changeset 733:81cb6e4a9fcf cacao

2008-03-04 Gary Benson <gbenson@redhat.com> * patches/icedtea-hotspot-6b06-7b24.patch: New file. * patches/icedtea-hotspot7-build-fixes.patch: Likewise. * patches/icedtea-bytecodeInterpreter.patch: Likewise. * patches/icedtea-bytecodeInterpreterWithChecks.patch: Likewise. * Makefile.am: Apply the above patches when zero is being built. * Makefile.in: Regenerated. * patches/icedtea-linker-options.patch: Reinstate missing hunks. * patches/icedtea-ports.patch: Likewise. * patches/icedtea-zero.patch: Likewise.
author Gary Benson <gbenson@redhat.com>
date Tue, 04 Mar 2008 07:02:39 -0500
parents 4210155c180e
children c165c63fcb0e
files ChangeLog Makefile.am patches/icedtea-bytecodeInterpreter.patch patches/icedtea-bytecodeInterpreterWithChecks.patch patches/icedtea-hotspot-6b06-7b24.patch patches/icedtea-hotspot7-build-fixes.patch patches/icedtea-linker-options.patch patches/icedtea-ports.patch patches/icedtea-zero.patch
diffstat 9 files changed, 201498 insertions(+), 87 deletions(-) [+]
line wrap: on
line diff
--- a/ChangeLog	Tue Mar 04 00:06:10 2008 -0500
+++ b/ChangeLog	Tue Mar 04 07:02:39 2008 -0500
@@ -1,3 +1,16 @@
+2008-03-04  Gary Benson  <gbenson@redhat.com>
+
+	* patches/icedtea-hotspot-6b06-7b24.patch: New file.
+	* patches/icedtea-hotspot7-build-fixes.patch: Likewise.
+	* patches/icedtea-bytecodeInterpreter.patch: Likewise.
+	* patches/icedtea-bytecodeInterpreterWithChecks.patch: Likewise.
+	* Makefile.am: Apply the above patches when zero is being built.
+	* Makefile.in: Regenerated.
+
+	* patches/icedtea-linker-options.patch: Reinstate missing hunks.
+	* patches/icedtea-ports.patch: Likewise.
+	* patches/icedtea-zero.patch: Likewise.
+
 2008-03-04  Lillian Angel  <langel@redhat.com>
 
 	* generated/*: Updated generated files.
--- a/Makefile.am	Tue Mar 04 00:06:10 2008 -0500
+++ b/Makefile.am	Tue Mar 04 07:02:39 2008 -0500
@@ -204,7 +204,20 @@
   GCC_PATCH = patches/icedtea-gcc-4.3.patch
 endif
 
+# If CORE_BUILD is set then we are building zero and need
+# to patch up to OpenJDK 7 HotSpot for the C++ interpreter.
+if CORE_BUILD
+  HOTSPOT_7_PATCHES = \
+	patches/icedtea-hotspot-6b06-7b24.patch \
+	patches/icedtea-hotspot7-build-fixes.patch \
+	patches/icedtea-bytecodeInterpreter.patch \
+	patches/icedtea-bytecodeInterpreterWithChecks.patch
+else
+  HOTSPOT_7_PATCHES = 
+endif
+
 ICEDTEA_PATCHES = \
+	$(HOTSPOT_7_PATCHES) \
 	patches/icedtea-copy-plugs.patch \
 	patches/icedtea-version.patch \
 	patches/icedtea-text-relocations.patch \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/patches/icedtea-bytecodeInterpreter.patch	Tue Mar 04 07:02:39 2008 -0500
@@ -0,0 +1,36 @@
+diff -r b3238230c1ef openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.hpp
+--- openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.hpp	Fri Nov 02 10:14:32 2007 +0000
++++ openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.hpp	Fri Nov 02 10:15:45 2007 +0000
+@@ -60,7 +60,6 @@
+ };
+ 
+ class BytecodeInterpreter : StackObj {
+-friend class SharedRuntime;
+ friend class AbstractInterpreterGenerator;
+ friend class CppInterpreterGenerator;
+ friend class InterpreterGenerator;
+diff -r bae119bcbcd0 openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp
+--- openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp	Fri Nov 02 15:08:47 2007 +0000
++++ openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp	Fri Nov 02 15:21:08 2007 +0000
+@@ -518,16 +518,16 @@
+ 
+ /* 0xC0 */ &&opc_checkcast,   &&opc_instanceof,     &&opc_monitorenter, &&opc_monitorexit,
+ /* 0xC4 */ &&opc_wide,        &&opc_multianewarray, &&opc_ifnull,       &&opc_ifnonnull,
+-/* 0xC8 */ &&opc_goto_w,      &&opc_jsr_w,          &&opc_breakpoint,   &&opc_fast_igetfield,
+-/* 0xCC */ &&opc_fastagetfield,&&opc_fast_aload_0,  &&opc_fast_iaccess_0, &&opc__fast_aaccess_0,
+-
+-/* 0xD0 */ &&opc_fast_linearswitch, &&opc_fast_binaryswitch, &&opc_return_register_finalizer,      &&opc_default,
++/* 0xC8 */ &&opc_goto_w,      &&opc_jsr_w,          &&opc_breakpoint,   &&opc_default,
++/* 0xCC */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
++
++/* 0xD0 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
+ /* 0xD4 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
+ /* 0xD8 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
+ /* 0xDC */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
+ 
+ /* 0xE0 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
+-/* 0xE4 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
++/* 0xE4 */ &&opc_default,     &&opc_return_register_finalizer, &&opc_default, &&opc_default,
+ /* 0xE8 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
+ /* 0xEC */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
+ 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/patches/icedtea-bytecodeInterpreterWithChecks.patch	Tue Mar 04 07:02:39 2008 -0500
@@ -0,0 +1,18 @@
+--- openjdk.old/hotspot/src/share/vm/interpreter/bytecodeInterpreterWithChecks.xsl      2007-10-12 03:46:08.000000000 -0400
++++ openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreterWithChecks.xsl      2007-10-29 11:57:33.000000000 -0400
+@@ -6,6 +6,7 @@
+
+ <xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+
++<xsl:output method="text" indent="no" omit-xml-declaration="yes"/>
+ <xsl:template match="processcode">
+ <xsl:text>
+ #define VM_JVMTI
+@@ -15,7 +16,6 @@
+
+ </xsl:text>
+
+-<xsl:output method="text" indent="no" omit-xml-declaration="yes"/>
+ </xsl:template>
+
+ </xsl:stylesheet>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/patches/icedtea-hotspot-6b06-7b24.patch	Tue Mar 04 07:02:39 2008 -0500
@@ -0,0 +1,201186 @@
+diff -ruN openjdk{6,}/hotspot/src/share
+diff -ruN openjdk{6,}/hotspot/src/os/linux 
+
+diff -ruN openjdk6/hotspot/src/share/tools/MakeDeps/ArgsParser.java openjdk/hotspot/src/share/tools/MakeDeps/ArgsParser.java
+--- openjdk6/hotspot/src/share/tools/MakeDeps/ArgsParser.java	2008-02-28 05:02:31.000000000 -0500
++++ openjdk/hotspot/src/share/tools/MakeDeps/ArgsParser.java	2008-01-31 09:19:00.000000000 -0500
+@@ -19,67 +19,67 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class ArgIterator {
+     String[] args;
+     int i;
+     ArgIterator(String[] args) {
+-	this.args = args;
+-	this.i = 0;
++        this.args = args;
++        this.i = 0;
+     }
+-    String get() { return args[i]; }	
++    String get() { return args[i]; }
+     boolean hasMore() { return args != null && i  < args.length; }
+-    boolean next() { return ++i < args.length; }   
++    boolean next() { return ++i < args.length; }
+ }
+ 
+-abstract class ArgHandler {	
++abstract class ArgHandler {
+     public abstract void handle(ArgIterator it);
+-    
++
+ }
+ 
+ class ArgRule {
+     String arg;
+     ArgHandler handler;
+     ArgRule(String arg, ArgHandler handler) {
+-	this.arg = arg;
+-	this.handler = handler;
++        this.arg = arg;
++        this.handler = handler;
+     }
+ 
+     boolean process(ArgIterator it) {
+-	if (match(it.get(), arg)) {
+-	    handler.handle(it);
+-	    return true;
+-	}
+-	return false;
++        if (match(it.get(), arg)) {
++            handler.handle(it);
++            return true;
++        }
++        return false;
+     }
+     boolean match(String rule_pattern, String arg) {
+-	return arg.equals(rule_pattern);
++        return arg.equals(rule_pattern);
+     }
+ }
+ 
+ class ArgsParser {
+-    ArgsParser(String[] args, 
+-	       ArgRule[] rules, 
+-	       ArgHandler defaulter) {
+-	ArgIterator ai = new ArgIterator(args);
+-	while (ai.hasMore()) {
+-	    boolean processed = false;
+-	    for (int i=0; i<rules.length; i++) {
+-		processed |= rules[i].process(ai);
+-		if (processed) {
+-		    break;
+-		}
+-	    }
+-	    if (!processed) {
+-		if (defaulter != null) {
+-		    defaulter.handle(ai);
+-		} else {
+-		    System.err.println("ERROR: unparsed \""+ai.get()+"\"");
+-		    ai.next();
+-		}
+-	    }
+-	}
++    ArgsParser(String[] args,
++               ArgRule[] rules,
++               ArgHandler defaulter) {
++        ArgIterator ai = new ArgIterator(args);
++        while (ai.hasMore()) {
++            boolean processed = false;
++            for (int i=0; i<rules.length; i++) {
++                processed |= rules[i].process(ai);
++                if (processed) {
++                    break;
++                }
++            }
++            if (!processed) {
++                if (defaulter != null) {
++                    defaulter.handle(ai);
++                } else {
++                    System.err.println("ERROR: unparsed \""+ai.get()+"\"");
++                    ai.next();
++                }
++            }
++        }
+     }
+ }
+diff -ruN openjdk6/hotspot/src/share/tools/MakeDeps/BuildConfig.java openjdk/hotspot/src/share/tools/MakeDeps/BuildConfig.java
+--- openjdk6/hotspot/src/share/tools/MakeDeps/BuildConfig.java	2008-02-28 05:02:31.000000000 -0500
++++ openjdk/hotspot/src/share/tools/MakeDeps/BuildConfig.java	2008-01-31 09:19:00.000000000 -0500
+@@ -19,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ import java.util.*;
+@@ -32,343 +32,343 @@
+ 
+     static CompilerInterface ci;
+     static CompilerInterface getCI() {
+-	if (ci == null) {
+-	    String comp = (String)getField(null, "CompilerVersion");
+-	    try {
+-		ci = (CompilerInterface)Class.forName("CompilerInterface" + comp).newInstance();
+-	    } catch (Exception cnfe) {
+-		System.err.println("Cannot find support for compiler " + comp);
+-		throw new RuntimeException(cnfe.toString());
+-	    }
+-	}
+-	return ci;
++        if (ci == null) {
++            String comp = (String)getField(null, "CompilerVersion");
++            try {
++                ci = (CompilerInterface)Class.forName("CompilerInterface" + comp).newInstance();
++            } catch (Exception cnfe) {
++                System.err.println("Cannot find support for compiler " + comp);
++                throw new RuntimeException(cnfe.toString());
++            }
++        }
++        return ci;
+     }
+ 
+     protected void initNames(String flavour, String build, String outDll) {
+-	if (vars == null) vars = new Hashtable();
+-	
+-	String flavourBuild =  flavour + "_" + build;
+-	put("Name", getCI().makeCfgName(flavourBuild));
+-	put("Flavour", flavour);
+-	put("Build", build);
+-
+-	// ones mentioned above were needed to expand format
+-	String buildBase = expandFormat(getFieldString(null, "BuildBase"));
+-	String jdkDir =  getFieldString(null, "JdkTargetRoot");
+-	String sourceBase = getFieldString(null, "SourceBase");	
+-	String outDir = buildBase;
+-	
+-	put("Id", flavourBuild);
+-	put("OutputDir", outDir);
+-	put("SourceBase", sourceBase);
+-	put("BuildBase", buildBase);
+-	put("OutputDll", jdkDir + Util.sep + outDll);
++        if (vars == null) vars = new Hashtable();
++
++        String flavourBuild =  flavour + "_" + build;
++        put("Name", getCI().makeCfgName(flavourBuild));
++        put("Flavour", flavour);
++        put("Build", build);
++
++        // ones mentioned above were needed to expand format
++        String buildBase = expandFormat(getFieldString(null, "BuildBase"));
++        String jdkDir =  getFieldString(null, "JdkTargetRoot");
++        String sourceBase = getFieldString(null, "SourceBase");
++        String outDir = buildBase;
++
++        put("Id", flavourBuild);
++        put("OutputDir", outDir);
++        put("SourceBase", sourceBase);
++        put("BuildBase", buildBase);
++        put("OutputDll", jdkDir + Util.sep + outDll);
+ 
+-	context = new String [] {flavourBuild, flavour, build, null};
++        context = new String [] {flavourBuild, flavour, build, null};
+     }
+-    
++
+     protected void init(Vector includes, Vector defines) {
+-	initDefaultDefines(defines);
+-	initDefaultCompilerFlags(includes);
+-	initDefaultLinkerFlags();
+-	handleDB((String)getFieldInContext("IncludeDB"));
++        initDefaultDefines(defines);
++        initDefaultCompilerFlags(includes);
++        initDefaultLinkerFlags();
++        handleDB((String)getFieldInContext("IncludeDB"));
+     }
+ 
+ 
+-    protected void initDefaultCompilerFlags(Vector includes) {	
+-	Vector compilerFlags = new Vector();
++    protected void initDefaultCompilerFlags(Vector includes) {
++        Vector compilerFlags = new Vector();
++
++        compilerFlags.addAll(getCI().getBaseCompilerFlags(getV("Define"),
++                                                          includes,
++                                                          get("OutputDir")));
+ 
+-	compilerFlags.addAll(getCI().getBaseCompilerFlags(getV("Define"), 
+-							  includes, 
+-							  get("OutputDir")));
+-	
+-	put("CompilerFlags", compilerFlags);
++        put("CompilerFlags", compilerFlags);
+     }
+ 
+     protected void initDefaultLinkerFlags() {
+-	Vector linkerFlags = new Vector();
++        Vector linkerFlags = new Vector();
+ 
+-	linkerFlags.addAll(getCI().getBaseLinkerFlags( get("OutputDir"), get("OutputDll")));
++        linkerFlags.addAll(getCI().getBaseLinkerFlags( get("OutputDir"), get("OutputDll")));
+ 
+-	put("LinkerFlags", linkerFlags);
++        put("LinkerFlags", linkerFlags);
+     }
+-    
++
+     DirectoryTree getSourceTree(String sourceBase, String startAt) {
+-	DirectoryTree tree = new DirectoryTree();
++        DirectoryTree tree = new DirectoryTree();
+ 
+-	tree.addSubdirToIgnore("Codemgr_wsdata");
+-	tree.addSubdirToIgnore("deleted_files");
+-	tree.addSubdirToIgnore("SCCS");
+-	tree.setVerbose(true);
+-	if (startAt != null) {
+-	    tree.readDirectory(sourceBase + File.separator + startAt);
+-	} else {
+-	    tree.readDirectory(sourceBase);
+-	}
++        tree.addSubdirToIgnore("Codemgr_wsdata");
++        tree.addSubdirToIgnore("deleted_files");
++        tree.addSubdirToIgnore("SCCS");
++        tree.setVerbose(true);
++        if (startAt != null) {
++            tree.readDirectory(sourceBase + File.separator + startAt);
++        } else {
++            tree.readDirectory(sourceBase);
++        }
+ 
+-	return tree;
++        return tree;
+     }
+ 
+ 
+     Vector getPreferredPaths(Database currentDB) {
+-	Vector preferredPaths = new Vector();
+-    	// In the case of multiple files with the same name in
+-	// different subdirectories, prefer the versions specified in
+-	// the platform file as the "os_family" and "arch" macros.
+-	for (Iterator iter = currentDB.getMacros(); iter.hasNext(); ) {
+-	    Macro macro = (Macro) iter.next();
+-	    if (macro.name.equals("os_family") ||
+-		macro.name.equals("arch")) {
+-		preferredPaths.add(macro.contents);
+-	    }
+-	}
+-	// Also prefer "opto" over "adlc" for adlcVMDeps.hpp
+-	preferredPaths.add("opto");
+-	
+-	return preferredPaths;
+-    }
+-
+-
+-    void handleDB(String dbFile) {	
+-	WinGammaPlatform platform = (WinGammaPlatform)getField(null, "PlatformObject");
+-	Database db = new Database(platform, platform.defaultGrandIncludeThreshold());
+-
+-	try {
+-	    File incls = new File(get("OutputDir")+Util.sep+"incls");
+-	    FileName oldInclTempl = platform.getInclFileTemplate();
+-	    FileName oldGITempl = platform.getGIFileTemplate();
+-	    FileName oldGDTempl = platform.getGDFileTemplate();
+-	    
+-	    platform.setInclFileTemplate(new FileName(platform, incls.getPath()+Util.sep, 
+-						      "_", "", ".incl", "", ""));
+-	    platform.setGIFileTemplate(new FileName(platform, incls.getPath()+Util.sep, 
+-						    "",  "_precompiled", ".incl", "", ""));
+-	    
+-	    incls.mkdirs();
+-	    
+-	    db.get(getFieldString(null, "Platform"), dbFile);
+-	    db.compute();
+-	    
+-	    db.put();
+-
+-	    //platform.setInclFileTemplate(oldInclTempl);
+-	    //platform.setGIFileTemplate(oldInclTempl);
+-	} catch (Exception e) {
+-	    e.printStackTrace();
+-	    throw new RuntimeException("cannot do db: "+e);
+-	}
+-	
+-	putSpecificField("AllFilesHash", computeAllFiles(platform, db));
+-    }
+-
+-
+-    void addAll(Iterator i, Hashtable hash, 
+-		WinGammaPlatform platform, DirectoryTree tree, 
+-		Vector preferredPaths, Vector filesNotFound, Vector filesDuplicate) {
+-	for (; i.hasNext(); ) {
+-	    String fileName = (String) i.next();
+-	    if (lookupHashFieldInContext("IgnoreFile", fileName) == null) {		
+-		String prefixedName = platform.envVarPrefixedFileName(fileName,
+-								      0, /* ignored */
+-								      tree,
+-								      preferredPaths,
+-								      filesNotFound,
+-								      filesDuplicate);
+-		if (prefixedName != null) {
+-		    addTo(hash, Util.normalize(prefixedName), fileName);
+-		}
+-	    }
+-	}
++        Vector preferredPaths = new Vector();
++        // In the case of multiple files with the same name in
++        // different subdirectories, prefer the versions specified in
++        // the platform file as the "os_family" and "arch" macros.
++        for (Iterator iter = currentDB.getMacros(); iter.hasNext(); ) {
++            Macro macro = (Macro) iter.next();
++            if (macro.name.equals("os_family") ||
++                macro.name.equals("arch")) {
++                preferredPaths.add(macro.contents);
++            }
++        }
++        // Also prefer "opto" over "adlc" for adlcVMDeps.hpp
++        preferredPaths.add("opto");
++
++        return preferredPaths;
++    }
++
++
++    void handleDB(String dbFile) {
++        WinGammaPlatform platform = (WinGammaPlatform)getField(null, "PlatformObject");
++        Database db = new Database(platform, platform.defaultGrandIncludeThreshold());
++
++        try {
++            File incls = new File(get("OutputDir")+Util.sep+"incls");
++            FileName oldInclTempl = platform.getInclFileTemplate();
++            FileName oldGITempl = platform.getGIFileTemplate();
++            FileName oldGDTempl = platform.getGDFileTemplate();
++
++            platform.setInclFileTemplate(new FileName(platform, incls.getPath()+Util.sep,
++                                                      "_", "", ".incl", "", ""));
++            platform.setGIFileTemplate(new FileName(platform, incls.getPath()+Util.sep,
++                                                    "",  "_precompiled", ".incl", "", ""));
++
++            incls.mkdirs();
++
++            db.get(getFieldString(null, "Platform"), dbFile);
++            db.compute();
++
++            db.put();
++
++            //platform.setInclFileTemplate(oldInclTempl);
++            //platform.setGIFileTemplate(oldInclTempl);
++        } catch (Exception e) {
++            e.printStackTrace();
++            throw new RuntimeException("cannot do db: "+e);
++        }
++
++        putSpecificField("AllFilesHash", computeAllFiles(platform, db));
++    }
++
++
++    void addAll(Iterator i, Hashtable hash,
++                WinGammaPlatform platform, DirectoryTree tree,
++                Vector preferredPaths, Vector filesNotFound, Vector filesDuplicate) {
++        for (; i.hasNext(); ) {
++            String fileName = (String) i.next();
++            if (lookupHashFieldInContext("IgnoreFile", fileName) == null) {
++                String prefixedName = platform.envVarPrefixedFileName(fileName,
++                                                                      0, /* ignored */
++                                                                      tree,
++                                                                      preferredPaths,
++                                                                      filesNotFound,
++                                                                      filesDuplicate);
++                if (prefixedName != null) {
++                    addTo(hash, Util.normalize(prefixedName), fileName);
++                }
++            }
++        }
+     }
+ 
+     void addTo(Hashtable ht, String key, String value) {
+-	ht.put(expandFormat(key), expandFormat(value));
++        ht.put(expandFormat(key), expandFormat(value));
+     }
+ 
+     Hashtable computeAllFiles(WinGammaPlatform platform, Database db) {
+-	Hashtable rv = new Hashtable();
+-	DirectoryTree tree = getSourceTree(get("SourceBase"), getFieldString(null, "StartAt"));
+-	Vector preferredPaths = getPreferredPaths(db);
+-	
+-	// Hold errors until end
+-	Vector filesNotFound = new Vector();
+-	Vector filesDuplicate = new Vector();
+-
+-
+-	// find all files
+-	Vector dbFiles = new Vector();
+-	for (Iterator i=db.getAllFiles().iterator(); i.hasNext(); ) {
+-	    FileList fl = (FileList) i.next();	    
+-	    dbFiles.add(fl.getName());
+-	}
+-	addAll(dbFiles.iterator(), rv,   
+-	       platform, tree, 
+-	       preferredPaths, filesNotFound, filesDuplicate);
+-
+-	Vector addFiles = new Vector();	
+-	collectRelevantVectors(addFiles, "AdditionalFile");
+-	addAll(addFiles.iterator(), rv,
+-	       platform, tree, 
+-	       preferredPaths, filesNotFound, filesDuplicate);
+-	
+-	collectRelevantHashes(rv, "AdditionalGeneratedFile");	 
+-		
+-	if ((filesNotFound.size() != 0) ||
+-	    (filesDuplicate.size() != 0)) {
+-	    System.err.println("Error: some files were not found or " +
+-			       "appeared in multiple subdirectories of " +
+-			       "directory " + get("SourceBase") + " and could not " +
+-			       "be resolved with the os_family and arch " +
+-			       "macros in the platform file.");
+-	    if (filesNotFound.size() != 0) {
+-		System.err.println("Files not found:");
+-		for (Iterator iter = filesNotFound.iterator();
+-		     iter.hasNext(); ) {
+-		    System.err.println("  " + (String) iter.next());
+-		}
+-	    }
+-	    if (filesDuplicate.size() != 0) {
+-		System.err.println("Duplicate files:");
+-		for (Iterator iter = filesDuplicate.iterator();
+-		     iter.hasNext(); ) {
+-		    System.err.println("  " + (String) iter.next());
+-		}
+-	    }
+-	    throw new RuntimeException();
+-	}
+-
+-	return rv;
+-    }
+-
+-    void initDefaultDefines(Vector defines) { 
+-    	Vector sysDefines = new Vector();
+-	sysDefines.add("WIN32");
+-	sysDefines.add("_WINDOWS");
+-	sysDefines.add("HOTSPOT_BUILD_USER="+System.getProperty("user.name"));
+-	sysDefines.add("HOTSPOT_BUILD_TARGET=\\\""+get("Build")+"\\\"");
+-	sysDefines.add("_JNI_IMPLEMENTATION_");
+-
+-	sysDefines.addAll(defines);
+-	
+-	put("Define", sysDefines);
+-    }	
++        Hashtable rv = new Hashtable();
++        DirectoryTree tree = getSourceTree(get("SourceBase"), getFieldString(null, "StartAt"));
++        Vector preferredPaths = getPreferredPaths(db);
++
++        // Hold errors until end
++        Vector filesNotFound = new Vector();
++        Vector filesDuplicate = new Vector();
++
++
++        // find all files
++        Vector dbFiles = new Vector();
++        for (Iterator i=db.getAllFiles().iterator(); i.hasNext(); ) {
++            FileList fl = (FileList) i.next();
++            dbFiles.add(fl.getName());
++        }
++        addAll(dbFiles.iterator(), rv,
++               platform, tree,
++               preferredPaths, filesNotFound, filesDuplicate);
++
++        Vector addFiles = new Vector();
++        collectRelevantVectors(addFiles, "AdditionalFile");
++        addAll(addFiles.iterator(), rv,
++               platform, tree,
++               preferredPaths, filesNotFound, filesDuplicate);
++
++        collectRelevantHashes(rv, "AdditionalGeneratedFile");
++
++        if ((filesNotFound.size() != 0) ||
++            (filesDuplicate.size() != 0)) {
++            System.err.println("Error: some files were not found or " +
++                               "appeared in multiple subdirectories of " +
++                               "directory " + get("SourceBase") + " and could not " +
++                               "be resolved with the os_family and arch " +
++                               "macros in the platform file.");
++            if (filesNotFound.size() != 0) {
++                System.err.println("Files not found:");
++                for (Iterator iter = filesNotFound.iterator();
++                     iter.hasNext(); ) {
++                    System.err.println("  " + (String) iter.next());
++                }
++            }
++            if (filesDuplicate.size() != 0) {
++                System.err.println("Duplicate files:");
++                for (Iterator iter = filesDuplicate.iterator();
++                     iter.hasNext(); ) {
++                    System.err.println("  " + (String) iter.next());
++                }
++            }
++            throw new RuntimeException();
++        }
++
++        return rv;
++    }
++
++    void initDefaultDefines(Vector defines) {
++        Vector sysDefines = new Vector();
++        sysDefines.add("WIN32");
++        sysDefines.add("_WINDOWS");
++        sysDefines.add("HOTSPOT_BUILD_USER="+System.getProperty("user.name"));
++        sysDefines.add("HOTSPOT_BUILD_TARGET=\\\""+get("Build")+"\\\"");
++        sysDefines.add("_JNI_IMPLEMENTATION_");
++
++        sysDefines.addAll(defines);
++
++        put("Define", sysDefines);
++    }
+ 
+     String get(String key) {
+-	return (String)vars.get(key);
++        return (String)vars.get(key);
+     }
+ 
+     Vector getV(String key) {
+-	return (Vector)vars.get(key);
++        return (Vector)vars.get(key);
+     }
+ 
+     Object getO(String key) {
+-	return vars.get(key);
++        return vars.get(key);
+     }
+-    
++
+     Hashtable getH(String key) {
+-	return (Hashtable)vars.get(key);
++        return (Hashtable)vars.get(key);
+     }
+ 
+     Object getFieldInContext(String field) {
+-	for (int i=0; i<context.length; i++) {
+-	    Object rv = getField(context[i], field);
+-	    if (rv != null) {
+-		return rv;
+-	    }
+-	}
+-	return null;
++        for (int i=0; i<context.length; i++) {
++            Object rv = getField(context[i], field);
++            if (rv != null) {
++                return rv;
++            }
++        }
++        return null;
+     }
+ 
+     Object lookupHashFieldInContext(String field, String key) {
+-	for (int i=0; i<context.length; i++) {
+-	    Hashtable ht = (Hashtable)getField(context[i], field);
+-	    if (ht != null) {
+-		Object rv = ht.get(key);		
+-		if (rv != null) {
+-		    return rv;
+-		}
+-	    }
+-	}
+-	return null;
++        for (int i=0; i<context.length; i++) {
++            Hashtable ht = (Hashtable)getField(context[i], field);
++            if (ht != null) {
++                Object rv = ht.get(key);
++                if (rv != null) {
++                    return rv;
++                }
++            }
++        }
++        return null;
+     }
+ 
+     void put(String key, String value) {
+-	vars.put(key, value);
++        vars.put(key, value);
+     }
+ 
+     void put(String key, Vector vvalue) {
+-	vars.put(key, vvalue);
++        vars.put(key, vvalue);
+     }
+ 
+     void add(String key, Vector vvalue) {
+-	getV(key).addAll(vvalue);
+-    }   
++        getV(key).addAll(vvalue);
++    }
+ 
+     String flavour() {
+-	return get("Flavour");
++        return get("Flavour");
+     }
+-    
++
+     String build() {
+-	return get("Build");
++        return get("Build");
+     }
+ 
+     Object getSpecificField(String field) {
+-	return getField(get("Id"), field);
++        return getField(get("Id"), field);
+     }
+ 
+     void putSpecificField(String field, Object value) {
+-	putField(get("Id"), field, value);
++        putField(get("Id"), field, value);
+     }
+-    
++
+     void collectRelevantVectors(Vector rv, String field) {
+-	for (int i = 0; i < context.length; i++) {
+-	    Vector v = getFieldVector(context[i], field);
+-	    if (v != null) {
+-		for (Iterator j=v.iterator(); j.hasNext(); ) {
+-		    String val = (String)j.next();
+-		    rv.add(expandFormat(val));
+-		}
+-	    }
+-	}
++        for (int i = 0; i < context.length; i++) {
++            Vector v = getFieldVector(context[i], field);
++            if (v != null) {
++                for (Iterator j=v.iterator(); j.hasNext(); ) {
++                    String val = (String)j.next();
++                    rv.add(expandFormat(val));
++                }
++            }
++        }
+     }
+ 
+     void collectRelevantHashes(Hashtable rv, String field) {
+-	for (int i = 0; i < context.length; i++) {
+-	    Hashtable v = (Hashtable)getField(context[i], field);
+-	    if (v != null) {
+-		for (Enumeration e=v.keys(); e.hasMoreElements(); ) {
+-		    String key = (String)e.nextElement();
+-		    String val =  (String)v.get(key);
+-		    addTo(rv, key, val);
+-		}
+-	    }
+-	}
++        for (int i = 0; i < context.length; i++) {
++            Hashtable v = (Hashtable)getField(context[i], field);
++            if (v != null) {
++                for (Enumeration e=v.keys(); e.hasMoreElements(); ) {
++                    String key = (String)e.nextElement();
++                    String val =  (String)v.get(key);
++                    addTo(rv, key, val);
++                }
++            }
++        }
+     }
+ 
+ 
+     Vector getDefines() {
+-	Vector rv = new Vector();
+-	collectRelevantVectors(rv, "Define");	
+-	return rv;
++        Vector rv = new Vector();
++        collectRelevantVectors(rv, "Define");
++        return rv;
+     }
+ 
+     Vector getIncludes() {
+-	Vector rv = new Vector();
+-	
+-	// for generated includes
+-	rv.add(get("OutputDir"));
+-
+-       	collectRelevantVectors(rv, "AbsoluteInclude");
+-	
+-	Vector ri = new Vector();
+-	String sourceBase = getFieldString(null, "SourceBase");
+-	collectRelevantVectors(ri, "RelativeInclude");
+-	for (Iterator i = ri.iterator(); i.hasNext(); ) {
+-	    String f = (String)i.next();
+-	    rv.add(sourceBase + Util.sep + f);
+-	}
++        Vector rv = new Vector();
++
++        // for generated includes
++        rv.add(get("OutputDir"));
++
++        collectRelevantVectors(rv, "AbsoluteInclude");
+ 
+-	return rv;
++        Vector ri = new Vector();
++        String sourceBase = getFieldString(null, "SourceBase");
++        collectRelevantVectors(ri, "RelativeInclude");
++        for (Iterator i = ri.iterator(); i.hasNext(); ) {
++            String f = (String)i.next();
++            rv.add(sourceBase + Util.sep + f);
++        }
++
++        return rv;
+     }
+ 
+     static Hashtable cfgData = new Hashtable();
+@@ -394,22 +394,22 @@
+     }
+ 
+     static Object getField(String cfg, String field) {
+-	if (cfg == null) {
+-	    return globalData.get(field);
+-	}
++        if (cfg == null) {
++            return globalData.get(field);
++        }
+ 
+-	Hashtable ht =  (Hashtable)cfgData.get(cfg);
+-	return ht == null ? null : ht.get(field);
++        Hashtable ht =  (Hashtable)cfgData.get(cfg);
++        return ht == null ? null : ht.get(field);
+     }
+ 
+     static String getFieldString(String cfg, String field) {
+-	return (String)getField(cfg, field);
++        return (String)getField(cfg, field);
+     }
+ 
+     static Vector getFieldVector(String cfg, String field) {
+-	return (Vector)getField(cfg, field);
++        return (Vector)getField(cfg, field);
+     }
+-    
++
+     static void putField(String cfg, String field, Object value) {
+         putFieldImpl(cfg, field, value);
+         if (appliesToTieredBuild(cfg, field)) {
+@@ -418,25 +418,25 @@
+     }
+ 
+     private static void putFieldImpl(String cfg, String field, Object value) {
+-	if (cfg == null) {
+-	    globalData.put(field, value);
+-	    return;
+-	}
+-
+-	Hashtable ht = (Hashtable)cfgData.get(cfg);
+-	if (ht == null) {
+-	    ht = new Hashtable();
+-	    cfgData.put(cfg, ht);
+-	}
++        if (cfg == null) {
++            globalData.put(field, value);
++            return;
++        }
++
++        Hashtable ht = (Hashtable)cfgData.get(cfg);
++        if (ht == null) {
++            ht = new Hashtable();
++            cfgData.put(cfg, ht);
++        }
+ 
+-	ht.put(field, value);	
++        ht.put(field, value);
+     }
+ 
+     static Object getFieldHash(String cfg, String field, String name) {
+-	Hashtable ht = (Hashtable)getField(cfg, field);
++        Hashtable ht = (Hashtable)getField(cfg, field);
+ 
+-	return ht == null ? null : ht.get(name);
+-    }    
++        return ht == null ? null : ht.get(name);
++    }
+ 
+     static void putFieldHash(String cfg, String field, String name, Object val) {
+         putFieldHashImpl(cfg, field, name, val);
+@@ -446,14 +446,14 @@
+     }
+ 
+     private static void putFieldHashImpl(String cfg, String field, String name, Object val) {
+-	Hashtable ht = (Hashtable)getField(cfg, field);
++        Hashtable ht = (Hashtable)getField(cfg, field);
+ 
+-	if (ht == null) {
+-	    ht = new Hashtable();
+-	    putFieldImpl(cfg, field, ht);
+-	}
++        if (ht == null) {
++            ht = new Hashtable();
++            putFieldImpl(cfg, field, ht);
++        }
+ 
+-	ht.put(name, val);
++        ht.put(name, val);
+     }
+ 
+     static void addFieldVector(String cfg, String field, String element) {
+@@ -464,52 +464,52 @@
+     }
+ 
+     private static void addFieldVectorImpl(String cfg, String field, String element) {
+-	Vector v = (Vector)getField(cfg, field);
++        Vector v = (Vector)getField(cfg, field);
+ 
+-	if (v == null) {
+-	    v = new Vector();
+-	    putFieldImpl(cfg, field, v);
+-	}
++        if (v == null) {
++            v = new Vector();
++            putFieldImpl(cfg, field, v);
++        }
+ 
+-	v.add(element);
++        v.add(element);
+     }
+ 
+     String expandFormat(String format) {
+-	if (format == null) {
+-	    return null;
+-	}
+-
+-	if (format.indexOf('%') == -1) {
+-	    return format;
+-	}
+-
+-	StringBuffer sb = new StringBuffer();
+-	int len = format.length();
+-	for (int i=0; i<len; i++) {
+-	    char ch = format.charAt(i);
+-	    if (ch == '%') {
+-		char ch1 = format.charAt(i+1);
+-		switch (ch1) {
+-		case '%':
+-		    sb.append(ch1);
+-		    break;
+-		case 'b':
+-		    sb.append(build());
+-		    break;
+-		case 'f':
+-		    sb.append(flavour());
+-		    break;
+-		default:
+-		    sb.append(ch);
+-		    sb.append(ch1);
+-		}
+-		i++;
+-	    } else {
+-		sb.append(ch);
+-	    }
+-	}
+-	
+-	return sb.toString();
++        if (format == null) {
++            return null;
++        }
++
++        if (format.indexOf('%') == -1) {
++            return format;
++        }
++
++        StringBuffer sb = new StringBuffer();
++        int len = format.length();
++        for (int i=0; i<len; i++) {
++            char ch = format.charAt(i);
++            if (ch == '%') {
++                char ch1 = format.charAt(i+1);
++                switch (ch1) {
++                case '%':
++                    sb.append(ch1);
++                    break;
++                case 'b':
++                    sb.append(build());
++                    break;
++                case 'f':
++                    sb.append(flavour());
++                    break;
++                default:
++                    sb.append(ch);
++                    sb.append(ch1);
++                }
++                i++;
++            } else {
++                sb.append(ch);
++            }
++        }
++
++        return sb.toString();
+     }
+ }
+ 
+@@ -517,176 +517,176 @@
+     abstract String getOptFlag();
+ 
+     protected void init(Vector includes, Vector defines) {
+-	defines.add("_DEBUG");
+-	defines.add("ASSERT");
++        defines.add("_DEBUG");
++        defines.add("ASSERT");
+ 
+-	super.init(includes, defines);
++        super.init(includes, defines);
+ 
+-	getV("CompilerFlags").addAll(getCI().getDebugCompilerFlags(getOptFlag()));
+-	getV("LinkerFlags").addAll(getCI().getDebugLinkerFlags());
++        getV("CompilerFlags").addAll(getCI().getDebugCompilerFlags(getOptFlag()));
++        getV("LinkerFlags").addAll(getCI().getDebugLinkerFlags());
+    }
+ }
+ 
+ class C1DebugConfig extends GenericDebugConfig {
+     String getOptFlag() {
+-	return getCI().getNoOptFlag();
++        return getCI().getNoOptFlag();
+     }
+ 
+     C1DebugConfig() {
+-	initNames("compiler1", "debug", "fastdebug\\jre\\bin\\client\\jvm.dll");
+-	init(getIncludes(), getDefines());
++        initNames("compiler1", "debug", "fastdebug\\jre\\bin\\client\\jvm.dll");
++        init(getIncludes(), getDefines());
+     }
+ }
+ 
+ class C1FastDebugConfig extends GenericDebugConfig {
+     String getOptFlag() {
+-	return getCI().getOptFlag();
++        return getCI().getOptFlag();
+     }
+ 
+     C1FastDebugConfig() {
+-	initNames("compiler1", "fastdebug", "fastdebug\\jre\\bin\\client\\jvm.dll");
+-	init(getIncludes(), getDefines());
++        initNames("compiler1", "fastdebug", "fastdebug\\jre\\bin\\client\\jvm.dll");
++        init(getIncludes(), getDefines());
+     }
+ }
+ 
+ class C2DebugConfig extends GenericDebugConfig {
+     String getOptFlag() {
+-	return getCI().getNoOptFlag();
++        return getCI().getNoOptFlag();
+     }
+ 
+     C2DebugConfig() {
+-	initNames("compiler2", "debug", "fastdebug\\jre\\bin\\server\\jvm.dll");
+-	init(getIncludes(), getDefines());
++        initNames("compiler2", "debug", "fastdebug\\jre\\bin\\server\\jvm.dll");
++        init(getIncludes(), getDefines());
+     }
+ }
+ 
+ class C2FastDebugConfig extends GenericDebugConfig {
+     String getOptFlag() {
+-	return getCI().getOptFlag();
++        return getCI().getOptFlag();
+     }
+ 
+     C2FastDebugConfig() {
+-	initNames("compiler2", "fastdebug", "fastdebug\\jre\\bin\\server\\jvm.dll");
+-	init(getIncludes(), getDefines());
++        initNames("compiler2", "fastdebug", "fastdebug\\jre\\bin\\server\\jvm.dll");
++        init(getIncludes(), getDefines());
+     }
+ }
+ 
+ class TieredDebugConfig extends GenericDebugConfig {
+     String getOptFlag() {
+-	return getCI().getNoOptFlag();
++        return getCI().getNoOptFlag();
+     }
+ 
+     TieredDebugConfig() {
+-	initNames("tiered", "debug", "fastdebug\\jre\\bin\\server\\jvm.dll");
+-	init(getIncludes(), getDefines());
++        initNames("tiered", "debug", "fastdebug\\jre\\bin\\server\\jvm.dll");
++        init(getIncludes(), getDefines());
+     }
+ }
+ 
+ class TieredFastDebugConfig extends GenericDebugConfig {
+     String getOptFlag() {
+-	return getCI().getOptFlag();
++        return getCI().getOptFlag();
+     }
+ 
+     TieredFastDebugConfig() {
+-	initNames("tiered", "fastdebug", "fastdebug\\jre\\bin\\server\\jvm.dll");
+-	init(getIncludes(), getDefines());
++        initNames("tiered", "fastdebug", "fastdebug\\jre\\bin\\server\\jvm.dll");
++        init(getIncludes(), getDefines());
+     }
+ }
+ 
+ 
+ abstract class ProductConfig extends BuildConfig {
+     protected void init(Vector includes, Vector defines) {
+-	defines.add("NDEBUG");
+-	defines.add("PRODUCT");
++        defines.add("NDEBUG");
++        defines.add("PRODUCT");
+ 
+-	super.init(includes, defines);
++        super.init(includes, defines);
+ 
+-	getV("CompilerFlags").addAll(getCI().getProductCompilerFlags());
+-	getV("LinkerFlags").addAll(getCI().getProductLinkerFlags());
++        getV("CompilerFlags").addAll(getCI().getProductCompilerFlags());
++        getV("LinkerFlags").addAll(getCI().getProductLinkerFlags());
+     }
+ }
+ 
+ class C1ProductConfig extends ProductConfig {
+     C1ProductConfig() {
+-	initNames("compiler1", "product", "jre\\bin\\client\\jvm.dll");
+-	init(getIncludes(), getDefines());
++        initNames("compiler1", "product", "jre\\bin\\client\\jvm.dll");
++        init(getIncludes(), getDefines());
+     }
+ }
+ 
+ class C2ProductConfig extends ProductConfig {
+     C2ProductConfig() {
+-	initNames("compiler2", "product", "jre\\bin\\server\\jvm.dll");
+-        init(getIncludes(), getDefines());        
++        initNames("compiler2", "product", "jre\\bin\\server\\jvm.dll");
++        init(getIncludes(), getDefines());
+     }
+ }
+ 
+ class TieredProductConfig extends ProductConfig {
+     TieredProductConfig() {
+-	initNames("tiered", "product", "jre\\bin\\server\\jvm.dll");
+-        init(getIncludes(), getDefines());        
++        initNames("tiered", "product", "jre\\bin\\server\\jvm.dll");
++        init(getIncludes(), getDefines());
+     }
+ }
+ 
+ 
+ class CoreDebugConfig extends GenericDebugConfig {
+     String getOptFlag() {
+-	return getCI().getNoOptFlag();
++        return getCI().getNoOptFlag();
+     }
+ 
+     CoreDebugConfig() {
+-	initNames("core", "debug", "fastdebug\\jre\\bin\\core\\jvm.dll");
+-	init(getIncludes(), getDefines());
++        initNames("core", "debug", "fastdebug\\jre\\bin\\core\\jvm.dll");
++        init(getIncludes(), getDefines());
+     }
+ }
+ 
+ 
+ class CoreFastDebugConfig extends GenericDebugConfig {
+     String getOptFlag() {
+-	return getCI().getOptFlag();
++        return getCI().getOptFlag();
+     }
+ 
+     CoreFastDebugConfig() {
+-	initNames("core", "fastdebug", "fastdebug\\jre\\bin\\core\\jvm.dll");
+-	init(getIncludes(), getDefines());
++        initNames("core", "fastdebug", "fastdebug\\jre\\bin\\core\\jvm.dll");
++        init(getIncludes(), getDefines());
+     }
+ }
+ 
+ 
+ class CoreProductConfig extends ProductConfig {
+     CoreProductConfig() {
+-	initNames("core", "product", "jre\\bin\\core\\jvm.dll");
+-        init(getIncludes(), getDefines());        
++        initNames("core", "product", "jre\\bin\\core\\jvm.dll");
++        init(getIncludes(), getDefines());
+     }
+ }
+ 
+ class KernelDebugConfig extends GenericDebugConfig {
+     String getOptFlag() {
+-	return getCI().getNoOptFlag();
++        return getCI().getNoOptFlag();
+     }
+ 
+     KernelDebugConfig() {
+-	initNames("kernel", "debug", "fastdebug\\jre\\bin\\kernel\\jvm.dll");
+-	init(getIncludes(), getDefines());
++        initNames("kernel", "debug", "fastdebug\\jre\\bin\\kernel\\jvm.dll");
++        init(getIncludes(), getDefines());
+     }
+ }
+ 
+ 
+ class KernelFastDebugConfig extends GenericDebugConfig {
+     String getOptFlag() {
+-	return getCI().getOptFlag();
++        return getCI().getOptFlag();
+     }
+ 
+     KernelFastDebugConfig() {
+-	initNames("kernel", "fastdebug", "fastdebug\\jre\\bin\\kernel\\jvm.dll");
+-	init(getIncludes(), getDefines());
++        initNames("kernel", "fastdebug", "fastdebug\\jre\\bin\\kernel\\jvm.dll");
++        init(getIncludes(), getDefines());
+     }
+ }
+ 
+ 
+ class KernelProductConfig extends ProductConfig {
+     KernelProductConfig() {
+-	initNames("kernel", "product", "jre\\bin\\kernel\\jvm.dll");
+-        init(getIncludes(), getDefines());        
++        initNames("kernel", "product", "jre\\bin\\kernel\\jvm.dll");
++        init(getIncludes(), getDefines());
+     }
+ }
+ abstract class CompilerInterface {
+@@ -701,9 +701,6 @@
+     abstract String makeCfgName(String flavourBuild);
+ 
+     void addAttr(Vector receiver, String attr, String value) {
+-	receiver.add(attr); receiver.add(value); 
++        receiver.add(attr); receiver.add(value);
+     }
+ }
+-
+-
+-
+diff -ruN openjdk6/hotspot/src/share/tools/MakeDeps/Database.java openjdk/hotspot/src/share/tools/MakeDeps/Database.java
+--- openjdk6/hotspot/src/share/tools/MakeDeps/Database.java	2008-02-28 05:02:31.000000000 -0500
++++ openjdk/hotspot/src/share/tools/MakeDeps/Database.java	2008-01-31 09:19:00.000000000 -0500
+@@ -19,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ import java.io.*;
+@@ -32,7 +32,7 @@
+   // files that have implicit dependency on platform files
+   // e.g. os.hpp: os_<os_family>.hpp os_<os_arch>.hpp but only
+   // recorded if the platform file was seen.
+-  private FileList platformFiles; 
++  private FileList platformFiles;
+   private FileList outerFiles;
+   private FileList indivIncludes;
+   private FileList grandInclude; // the results for the grand include file
+@@ -175,7 +175,7 @@
+               // derive generic name from platform specific name
+               // e.g. os_<arch_os>.hpp => os.hpp. We enforce the
+               // restriction (imperfectly) noted in includeDB_core
+-              // that platform specific files will have an underscore 
++              // that platform specific files will have an underscore
+               // preceding the macro invocation.
+ 
+               // First expand macro as null string.
+@@ -240,7 +240,7 @@
+ 
+               if (includee.equals(plat.noGrandInclude())) {
+                 p.setUseGrandInclude(false);
+-              } else {            
++              } else {
+                 FileList q = allFiles.listForFile(includee);
+                 p.addIfAbsent(q);
+               }
+@@ -260,13 +260,13 @@
+       if (firstFile != null) {
+         FileList p = allFiles.listForFile(firstFile);
+         allFiles.setFirstFile(p);
+-        outerFiles.setFirstFile(p);	  
++        outerFiles.setFirstFile(p);
+       }
+ 
+       if (lastFile != null) {
+         FileList p = allFiles.listForFile(lastFile);
+         allFiles.setLastFile(p);
+-        outerFiles.setLastFile(p);	  
++        outerFiles.setLastFile(p);
+       }
+     }
+ 
+@@ -345,7 +345,7 @@
+   }
+ 
+   private void writeGrandUnixMakefile() throws IOException {
+-    if (!plat.writeDeps()) 
++    if (!plat.writeDeps())
+       return;
+ 
+     System.out.println("\twriting dependencies file\n");
+@@ -383,7 +383,7 @@
+       // write Obj_Files = ...
+       gd.println("Obj_Files = \\");
+       gd.println(firstName + plat.objFileSuffix() + " \\");
+-      for (Iterator iter = sortList.iterator(); iter.hasNext(); ) {            
++      for (Iterator iter = sortList.iterator(); iter.hasNext(); ) {
+         gd.println(iter.next() + plat.objFileSuffix() + " \\");
+       }
+       gd.println(lastName + plat.objFileSuffix() + " \\");
+@@ -471,7 +471,7 @@
+         }
+ 
+         if (plat.includeGIDependencies()
+-            && nPrecompiledFiles > 0 
++            && nPrecompiledFiles > 0
+             && anII.getUseGrandInclude()) {
+           gd.println("    $(Precompiled_Files) \\");
+         }
+diff -ruN openjdk6/hotspot/src/share/tools/MakeDeps/DirectoryTree.java openjdk/hotspot/src/share/tools/MakeDeps/DirectoryTree.java
+--- openjdk6/hotspot/src/share/tools/MakeDeps/DirectoryTree.java	2008-02-28 05:02:31.000000000 -0500
++++ openjdk/hotspot/src/share/tools/MakeDeps/DirectoryTree.java	2008-01-31 09:19:00.000000000 -0500
+@@ -19,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ /** Encapsulates a notion of a directory tree. Designed to allow fast
+@@ -43,8 +43,8 @@
+     private boolean verbose;
+ 
+     public DirectoryTree() {
+-	subdirsToIgnore = new Vector();
+-	verbose = false;
++        subdirsToIgnore = new Vector();
++        verbose = false;
+     }
+ 
+     /** Takes an absolute path to the root directory of this
+@@ -52,22 +52,22 @@
+         string represents a plain file or nonexistent directory. */
+ 
+     public DirectoryTree(String baseDirectory) {
+-	this();
+-	readDirectory(baseDirectory);
++        this();
++        readDirectory(baseDirectory);
+     }
+ 
+     public void addSubdirToIgnore(String subdir) {
+-	subdirsToIgnore.add(subdir);
++        subdirsToIgnore.add(subdir);
+     }
+ 
+     /** Output "."'s to System.out as directories are read. Defaults
+         to false. */
+     public void setVerbose(boolean newValue) {
+-	verbose = newValue;
++        verbose = newValue;
+     }
+ 
+     public boolean getVerbose() {
+-	return verbose;
++        return verbose;
+     }
+ 
+     public String getRootNodeName() {
+@@ -79,82 +79,82 @@
+         string represents a plain file or nonexistent directory. */
+ 
+     public void readDirectory(String baseDirectory)
+-	throws IllegalArgumentException {
+-	File root = new File(baseDirectory);
+-	if (!root.isDirectory()) {
+-	    throw new IllegalArgumentException("baseDirectory \"" +
+-					       baseDirectory +
+-					       "\" does not exist or " +
+-					       "is not a directory");
+-	}
+-	try {
+-	    root = root.getCanonicalFile();
+-	}
+-	catch (IOException e) {
+-	    throw new RuntimeException(e.toString());
+-	}
+-	rootNode = new Node(root);
+-	readDirectory(rootNode, root);
++        throws IllegalArgumentException {
++        File root = new File(baseDirectory);
++        if (!root.isDirectory()) {
++            throw new IllegalArgumentException("baseDirectory \"" +
++                                               baseDirectory +
++                                               "\" does not exist or " +
++                                               "is not a directory");
++        }
++        try {
++            root = root.getCanonicalFile();
++        }
++        catch (IOException e) {
++            throw new RuntimeException(e.toString());
++        }
++        rootNode = new Node(root);
++        readDirectory(rootNode, root);
+     }
+ 
+     /** Queries the DirectoryTree for a file or directory name. Takes
+-	only the name of the file or directory itself (i.e., no parent
+-	directory information should be in the passed name). Returns a
+-	List of DirectoryTreeNodes specifying the full paths of all of
+-	the files or directories of this name in the DirectoryTree.
+-	Returns null if the directory tree has not been read from disk
+-	yet or if the file was not found in the tree. */
++        only the name of the file or directory itself (i.e., no parent
++        directory information should be in the passed name). Returns a
++        List of DirectoryTreeNodes specifying the full paths of all of
++        the files or directories of this name in the DirectoryTree.
++        Returns null if the directory tree has not been read from disk
++        yet or if the file was not found in the tree. */
+     public List findFile(String name) {
+-	if (rootNode == null) {
+-	    return null;
+-	}
+-
+-	if (nameToNodeListTable == null) {
+-	    nameToNodeListTable = new Hashtable();
+-	    try {
+-		buildNameToNodeListTable(rootNode);
+-	    } catch (IOException e) {
+-		e.printStackTrace();
+-		return null;
+-	    }
+-	}
++        if (rootNode == null) {
++            return null;
++        }
++
++        if (nameToNodeListTable == null) {
++            nameToNodeListTable = new Hashtable();
++            try {
++                buildNameToNodeListTable(rootNode);
++            } catch (IOException e) {
++                e.printStackTrace();
++                return null;
++            }
++        }
+ 
+-	return (List) nameToNodeListTable.get(name);
++        return (List) nameToNodeListTable.get(name);
+     }
+-    
++
+     private void buildNameToNodeListTable(Node curNode)
+       throws IOException {
+-	String fullName = curNode.getName();
+-	String parent = curNode.getParent();
+-	String separator = System.getProperty("file.separator");
++        String fullName = curNode.getName();
++        String parent = curNode.getParent();
++        String separator = System.getProperty("file.separator");
+ 
+         if (parent != null) {
+           if (!fullName.startsWith(parent)) {
+-	    throw new RuntimeException(
+-	        "Internal error: parent of file name \"" + fullName +
+-		"\" does not match file name \"" + parent + "\""
+-	    );
++            throw new RuntimeException(
++                "Internal error: parent of file name \"" + fullName +
++                "\" does not match file name \"" + parent + "\""
++            );
+           }
+ 
+           int len = parent.length();
+           if (!parent.endsWith(separator)) {
+-	    len += separator.length();
++            len += separator.length();
+           }
+-	
++
+           String fileName = fullName.substring(len);
+ 
+           if (fileName == null) {
+-	    throw new RuntimeException(
+-	        "Internal error: file name was empty"
+-	    );
++            throw new RuntimeException(
++                "Internal error: file name was empty"
++            );
+           }
+ 
+           List nodeList = (List) nameToNodeListTable.get(fileName);
+           if (nodeList == null) {
+-	    nodeList = new Vector();
+-	    nameToNodeListTable.put(fileName, nodeList);
++            nodeList = new Vector();
++            nameToNodeListTable.put(fileName, nodeList);
+           }
+-	
++
+           nodeList.add(curNode);
+         } else {
+           if (curNode != rootNode) {
+@@ -165,14 +165,14 @@
+           }
+         }
+ 
+-	if (curNode.isDirectory()) {
++        if (curNode.isDirectory()) {
+           Iterator iter = curNode.getChildren();
+           if (iter != null) {
+             while (iter.hasNext()) {
+               buildNameToNodeListTable((Node) iter.next());
+             }
+           }
+-	}
++        }
+     }
+ 
+     /** Reads all of the files in the given directory and adds them as
+@@ -180,78 +180,78 @@
+         node represents a directory. */
+ 
+     private void readDirectory(Node parentNode, File parentDir) {
+-	File[] children = parentDir.listFiles();
+-	if (children == null)
+-	    return;
+-	if (verbose) {
+-	    System.out.print(".");
+-	    System.out.flush();
+-	}
+-	for (int i = 0; i < children.length; i++) {
+-	    File child = children[i];
+-	    children[i] = null;
+-	    boolean isDir = child.isDirectory();
+-	    boolean mustSkip = false;
+-	    if (isDir) {
+-		for (Iterator iter = subdirsToIgnore.iterator();
+-		     iter.hasNext(); ) {
+-		    if (child.getName().equals((String) iter.next())) {
+-			mustSkip = true;
+-			break;
+-		    }
+-		}
+-	    }
+-	    if (!mustSkip) {
+-		Node childNode = new Node(child);
+-		parentNode.addChild(childNode);
+-		if (isDir) {
+-		    readDirectory(childNode, child);
+-		}
+-	    }
+-	}
++        File[] children = parentDir.listFiles();
++        if (children == null)
++            return;
++        if (verbose) {
++            System.out.print(".");
++            System.out.flush();
++        }
++        for (int i = 0; i < children.length; i++) {
++            File child = children[i];
++            children[i] = null;
++            boolean isDir = child.isDirectory();
++            boolean mustSkip = false;
++            if (isDir) {
++                for (Iterator iter = subdirsToIgnore.iterator();
++                     iter.hasNext(); ) {
++                    if (child.getName().equals((String) iter.next())) {
++                        mustSkip = true;
++                        break;
++                    }
++                }
++            }
++            if (!mustSkip) {
++                Node childNode = new Node(child);
++                parentNode.addChild(childNode);
++                if (isDir) {
++                    readDirectory(childNode, child);
++                }
++            }
++        }
+     }
+ 
+     private class Node implements DirectoryTreeNode {
+-	private File file;
+-	private Vector children;
+-	
+-	/** file must be a canonical file */
+-	Node(File file) {
+-	    this.file = file;
+-	    children = new Vector();
+-	}
+-
+-	public boolean isFile() {
+-	    return file.isFile();
+-	}
+-
+-	public boolean isDirectory() {
+-	    return file.isDirectory();
+-	}
+-
+-	public String getName() {
+-	    return file.getPath();
+-	}
+-
+-	public String getParent() {
+-	    return file.getParent();
+-	}
+-
+-	public void addChild(Node n) {
+-	    children.add(n);
+-	}
+-
+-	public Iterator getChildren() throws IllegalArgumentException {
+-	    return children.iterator();
+-	}
+-
+-	public int getNumChildren() throws IllegalArgumentException {
+-	    return children.size();
+-	}
+-	
+-	public DirectoryTreeNode getChild(int i)
+-	    throws IllegalArgumentException, ArrayIndexOutOfBoundsException {
+-	    return (DirectoryTreeNode) children.get(i);
+-	}
++        private File file;
++        private Vector children;
++
++        /** file must be a canonical file */
++        Node(File file) {
++            this.file = file;
++            children = new Vector();
++        }
++
++        public boolean isFile() {
++            return file.isFile();
++        }
++
++        public boolean isDirectory() {
++            return file.isDirectory();
++        }
++
++        public String getName() {
++            return file.getPath();
++        }
++
++        public String getParent() {
++            return file.getParent();
++        }
++
++        public void addChild(Node n) {
++            children.add(n);
++        }
++
++        public Iterator getChildren() throws IllegalArgumentException {
++            return children.iterator();
++        }
++
++        public int getNumChildren() throws IllegalArgumentException {
++            return children.size();
++        }
++
++        public DirectoryTreeNode getChild(int i)
++            throws IllegalArgumentException, ArrayIndexOutOfBoundsException {
++            return (DirectoryTreeNode) children.get(i);
++        }
+     }
+ }
+diff -ruN openjdk6/hotspot/src/share/tools/MakeDeps/DirectoryTreeNode.java openjdk/hotspot/src/share/tools/MakeDeps/DirectoryTreeNode.java
+--- openjdk6/hotspot/src/share/tools/MakeDeps/DirectoryTreeNode.java	2008-02-28 05:02:31.000000000 -0500
++++ openjdk/hotspot/src/share/tools/MakeDeps/DirectoryTreeNode.java	2008-01-31 09:19:00.000000000 -0500
+@@ -19,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ import java.util.*;
+@@ -32,5 +32,5 @@
+     public Iterator getChildren() throws IllegalArgumentException;
+     public int getNumChildren() throws IllegalArgumentException;
+     public DirectoryTreeNode getChild(int i)
+-	throws IllegalArgumentException, ArrayIndexOutOfBoundsException;
++        throws IllegalArgumentException, ArrayIndexOutOfBoundsException;
+ }
+diff -ruN openjdk6/hotspot/src/share/tools/MakeDeps/FileFormatException.java openjdk/hotspot/src/share/tools/MakeDeps/FileFormatException.java
+--- openjdk6/hotspot/src/share/tools/MakeDeps/FileFormatException.java	2008-02-28 05:02:31.000000000 -0500
++++ openjdk/hotspot/src/share/tools/MakeDeps/FileFormatException.java	2008-01-31 09:19:00.000000000 -0500
+@@ -19,15 +19,15 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ public class FileFormatException extends Exception {
+     public FileFormatException() {
+-	super();
++        super();
+     }
+ 
+     public FileFormatException(String s) {
+-	super(s);
++        super(s);
+     }
+ }
+diff -ruN openjdk6/hotspot/src/share/tools/MakeDeps/FileList.java openjdk/hotspot/src/share/tools/MakeDeps/FileList.java
+--- openjdk6/hotspot/src/share/tools/MakeDeps/FileList.java	2008-02-28 05:02:31.000000000 -0500
++++ openjdk/hotspot/src/share/tools/MakeDeps/FileList.java	2008-01-31 09:19:00.000000000 -0500
+@@ -19,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ import java.io.*;
+@@ -43,13 +43,13 @@
+     private Platform plat;
+ 
+     public FileList(String n, Platform plat) {
+-	super();
+-	this.plat = plat;
+-	beenHere = mayBeCycle = isCycle = false;
+-	platformDependentInclude = null;
+-	name = n;
+-	count = 0;
+-	useGrandInclude = plat.haveGrandInclude();
++        super();
++        this.plat = plat;
++        beenHere = mayBeCycle = isCycle = false;
++        platformDependentInclude = null;
++        name = n;
++        count = 0;
++        useGrandInclude = plat.haveGrandInclude();
+     }
+ 
+     // Change definition of equality from AbstractList so remove() works properly
+@@ -59,98 +59,98 @@
+ 
+     // Necessary accessors
+     public String getName() {
+-	return name;
++        return name;
+     }
+ 
+     public void setPlatformDependentInclude(String arg) {
+-	platformDependentInclude = arg;
++        platformDependentInclude = arg;
+     }
+ 
+     public String getPlatformDependentInclude() {
+-	return platformDependentInclude;
++        return platformDependentInclude;
+     }
+ 
+     public boolean getUseGrandInclude() {
+-	return useGrandInclude;
++        return useGrandInclude;
+     }
+ 
+     public void setUseGrandInclude(boolean arg) {
+-	useGrandInclude = arg;
++        useGrandInclude = arg;
+     }
+ 
+     public void incrementCount() {
+-	count++;
++        count++;
+     }
+ 
+     public int getCount() {
+-	return count;
++        return count;
+     }
+ 
+     public FileList listForFile(String fileName) {
+-	for (Iterator iter = iterator(); iter.hasNext(); ) {
+-	    FileList fl = (FileList) iter.next();
+-	    if (plat.fileNameStringEquality(fl.name, fileName)) {
+-		plat.fileNamePortabilityCheck(fl.name, fileName);
+-		return fl;
+-	    }
+-	}
+-	plat.fileNamePortabilityCheck(fileName);
+-	FileList newList = new FileList(fileName, plat);
+-	add(newList);
+-	return newList;
++        for (Iterator iter = iterator(); iter.hasNext(); ) {
++            FileList fl = (FileList) iter.next();
++            if (plat.fileNameStringEquality(fl.name, fileName)) {
++                plat.fileNamePortabilityCheck(fl.name, fileName);
++                return fl;
++            }
++        }
++        plat.fileNamePortabilityCheck(fileName);
++        FileList newList = new FileList(fileName, plat);
++        add(newList);
++        return newList;
+     }
+ 
+     public boolean hasListForFile(String fileName) {
+-	for (Iterator iter = iterator(); iter.hasNext(); ) {
+-	    FileList fl = (FileList) iter.next();
+-	    if (plat.fileNameStringEquality(fl.name, fileName)) {
+-		plat.fileNamePortabilityCheck(fl.name, fileName);
+-		return true;
+-	    }
+-	}
+-	return false;
++        for (Iterator iter = iterator(); iter.hasNext(); ) {
++            FileList fl = (FileList) iter.next();
++            if (plat.fileNameStringEquality(fl.name, fileName)) {
++                plat.fileNamePortabilityCheck(fl.name, fileName);
++                return true;
++            }
++        }
++        return false;
+     }
+ 
+     public boolean compareLists(FileList s) {
+-	Iterator myIter = iterator();
+-	Iterator hisIter = s.iterator();
++        Iterator myIter = iterator();
++        Iterator hisIter = s.iterator();
+ 
+-	while (myIter.hasNext() &&
+-	       hisIter.hasNext()) {
+-	    // crude: order dependent
+-	    FileList myElement = (FileList) myIter.next();
+-	    FileList hisElement = (FileList) hisIter.next();
+-	    if (!plat.fileNameStringEquality(myElement.name,
+-					     hisElement.name)) {
+-		return false;
+-	    }
+-	}
+-	
+-	if (myIter.hasNext() != hisIter.hasNext()) {
+-	    // One ended earlier
+-	    return false;
+-	}
+-	
+-	return true;
++        while (myIter.hasNext() &&
++               hisIter.hasNext()) {
++            // crude: order dependent
++            FileList myElement = (FileList) myIter.next();
++            FileList hisElement = (FileList) hisIter.next();
++            if (!plat.fileNameStringEquality(myElement.name,
++                                             hisElement.name)) {
++                return false;
++            }
++        }
++
++        if (myIter.hasNext() != hisIter.hasNext()) {
++            // One ended earlier
++            return false;
++        }
++
++        return true;
+     }
+ 
+     public void addIfAbsent(FileList s) {
+-	for (Iterator iter = iterator(); iter.hasNext(); ) {
+-	    if (iter.next() == s) {
+-		return;
+-	    }
+-	}
+-	add(s);
++        for (Iterator iter = iterator(); iter.hasNext(); ) {
++            if (iter.next() == s) {
++                return;
++            }
++        }
++        add(s);
+     }
+ 
+     public void sortByName() {
+-	Collections.sort(this, new Comparator() {
+-		public int compare(Object o1, Object o2) {
+-		    FileList fl1 = (FileList) o1;
+-		    FileList fl2 = (FileList) o2;
+-		    return fl1.getName().compareTo(fl2.getName());
+-		}
+-	    });
++        Collections.sort(this, new Comparator() {
++                public int compare(Object o1, Object o2) {
++                    FileList fl1 = (FileList) o1;
++                    FileList fl2 = (FileList) o2;
++                    return fl1.getName().compareTo(fl2.getName());
++                }
++            });
+     }
+ 
+     public void setFirstFile(FileList s) {
+@@ -166,98 +166,98 @@
+     }
+ 
+     public boolean doFiles(FileList s) {
+-	boolean result = true;
+-	for (Iterator iter = iterator(); iter.hasNext(); ) {
+-	    FileList h = (FileList) iter.next();
+-	    if (h.platformDependentInclude != null) {
+-		System.err.println("Error: the source for " +
+-				   h.platformDependentInclude +
+-				   " is " + h.name + ".");
+-		System.err.println("\tIt shouldn't be included directly by " +
+-				   name + ".");
+-		h.platformDependentInclude = null; // report once per file
+-		result = false;
+-	    }
+-	    h.doHFile(s);
+-	}
+-	return result;
++        boolean result = true;
++        for (Iterator iter = iterator(); iter.hasNext(); ) {
++            FileList h = (FileList) iter.next();
++            if (h.platformDependentInclude != null) {
++                System.err.println("Error: the source for " +
++                                   h.platformDependentInclude +
++                                   " is " + h.name + ".");
++                System.err.println("\tIt shouldn't be included directly by " +
++                                   name + ".");
++                h.platformDependentInclude = null; // report once per file
++                result = false;
++            }
++            h.doHFile(s);
++        }
++        return result;
+     }
+-    
++
+     public void traceCycle(FileList s) {
+-	if (isCycle) // already traced
+-	    return;
+-	isCycle = true;
+-	System.err.println("\ttracing cycle for " + name);
+-	// FIXME: must return status in caller routine
+-	// exitCode = 1;
+-	for (Iterator iter = iterator(); iter.hasNext(); ) {
+-	    FileList q = (FileList) iter.next();
+-	    if (q.mayBeCycle) {
+-		if (s == q) {
+-		    plat.fatalError("\tend of cycle for " + s.getName());
+-		} else {
+-		    q.traceCycle(s);
+-		}
+-	    }
+-	}
++        if (isCycle) // already traced
++            return;
++        isCycle = true;
++        System.err.println("\ttracing cycle for " + name);
++        // FIXME: must return status in caller routine
++        // exitCode = 1;
++        for (Iterator iter = iterator(); iter.hasNext(); ) {
++            FileList q = (FileList) iter.next();
++            if (q.mayBeCycle) {
++                if (s == q) {
++                    plat.fatalError("\tend of cycle for " + s.getName());
++                } else {
++                    q.traceCycle(s);
++                }
++            }
++        }
+     }
+-    
++
+     public void doHFile(FileList s) {
+-	if (beenHere) {
+-	    if (mayBeCycle) {
+-		traceCycle(this);
+-	    }
+-	    return;
+-	}
+-	beenHere = true;
+-	mayBeCycle = true;
+-	doFiles(s);
+-	mayBeCycle = false;
+-	s.add(this);
++        if (beenHere) {
++            if (mayBeCycle) {
++                traceCycle(this);
++            }
++            return;
++        }
++        beenHere = true;
++        mayBeCycle = true;
++        doFiles(s);
++        mayBeCycle = false;
++        s.add(this);
+     }
+ 
+     public FileList doCFile() {
+-	FileList s = new FileList(name, plat);
+-	s.useGrandInclude = useGrandInclude; // propagate this
+-	doFiles(s);
+-	for (Iterator iter = s.iterator(); iter.hasNext(); ) {
+-	    FileList l = (FileList) iter.next();
+-	    l.beenHere = false;
+-	}
+-	return s;
++        FileList s = new FileList(name, plat);
++        s.useGrandInclude = useGrandInclude; // propagate this
++        doFiles(s);
++        for (Iterator iter = s.iterator(); iter.hasNext(); ) {
++            FileList l = (FileList) iter.next();
++            l.beenHere = false;
++        }
++        return s;
+     }
+-    
++
+     /** if .h file is included thresh times, put it in the grand
+         include file */
+     public void putInclFile(Database db)
+-	throws IOException {
++        throws IOException {
+         boolean needline = true;
+-	FileName inclName = plat.getInclFileTemplate().copyStem(name);
+-	PrintWriter inclFile =
+-	    new PrintWriter(new FileWriter(inclName.dirPreStemSuff()));
+-	if (plat.haveGrandInclude() && plat.includeGIInEachIncl()) {
+-	    inclFile.println("# include \"" +
+-			     plat.getGIFileTemplate().dirPreStemAltSuff() +
+-			     "\"");
+-	    needline = false;
+-	}
+-	for (Iterator iter = iterator(); iter.hasNext(); ) {
+-	    FileList hfile = (FileList) iter.next();
+-	    if (!db.hfileIsInGrandInclude(hfile, this)) {
+-		inclFile.println("# include \"" +
+-				 plat.getInclFileTemplate().getInvDir() +
+-				 hfile.name +
+-				 "\"");
+-	        needline = false;
+-	    }
+-	}
+-
+-	// Solaris C++ in strict mode warns about empty files
+-
+-	if(needline) {
+-	    inclFile.println();
+-	}
++        FileName inclName = plat.getInclFileTemplate().copyStem(name);
++        PrintWriter inclFile =
++            new PrintWriter(new FileWriter(inclName.dirPreStemSuff()));
++        if (plat.haveGrandInclude() && plat.includeGIInEachIncl()) {
++            inclFile.println("# include \"" +
++                             plat.getGIFileTemplate().dirPreStemAltSuff() +
++                             "\"");
++            needline = false;
++        }
++        for (Iterator iter = iterator(); iter.hasNext(); ) {
++            FileList hfile = (FileList) iter.next();
++            if (!db.hfileIsInGrandInclude(hfile, this)) {
++                inclFile.println("# include \"" +
++                                 plat.getInclFileTemplate().getInvDir() +
++                                 hfile.name +
++                                 "\"");
++                needline = false;
++            }
++        }
++
++        // Solaris C++ in strict mode warns about empty files
++
++        if(needline) {
++            inclFile.println();
++        }
+ 
+-	inclFile.close();
++        inclFile.close();
+     }
+ }
+diff -ruN openjdk6/hotspot/src/share/tools/MakeDeps/FileName.java openjdk/hotspot/src/share/tools/MakeDeps/FileName.java
+--- openjdk6/hotspot/src/share/tools/MakeDeps/FileName.java	2008-02-28 05:02:31.000000000 -0500
++++ openjdk/hotspot/src/share/tools/MakeDeps/FileName.java	2008-01-31 09:19:00.000000000 -0500
+@@ -19,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ public class FileName {
+@@ -40,80 +40,80 @@
+     /** None of the passed strings may be null. */
+ 
+     public FileName(Platform plat, String dir, String prefix,
+-		    String stem, String suffix,
+-		    String inverseDir, String altSuffix) {
+-	if ((dir == null) ||
+-	    (prefix == null) ||
+-	    (stem == null) ||
+-	    (suffix == null) ||
+-	    (inverseDir == null) ||
+-	    (altSuffix == null)) {
+-	    throw new NullPointerException("All arguments must be non-null");
+-	}
+-
+-	this.plat = plat;
+-
+-	this.dir = dir;
+-	this.prefix = prefix;
+-	this.stem = stem;
+-	this.suffix = suffix;
+-	this.inverseDir = inverseDir;
+-	this.altSuffix = altSuffix;
+-
+-	pss = prefix + stem + suffix;
+-	dpss = dir + prefix + stem + suffix;
+-	psa = prefix + stem + altSuffix;
+-	dpsa = dir + prefix + stem + altSuffix;
++                    String stem, String suffix,
++                    String inverseDir, String altSuffix) {
++        if ((dir == null) ||
++            (prefix == null) ||
++            (stem == null) ||
++            (suffix == null) ||
++            (inverseDir == null) ||
++            (altSuffix == null)) {
++            throw new NullPointerException("All arguments must be non-null");
++        }
++
++        this.plat = plat;
++
++        this.dir = dir;
++        this.prefix = prefix;
++        this.stem = stem;
++        this.suffix = suffix;
++        this.inverseDir = inverseDir;
++        this.altSuffix = altSuffix;
++
++        pss = prefix + stem + suffix;
++        dpss = dir + prefix + stem + suffix;
++        psa = prefix + stem + altSuffix;
++        dpsa = dir + prefix + stem + altSuffix;
+ 
+-	checkLength(plat);
++        checkLength(plat);
+     }
+ 
+     public void checkLength(Platform p) {
+-	int len;
+-	String s;
+-	int suffLen = suffix.length();
+-	int altSuffLen = altSuffix.length();
+-	if (suffLen >= altSuffLen) {
+-	    len = suffLen;
+-	    s = suffix;
+-	} else {
+-	    len = altSuffLen;
+-	    s = altSuffix;
+-	}
+-	len += prefix.length() + stem.length();
+-	int lim = p.fileNameLengthLimit();
+-	if (len > lim) {
+-	    p.fatalError(prefix + stem + s + " is too long: " +
+-			 len + " >= " + lim);
+-	}
++        int len;
++        String s;
++        int suffLen = suffix.length();
++        int altSuffLen = altSuffix.length();
++        if (suffLen >= altSuffLen) {
++            len = suffLen;
++            s = suffix;
++        } else {
++            len = altSuffLen;
++            s = altSuffix;
++        }
++        len += prefix.length() + stem.length();
++        int lim = p.fileNameLengthLimit();
++        if (len > lim) {
++            p.fatalError(prefix + stem + s + " is too long: " +
++                         len + " >= " + lim);
++        }
+     }
+ 
+     public String dirPreStemSuff() {
+-	return dpss;
++        return dpss;
+     }
+ 
+     public String preStemSuff() {
+-	return pss;
++        return pss;
+     }
+ 
+     public String dirPreStemAltSuff() {
+-	return dpsa;
++        return dpsa;
+     }
+ 
+     public String preStemAltSuff() {
+-	return psa;
++        return psa;
+     }
+ 
+     public FileName copyStem(String newStem) {
+-	return new FileName(plat, dir, prefix, newStem,
+-			    suffix, inverseDir, altSuffix);
++        return new FileName(plat, dir, prefix, newStem,
++                            suffix, inverseDir, altSuffix);
+     }
+ 
+     String nameOfList() {
+-	return stem;
++        return stem;
+     }
+ 
+     String getInvDir() {
+-	return inverseDir;
++        return inverseDir;
+     }
+ }
+diff -ruN openjdk6/hotspot/src/share/tools/MakeDeps/MacroDefinitions.java openjdk/hotspot/src/share/tools/MakeDeps/MacroDefinitions.java
+--- openjdk6/hotspot/src/share/tools/MakeDeps/MacroDefinitions.java	2008-02-28 05:02:31.000000000 -0500
++++ openjdk/hotspot/src/share/tools/MakeDeps/MacroDefinitions.java	2008-01-31 09:19:00.000000000 -0500
+@@ -19,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ import java.io.*;
+@@ -29,228 +29,228 @@
+     private Vector macros;
+ 
+     public MacroDefinitions() {
+-	macros = new Vector();
++        macros = new Vector();
+     }
+ 
+     private String lookup(String name) throws NoSuchElementException {
+-	for (Iterator iter = macros.iterator(); iter.hasNext(); ) {
+-	    Macro macro = (Macro) iter.next();
+-	    if (macro.name.equals(name)) {
+-		return macro.contents;
+-	    }
+-	}
+-	throw new NoSuchElementException(name);
++        for (Iterator iter = macros.iterator(); iter.hasNext(); ) {
++            Macro macro = (Macro) iter.next();
++            if (macro.name.equals(name)) {
++                return macro.contents;
++            }
++        }
++        throw new NoSuchElementException(name);
+     }
+-    
++
+     public void addMacro(String name, String contents) {
+-	Macro macro = new Macro();
+-	macro.name = name;
+-	macro.contents = contents;
+-	macros.add(macro);
++        Macro macro = new Macro();
++        macro.name = name;
++        macro.contents = contents;
++        macros.add(macro);
+     }
+ 
+     private boolean lineIsEmpty(String s) {
+-	for (int i = 0; i < s.length(); i++) {
+-	    if (!Character.isWhitespace(s.charAt(i))) {
+-		return false;
+-	    }
+-	}
+-	return true;
++        for (int i = 0; i < s.length(); i++) {
++            if (!Character.isWhitespace(s.charAt(i))) {
++                return false;
++            }
++        }
++        return true;
+     }
+ 
+     public void readFrom(String fileName, boolean missingOk)
+-	throws FileNotFoundException, FileFormatException, IOException {
+-	BufferedReader reader = null;
+-	try {
+-	    reader = new BufferedReader(new FileReader(fileName));
+-	} catch (FileNotFoundException e) {
+-	    if (missingOk) {
+-		return;
+-	    } else {
+-		throw(e);
+-	    }
+-	}
+-	String line;
+-	do {
+-	    line = reader.readLine();
+-	    if (line != null) {
+-		// This had to be rewritten (compare to Database.java)
+-		// because the Solaris platform file has been
+-		// repurposed and now contains "macros" with spaces in
+-		// them.
+-		
+-		if ((!line.startsWith("//")) &&
+-		    (!lineIsEmpty(line))) {
+-		    int nameBegin = -1;
+-		    int nameEnd = -1;
+-		    boolean gotEquals = false;
+-		    int contentsBegin = -1;
+-		    int contentsEnd = -1;
+-
+-		    int i = 0;
+-		    // Scan forward for beginning of name
+-		    while (i < line.length()) {
+-			if (!Character.isWhitespace(line.charAt(i))) {
+-			    break;
+-			}
+-			i++;
+-		    }
+-		    nameBegin = i;
+-
+-		    // Scan forward for end of name
+-		    while (i < line.length()) {
+-			if (Character.isWhitespace(line.charAt(i))) {
+-			    break;
+-			}
+-			i++;
+-		    }
+-		    nameEnd = i;
+-
+-		    // Scan forward for equals sign
+-		    while (i < line.length()) {
+-			if (line.charAt(i) == '=') {
+-			    gotEquals = true;
+-			    break;
+-			}
+-			i++;
+-		    }
+-
+-		    // Scan forward for start of contents
+-		    i++;
+-		    while (i < line.length()) {
+-			if (!Character.isWhitespace(line.charAt(i))) {
+-			    break;
+-			}
+-			i++;
+-		    }
+-		    contentsBegin = i;
+-
+-		    // Scan *backward* for end of contents
+-		    i = line.length() - 1;
+-		    while (i >= 0) {
+-			if (!Character.isWhitespace(line.charAt(i))) {
+-			    break;
+-			}
+-		    }
+-		    contentsEnd = i+1;
+-
+-		    // Now do consistency check
+-		    if (!((nameBegin < nameEnd) &&
+-			  (nameEnd < contentsBegin) &&
+-			  (contentsBegin < contentsEnd) &&
+-			  (gotEquals == true))) {
+-			throw new FileFormatException(
+-			    "Expected \"macroname = value\", " +
+-			    "but found: " + line
+-			);
+-		    }
+-
+-		    String name = line.substring(nameBegin, nameEnd);
+-		    String contents = line.substring(contentsBegin,
+-						     contentsEnd);
+-		    addMacro(name, contents);
+-		}
+-	    }
+-	} while (line != null);
+-	reader.close();
++        throws FileNotFoundException, FileFormatException, IOException {
++        BufferedReader reader = null;
++        try {
++            reader = new BufferedReader(new FileReader(fileName));
++        } catch (FileNotFoundException e) {
++            if (missingOk) {
++                return;
++            } else {
++                throw(e);
++            }
++        }
++        String line;
++        do {
++            line = reader.readLine();
++            if (line != null) {
++                // This had to be rewritten (compare to Database.java)
++                // because the Solaris platform file has been
++                // repurposed and now contains "macros" with spaces in
++                // them.
++
++                if ((!line.startsWith("//")) &&
++                    (!lineIsEmpty(line))) {
++                    int nameBegin = -1;
++                    int nameEnd = -1;
++                    boolean gotEquals = false;
++                    int contentsBegin = -1;
++                    int contentsEnd = -1;
++
++                    int i = 0;
++                    // Scan forward for beginning of name
++                    while (i < line.length()) {
++                        if (!Character.isWhitespace(line.charAt(i))) {
++                            break;
++                        }
++                        i++;
++                    }
++                    nameBegin = i;
++
++                    // Scan forward for end of name
++                    while (i < line.length()) {
++                        if (Character.isWhitespace(line.charAt(i))) {
++                            break;
++                        }
++                        i++;
++                    }
++                    nameEnd = i;
++
++                    // Scan forward for equals sign
++                    while (i < line.length()) {
++                        if (line.charAt(i) == '=') {
++                            gotEquals = true;
++                            break;
++                        }
++                        i++;
++                    }
++
++                    // Scan forward for start of contents
++                    i++;
++                    while (i < line.length()) {
++                        if (!Character.isWhitespace(line.charAt(i))) {
++                            break;
++                        }
++                        i++;
++                    }
++                    contentsBegin = i;
++
++                    // Scan *backward* for end of contents
++                    i = line.length() - 1;
++                    while (i >= 0) {
++                        if (!Character.isWhitespace(line.charAt(i))) {
++                            break;
++                        }
++                    }
++                    contentsEnd = i+1;
++
++                    // Now do consistency check
++                    if (!((nameBegin < nameEnd) &&
++                          (nameEnd < contentsBegin) &&
++                          (contentsBegin < contentsEnd) &&
++                          (gotEquals == true))) {
++                        throw new FileFormatException(
++                            "Expected \"macroname = value\", " +
++                            "but found: " + line
++                        );
++                    }
++
++                    String name = line.substring(nameBegin, nameEnd);
++                    String contents = line.substring(contentsBegin,
++                                                     contentsEnd);
++                    addMacro(name, contents);
++                }
++            }
++        } while (line != null);
++        reader.close();
+     }
+ 
+     /** Throws IllegalArgumentException if passed token is illegally
+         formatted */
+     public String expand(String token)
+-	throws IllegalArgumentException {
+-	// the token may contain one or more <macroName>'s
++        throws IllegalArgumentException {
++        // the token may contain one or more <macroName>'s
++
++        String out = "";
+ 
+-	String out = "";
++        // emacs lingo
++        int mark = 0;
++        int point = 0;
++
++        int len = token.length();
++
++        if (len == 0)
++            return out;
++
++        do {
++            // Scan "point" forward until hitting either the end of
++            // the string or the beginning of a macro
++            if (token.charAt(point) == '<') {
++                // Append (point - mark) to out
++                if ((point - mark) != 0) {
++                    out += token.substring(mark, point);
++                }
++                mark = point + 1;
++                // Scan forward from point for right bracket
++                point++;
++                while ((point < len) &&
++                       (token.charAt(point) != '>')) {
++                    point++;
++                }
++                if (point == len) {
++                    throw new IllegalArgumentException(
++                        "Could not find right angle-bracket in token " + token
++                    );
++                }
++                String name = token.substring(mark, point);
++                if (name == null) {
++                    throw new IllegalArgumentException(
++                        "Empty macro in token " + token
++                    );
++                }
++                try {
++                    String contents = lookup(name);
++                    out += contents;
++                    point++;
++                    mark = point;
++                } catch (NoSuchElementException e) {
++                    throw new IllegalArgumentException(
++                        "Unknown macro " + name + " in token " + token
++                    );
++                }
++            } else {
++                point++;
++            }
++        } while (point != len);
++
++        if (mark != point) {
++            out += token.substring(mark, point);
++        }
+ 
+-	// emacs lingo
+-	int mark = 0;
+-	int point = 0;
+-	
+-	int len = token.length();
+-
+-	if (len == 0)
+-	    return out;
+-
+-	do {
+-	    // Scan "point" forward until hitting either the end of
+-	    // the string or the beginning of a macro
+-	    if (token.charAt(point) == '<') {
+-		// Append (point - mark) to out
+-		if ((point - mark) != 0) {
+-		    out += token.substring(mark, point);
+-		}
+-		mark = point + 1;
+-		// Scan forward from point for right bracket
+-		point++;
+-		while ((point < len) &&
+-		       (token.charAt(point) != '>')) {
+-		    point++;
+-		}
+-		if (point == len) {
+-		    throw new IllegalArgumentException(
+-		        "Could not find right angle-bracket in token " + token
+-		    );
+-		}
+-		String name = token.substring(mark, point);
+-		if (name == null) {
+-		    throw new IllegalArgumentException(
+-		        "Empty macro in token " + token
+-		    );
+-		}
+-		try {
+-		    String contents = lookup(name);
+-		    out += contents;
+-		    point++;
+-		    mark = point;
+-		} catch (NoSuchElementException e) {
+-		    throw new IllegalArgumentException(
+-		        "Unknown macro " + name + " in token " + token
+-		    );
+-		}
+-	    } else {
+-		point++;
+-	    }
+-	} while (point != len);
+-	
+-	if (mark != point) {
+-	    out += token.substring(mark, point);
+-	}
+-	
+-	return out;
++        return out;
+     }
+ 
+     public MacroDefinitions copy() {
+-	MacroDefinitions ret = new MacroDefinitions();
+-	for (Iterator iter = macros.iterator();
+-	     iter.hasNext(); ) {
+-	    Macro orig = (Macro) iter.next();
+-	    Macro macro = new Macro();
+-	    macro.name = orig.name;
+-	    macro.contents = orig.contents;
+-	    ret.macros.add(macro);
+-	}
+-	return ret;
++        MacroDefinitions ret = new MacroDefinitions();
++        for (Iterator iter = macros.iterator();
++             iter.hasNext(); ) {
++            Macro orig = (Macro) iter.next();
++            Macro macro = new Macro();
++            macro.name = orig.name;
++            macro.contents = orig.contents;
++            ret.macros.add(macro);
++        }
++        return ret;
+     }
+ 
+     public void setAllMacroBodiesTo(String s) {
+-	for (Iterator iter = macros.iterator();
+-	     iter.hasNext(); ) {
+-	    Macro macro = (Macro) iter.next();
+-	    macro.contents = s;
+-	}
++        for (Iterator iter = macros.iterator();
++             iter.hasNext(); ) {
++            Macro macro = (Macro) iter.next();
++            macro.contents = s;
++        }
+     }
+ 
+     /** This returns an Iterator of Macros. You should not mutate the
+         returned Macro objects or use the Iterator to remove
+         macros. */
+     public Iterator getMacros() {
+-	return macros.iterator();
++        return macros.iterator();
+     }
+ 
+     private void error(String text) throws FileFormatException {
+-	throw new FileFormatException(
+-	    "Expected \"macroname = value\", but found: " + text
+-	);
++        throw new FileFormatException(
++            "Expected \"macroname = value\", but found: " + text
++        );
+     }
+ }
+diff -ruN openjdk6/hotspot/src/share/tools/MakeDeps/Macro.java openjdk/hotspot/src/share/tools/MakeDeps/Macro.java
+--- openjdk6/hotspot/src/share/tools/MakeDeps/Macro.java	2008-02-28 05:02:31.000000000 -0500
++++ openjdk/hotspot/src/share/tools/MakeDeps/Macro.java	2008-01-31 09:19:00.000000000 -0500
+@@ -19,11 +19,10 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ public class Macro {
+     public String name;
+     public String contents;
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/tools/MakeDeps/MakeDeps.java openjdk/hotspot/src/share/tools/MakeDeps/MakeDeps.java
+--- openjdk6/hotspot/src/share/tools/MakeDeps/MakeDeps.java	2008-02-28 05:02:31.000000000 -0500
++++ openjdk/hotspot/src/share/tools/MakeDeps/MakeDeps.java	2008-01-31 09:19:00.000000000 -0500
+@@ -19,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This program reads an include file database.
+@@ -27,7 +27,7 @@
+ //   but not files in /usr/include
+ // The database consists of pairs of nonblank words, where the first word is
+ //   the filename that needs to include the file named by the second word.
+-// For each .c file, this program generates a fooIncludes.h file that 
++// For each .c file, this program generates a fooIncludes.h file that
+ //  the .c file may include to include all the needed files in the right order.
+ // It also generates a foo.dep file to include in the makefile.
+ // Finally it detects cycles, and can work with two files, an old and a new one.
+@@ -51,186 +51,186 @@
+ public class MakeDeps {
+ 
+     public static void usage() {
+-	System.out.println("usage:");
+-	System.out.println("\tmakeDeps platform-name     platform-file     database-file [MakeDeps args] [platform args]");
+-	System.out.println("\tmakeDeps diffs platform-name old-platform-file old-database-file new-platform-file new-database-file [MakeDeps args] [platform args]");
+-	System.out.println("where platform-name is the name of a platform MakeDeps supports");
+-	System.out.println("(currently \"WinGammaPlatform\" or \"UnixPlatform\")");
+-	System.out.println("MakeDeps options:");
+-	System.out.println("  -firstFile [filename]: Specify the first file in link order (i.e.,");
+-	System.out.println("   to have a well-known function at the start of the output file)");
+-	System.out.println("  -lastFile [filename]: Specify the last file in link order (i.e.,");
+-	System.out.println("   to have a well-known function at the end of the output file)");
+-	System.err.println("WinGammaPlatform platform-specific options:");
+-	System.err.println("  -sourceBase <path to directory (workspace) " +
+-			   "containing source files; no trailing slash>");
+-	System.err.println("  -dspFileName <full pathname to which .dsp file " +
+-			   "will be written; all parent directories must " +
+-			   "already exist>");
+-	System.err.println("  -envVar <environment variable to be inserted " +
+-			   "into .dsp file, substituting for path given in " +
+-			   "-sourceBase. Example: HotSpotWorkSpace>");
+-	System.err.println("  -dllLoc <path to directory in which to put " +
+-			   "jvm.dll and jvm_g.dll; no trailing slash>");
+-	System.err.println("  If any of the above are specified, "+
+-			   "they must all be.");
+-	System.err.println("  Additional, optional arguments, which can be " +
+-			   "specified multiple times:");
+-	System.err.println("    -absoluteInclude <string containing absolute " +
+-			   "path to include directory>");
+-	System.err.println("    -relativeInclude <string containing include " +
+-			   "directory relative to -envVar>");
+-	System.err.println("    -define <preprocessor flag to be #defined " +
+-			   "(note: doesn't yet support " +
+-			   "#define (flag) (value))>");
+-	System.err.println("    -perFileLine <file> <line>");
+-	System.err.println("    -conditionalPerFileLine <file> <line for " +
+-			   "release build> <line for debug build>");
+-	System.err.println("  (NOTE: To work around a bug in nmake, where " +
+-			   "you can't have a '#' character in a quoted " +
+-			   "string, all of the lines outputted have \"#\"" +
+-			   "prepended)");
+-	System.err.println("    -startAt <subdir of sourceBase>");
+-	System.err.println("    -ignoreFile <file which won't be able to be " +
+-			   "found in the sourceBase because it's generated " +
+-			   "later>");
+-	System.err.println("    -additionalFile <file not in database but " +
+-			   "which should show up in .dsp file, like " +
+-			   "includeDB_core>");
+-	System.err.println("    -additionalGeneratedFile <environment variable of " +
+-			   "generated file's location> <relative path to " +
+-			   "directory containing file; no trailing slash> " +
+-			   "<name of file generated later in the build process>");
+-	System.err.println("    -prelink <build> <desc> <cmds>:");
+-	System.err.println(" Generate a set of prelink commands for the given BUILD");
+-	System.err.println(" (\"Debug\" or \"Release\"). The prelink description and commands");
+-	System.err.println(" are both quoted strings.");
+-	System.err.println("    Default includes: \".\"");
+-	System.err.println("    Default defines: WIN32, _WINDOWS, \"HOTSPOT_BUILD_USER=$(USERNAME)\"");
++        System.out.println("usage:");
++        System.out.println("\tmakeDeps platform-name     platform-file     database-file [MakeDeps args] [platform args]");
++        System.out.println("\tmakeDeps diffs platform-name old-platform-file old-database-file new-platform-file new-database-file [MakeDeps args] [platform args]");
++        System.out.println("where platform-name is the name of a platform MakeDeps supports");
++        System.out.println("(currently \"WinGammaPlatform\" or \"UnixPlatform\")");
++        System.out.println("MakeDeps options:");
++        System.out.println("  -firstFile [filename]: Specify the first file in link order (i.e.,");
++        System.out.println("   to have a well-known function at the start of the output file)");
++        System.out.println("  -lastFile [filename]: Specify the last file in link order (i.e.,");
++        System.out.println("   to have a well-known function at the end of the output file)");
++        System.err.println("WinGammaPlatform platform-specific options:");
++        System.err.println("  -sourceBase <path to directory (workspace) " +
++                           "containing source files; no trailing slash>");
++        System.err.println("  -dspFileName <full pathname to which .dsp file " +
++                           "will be written; all parent directories must " +
++                           "already exist>");
++        System.err.println("  -envVar <environment variable to be inserted " +
++                           "into .dsp file, substituting for path given in " +
++                           "-sourceBase. Example: HotSpotWorkSpace>");
++        System.err.println("  -dllLoc <path to directory in which to put " +
++                           "jvm.dll and jvm_g.dll; no trailing slash>");
++        System.err.println("  If any of the above are specified, "+
++                           "they must all be.");
++        System.err.println("  Additional, optional arguments, which can be " +
++                           "specified multiple times:");
++        System.err.println("    -absoluteInclude <string containing absolute " +
++                           "path to include directory>");
++        System.err.println("    -relativeInclude <string containing include " +
++                           "directory relative to -envVar>");
++        System.err.println("    -define <preprocessor flag to be #defined " +
++                           "(note: doesn't yet support " +
++                           "#define (flag) (value))>");
++        System.err.println("    -perFileLine <file> <line>");
++        System.err.println("    -conditionalPerFileLine <file> <line for " +
++                           "release build> <line for debug build>");
++        System.err.println("  (NOTE: To work around a bug in nmake, where " +
++                           "you can't have a '#' character in a quoted " +
++                           "string, all of the lines outputted have \"#\"" +
++                           "prepended)");
++        System.err.println("    -startAt <subdir of sourceBase>");
++        System.err.println("    -ignoreFile <file which won't be able to be " +
++                           "found in the sourceBase because it's generated " +
++                           "later>");
++        System.err.println("    -additionalFile <file not in database but " +
++                           "which should show up in .dsp file, like " +
++                           "includeDB_core>");
++        System.err.println("    -additionalGeneratedFile <environment variable of " +
++                           "generated file's location> <relative path to " +
++                           "directory containing file; no trailing slash> " +
++                           "<name of file generated later in the build process>");
++        System.err.println("    -prelink <build> <desc> <cmds>:");
++        System.err.println(" Generate a set of prelink commands for the given BUILD");
++        System.err.println(" (\"Debug\" or \"Release\"). The prelink description and commands");
++        System.err.println(" are both quoted strings.");
++        System.err.println("    Default includes: \".\"");
++        System.err.println("    Default defines: WIN32, _WINDOWS, \"HOTSPOT_BUILD_USER=$(USERNAME)\"");
+     }
+ 
+     public static void main(String[] args) {
+-	try {
+-	    if (args.length < 3) {
+-		usage();
+-		System.exit(1);
+-	    }
+-	
+-	    int argc = 0;
+-	    boolean diffMode = false;
+-	    if (args[argc].equals("diffs")) {
+-		diffMode = true;
+-		++argc;
+-	    }
+-
+-	    String platformName = args[argc++];
+-	    Class platformClass = Class.forName(platformName);
+-
+-	    String plat1 = null;
+-	    String db1 = null;
+-	    String plat2 = null;
+-	    String db2 = null;
+-
+-	    String firstFile = null;
+-	    String lastFile = null;
+-
+-	    int numOptionalArgs =
+-		(diffMode ? (args.length - 6) : (args.length - 3));
+-	    if (numOptionalArgs < 0) {
+-		usage();
+-		System.exit(1);
+-	    }
+-
+-	    plat1 = args[argc++];
+-	    db1   = args[argc++];
+-
+-	    if (diffMode) {
+-	      plat2 = args[argc++];
+-	      db2   = args[argc++];
+-	    }
+-
+-	    // argc now points at start of optional arguments, if any
+-
+-	    try {
+-	      boolean gotOne = true;
+-	      while (gotOne && (argc < args.length - 1)) {
+-		gotOne = false;
+-		String arg = args[argc];
+-		if (arg.equals("-firstFile")) {
+-		  firstFile = args[argc + 1];
+-		  argc += 2;
+-		  gotOne = true;
+-		} else if (arg.equals("-lastFile")) {
+-		  lastFile = args[argc + 1];
+-		  argc += 2;
+-		  gotOne = true;
+-		}
+-	      }
+-	    }
+-	    catch (Exception e) {
+-	      e.printStackTrace();
+-	      usage();
+-	      System.exit(1);
+-	    }
+-
+-	    Platform platform = (Platform) platformClass.newInstance();
+-	    platform.setupFileTemplates();
+-	    long t = platform.defaultGrandIncludeThreshold();
+-	    
+-	    String[] platformArgs = null;
+-	    int numPlatformArgs = args.length - argc;
+-	    if (numPlatformArgs > 0) {
+-		platformArgs = new String[numPlatformArgs];
+-		int offset = argc;
+-		while (argc < args.length) {
+-		  platformArgs[argc - offset] = args[argc];
+-		  ++argc;
+-		}
+-	    }
+-
+-	    // If you want to change the threshold, change the default
+-	    // "grand include" threshold in Platform.java, or override
+-	    // it in the platform-specific file like UnixPlatform.java
+-
+-	    Database previous = new Database(platform, t);
+-	    Database current = new Database(platform, t);
+-
+-	    previous.canBeMissing();
+-	    
+-	    if (firstFile != null) {
+-	      previous.setFirstFile(firstFile);
+-	      current.setFirstFile(firstFile);
+-	    }
+-	    if (lastFile != null) {
+-	      previous.setLastFile(lastFile);
+-	      current.setLastFile(lastFile);
+-	    }
+-
+-	    if (diffMode) {
+-		System.out.println("Old database:");
+-		previous.get(plat1, db1);
+-		previous.compute();
+-		System.out.println("New database:");
+-		current.get(plat2, db2);
+-		current.compute();
+-		System.out.println("Deltas:");
+-		current.putDiffs(previous);
+-	    } else {
+-		System.out.println("New database:");
+-		current.get(plat1, db1);
+-		current.compute();
+-		current.put();
+-	    }
+-
+-	    if (platformArgs != null) {
+-		// Allow the platform to write platform-specific files
+-		platform.writePlatformSpecificFiles(previous, current,
+-						    platformArgs);
+-	    }
+-	}
+-	catch (Exception e) {
+-	    e.printStackTrace();
+-	      System.exit(1);
+-	}
++        try {
++            if (args.length < 3) {
++                usage();
++                System.exit(1);
++            }
++
++            int argc = 0;
++            boolean diffMode = false;
++            if (args[argc].equals("diffs")) {
++                diffMode = true;
++                ++argc;
++            }
++
++            String platformName = args[argc++];
++            Class platformClass = Class.forName(platformName);
++
++            String plat1 = null;
++            String db1 = null;
++            String plat2 = null;
++            String db2 = null;
++
++            String firstFile = null;
++            String lastFile = null;
++
++            int numOptionalArgs =
++                (diffMode ? (args.length - 6) : (args.length - 3));
++            if (numOptionalArgs < 0) {
++                usage();
++                System.exit(1);
++            }
++
++            plat1 = args[argc++];
++            db1   = args[argc++];
++
++            if (diffMode) {
++              plat2 = args[argc++];
++              db2   = args[argc++];
++            }
++
++            // argc now points at start of optional arguments, if any
++
++            try {
++              boolean gotOne = true;
++              while (gotOne && (argc < args.length - 1)) {
++                gotOne = false;
++                String arg = args[argc];
++                if (arg.equals("-firstFile")) {
++                  firstFile = args[argc + 1];
++                  argc += 2;
++                  gotOne = true;
++                } else if (arg.equals("-lastFile")) {
++                  lastFile = args[argc + 1];
++                  argc += 2;
++                  gotOne = true;
++                }
++              }
++            }
++            catch (Exception e) {
++              e.printStackTrace();
++              usage();
++              System.exit(1);
++            }
++
++            Platform platform = (Platform) platformClass.newInstance();
++            platform.setupFileTemplates();
++            long t = platform.defaultGrandIncludeThreshold();
++
++            String[] platformArgs = null;
++            int numPlatformArgs = args.length - argc;
++            if (numPlatformArgs > 0) {
++                platformArgs = new String[numPlatformArgs];
++                int offset = argc;
++                while (argc < args.length) {
++                  platformArgs[argc - offset] = args[argc];
++                  ++argc;
++                }
++            }
++
++            // If you want to change the threshold, change the default
++            // "grand include" threshold in Platform.java, or override
++            // it in the platform-specific file like UnixPlatform.java
++
++            Database previous = new Database(platform, t);
++            Database current = new Database(platform, t);
++
++            previous.canBeMissing();
++
++            if (firstFile != null) {
++              previous.setFirstFile(firstFile);
++              current.setFirstFile(firstFile);
++            }
++            if (lastFile != null) {
++              previous.setLastFile(lastFile);
++              current.setLastFile(lastFile);
++            }
++
++            if (diffMode) {
++                System.out.println("Old database:");
++                previous.get(plat1, db1);
++                previous.compute();
++                System.out.println("New database:");
++                current.get(plat2, db2);
++                current.compute();
++                System.out.println("Deltas:");
++                current.putDiffs(previous);
++            } else {
++                System.out.println("New database:");
++                current.get(plat1, db1);
++                current.compute();
++                current.put();
++            }
++
++            if (platformArgs != null) {
++                // Allow the platform to write platform-specific files
++                platform.writePlatformSpecificFiles(previous, current,
++                                                    platformArgs);
++            }
++        }
++        catch (Exception e) {
++            e.printStackTrace();
++              System.exit(1);
++        }
+     }
+ }
+diff -ruN openjdk6/hotspot/src/share/tools/MakeDeps/MetroWerksMacPlatform.java openjdk/hotspot/src/share/tools/MakeDeps/MetroWerksMacPlatform.java
+--- openjdk6/hotspot/src/share/tools/MakeDeps/MetroWerksMacPlatform.java	2008-02-28 05:02:31.000000000 -0500
++++ openjdk/hotspot/src/share/tools/MakeDeps/MetroWerksMacPlatform.java	2008-01-31 09:19:00.000000000 -0500
+@@ -19,52 +19,52 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ import java.io.*;
+ 
+ public class MetroWerksMacPlatform extends Platform {
+     public void setupFileTemplates() {
+-	inclFileTemplate = new FileName(this,
+-	    ":incls:", "_", "",                  ".incl", "", ""
+-	);
++        inclFileTemplate = new FileName(this,
++            ":incls:", "_", "",                  ".incl", "", ""
++        );
+         giFileTemplate = new FileName(this,
+-	    "",        "",  "precompiledHeader", ".pch",  "", ""
+-	);
++            "",        "",  "precompiledHeader", ".pch",  "", ""
++        );
+         gdFileTemplate = dummyFileTemplate;
+     }
+ 
+     private static String[] suffixes = { ".cpp", ".c", ".s" };
+ 
+     public String[] outerSuffixes() {
+-	return suffixes;
++        return suffixes;
+     }
+ 
+     public boolean includeGIInEachIncl() {
+-	return true;
++        return true;
+     }
+ 
+     public int defaultGrandIncludeThreshold() {
+-	return 150;
++        return 150;
+     }
+ 
+     public void writeGIPragma(PrintWriter out) {
+-	out.println("#pragma precompile_target \"" +
+-		    giFileTemplate.preStemAltSuff() +
+-		    "\"");
+-	out.println();
++        out.println("#pragma precompile_target \"" +
++                    giFileTemplate.preStemAltSuff() +
++                    "\"");
++        out.println();
+     }
+-    
++
+     public String objFileSuffix() {
+-	throw new RuntimeException("Unimplemented in original makeDeps");
++        throw new RuntimeException("Unimplemented in original makeDeps");
+     }
+ 
+     public String asmFileSuffix() {
+-	throw new RuntimeException("Unimplemented in original makeDeps");
++        throw new RuntimeException("Unimplemented in original makeDeps");
+     }
+ 
+     public String dependentPrefix() {
+-	throw new RuntimeException("Unimplemented in original makeDeps");
++        throw new RuntimeException("Unimplemented in original makeDeps");
+     }
+ }
+diff -ruN openjdk6/hotspot/src/share/tools/MakeDeps/Platform.java openjdk/hotspot/src/share/tools/MakeDeps/Platform.java
+--- openjdk6/hotspot/src/share/tools/MakeDeps/Platform.java	2008-02-28 05:02:31.000000000 -0500
++++ openjdk/hotspot/src/share/tools/MakeDeps/Platform.java	2008-01-31 09:19:00.000000000 -0500
+@@ -19,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ /** Defines what must be specified for each platform. This class must
+@@ -30,33 +30,33 @@
+ public abstract class Platform {
+     /** file name templates capture naming conventions */
+     protected FileName dummyFileTemplate =
+-	new FileName(this, "", "", "", "", "", "");
++        new FileName(this, "", "", "", "", "", "");
+ 
+     // The next three must be instantiated in subclasses' constructors
+ 
+     /** An incl file is produced per .c file and contains all the
+-	includes it needs */
++        includes it needs */
+     protected FileName inclFileTemplate;
+ 
+     /** A GI (grand-include) file has any file used more than N times
+-	for precompiled headers */
++        for precompiled headers */
+     protected FileName giFileTemplate;
+ 
+     /** A GD (grand-dependencies) file that tells Unix make all the
+-	.o's needed for linking and the include dependencies */
++        .o's needed for linking and the include dependencies */
+     protected FileName gdFileTemplate;
+ 
+     // Accessors
+     public FileName getInclFileTemplate() {
+-	return inclFileTemplate;
++        return inclFileTemplate;
+     }
+ 
+     public FileName getGIFileTemplate() {
+-	return giFileTemplate;
++        return giFileTemplate;
+     }
+ 
+     public FileName getGDFileTemplate() {
+-	return gdFileTemplate;
++        return gdFileTemplate;
+     }
+ 
+     // an incl file is the file included by each.c file that includes
+@@ -67,37 +67,37 @@
+ 
+     /** empty file name -> no grand include file */
+     public boolean haveGrandInclude() {
+-	return (giFileTemplate.nameOfList().length() > 0);
++        return (giFileTemplate.nameOfList().length() > 0);
+     }
+ 
+     public boolean writeDeps() {
+-	return (gdFileTemplate.nameOfList().length() > 0);
++        return (gdFileTemplate.nameOfList().length() > 0);
+     }
+ 
+     /** <p> A gi file is the grand-include file. It includes in one
+-	file any file that is included more than a certain number of
+-	times. </p>
++        file any file that is included more than a certain number of
++        times. </p>
+ 
+-	<p> It is used for precompiled header files. </p>
++        <p> It is used for precompiled header files. </p>
+ 
+-	<p> It has a source name, that is the file that this program
+-	generates, and a compiled name; that is the file that is
+-	included by other files. </p>
++        <p> It has a source name, that is the file that this program
++        generates, and a compiled name; that is the file that is
++        included by other files. </p>
+ 
+-	<p> Some platforms have this program actually explictly
+-	include the preprocessed gi file-- see includeGIInEachIncl().
+-	</p>
++        <p> Some platforms have this program actually explictly
++        include the preprocessed gi file-- see includeGIInEachIncl().
++        </p>
+ 
+-	<p> Also, some platforms need a pragma in the GI file. </p> */
++        <p> Also, some platforms need a pragma in the GI file. </p> */
+     public boolean includeGIInEachIncl() {
+-	return false;
++        return false;
+     }
+ 
+     /** For some platforms, e.g. Solaris, include the grand-include
+-	dependencies in the makefile. For others, e.g. Windows, do
+-	not. */
++        dependencies in the makefile. For others, e.g. Windows, do
++        not. */
+     public boolean includeGIDependencies() {
+-	return false;
++        return false;
+     }
+ 
+     /** Should C/C++ source file be dependent on a file included
+@@ -105,27 +105,27 @@
+     public boolean writeDependenciesOnHFilesFromGI() {
+         return false;
+     }
+-    
++
+     /** Default implementation does nothing */
+     public void writeGIPragma(PrintWriter out) {
+     }
+ 
+     /** A line with a filename and the noGrandInclude string means
+-	that this file cannot use the precompiled header. */
++        that this file cannot use the precompiled header. */
+     public String noGrandInclude() {
+-	return "no_precompiled_headers";
++        return "no_precompiled_headers";
+     }
+ 
+     /** A line with a filename and the
+-	generatePlatformDependentInclude means that an include file
+-	for the header file must be generated. This file generated include
+-	file is directly included by the non-platform dependent include file
+-	(e.g os.hpp includes _os_pd.hpp.incl. So while we notice files that
+-	are directly dependent on non-platform dependent files from the database
+-	we must infer the dependence on platform specific files to generate correct
+-	dependences on the platform specific files. */
++        generatePlatformDependentInclude means that an include file
++        for the header file must be generated. This file generated include
++        file is directly included by the non-platform dependent include file
++        (e.g os.hpp includes _os_pd.hpp.incl. So while we notice files that
++        are directly dependent on non-platform dependent files from the database
++        we must infer the dependence on platform specific files to generate correct
++        dependences on the platform specific files. */
+     public String generatePlatformDependentInclude() {
+-	return "generate_platform_dependent_include";
++        return "generate_platform_dependent_include";
+     }
+ 
+     /** Prefix and suffix strings for emitting Makefile rules */
+@@ -137,49 +137,49 @@
+ 
+     /** Abort means an internal error */
+     public void abort() {
+-	throw new RuntimeException("Internal error");
++        throw new RuntimeException("Internal error");
+     }
+ 
+     /** fatalError is used by clients to stop the system */
+     public void fatalError(String msg) {
+-	System.err.println(msg);
+-	System.exit(1);
++        System.err.println(msg);
++        System.exit(1);
+     }
+ 
+     /** Default implementation performs case-sensitive comparison */
+     public boolean fileNameStringEquality(String s1, String s2) {
+-	return s1.equals(s2);
++        return s1.equals(s2);
+     }
+ 
+     public void fileNamePortabilityCheck(String name) {
+-	if (Character.isUpperCase(name.charAt(0))) {
+-	    fatalError("Error: for the sake of portability we have chosen\n" +
+-		       "to avoid files starting with an uppercase letter.\n" +
+-		       "Please rename " + name + ".");
+-	}
++        if (Character.isUpperCase(name.charAt(0))) {
++            fatalError("Error: for the sake of portability we have chosen\n" +
++                       "to avoid files starting with an uppercase letter.\n" +
++                       "Please rename " + name + ".");
++        }
+     }
+ 
+     public void fileNamePortabilityCheck(String name, String matchingName) {
+-	if (!name.equals(matchingName)) {
+-	    fatalError("Error: file " + name + " also appears as " +
+-		       matchingName + ".  Case must be consistent for " +
+-		       "portability.");
+-	}
++        if (!name.equals(matchingName)) {
++            fatalError("Error: file " + name + " also appears as " +
++                       matchingName + ".  Case must be consistent for " +
++                       "portability.");
++        }
+     }
+ 
+     /** max is 31 on mac, so warn */
+     public int fileNameLengthLimit() {
+-	return 40;
++        return 45;
+     }
+ 
+     public int defaultGrandIncludeThreshold() {
+-	return 30;
++        return 30;
+     }
+ 
+     /** Not very general, but this is a way to get platform-specific
+         files to be written. Default implementation does nothing. */
+     public void writePlatformSpecificFiles(Database previousDB,
+-					   Database currentDB, String[] args)
+-	throws IllegalArgumentException, IOException {
++                                           Database currentDB, String[] args)
++        throws IllegalArgumentException, IOException {
+     }
+ }
+diff -ruN openjdk6/hotspot/src/share/tools/MakeDeps/UnixPlatform.java openjdk/hotspot/src/share/tools/MakeDeps/UnixPlatform.java
+--- openjdk6/hotspot/src/share/tools/MakeDeps/UnixPlatform.java	2008-02-28 05:02:31.000000000 -0500
++++ openjdk/hotspot/src/share/tools/MakeDeps/UnixPlatform.java	2008-01-31 09:19:00.000000000 -0500
+@@ -19,42 +19,42 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ public class UnixPlatform extends Platform {
+     public void setupFileTemplates() {
+-	inclFileTemplate = new FileName(this,
+-	    "incls/", "_", "",             ".incl", "", ""
+-	);
+-	giFileTemplate = new FileName(this,
+-	    "incls/", "",  "_precompiled", ".incl", "", ""
+-	);
+-	gdFileTemplate = new FileName(this,
+-	    "",       "",  "Dependencies", "",      "", ""
+-	);
++        inclFileTemplate = new FileName(this,
++            "incls/", "_", "",             ".incl", "", ""
++        );
++        giFileTemplate = new FileName(this,
++            "incls/", "",  "_precompiled", ".incl", "", ""
++        );
++        gdFileTemplate = new FileName(this,
++            "",       "",  "Dependencies", "",      "", ""
++        );
+     }
+-    
++
+     private static String[] suffixes = { ".cpp", ".c", ".s" };
+ 
+     public String[] outerSuffixes() {
+-	return suffixes;
++        return suffixes;
+     }
+ 
+     public String objFileSuffix() {
+-	return ".o";
++        return ".o";
+     }
+ 
+     public String asmFileSuffix() {
+-	return ".i";
++        return ".i";
+     }
+ 
+     public String dependentPrefix() {
+-	return "";
++        return "";
+     }
+ 
+     /** Do not change this; unless you fix things so precompiled
+-	header files get translated into make dependencies. - Ungar */
++        header files get translated into make dependencies. - Ungar */
+     public int defaultGrandIncludeThreshold() {
+        if (System.getProperty("USE_PRECOMPILED_HEADER") != null)
+           return 30;
+@@ -65,9 +65,9 @@
+     /** For Unix make, include the dependencies for precompiled header
+         files. */
+     public boolean includeGIDependencies() {
+-	return false;
++        return false;
+     }
+-  
++
+     /** Should C/C++ source file be dependent on a file included
+         into the grand-include file.
+         On Unix with precompiled headers we don't want each file to be
+diff -ruN openjdk6/hotspot/src/share/tools/MakeDeps/Util.java openjdk/hotspot/src/share/tools/MakeDeps/Util.java
+--- openjdk6/hotspot/src/share/tools/MakeDeps/Util.java	2008-02-28 05:02:31.000000000 -0500
++++ openjdk/hotspot/src/share/tools/MakeDeps/Util.java	2008-01-31 09:19:00.000000000 -0500
+@@ -19,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ import java.util.*;
+@@ -27,62 +27,62 @@
+ 
+ public class Util {
+     static String join(String padder, Vector v) {
+-	return join(padder, v, false);
++        return join(padder, v, false);
+     }
+-    
++
+     static String join(String padder, Vector v, boolean quoted) {
+-	StringBuffer sb = new StringBuffer();
+-	
+-	for (Iterator iter = v.iterator(); iter.hasNext(); ) {
+-	    if (quoted) {
+-		sb.append('"');
+-	    }
+-	    sb.append((String)iter.next());
+-	    if (quoted) {
+-		sb.append('"');
+-	    }
+-	    if (iter.hasNext()) sb.append(padder);
+-	}
++        StringBuffer sb = new StringBuffer();
++
++        for (Iterator iter = v.iterator(); iter.hasNext(); ) {
++            if (quoted) {
++                sb.append('"');
++            }
++            sb.append((String)iter.next());
++            if (quoted) {
++                sb.append('"');
++            }
++            if (iter.hasNext()) sb.append(padder);
++        }
+ 
+-	return sb.toString();
++        return sb.toString();
+     }
+-    
++
+      static String join(String padder, String v[]) {
+-	StringBuffer sb = new StringBuffer();
+-	
+-	for (int i=0; i<v.length; i++) {
+-	    sb.append(v[i]);
+-	    if (i < (v.length  - 1)) sb.append(padder);
+-	}
++        StringBuffer sb = new StringBuffer();
++
++        for (int i=0; i<v.length; i++) {
++            sb.append(v[i]);
++            if (i < (v.length  - 1)) sb.append(padder);
++        }
+ 
+-	return sb.toString();
++        return sb.toString();
+     }
+ 
+-    
++
+ 
+     static String prefixed_join(String padder, Vector v, boolean quoted) {
+-	StringBuffer sb = new StringBuffer();
+-	
+-	for (Iterator iter = v.iterator(); iter.hasNext(); ) {
+-	    sb.append(padder);
+-
+-	    if (quoted) {
+-		sb.append('"');
+-	    }
+-	    sb.append((String)iter.next());
+-	    if (quoted) {
+-		sb.append('"');
+-	    }
+-	}
++        StringBuffer sb = new StringBuffer();
++
++        for (Iterator iter = v.iterator(); iter.hasNext(); ) {
++            sb.append(padder);
+ 
+-	return sb.toString();
++            if (quoted) {
++                sb.append('"');
++            }
++            sb.append((String)iter.next());
++            if (quoted) {
++                sb.append('"');
++            }
++        }
++
++        return sb.toString();
+     }
+ 
+ 
+     static String normalize(String file) {
+-	return file.replace('\\', '/');
++        return file.replace('\\', '/');
+     }
+-    
++
+     static String sep = File.separator;
+     static String os = "Win32"; //System.getProperty("os.name");
+ }
+diff -ruN openjdk6/hotspot/src/share/tools/MakeDeps/WinGammaPlatform.java openjdk/hotspot/src/share/tools/MakeDeps/WinGammaPlatform.java
+--- openjdk6/hotspot/src/share/tools/MakeDeps/WinGammaPlatform.java	2008-02-28 05:02:31.000000000 -0500
++++ openjdk/hotspot/src/share/tools/MakeDeps/WinGammaPlatform.java	2008-01-31 09:19:00.000000000 -0500
+@@ -19,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ import java.io.*;
+@@ -31,43 +31,43 @@
+     static final int HASH   = 3;
+ 
+     boolean nextNotKey(ArgIterator it) {
+-	if (it.next()) {
+-	    String s = it.get();
+-	    return (s.length() == 0) || (s.charAt(0) != '-');
+-	} else {
+-	    return false;
+-	}
++        if (it.next()) {
++            String s = it.get();
++            return (s.length() == 0) || (s.charAt(0) != '-');
++        } else {
++            return false;
++        }
+     }
+ 
+     void empty(String key, String message) {
+-	if (key != null) {
+-	    System.err.println("** Error: empty " + key);
+-	}
+-	if (message != null) {
+-	    System.err.println(message);
+-	}
+-	WinGammaPlatform.usage();
++        if (key != null) {
++            System.err.println("** Error: empty " + key);
++        }
++        if (message != null) {
++            System.err.println(message);
++        }
++        WinGammaPlatform.usage();
+     }
+ 
+     static String getCfg(String val) {
+-	int under = val.indexOf('_');
+-	int len = val.length();
+-	if (under != -1 && under < len - 1) {
+-	    return val.substring(under+1, len);
+-	} else {
+-	    return null;
+-	}
++        int under = val.indexOf('_');
++        int len = val.length();
++        if (under != -1 && under < len - 1) {
++            return val.substring(under+1, len);
++        } else {
++            return null;
++        }
+     }
+ }
+ 
+ class ArgRuleSpecific extends ArgRule {
+     ArgRuleSpecific(String arg, ArgHandler handler) {
+-	super(arg, handler);
++        super(arg, handler);
+     }
+ 
+     boolean match(String rulePattern, String arg) {
+-	return rulePattern.startsWith(arg);
+-    } 
++        return rulePattern.startsWith(arg);
++    }
+ }
+ 
+ 
+@@ -77,147 +77,147 @@
+     int type;
+ 
+     public void handle(ArgIterator it) {
+-	String cfg = getCfg(it.get());
+-	if (nextNotKey(it)) {
+-	    String val = it.get();	    
+-	    switch (type) {
+-	    case VECTOR:
+-		BuildConfig.addFieldVector(cfg, valKey, val);
+-		break;
+-	    case HASH:
+-		BuildConfig.putFieldHash(cfg, valKey, val, "1");
+-		break;
+-	    case STRING:
+-		BuildConfig.putField(cfg, valKey, val);
+-		break;
+-	    default:
+-		empty(valKey, "Unknown type: "+type);		 
+-	    }
+-	    it.next();
+-	    
+-	} else {
+-	    empty(argKey, message);
+-	}
++        String cfg = getCfg(it.get());
++        if (nextNotKey(it)) {
++            String val = it.get();
++            switch (type) {
++            case VECTOR:
++                BuildConfig.addFieldVector(cfg, valKey, val);
++                break;
++            case HASH:
++                BuildConfig.putFieldHash(cfg, valKey, val, "1");
++                break;
++            case STRING:
++                BuildConfig.putField(cfg, valKey, val);
++                break;
++            default:
++                empty(valKey, "Unknown type: "+type);
++            }
++            it.next();
++
++        } else {
++            empty(argKey, message);
++        }
+     }
+ 
+     SpecificHsArgHandler(String argKey, String valKey, String message, int type) {
+-	this.argKey = argKey;	
+-	this.valKey = valKey;
+-	this.message = message;
+-	this.type = type;
++        this.argKey = argKey;
++        this.valKey = valKey;
++        this.message = message;
++        this.type = type;
+     }
+ }
+ 
+ 
+-class HsArgRule extends ArgRuleSpecific {    
++class HsArgRule extends ArgRuleSpecific {
+ 
+     HsArgRule(String argKey, String valKey, String message, int type) {
+-	super(argKey, new SpecificHsArgHandler(argKey, valKey, message, type));
++        super(argKey, new SpecificHsArgHandler(argKey, valKey, message, type));
+     }
+ 
+ }
+ 
+ public abstract class WinGammaPlatform extends Platform {
+     public void setupFileTemplates() {
+-	inclFileTemplate = new FileName(this,
+-	    "incls\\", "_", "",                      ".incl", "", ""
+-	);
+-	giFileTemplate = new FileName(this,
+-	    "incls\\", "",  "_precompiled", ".incl", "", ""
+-	);
+-	gdFileTemplate = new FileName(this,
+-	    "", "",  "Dependencies",         "",      "", ""
+-	);
++        inclFileTemplate = new FileName(this,
++            "incls\\", "_", "",                      ".incl", "", ""
++        );
++        giFileTemplate = new FileName(this,
++            "incls\\", "",  "_precompiled", ".incl", "", ""
++        );
++        gdFileTemplate = new FileName(this,
++            "", "",  "Dependencies",         "",      "", ""
++        );
+     }
+ 
+     private static String[] suffixes = { ".cpp", ".c" };
+ 
+     public String[] outerSuffixes() {
+-	return suffixes;
++        return suffixes;
+     }
+ 
+     public String objFileSuffix() {
+-	return ".obj";
++        return ".obj";
+     }
+ 
+     public String asmFileSuffix() {
+-	return ".i";
++        return ".i";
+     }
+ 
+     public String dependentPrefix() {
+-	return "$(VM_PATH)";
++        return "$(VM_PATH)";
+     }
+ 
+     public boolean includeGIInEachIncl() {
+-	return false;
++        return false;
+     }
+ 
+     public boolean fileNameStringEquality(String s1, String s2) {
+-	return s1.equalsIgnoreCase(s2);
++        return s1.equalsIgnoreCase(s2);
+     }
+ 
+     static void usage() throws IllegalArgumentException {
+-	System.err.println("WinGammaPlatform platform-specific options:");
+-	System.err.println("  -sourceBase <path to directory (workspace) " +
+-			   "containing source files; no trailing slash>");
+-	System.err.println("  -projectFileName <full pathname to which project file " +
+-			   "will be written; all parent directories must " +
+-			   "already exist>");	
+-	System.err.println("  If any of the above are specified, "+
+-			   "they must all be.");
+-	System.err.println("  Additional, optional arguments, which can be " +
+-			   "specified multiple times:");
+-	System.err.println("    -absoluteInclude <string containing absolute " +
+-			   "path to include directory>");
+-	System.err.println("    -relativeInclude <string containing include " +
+-			   "directory relative to -sourceBase>");
+-	System.err.println("    -define <preprocessor flag to be #defined " +
+-			   "(note: doesn't yet support " +
+-			   "#define (flag) (value))>");
+-	System.err.println("    -startAt <subdir of sourceBase>");
+-	System.err.println("    -additionalFile <file not in database but " +
+-			   "which should show up in project file, like " +
+-			   "includeDB_core>");
+-	System.err.println("    -additionalGeneratedFile <absolute path to " +
+-			   "directory containing file; no trailing slash> " +
+-			   "<name of file generated later in the build process>");
+-	throw new IllegalArgumentException();
++        System.err.println("WinGammaPlatform platform-specific options:");
++        System.err.println("  -sourceBase <path to directory (workspace) " +
++                           "containing source files; no trailing slash>");
++        System.err.println("  -projectFileName <full pathname to which project file " +
++                           "will be written; all parent directories must " +
++                           "already exist>");
++        System.err.println("  If any of the above are specified, "+
++                           "they must all be.");
++        System.err.println("  Additional, optional arguments, which can be " +
++                           "specified multiple times:");
++        System.err.println("    -absoluteInclude <string containing absolute " +
++                           "path to include directory>");
++        System.err.println("    -relativeInclude <string containing include " +
++                           "directory relative to -sourceBase>");
++        System.err.println("    -define <preprocessor flag to be #defined " +
++                           "(note: doesn't yet support " +
++                           "#define (flag) (value))>");
++        System.err.println("    -startAt <subdir of sourceBase>");
++        System.err.println("    -additionalFile <file not in database but " +
++                           "which should show up in project file, like " +
++                           "includeDB_core>");
++        System.err.println("    -additionalGeneratedFile <absolute path to " +
++                           "directory containing file; no trailing slash> " +
++                           "<name of file generated later in the build process>");
++        throw new IllegalArgumentException();
+     }
+ 
+-    
++
+     public void addPerFileLine(Hashtable table,
+-			       String fileName,
+-			       String line) {
+-	Vector v = (Vector) table.get(fileName);
+-	if (v != null) {
+-	    v.add(line);
+-	} else {
+-	    v = new Vector();
+-	    v.add(line);
+-	    table.put(fileName, v);
+-	}
++                               String fileName,
++                               String line) {
++        Vector v = (Vector) table.get(fileName);
++        if (v != null) {
++            v.add(line);
++        } else {
++            v = new Vector();
++            v.add(line);
++            table.put(fileName, v);
++        }
+     }
+-			       
++
+     protected static class PerFileCondData {
+-	public String releaseString;
+-	public String debugString;
++        public String releaseString;
++        public String debugString;
+     }
+ 
+     protected void addConditionalPerFileLine(Hashtable table,
+-					   String fileName,
+-					   String releaseLine,
+-					   String debugLine) {
+-	PerFileCondData data = new PerFileCondData();
+-	data.releaseString = releaseLine;
+-	data.debugString = debugLine;
+-	Vector v = (Vector) table.get(fileName);
+-	if (v != null) {
+-	    v.add(data);
+-	} else {
+-	    v = new Vector();
+-	    v.add(data);
+-	    table.put(fileName, v);
+-	}
++                                           String fileName,
++                                           String releaseLine,
++                                           String debugLine) {
++        PerFileCondData data = new PerFileCondData();
++        data.releaseString = releaseLine;
++        data.debugString = debugLine;
++        Vector v = (Vector) table.get(fileName);
++        if (v != null) {
++            v.add(data);
++        } else {
++            v = new Vector();
++            v.add(data);
++            table.put(fileName, v);
++        }
+     }
+ 
+     protected static class PrelinkCommandData {
+@@ -226,9 +226,9 @@
+     }
+ 
+     protected void addPrelinkCommand(Hashtable table,
+-				     String build,
+-				     String description,
+-				     String commands) {
++                                     String build,
++                                     String description,
++                                     String commands) {
+       PrelinkCommandData data = new PrelinkCommandData();
+       data.description = description;
+       data.commands = commands;
+@@ -236,13 +236,13 @@
+     }
+ 
+     public boolean findString(Vector v, String s) {
+-	for (Iterator iter = v.iterator(); iter.hasNext(); ) {
+-	    if (((String) iter.next()).equals(s)) {
+-		return true;
+-	    }
+-	}
++        for (Iterator iter = v.iterator(); iter.hasNext(); ) {
++            if (((String) iter.next()).equals(s)) {
++                return true;
++            }
++        }
+ 
+-	return false;
++        return false;
+     }
+ 
+     /* This returns a String containing the full path to the passed
+@@ -251,515 +251,515 @@
+        preferred paths, the file name is added to the appropriate
+        Vector of Strings. */
+     private String findFileInDirectory(String fileName,
+-				       DirectoryTree directory,
+-				       Vector preferredPaths,
+-				       Vector filesNotFound,
+-				       Vector filesDuplicate) {
+-	List locationsInTree = directory.findFile(fileName);
++                                       DirectoryTree directory,
++                                       Vector preferredPaths,
++                                       Vector filesNotFound,
++                                       Vector filesDuplicate) {
++        List locationsInTree = directory.findFile(fileName);
+         int  rootNameLength = directory.getRootNodeName().length();
+-	String name = null;
+-	if ((locationsInTree == null) ||
+-	    (locationsInTree.size() == 0)) {
+-	    filesNotFound.add(fileName);
+-	} else if (locationsInTree.size() > 1) {
+-            // We shouldn't have duplicate file names in our workspace. 
++        String name = null;
++        if ((locationsInTree == null) ||
++            (locationsInTree.size() == 0)) {
++            filesNotFound.add(fileName);
++        } else if (locationsInTree.size() > 1) {
++            // We shouldn't have duplicate file names in our workspace.
+             System.err.println();
+             System.err.println("There are multiple files named as: " + fileName);
+             System.exit(-1);
+             // The following code could be safely removed if we don't need duplicate
+             // file names.
+ 
+-	    // Iterate through them, trying to find one with a
+-	    // preferred path
+-	search:
+-	    {
+-		for (Iterator locIter = locationsInTree.iterator();
+-		     locIter.hasNext(); ) {
+-		    DirectoryTreeNode node =
+-			(DirectoryTreeNode) locIter.next();
+-		    String tmpName = node.getName();
+-		    for (Iterator prefIter = preferredPaths.iterator();
+-			 prefIter.hasNext(); ) {
+-                        // We need to make sure the preferred path is 
++            // Iterate through them, trying to find one with a
++            // preferred path
++        search:
++            {
++                for (Iterator locIter = locationsInTree.iterator();
++                     locIter.hasNext(); ) {
++                    DirectoryTreeNode node =
++                        (DirectoryTreeNode) locIter.next();
++                    String tmpName = node.getName();
++                    for (Iterator prefIter = preferredPaths.iterator();
++                         prefIter.hasNext(); ) {
++                        // We need to make sure the preferred path is
+                         // found from the file path not including the root node name.
+-			if (tmpName.indexOf((String)prefIter.next(), 
++                        if (tmpName.indexOf((String)prefIter.next(),
+                                             rootNameLength) != -1) {
+-			    name = tmpName;
+-			    break search;
+-			}
+-		    }
+-		}
+-	    }
+-	    
+-	    if (name == null) {
+-		filesDuplicate.add(fileName);
+-	    }
+-	} else {
+-	    name = ((DirectoryTreeNode) locationsInTree.get(0)).getName();
+-	}
+-
+-	return name;
+-    }    
+-				   
++                            name = tmpName;
++                            break search;
++                        }
++                    }
++                }
++            }
++
++            if (name == null) {
++                filesDuplicate.add(fileName);
++            }
++        } else {
++            name = ((DirectoryTreeNode) locationsInTree.get(0)).getName();
++        }
++
++        return name;
++    }
++
+     protected boolean databaseAllFilesEqual(Database previousDB,
+-					    Database currentDB) {
+-	Iterator i1 = previousDB.getAllFiles().iterator();
+-	Iterator i2 = currentDB.getAllFiles().iterator();
+-	
+-	while (i1.hasNext() && i2.hasNext()) {
+-	    FileList fl1 = (FileList) i1.next();
+-	    FileList fl2 = (FileList) i2.next();
+-	    if (!fl1.getName().equals(fl2.getName())) {
+-		return false;
+-	    }
+-	}
+-
+-	if (i1.hasNext() != i2.hasNext()) {
+-	    // Different lengths
+-	    return false;
+-	}
++                                            Database currentDB) {
++        Iterator i1 = previousDB.getAllFiles().iterator();
++        Iterator i2 = currentDB.getAllFiles().iterator();
++
++        while (i1.hasNext() && i2.hasNext()) {
++            FileList fl1 = (FileList) i1.next();
++            FileList fl2 = (FileList) i2.next();
++            if (!fl1.getName().equals(fl2.getName())) {
++                return false;
++            }
++        }
++
++        if (i1.hasNext() != i2.hasNext()) {
++            // Different lengths
++            return false;
++        }
+ 
+-	return true;
++        return true;
+     }
+ 
+     protected String envVarPrefixedFileName(String fileName,
+-					    int sourceBaseLen,
+-					    DirectoryTree tree,
+-					    Vector preferredPaths,
+-					    Vector filesNotFound,
+-					    Vector filesDuplicate) {
+-	String fullName = findFileInDirectory(fileName,
+-					      tree,
+-					      preferredPaths,
+-					      filesNotFound,
+-					      filesDuplicate);
+-	return fullName;
++                                            int sourceBaseLen,
++                                            DirectoryTree tree,
++                                            Vector preferredPaths,
++                                            Vector filesNotFound,
++                                            Vector filesDuplicate) {
++        String fullName = findFileInDirectory(fileName,
++                                              tree,
++                                              preferredPaths,
++                                              filesNotFound,
++                                              filesDuplicate);
++        return fullName;
+     }
+-    
++
+      String getProjectName(String fullPath, String extension)
+-	throws IllegalArgumentException, IOException {
+-	File file = new File(fullPath).getCanonicalFile();
+-	fullPath = file.getCanonicalPath();
+-	String parent = file.getParent();
+-
+-	if (!fullPath.endsWith(extension)) {
+-	    throw new IllegalArgumentException("project file name \"" +
+-					       fullPath +
+-					       "\" does not end in "+extension);
+-	}
+-
+-	if ((parent != null) &&
+-	    (!fullPath.startsWith(parent))) {
+-	    throw new RuntimeException(
+-	        "Internal error: parent of file name \"" + parent +
+-		"\" does not match file name \"" + fullPath + "\""
+-	    );
+-	}
+-
+-	int len = parent.length();
+-	if (!parent.endsWith(Util.sep)) {
+-	    len += Util.sep.length();
+-	}
+-	
+-	int end = fullPath.length() - extension.length();
+-
+-	if (len == end) {
+-	    throw new RuntimeException(
+-	        "Internal error: file name was empty"
+-	    );
+-	}
++        throws IllegalArgumentException, IOException {
++        File file = new File(fullPath).getCanonicalFile();
++        fullPath = file.getCanonicalPath();
++        String parent = file.getParent();
++
++        if (!fullPath.endsWith(extension)) {
++            throw new IllegalArgumentException("project file name \"" +
++                                               fullPath +
++                                               "\" does not end in "+extension);
++        }
++
++        if ((parent != null) &&
++            (!fullPath.startsWith(parent))) {
++            throw new RuntimeException(
++                "Internal error: parent of file name \"" + parent +
++                "\" does not match file name \"" + fullPath + "\""
++            );
++        }
++
++        int len = parent.length();
++        if (!parent.endsWith(Util.sep)) {
++            len += Util.sep.length();
++        }
++
++        int end = fullPath.length() - extension.length();
++
++        if (len == end) {
++            throw new RuntimeException(
++                "Internal error: file name was empty"
++            );
++        }
+ 
+-	return fullPath.substring(len, end);
++        return fullPath.substring(len, end);
+     }
+ 
+     protected abstract String getProjectExt();
+ 
+     public void writePlatformSpecificFiles(Database previousDB,
+-					   Database currentDB, String[] args)	
+-	throws IllegalArgumentException, IOException {
+-	
+-	parseArguments(args);
+-	
+-	String projectFileName = BuildConfig.getFieldString(null, "ProjectFileName");
+-	String ext = getProjectExt();	
+-	
+-	// Compare contents of allFiles of previousDB and includeDB.
+-	// If these haven't changed, then skip writing the .vcproj file.
+-	if (false && databaseAllFilesEqual(previousDB, currentDB) && 
+-	    new File(projectFileName).exists()) {
+-	    System.out.println(
+-			       "    Databases unchanged; skipping overwrite of "+ext+" file."
+-			       );
+-	    return;
+-	} 
+-	
+-	String projectName = getProjectName(projectFileName, ext);
++                                           Database currentDB, String[] args)
++        throws IllegalArgumentException, IOException {
+ 
+-	writeProjectFile(projectFileName, projectName, createAllConfigs());
++        parseArguments(args);
++
++        String projectFileName = BuildConfig.getFieldString(null, "ProjectFileName");
++        String ext = getProjectExt();
++
++        // Compare contents of allFiles of previousDB and includeDB.
++        // If these haven't changed, then skip writing the .vcproj file.
++        if (false && databaseAllFilesEqual(previousDB, currentDB) &&
++            new File(projectFileName).exists()) {
++            System.out.println(
++                               "    Databases unchanged; skipping overwrite of "+ext+" file."
++                               );
++            return;
++        }
++
++        String projectName = getProjectName(projectFileName, ext);
++
++        writeProjectFile(projectFileName, projectName, createAllConfigs());
+     }
+ 
+     protected void writePrologue(String[] args) {
+-	System.err.println("WinGammaPlatform platform-specific arguments:");
+-	for (int i = 0; i < args.length; i++) {
+-	    System.err.print(args[i] + " ");
+-	}
+-	System.err.println();
++        System.err.println("WinGammaPlatform platform-specific arguments:");
++        for (int i = 0; i < args.length; i++) {
++            System.err.print(args[i] + " ");
++        }
++        System.err.println();
+     }
+ 
+ 
+     void setInclFileTemplate(FileName val) {
+-	this.inclFileTemplate = val;
++        this.inclFileTemplate = val;
+     }
+ 
+     void setGIFileTemplate(FileName val) {
+-	this.giFileTemplate = val;
++        this.giFileTemplate = val;
+     }
+ 
+ 
+     void parseArguments(String[] args) {
+-	new ArgsParser(args, 
+-		       new ArgRule[]
+-	    {
+-		new HsArgRule("-sourceBase",
+-			      "SourceBase",
+-			      "   (Did you set the HotSpotWorkSpace environment variable?)",
+-			      HsArgHandler.STRING
+-			      ),
+-
+-		new HsArgRule("-buildBase",
+-			      "BuildBase",
+-			      "   (Did you set the HotSpotBuildSpace environment variable?)",
+-			      HsArgHandler.STRING
+-			      ),
+-
+-		new HsArgRule("-projectFileName",
+-			      "ProjectFileName",
+-			      null,
+-			      HsArgHandler.STRING
+-			      ),
+-		
+-		new HsArgRule("-jdkTargetRoot",
+-			      "JdkTargetRoot",
+-			      "   (Did you set the HotSpotJDKDist environment variable?)",
+-			      HsArgHandler.STRING
+-			      ),
+-
+-		new HsArgRule("-compiler",
+-			      "CompilerVersion",
+-			      "   (Did you set the VcVersion correctly?)",
+-			      HsArgHandler.STRING
+-			      ),
+-
+-		new HsArgRule("-platform",
+-			      "Platform",
+-			      null,
+-			      HsArgHandler.STRING
+-			      ),
+-
+-		new HsArgRule("-absoluteInclude",
+-			      "AbsoluteInclude",
+-			      null,
+-			      HsArgHandler.VECTOR
+-			      ),
+-
+-		new HsArgRule("-relativeInclude",
+-			      "RelativeInclude",
+-			      null,
+-			      HsArgHandler.VECTOR
+-			      ),
+-		
+-		new HsArgRule("-define",
+-			      "Define",
+-			      null,
+-			      HsArgHandler.VECTOR
+-			      ),	
+-
+-		new HsArgRule("-useToGeneratePch",
+-			      "UseToGeneratePch",
+-			      null,
+-			      HsArgHandler.STRING
+-			      ),
+-		
+-		new ArgRuleSpecific("-perFileLine",
+-			    new HsArgHandler() {
+-				public void handle(ArgIterator it) {
+-				    String cfg = getCfg(it.get());
+-				    if (nextNotKey(it)) {
+-					String fileName = it.get();
+-					if (nextNotKey(it)) {
+-					    String line = it.get();
+-					    BuildConfig.putFieldHash(cfg, "PerFileLine", fileName, line);
+-					    it.next();
+-					    return;
+-					}
+-				    }
+-				    empty(null, "** Error: wrong number of args to -perFileLine");
+-				}
+-			    }
+-			    ),
+-
+-		new ArgRuleSpecific("-conditionalPerFileLine",
+-			    new HsArgHandler() {
+-				public void handle(ArgIterator it) {
+-				    String cfg = getCfg(it.get());
+-				    if (nextNotKey(it)) {
+-					String fileName = it.get();
+-					if (nextNotKey(it)) {
+-					    String productLine = it.get();
+-					    if (nextNotKey(it)) {
+-						String debugLine = it.get();
+-						BuildConfig.putFieldHash(cfg+"_debug", "CondPerFileLine", 
+-									 fileName, debugLine);
+-						BuildConfig.putFieldHash(cfg+"_product", "CondPerFileLine", 
+-									 fileName, productLine);
+-						it.next();
+-						return;
+-					    }
+-					}
+-				    }
+-				    
+-				    empty(null, "** Error: wrong number of args to -conditionalPerFileLine");
+-				}
+-			    }
+-			    ),
+-
+-		new HsArgRule("-disablePch",
+-			      "DisablePch",
+-			      null,
+-			      HsArgHandler.HASH
+-			      ),	
+-		
+-		new ArgRule("-startAt",
+-			    new HsArgHandler() {
+-				public void handle(ArgIterator it) {
+-				    if (BuildConfig.getField(null, "StartAt") != null) {
+-					empty(null, "** Error: multiple -startAt");
+-				    }
+-				    if (nextNotKey(it)) {
+-					BuildConfig.putField(null, "StartAt", it.get());
+-					it.next();		       
+-				    } else {
+-					empty("-startAt", null);
+-				    }
+-				}
+-			    }
+-			    ),
+-
+-		new HsArgRule("-ignoreFile",
+-				      "IgnoreFile",
+-				      null,
+-				      HsArgHandler.HASH
+-				      ),
+-	
+-		new HsArgRule("-additionalFile",
+-			      "AdditionalFile",
+-			      null,
+-			      HsArgHandler.VECTOR
+-			      ),
+-
+-		new ArgRuleSpecific("-additionalGeneratedFile",
+-			    new HsArgHandler() {
+-				public void handle(ArgIterator it) {
+-				    String cfg = getCfg(it.get());
+-				    if (nextNotKey(it)) {
+-					String dir = it.get();
+-					if (nextNotKey(it)) {
+-					    String fileName = it.get();
+-					    // we ignore files that we know are generated, so we coudn't 
+-					    // find them in sources
+-					    BuildConfig.putFieldHash(cfg, "IgnoreFile",  fileName, "1");
+-					    BuildConfig.putFieldHash(cfg, "AdditionalGeneratedFile", 
+-								     Util.normalize(dir + Util.sep + fileName), 
+-								     fileName);
+-					    it.next();
+-					    return;
+-					}
+-				    }
+-				    empty(null, "** Error: wrong number of args to -additionalGeneratedFile");
+-				}
+-			    }
+-			    ),	
+-
+-		new HsArgRule("-includeDB",
+-			      "IncludeDB",
+-			      null,
+-			      HsArgHandler.STRING
+-			      ),
+-
+-		new ArgRule("-prelink",
+-			    new HsArgHandler() {
+-				public void handle(ArgIterator it) {
+-				    if (nextNotKey(it)) {
+-					String build = it.get();
+-					if (nextNotKey(it)) {
+-					    String description = it.get();
+-					    if (nextNotKey(it)) {
+-						String command = it.get();
+-						BuildConfig.putField(null, "PrelinkDescription", description);
+-						BuildConfig.putField(null, "PrelinkCommand", command);
+-						it.next();
+-						return;
+-					    }
+-					}
+-				    }
+-
+-				    empty(null,  "** Error: wrong number of args to -prelink");
+-				}
+-			    }
+-			    )		
+-	    },
+-				       new ArgHandler() {
+-					   public void handle(ArgIterator it) {
+-					       
+-					       throw new RuntimeException("Arg Parser: unrecognized option "+it.get());
+-					   }
+-				       }
+-				       );
+-	if (BuildConfig.getField(null, "SourceBase") == null      || 
+-	    BuildConfig.getField(null, "BuildBase") == null       || 
+-	    BuildConfig.getField(null, "ProjectFileName") == null ||
+-	    BuildConfig.getField(null, "CompilerVersion") == null) {
+-	    usage();
+-	}
+-	
+-	if (BuildConfig.getField(null, "UseToGeneratePch") == null) {
+-	    throw new RuntimeException("ERROR: need to specify one file to compute PCH, with -useToGeneratePch flag");
+-	}       
++        new ArgsParser(args,
++                       new ArgRule[]
++            {
++                new HsArgRule("-sourceBase",
++                              "SourceBase",
++                              "   (Did you set the HotSpotWorkSpace environment variable?)",
++                              HsArgHandler.STRING
++                              ),
++
++                new HsArgRule("-buildBase",
++                              "BuildBase",
++                              "   (Did you set the HotSpotBuildSpace environment variable?)",
++                              HsArgHandler.STRING
++                              ),
++
++                new HsArgRule("-projectFileName",
++                              "ProjectFileName",
++                              null,
++                              HsArgHandler.STRING
++                              ),
++
++                new HsArgRule("-jdkTargetRoot",
++                              "JdkTargetRoot",
++                              "   (Did you set the HotSpotJDKDist environment variable?)",
++                              HsArgHandler.STRING
++                              ),
++
++                new HsArgRule("-compiler",
++                              "CompilerVersion",
++                              "   (Did you set the VcVersion correctly?)",
++                              HsArgHandler.STRING
++                              ),
++
++                new HsArgRule("-platform",
++                              "Platform",
++                              null,
++                              HsArgHandler.STRING
++                              ),
++
++                new HsArgRule("-absoluteInclude",
++                              "AbsoluteInclude",
++                              null,
++                              HsArgHandler.VECTOR
++                              ),
++
++                new HsArgRule("-relativeInclude",
++                              "RelativeInclude",
++                              null,
++                              HsArgHandler.VECTOR
++                              ),
++
++                new HsArgRule("-define",
++                              "Define",
++                              null,
++                              HsArgHandler.VECTOR
++                              ),
++
++                new HsArgRule("-useToGeneratePch",
++                              "UseToGeneratePch",
++                              null,
++                              HsArgHandler.STRING
++                              ),
++
++                new ArgRuleSpecific("-perFileLine",
++                            new HsArgHandler() {
++                                public void handle(ArgIterator it) {
++                                    String cfg = getCfg(it.get());
++                                    if (nextNotKey(it)) {
++                                        String fileName = it.get();
++                                        if (nextNotKey(it)) {
++                                            String line = it.get();
++                                            BuildConfig.putFieldHash(cfg, "PerFileLine", fileName, line);
++                                            it.next();
++                                            return;
++                                        }
++                                    }
++                                    empty(null, "** Error: wrong number of args to -perFileLine");
++                                }
++                            }
++                            ),
++
++                new ArgRuleSpecific("-conditionalPerFileLine",
++                            new HsArgHandler() {
++                                public void handle(ArgIterator it) {
++                                    String cfg = getCfg(it.get());
++                                    if (nextNotKey(it)) {
++                                        String fileName = it.get();
++                                        if (nextNotKey(it)) {
++                                            String productLine = it.get();
++                                            if (nextNotKey(it)) {
++                                                String debugLine = it.get();
++                                                BuildConfig.putFieldHash(cfg+"_debug", "CondPerFileLine",
++                                                                         fileName, debugLine);
++                                                BuildConfig.putFieldHash(cfg+"_product", "CondPerFileLine",
++                                                                         fileName, productLine);
++                                                it.next();
++                                                return;
++                                            }
++                                        }
++                                    }
++
++                                    empty(null, "** Error: wrong number of args to -conditionalPerFileLine");
++                                }
++                            }
++                            ),
++
++                new HsArgRule("-disablePch",
++                              "DisablePch",
++                              null,
++                              HsArgHandler.HASH
++                              ),
++
++                new ArgRule("-startAt",
++                            new HsArgHandler() {
++                                public void handle(ArgIterator it) {
++                                    if (BuildConfig.getField(null, "StartAt") != null) {
++                                        empty(null, "** Error: multiple -startAt");
++                                    }
++                                    if (nextNotKey(it)) {
++                                        BuildConfig.putField(null, "StartAt", it.get());
++                                        it.next();
++                                    } else {
++                                        empty("-startAt", null);
++                                    }
++                                }
++                            }
++                            ),
++
++                new HsArgRule("-ignoreFile",
++                                      "IgnoreFile",
++                                      null,
++                                      HsArgHandler.HASH
++                                      ),
++
++                new HsArgRule("-additionalFile",
++                              "AdditionalFile",
++                              null,
++                              HsArgHandler.VECTOR
++                              ),
++
++                new ArgRuleSpecific("-additionalGeneratedFile",
++                            new HsArgHandler() {
++                                public void handle(ArgIterator it) {
++                                    String cfg = getCfg(it.get());
++                                    if (nextNotKey(it)) {
++                                        String dir = it.get();
++                                        if (nextNotKey(it)) {
++                                            String fileName = it.get();
++                                            // we ignore files that we know are generated, so we coudn't
++                                            // find them in sources
++                                            BuildConfig.putFieldHash(cfg, "IgnoreFile",  fileName, "1");
++                                            BuildConfig.putFieldHash(cfg, "AdditionalGeneratedFile",
++                                                                     Util.normalize(dir + Util.sep + fileName),
++                                                                     fileName);
++                                            it.next();
++                                            return;
++                                        }
++                                    }
++                                    empty(null, "** Error: wrong number of args to -additionalGeneratedFile");
++                                }
++                            }
++                            ),
++
++                new HsArgRule("-includeDB",
++                              "IncludeDB",
++                              null,
++                              HsArgHandler.STRING
++                              ),
++
++                new ArgRule("-prelink",
++                            new HsArgHandler() {
++                                public void handle(ArgIterator it) {
++                                    if (nextNotKey(it)) {
++                                        String build = it.get();
++                                        if (nextNotKey(it)) {
++                                            String description = it.get();
++                                            if (nextNotKey(it)) {
++                                                String command = it.get();
++                                                BuildConfig.putField(null, "PrelinkDescription", description);
++                                                BuildConfig.putField(null, "PrelinkCommand", command);
++                                                it.next();
++                                                return;
++                                            }
++                                        }
++                                    }
++
++                                    empty(null,  "** Error: wrong number of args to -prelink");
++                                }
++                            }
++                            )
++            },
++                                       new ArgHandler() {
++                                           public void handle(ArgIterator it) {
++
++                                               throw new RuntimeException("Arg Parser: unrecognized option "+it.get());
++                                           }
++                                       }
++                                       );
++        if (BuildConfig.getField(null, "SourceBase") == null      ||
++            BuildConfig.getField(null, "BuildBase") == null       ||
++            BuildConfig.getField(null, "ProjectFileName") == null ||
++            BuildConfig.getField(null, "CompilerVersion") == null) {
++            usage();
++        }
++
++        if (BuildConfig.getField(null, "UseToGeneratePch") == null) {
++            throw new RuntimeException("ERROR: need to specify one file to compute PCH, with -useToGeneratePch flag");
++        }
+ 
+-	BuildConfig.putField(null, "PlatformObject", this);
++        BuildConfig.putField(null, "PlatformObject", this);
+     }
+ 
+     Vector createAllConfigs() {
+-	Vector allConfigs = new Vector();
+-	
+-	allConfigs.add(new C1DebugConfig());
+-
+-	boolean b = true;
+-	if (b) {
+-	    allConfigs.add(new C1FastDebugConfig());
+-	    allConfigs.add(new C1ProductConfig());
+-	    
+-	    allConfigs.add(new C2DebugConfig());
+-	    allConfigs.add(new C2FastDebugConfig());
+-	    allConfigs.add(new C2ProductConfig());
+-
+-	    allConfigs.add(new TieredDebugConfig());
+-	    allConfigs.add(new TieredFastDebugConfig());
+-	    allConfigs.add(new TieredProductConfig());
+-	    
+-	    allConfigs.add(new CoreDebugConfig());
+-	    allConfigs.add(new CoreFastDebugConfig());
+-	    allConfigs.add(new CoreProductConfig());
+-
+-	    allConfigs.add(new KernelDebugConfig());
+-	    allConfigs.add(new KernelFastDebugConfig());
+-	    allConfigs.add(new KernelProductConfig());
+-	}
++        Vector allConfigs = new Vector();
++
++        allConfigs.add(new C1DebugConfig());
++
++        boolean b = true;
++        if (b) {
++            allConfigs.add(new C1FastDebugConfig());
++            allConfigs.add(new C1ProductConfig());
++
++            allConfigs.add(new C2DebugConfig());
++            allConfigs.add(new C2FastDebugConfig());
++            allConfigs.add(new C2ProductConfig());
+ 
+-	return allConfigs;
++            allConfigs.add(new TieredDebugConfig());
++            allConfigs.add(new TieredFastDebugConfig());
++            allConfigs.add(new TieredProductConfig());
++
++            allConfigs.add(new CoreDebugConfig());
++            allConfigs.add(new CoreFastDebugConfig());
++            allConfigs.add(new CoreProductConfig());
++
++            allConfigs.add(new KernelDebugConfig());
++            allConfigs.add(new KernelFastDebugConfig());
++            allConfigs.add(new KernelProductConfig());
++        }
++
++        return allConfigs;
+     }
+ 
+     class FileAttribute {
+-	int     numConfigs;
+-	Vector  configs;
+-	String  shortName;
+-	boolean noPch, pchRoot;
+-	
+-	FileAttribute(String shortName, BuildConfig cfg, int numConfigs) {
+-	    this.shortName = shortName;
+-	    this.noPch =  (cfg.lookupHashFieldInContext("DisablePch", shortName) != null);
+-	    this.pchRoot = shortName.equals(BuildConfig.getFieldString(null, "UseToGeneratePch"));
+-	    this.numConfigs = numConfigs;
+-
+-	    configs = new Vector();
+-	    add(cfg.get("Name"));
+-	}
+-	
+-	void add(String confName) {
+-	    configs.add(confName);
+-	    
+-	    // if presented in all configs
+-	    if (configs.size() == numConfigs) {
+-		configs = null; 
+-	    }
+-	}
++        int     numConfigs;
++        Vector  configs;
++        String  shortName;
++        boolean noPch, pchRoot;
++
++        FileAttribute(String shortName, BuildConfig cfg, int numConfigs) {
++            this.shortName = shortName;
++            this.noPch =  (cfg.lookupHashFieldInContext("DisablePch", shortName) != null);
++            this.pchRoot = shortName.equals(BuildConfig.getFieldString(null, "UseToGeneratePch"));
++            this.numConfigs = numConfigs;
++
++            configs = new Vector();
++            add(cfg.get("Name"));
++        }
++
++        void add(String confName) {
++            configs.add(confName);
++
++            // if presented in all configs
++            if (configs.size() == numConfigs) {
++                configs = null;
++            }
++        }
+     }
+-    
++
+     class FileInfo implements Comparable {
+-	String        full;
+-	FileAttribute attr;
+-	
+-	FileInfo(String full, FileAttribute  attr) {
+-	    this.full = full;
+-	    this.attr = attr;
+-	}
++        String        full;
++        FileAttribute attr;
+ 
+-	public int compareTo(Object o) {
+-	    FileInfo oo = (FileInfo)o;
++        FileInfo(String full, FileAttribute  attr) {
++            this.full = full;
++            this.attr = attr;
++        }
++
++        public int compareTo(Object o) {
++            FileInfo oo = (FileInfo)o;
+             // Don't squelch identical short file names where the full
+             // paths are different
+             if (!attr.shortName.equals(oo.attr.shortName))
+               return attr.shortName.compareTo(oo.attr.shortName);
+             return full.compareTo(oo.full);
+-	}
++        }
+ 
+-	boolean isHeader() {
+-	    return attr.shortName.endsWith(".h") || attr.shortName.endsWith(".hpp");
+-	}
++        boolean isHeader() {
++            return attr.shortName.endsWith(".h") || attr.shortName.endsWith(".hpp");
++        }
+     }
+-    
++
+ 
+     TreeSet sortFiles(Hashtable allFiles) {
+-	TreeSet rv = new TreeSet();
+-    	Enumeration e = allFiles.keys();	
+-	while (e.hasMoreElements()) {
+-	    String fullPath = (String)e.nextElement();
+-	    rv.add(new FileInfo(fullPath, (FileAttribute)allFiles.get(fullPath)));
+-	}
+-	return rv;
++        TreeSet rv = new TreeSet();
++        Enumeration e = allFiles.keys();
++        while (e.hasMoreElements()) {
++            String fullPath = (String)e.nextElement();
++            rv.add(new FileInfo(fullPath, (FileAttribute)allFiles.get(fullPath)));
++        }
++        return rv;
+     }
+-    
++
+     Hashtable computeAttributedFiles(Vector allConfigs) {
+-	Hashtable ht = new Hashtable();
+-	int numConfigs = allConfigs.size();
+-	
+-	for (Iterator i = allConfigs.iterator(); i.hasNext(); ) {
+-	    BuildConfig bc = (BuildConfig)i.next();
+-	    Hashtable  confFiles = (Hashtable)bc.getSpecificField("AllFilesHash");
+-	    String confName = bc.get("Name");
+-	    
+-	    for (Enumeration e=confFiles.keys(); e.hasMoreElements(); ) {
+-		String filePath = (String)e.nextElement();
+-		FileAttribute fa = (FileAttribute)ht.get(filePath);
+-		
+-		if (fa == null) {
+-		    fa = new FileAttribute((String)confFiles.get(filePath), bc, numConfigs);
+-		    ht.put(filePath, fa);
+-		} else {
+-		    fa.add(confName);
+-		}
+-	    }
+-	}
+-	
+-	return ht;
+-    }    
+-    
++        Hashtable ht = new Hashtable();
++        int numConfigs = allConfigs.size();
++
++        for (Iterator i = allConfigs.iterator(); i.hasNext(); ) {
++            BuildConfig bc = (BuildConfig)i.next();
++            Hashtable  confFiles = (Hashtable)bc.getSpecificField("AllFilesHash");
++            String confName = bc.get("Name");
++
++            for (Enumeration e=confFiles.keys(); e.hasMoreElements(); ) {
++                String filePath = (String)e.nextElement();
++                FileAttribute fa = (FileAttribute)ht.get(filePath);
++
++                if (fa == null) {
++                    fa = new FileAttribute((String)confFiles.get(filePath), bc, numConfigs);
++                    ht.put(filePath, fa);
++                } else {
++                    fa.add(confName);
++                }
++            }
++        }
++
++        return ht;
++    }
++
+      Hashtable computeAttributedFiles(BuildConfig bc) {
+-	Hashtable ht = new Hashtable();
+-	Hashtable confFiles = (Hashtable)bc.getSpecificField("AllFilesHash");
+-	    
+-	for (Enumeration e = confFiles.keys(); e.hasMoreElements(); ) {
+-	    String filePath = (String)e.nextElement();
+-	    ht.put(filePath,  new FileAttribute((String)confFiles.get(filePath), bc, 1));
+-	}
+-	
+-	return ht;
+-    }    
++        Hashtable ht = new Hashtable();
++        Hashtable confFiles = (Hashtable)bc.getSpecificField("AllFilesHash");
++
++        for (Enumeration e = confFiles.keys(); e.hasMoreElements(); ) {
++            String filePath = (String)e.nextElement();
++            ht.put(filePath,  new FileAttribute((String)confFiles.get(filePath), bc, 1));
++        }
++
++        return ht;
++    }
+ 
+     PrintWriter printWriter;
+ 
+-    public void writeProjectFile(String projectFileName, String projectName, 
+-				 Vector allConfigs) throws IOException {
+-	throw new RuntimeException("use compiler version specific version");
++    public void writeProjectFile(String projectFileName, String projectName,
++                                 Vector allConfigs) throws IOException {
++        throw new RuntimeException("use compiler version specific version");
+     }
+ }
+diff -ruN openjdk6/hotspot/src/share/tools/MakeDeps/WinGammaPlatformVC6.java openjdk/hotspot/src/share/tools/MakeDeps/WinGammaPlatformVC6.java
+--- openjdk6/hotspot/src/share/tools/MakeDeps/WinGammaPlatformVC6.java	2008-02-28 05:02:31.000000000 -0500
++++ openjdk/hotspot/src/share/tools/MakeDeps/WinGammaPlatformVC6.java	2008-01-31 09:19:00.000000000 -0500
+@@ -19,273 +19,273 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ import java.io.*;
+ import java.util.*;
+ 
+-public class WinGammaPlatformVC6 extends WinGammaPlatform {      
+-    public void writeProjectFile(String projectFileName, String projectName, 
+-				 Vector allConfigs) throws IOException {
+-	Vector allConfigNames = new Vector();
+-
+-	printWriter = new PrintWriter(new FileWriter(projectFileName));
+-	String cfg = ((BuildConfig)allConfigs.get(0)).get("Name");
+-
+-	printWriter.println("# Microsoft Developer Studio Project File - Name=\"" + projectName + "\" - Package Owner=<4>");
+-	printWriter.println("# Microsoft Developer Studio Generated Build File, Format Version 6.00");
+-	printWriter.println("# ** DO NOT EDIT **");
+-	printWriter.println("");
+-	printWriter.println("# TARGTYPE \"Win32 (x86) Dynamic-Link Library\" 0x0102");
+-	printWriter.println("CFG=" + cfg);
+-	printWriter.println("");
+-
+-	printWriter.println("!MESSAGE This is not a valid makefile. To build this project using NMAKE,");
+-	printWriter.println("!MESSAGE use the Export Makefile command and run");
+-	printWriter.println("!MESSAGE ");
+-	printWriter.println("!MESSAGE NMAKE /f \"" + projectName + ".mak\".");
+-	printWriter.println("!MESSAGE ");
+-	printWriter.println("!MESSAGE You can specify a configuration when running NMAKE");
+-	printWriter.println("!MESSAGE by defining the macro CFG on the command line. For example:");
+-	printWriter.println("!MESSAGE ");
+-	printWriter.println("!MESSAGE NMAKE /f \"" + projectName + ".mak\" CFG=\"" + cfg + "\"");
+-	printWriter.println("!MESSAGE ");
+-	printWriter.println("!MESSAGE Possible choices for configuration are:");
+-	printWriter.println("!MESSAGE ");
+-	for (Iterator i = allConfigs.iterator(); i.hasNext(); ) {
+-	    String name = ((BuildConfig)i.next()).get("Name");	    
+-	    printWriter.println("!MESSAGE \""+ name + "\" (based on \"Win32 (x86) Dynamic-Link Library\")");
+-	    allConfigNames.add(name);
+-	}
+-	printWriter.println("!MESSAGE ");
+-	printWriter.println("");
+-	
+-	printWriter.println("# Begin Project");
+-	printWriter.println("# PROP AllowPerConfigDependencies 0");
+-	printWriter.println("# PROP Scc_ProjName \"\"");
+-	printWriter.println("# PROP Scc_LocalPath \"\"");
+-	printWriter.println("CPP=cl.exe");
+-	printWriter.println("MTL=midl.exe");
+-	printWriter.println("RSC=rc.exe");
+-
+-	
+-	String keyword = "!IF";
+-	for (Iterator i = allConfigs.iterator(); i.hasNext(); ) {	    
+-	    BuildConfig bcfg = (BuildConfig)i.next();
+-	    printWriter.println(keyword + "  \"$(CFG)\" == \"" + bcfg.get("Name") + "\"");
+-	    writeConfigHeader(bcfg);
+-	    keyword = "!ELSEIF";
+-	    if (!i.hasNext()) printWriter.println("!ENDIF");
+-	}
+-
+-	
+-	TreeSet sortedFiles = sortFiles(computeAttributedFiles(allConfigs));	
+-	
+-	printWriter.println("# Begin Target");	
+-	
+-	for (Iterator i = allConfigs.iterator(); i.hasNext(); ) {	    
+-	    printWriter.println("# Name \"" + ((BuildConfig)i.next()).get("Name") + "\"");
+-	}
+-	printWriter.println("# Begin Group \"Header Files\"");
+-	printWriter.println("# PROP Default_Filter \"h;hpp;hxx;hm;inl;fi;fd\"");
+-	
+-	Iterator i = sortedFiles.iterator();
+-	
+-	while (i.hasNext()) {
+-	    FileInfo fi = (FileInfo)i.next();
+-	    
+-	    // skip sources
+-	    if (!fi.isHeader()) {
+-		continue;
+-	    }
+-	    
+-	    printFile(fi, allConfigNames);
+-	}
+-	printWriter.println("# End Group");
+-	printWriter.println("");
+-	
+-	printWriter.println("# Begin Group \"Source Files\"");
+-	printWriter.println("# PROP Default_Filter \"cpp;c;cxx;rc;def;r;odl;hpj;bat;for;f90\"");
+-	
+-	i = sortedFiles.iterator();
+-	while (i.hasNext()) {
+-	    FileInfo fi = (FileInfo)i.next();
+-	    
+-	    // skip headers
+-	    if (fi.isHeader()) {
+-		continue;
+-	    }
+-	    
+-	    printFile(fi, allConfigNames);
+-	}	
+-	printWriter.println("# End Group");
+-	printWriter.println("");
+-	
+-	
+-	printWriter.println("# Begin Group \"Resource Files\"");
+-	printWriter.println("# PROP Default_Filter \"ico;cur;bmp;dlg;rc2;rct;bin;cnt;rtf;gif;jpg;jpeg;jpe\"");
+-	printWriter.println("# End Group");
+-	printWriter.println("");
+-	printWriter.println("# End Target");
+-	
+-	printWriter.println("# End Project");	
+-	
+-	printWriter.close();
++public class WinGammaPlatformVC6 extends WinGammaPlatform {
++    public void writeProjectFile(String projectFileName, String projectName,
++                                 Vector allConfigs) throws IOException {
++        Vector allConfigNames = new Vector();
++
++        printWriter = new PrintWriter(new FileWriter(projectFileName));
++        String cfg = ((BuildConfig)allConfigs.get(0)).get("Name");
++
++        printWriter.println("# Microsoft Developer Studio Project File - Name=\"" + projectName + "\" - Package Owner=<4>");
++        printWriter.println("# Microsoft Developer Studio Generated Build File, Format Version 6.00");
++        printWriter.println("# ** DO NOT EDIT **");
++        printWriter.println("");
++        printWriter.println("# TARGTYPE \"Win32 (x86) Dynamic-Link Library\" 0x0102");
++        printWriter.println("CFG=" + cfg);
++        printWriter.println("");
++
++        printWriter.println("!MESSAGE This is not a valid makefile. To build this project using NMAKE,");
++        printWriter.println("!MESSAGE use the Export Makefile command and run");
++        printWriter.println("!MESSAGE ");
++        printWriter.println("!MESSAGE NMAKE /f \"" + projectName + ".mak\".");
++        printWriter.println("!MESSAGE ");
++        printWriter.println("!MESSAGE You can specify a configuration when running NMAKE");
++        printWriter.println("!MESSAGE by defining the macro CFG on the command line. For example:");
++        printWriter.println("!MESSAGE ");
++        printWriter.println("!MESSAGE NMAKE /f \"" + projectName + ".mak\" CFG=\"" + cfg + "\"");
++        printWriter.println("!MESSAGE ");
++        printWriter.println("!MESSAGE Possible choices for configuration are:");
++        printWriter.println("!MESSAGE ");
++        for (Iterator i = allConfigs.iterator(); i.hasNext(); ) {
++            String name = ((BuildConfig)i.next()).get("Name");
++            printWriter.println("!MESSAGE \""+ name + "\" (based on \"Win32 (x86) Dynamic-Link Library\")");
++            allConfigNames.add(name);
++        }
++        printWriter.println("!MESSAGE ");
++        printWriter.println("");
++
++        printWriter.println("# Begin Project");
++        printWriter.println("# PROP AllowPerConfigDependencies 0");
++        printWriter.println("# PROP Scc_ProjName \"\"");
++        printWriter.println("# PROP Scc_LocalPath \"\"");
++        printWriter.println("CPP=cl.exe");
++        printWriter.println("MTL=midl.exe");
++        printWriter.println("RSC=rc.exe");
++
++
++        String keyword = "!IF";
++        for (Iterator i = allConfigs.iterator(); i.hasNext(); ) {
++            BuildConfig bcfg = (BuildConfig)i.next();
++            printWriter.println(keyword + "  \"$(CFG)\" == \"" + bcfg.get("Name") + "\"");
++            writeConfigHeader(bcfg);
++            keyword = "!ELSEIF";
++            if (!i.hasNext()) printWriter.println("!ENDIF");
++        }
++
++
++        TreeSet sortedFiles = sortFiles(computeAttributedFiles(allConfigs));
++
++        printWriter.println("# Begin Target");
++
++        for (Iterator i = allConfigs.iterator(); i.hasNext(); ) {
++            printWriter.println("# Name \"" + ((BuildConfig)i.next()).get("Name") + "\"");
++        }
++        printWriter.println("# Begin Group \"Header Files\"");
++        printWriter.println("# PROP Default_Filter \"h;hpp;hxx;hm;inl;fi;fd\"");
++
++        Iterator i = sortedFiles.iterator();
++
++        while (i.hasNext()) {
++            FileInfo fi = (FileInfo)i.next();
++
++            // skip sources
++            if (!fi.isHeader()) {
++                continue;
++            }
++
++            printFile(fi, allConfigNames);
++        }
++        printWriter.println("# End Group");
++        printWriter.println("");
++
++        printWriter.println("# Begin Group \"Source Files\"");
++        printWriter.println("# PROP Default_Filter \"cpp;c;cxx;rc;def;r;odl;hpj;bat;for;f90\"");
++
++        i = sortedFiles.iterator();
++        while (i.hasNext()) {
++            FileInfo fi = (FileInfo)i.next();
++
++            // skip headers
++            if (fi.isHeader()) {
++                continue;
++            }
++
++            printFile(fi, allConfigNames);
++        }
++        printWriter.println("# End Group");
++        printWriter.println("");
++
++
++        printWriter.println("# Begin Group \"Resource Files\"");
++        printWriter.println("# PROP Default_Filter \"ico;cur;bmp;dlg;rc2;rct;bin;cnt;rtf;gif;jpg;jpeg;jpe\"");
++        printWriter.println("# End Group");
++        printWriter.println("");
++        printWriter.println("# End Target");
++
++        printWriter.println("# End Project");
++
++        printWriter.close();
+     }
+-    
++
+ 
+     void printFile(FileInfo fi, Vector allConfigNames) {
+-	printWriter.println("# Begin Source File");
+-	printWriter.println("");
+-	printWriter.println("SOURCE=\"" + fi.full + "\"");
+-	FileAttribute attr = fi.attr;
+-
+-	if (attr.noPch) {
+-	    printWriter.println("# SUBTRACT CPP /YX /Yc /Yu");
+-	}
+-
+-	if (attr.pchRoot) {
+-	    printWriter.println("# ADD CPP /Yc\"incls/_precompiled.incl\"");
+-	}	
+-	if (attr.configs != null) {
+-	    String keyword = "!IF";
+-	    for (Iterator j=allConfigNames.iterator(); j.hasNext();) {
+-		String cfg = (String)j.next();
+-		if (!attr.configs.contains(cfg)) {
+-		    printWriter.println(keyword+" \"$(CFG)\" == \"" + cfg +"\"");
+-		    printWriter.println("# PROP BASE Exclude_From_Build 1");
+-		    printWriter.println("# PROP Exclude_From_Build 1");
+-		    keyword = "!ELSEIF"; 
+-		}
+-	    }
+-	    printWriter.println("!ENDIF");
+-	}
++        printWriter.println("# Begin Source File");
++        printWriter.println("");
++        printWriter.println("SOURCE=\"" + fi.full + "\"");
++        FileAttribute attr = fi.attr;
++
++        if (attr.noPch) {
++            printWriter.println("# SUBTRACT CPP /YX /Yc /Yu");
++        }
++
++        if (attr.pchRoot) {
++            printWriter.println("# ADD CPP /Yc\"incls/_precompiled.incl\"");
++        }
++        if (attr.configs != null) {
++            String keyword = "!IF";
++            for (Iterator j=allConfigNames.iterator(); j.hasNext();) {
++                String cfg = (String)j.next();
++                if (!attr.configs.contains(cfg)) {
++                    printWriter.println(keyword+" \"$(CFG)\" == \"" + cfg +"\"");
++                    printWriter.println("# PROP BASE Exclude_From_Build 1");
++                    printWriter.println("# PROP Exclude_From_Build 1");
++                    keyword = "!ELSEIF";
++                }
++            }
++            printWriter.println("!ENDIF");
++        }
+ 
+-	printWriter.println("# End Source File");
++        printWriter.println("# End Source File");
+     }
+ 
+     void writeConfigHeader(BuildConfig cfg) {
+-	printWriter.println("# Begin Special Build Tool");	
+-	printWriter.println("SOURCE=\"$(InputPath)\"");
+-	printWriter.println("PreLink_Desc=" +  BuildConfig.getFieldString(null, "PrelinkDescription"));
+-	printWriter.println("PreLink_Cmds=" + 
+-			    cfg.expandFormat(BuildConfig.getFieldString(null, "PrelinkCommand")));
+-	printWriter.println("# End Special Build Tool");
+-	printWriter.println("");
+-
+-	for (Iterator i = cfg.getV("CompilerFlags").iterator(); i.hasNext(); ) {
+-	    printWriter.println("# "+(String)i.next());
+-	}
+-
+-
+-	printWriter.println("LINK32=link.exe");
+-
+-	for (Iterator i = cfg.getV("LinkerFlags").iterator(); i.hasNext(); ) {
+-	    printWriter.println("# "+(String)i.next());
+-	}
+-		
+-	printWriter.println("ADD BASE MTL /nologo /D \"_DEBUG\" /mktyplib203 /win32");
+-	printWriter.println("ADD MTL /nologo /D \"_DEBUG\" /mktyplib203 /win32");
+-	printWriter.println("ADD BASE RSC /l 0x409 /d \"_DEBUG\"");
+-	printWriter.println("ADD RSC /l 0x409 /d \"_DEBUG\"");
+-	printWriter.println("BSC32=bscmake.exe");
+-	printWriter.println("ADD BASE BSC32 /nologo");
+-	printWriter.println("ADD BSC32 /nologo");
+-	printWriter.println("");
++        printWriter.println("# Begin Special Build Tool");
++        printWriter.println("SOURCE=\"$(InputPath)\"");
++        printWriter.println("PreLink_Desc=" +  BuildConfig.getFieldString(null, "PrelinkDescription"));
++        printWriter.println("PreLink_Cmds=" +
++                            cfg.expandFormat(BuildConfig.getFieldString(null, "PrelinkCommand")));
++        printWriter.println("# End Special Build Tool");
++        printWriter.println("");
++
++        for (Iterator i = cfg.getV("CompilerFlags").iterator(); i.hasNext(); ) {
++            printWriter.println("# "+(String)i.next());
++        }
++
++
++        printWriter.println("LINK32=link.exe");
++
++        for (Iterator i = cfg.getV("LinkerFlags").iterator(); i.hasNext(); ) {
++            printWriter.println("# "+(String)i.next());
++        }
++
++        printWriter.println("ADD BASE MTL /nologo /D \"_DEBUG\" /mktyplib203 /win32");
++        printWriter.println("ADD MTL /nologo /D \"_DEBUG\" /mktyplib203 /win32");
++        printWriter.println("ADD BASE RSC /l 0x409 /d \"_DEBUG\"");
++        printWriter.println("ADD RSC /l 0x409 /d \"_DEBUG\"");
++        printWriter.println("BSC32=bscmake.exe");
++        printWriter.println("ADD BASE BSC32 /nologo");
++        printWriter.println("ADD BSC32 /nologo");
++        printWriter.println("");
+     }
+ 
+     protected String getProjectExt() {
+-	return ".dsp";
++        return ".dsp";
+     }
+ }
+ 
+ 
+ class CompilerInterfaceVC6  extends CompilerInterface {
+     Vector getBaseCompilerFlags(Vector defines, Vector includes, String outDir) {
+-	Vector rv = new Vector();
+-	
+-	rv.add("PROP BASE Use_MFC 0");
+-	rv.add("PROP Use_MFC 0");
+-	rv.add("ADD CPP /nologo /MT /W3 /WX /GX /YX /Fr /FD /c");
+-	rv.add("PROP BASE Output_Dir \""+outDir+"\"");
+-	rv.add("PROP Output_Dir \""+outDir+"\"");	
+-	rv.add("PROP BASE Intermediate_Dir \""+outDir+"\"");
+-	rv.add("PROP Intermediate_Dir \""+outDir+"\"");	
+-	rv.add("PROP BASE Target_Dir \"\"");
+-	rv.add("PROP Target_Dir \"\"");
+-	rv.add("ADD BASE CPP "+Util.prefixed_join(" /I ", includes, true));
+-	rv.add("ADD CPP "+Util.prefixed_join(" /I ", includes, true));
+-	rv.add("ADD BASE CPP "+Util.prefixed_join(" /D ", defines, true));
+-	rv.add("ADD CPP "+Util.prefixed_join(" /D ", defines, true));
+-	rv.add("ADD CPP /Yu\"incls/_precompiled.incl\"");
++        Vector rv = new Vector();
++
++        rv.add("PROP BASE Use_MFC 0");
++        rv.add("PROP Use_MFC 0");
++        rv.add("ADD CPP /nologo /MT /W3 /WX /GX /YX /Fr /FD /c");
++        rv.add("PROP BASE Output_Dir \""+outDir+"\"");
++        rv.add("PROP Output_Dir \""+outDir+"\"");
++        rv.add("PROP BASE Intermediate_Dir \""+outDir+"\"");
++        rv.add("PROP Intermediate_Dir \""+outDir+"\"");
++        rv.add("PROP BASE Target_Dir \"\"");
++        rv.add("PROP Target_Dir \"\"");
++        rv.add("ADD BASE CPP "+Util.prefixed_join(" /I ", includes, true));
++        rv.add("ADD CPP "+Util.prefixed_join(" /I ", includes, true));
++        rv.add("ADD BASE CPP "+Util.prefixed_join(" /D ", defines, true));
++        rv.add("ADD CPP "+Util.prefixed_join(" /D ", defines, true));
++        rv.add("ADD CPP /Yu\"incls/_precompiled.incl\"");
+ 
+-	return rv;
++        return rv;
+     }
+ 
+     Vector getBaseLinkerFlags(String outDir, String outDll) {
+-	Vector rv = new Vector();
++        Vector rv = new Vector();
+ 
+-	rv.add("PROP Ignore_Export_Lib 0");
+-	rv.add("ADD BASE CPP /MD");
+-	rv.add("ADD CPP /MD");
+-	rv.add("ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib " + 
+-	       "           advapi32.lib shell32.lib ole32.lib oleaut32.lib winmm.lib");	
+-	rv.add("ADD LINK32      /out:\""+outDll+"\" "+
+-	       "                /nologo /subsystem:windows /machine:I386" +
+-	       "                /nologo /base:\"0x8000000\" /subsystem:windows /dll" +
+-	       "                /export:JNI_GetDefaultJavaVMInitArgs /export:JNI_CreateJavaVM /export:JNI_GetCreatedJavaVMs "+ 
+-	       "                /export:jio_snprintf /export:jio_printf /export:jio_fprintf /export:jio_vfprintf "+
+-	       "                /export:jio_vsnprintf /export:JVM_EnqueueOperation ");
+-	rv.add("SUBTRACT LINK32 /pdb:none /map");
++        rv.add("PROP Ignore_Export_Lib 0");
++        rv.add("ADD BASE CPP /MD");
++        rv.add("ADD CPP /MD");
++        rv.add("ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib " +
++               "           advapi32.lib shell32.lib ole32.lib oleaut32.lib winmm.lib");
++        rv.add("ADD LINK32      /out:\""+outDll+"\" "+
++               "                /nologo /subsystem:windows /machine:I386" +
++               "                /nologo /base:\"0x8000000\" /subsystem:windows /dll" +
++               "                /export:JNI_GetDefaultJavaVMInitArgs /export:JNI_CreateJavaVM /export:JNI_GetCreatedJavaVMs "+
++               "                /export:jio_snprintf /export:jio_printf /export:jio_fprintf /export:jio_vfprintf "+
++               "                /export:jio_vsnprintf ");
++        rv.add("SUBTRACT LINK32 /pdb:none /map");
+ 
+-	return rv;
++        return rv;
+     }
+-    
++
+     Vector getDebugCompilerFlags(String opt) {
+-	Vector rv = new Vector();
+-	
+-	rv.add("ADD BASE CPP /Gm /Zi /O"+opt);
++        Vector rv = new Vector();
++
++        rv.add("ADD BASE CPP /Gm /Zi /O"+opt);
+ 
+-	return rv;
++        return rv;
+     }
+ 
+     Vector getDebugLinkerFlags() {
+-	Vector rv = new Vector();
++        Vector rv = new Vector();
+ 
+-	rv.add("PROP BASE Use_Debug_Libraries 1");
+-	rv.add("PROP Use_Debug_Libraries 1");
+-	rv.add("ADD LINK32 /debug");
+-	
+-	return rv;
++        rv.add("PROP BASE Use_Debug_Libraries 1");
++        rv.add("PROP Use_Debug_Libraries 1");
++        rv.add("ADD LINK32 /debug");
++
++        return rv;
+     }
+ 
+     Vector getProductCompilerFlags() {
+-	Vector rv = new Vector();       	
+-	
+-	rv.add("ADD CPP /O"+getOptFlag());
++        Vector rv = new Vector();
++
++        rv.add("ADD CPP /O"+getOptFlag());
+ 
+-	return rv;
++        return rv;
+     }
+ 
+     Vector getProductLinkerFlags() {
+-	Vector rv = new Vector();
++        Vector rv = new Vector();
+ 
+-	rv.add("PROP BASE Use_Debug_Libraries 0");
+-	rv.add("PROP Use_Debug_Libraries 0");	
++        rv.add("PROP BASE Use_Debug_Libraries 0");
++        rv.add("PROP Use_Debug_Libraries 0");
+ 
+-	return rv;
++        return rv;
+     }
+ 
+     String getOptFlag() {
+-	return "2";
++        return "2";
+     }
+ 
+     String getNoOptFlag() {
+-	return "d";
++        return "d";
+     }
+ 
+     String makeCfgName(String flavourBuild) {
+-	return "vm - "+ Util.os + " " + flavourBuild;
++        return "vm - "+ Util.os + " " + flavourBuild;
+     }
+ }
+diff -ruN openjdk6/hotspot/src/share/tools/MakeDeps/WinGammaPlatformVC7.java openjdk/hotspot/src/share/tools/MakeDeps/WinGammaPlatformVC7.java
+--- openjdk6/hotspot/src/share/tools/MakeDeps/WinGammaPlatformVC7.java	2008-02-28 05:02:31.000000000 -0500
++++ openjdk/hotspot/src/share/tools/MakeDeps/WinGammaPlatformVC7.java	2008-01-31 09:19:00.000000000 -0500
+@@ -19,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ import java.io.*;
+@@ -27,474 +27,476 @@
+ 
+ public class WinGammaPlatformVC7 extends WinGammaPlatform {
+ 
+-    public void writeProjectFile(String projectFileName, String projectName, 
+-				 Vector allConfigs) throws IOException {
+-	System.out.println();
+-	System.out.println("    Writing .vcproj file...");
+-	// If we got this far without an error, we're safe to actually
+-	// write the .vcproj file
+-	printWriter = new PrintWriter(new FileWriter(projectFileName));
+-	
+-	printWriter.println("<?xml version=\"1.0\" encoding=\"windows-1251\"?>");
+-	startTag(
+-	    "VisualStudioProject", 
+-	    new String[] {
+-		"ProjectType", "Visual C++",
+-		"Version", "7.10",
+-		"Name", projectName,
+-		"ProjectGUID", "{8822CB5C-1C41-41C2-8493-9F6E1994338B}",
+-		"SccProjectName", "",
+-		"SccLocalPath", ""
+-	    }
+-	    );
+-	
+-	startTag("Platforms", null);
+-	tag("Platform", new String[] {"Name", Util.os});
+-	endTag("Platforms");	
+-	
+-	startTag("Configurations", null);
+-    
+-	for (Iterator i = allConfigs.iterator(); i.hasNext(); ) {	    
+-	    writeConfiguration((BuildConfig)i.next());  
+-	}
+-
+-	endTag("Configurations");
+-
+-	tag("References", null);       	
+-
+-	writeFiles(allConfigs);	
+-	
+-	tag("Globals", null);
+-	
+-	endTag("VisualStudioProject");
+-	printWriter.close();
++    public void writeProjectFile(String projectFileName, String projectName,
++                                 Vector allConfigs) throws IOException {
++        System.out.println();
++        System.out.println("    Writing .vcproj file...");
++        // If we got this far without an error, we're safe to actually
++        // write the .vcproj file
++        printWriter = new PrintWriter(new FileWriter(projectFileName));
++
++        printWriter.println("<?xml version=\"1.0\" encoding=\"windows-1251\"?>");
++        startTag(
++            "VisualStudioProject",
++            new String[] {
++                "ProjectType", "Visual C++",
++                "Version", "7.10",
++                "Name", projectName,
++                "ProjectGUID", "{8822CB5C-1C41-41C2-8493-9F6E1994338B}",
++                "SccProjectName", "",
++                "SccLocalPath", ""
++            }
++            );
++
++        startTag("Platforms", null);
++        tag("Platform", new String[] {"Name", Util.os});
++        endTag("Platforms");
++
++        startTag("Configurations", null);
++
++        for (Iterator i = allConfigs.iterator(); i.hasNext(); ) {
++            writeConfiguration((BuildConfig)i.next());
++        }
++
++        endTag("Configurations");
++
++        tag("References", null);
+ 
+-	System.out.println("    Done.");
++        writeFiles(allConfigs);
++
++        tag("Globals", null);
++
++        endTag("VisualStudioProject");
++        printWriter.close();
++
++        System.out.println("    Done.");
+     }
+ 
+ 
+     abstract class NameFilter {
+-	protected String fname;
++        protected String fname;
+ 
+-	abstract boolean match(FileInfo fi);
++        abstract boolean match(FileInfo fi);
+ 
+-	String  filterString() { return ""; }
+-	String name() { return this.fname;}
++        String  filterString() { return ""; }
++        String name() { return this.fname;}
+     }
+ 
+     class DirectoryFilter extends NameFilter {
+-	String dir;
+-	int baseLen, dirLen;
++        String dir;
++        int baseLen, dirLen;
++
++        DirectoryFilter(String dir, String sbase) {
++            this.dir = dir;
++            this.baseLen = sbase.length();
++            this.dirLen = dir.length();
++            this.fname = dir;
++        }
++
++        DirectoryFilter(String fname, String dir, String sbase) {
++            this.dir = dir;
++            this.baseLen = sbase.length();
++            this.dirLen = dir.length();
++            this.fname = fname;
++        }
+ 
+-	DirectoryFilter(String dir, String sbase) {
+-	    this.dir = dir;
+-	    this.baseLen = sbase.length();
+-	    this.dirLen = dir.length();
+-	    this.fname = dir;
+-	}
+-
+-	DirectoryFilter(String fname, String dir, String sbase) {
+-	    this.dir = dir;
+-	    this.baseLen = sbase.length();
+-	    this.dirLen = dir.length();
+-	    this.fname = fname;
+-	}
+-	
+-
+-	boolean match(FileInfo fi) {
+-	    return fi.full.regionMatches(true, baseLen, dir, 0, dirLen);
+-	}
++
++        boolean match(FileInfo fi) {
++            return fi.full.regionMatches(true, baseLen, dir, 0, dirLen);
++        }
+     }
+ 
+     class TypeFilter extends NameFilter {
+-	String[] exts;
++        String[] exts;
+ 
+-	TypeFilter(String fname, String[] exts) {
+-	    this.fname = fname;
+-	    this.exts = exts;
+-	}
+-
+-	boolean match(FileInfo fi) {
+-	    for (int i=0; i<exts.length; i++) {
+-		if (fi.full.endsWith(exts[i])) {
+-		    return true;
+-		}
+-	    }
+-	    return false;
+-	}
+-
+-	String  filterString() {
+-	    return Util.join(";", exts);
+-	}
+-    } 
++        TypeFilter(String fname, String[] exts) {
++            this.fname = fname;
++            this.exts = exts;
++        }
++
++        boolean match(FileInfo fi) {
++            for (int i=0; i<exts.length; i++) {
++                if (fi.full.endsWith(exts[i])) {
++                    return true;
++                }
++            }
++            return false;
++        }
++
++        String  filterString() {
++            return Util.join(";", exts);
++        }
++    }
+ 
+     class TerminatorFilter extends NameFilter {
+-	TerminatorFilter(String fname) {
+-	    this.fname = fname;
+-	    
+-	}
+-	boolean match(FileInfo fi) {
+-	    return true;
+-	}
+-	
++        TerminatorFilter(String fname) {
++            this.fname = fname;
++
++        }
++        boolean match(FileInfo fi) {
++            return true;
++        }
++
+     }
+ 
+     class SpecificNameFilter extends NameFilter {
+-	String pats[];
++        String pats[];
++
++        SpecificNameFilter(String fname, String[] pats) {
++            this.fname = fname;
++            this.pats = pats;
++        }
++
++        boolean match(FileInfo fi) {
++            for (int i=0; i<pats.length; i++) {
++                if (fi.attr.shortName.matches(pats[i])) {
++                    return true;
++                }
++            }
++            return false;
++        }
+ 
+-	SpecificNameFilter(String fname, String[] pats) {
+-	    this.fname = fname;
+-	    this.pats = pats;
+-	}
+-
+-	boolean match(FileInfo fi) {
+-	    for (int i=0; i<pats.length; i++) {
+-		if (fi.attr.shortName.matches(pats[i])) {
+-		    return true;
+-		}
+-	    }
+-	    return false;
+-	}
+-	
+     }
+ 
+     class ContainerFilter extends NameFilter {
+-	Vector children;
+-	
+-	ContainerFilter(String fname) {
+-	    this.fname = fname;
+-	    children = new Vector();
+-	    
+-	}
+-	boolean match(FileInfo fi) {	 
+-	    return false;
+-	}
+-	
+-	Iterator babies() { return children.iterator(); }
+-
+-	void add(NameFilter f) {
+-	    children.add(f);
+-	}
+-    }
+-
+-    
+-    void writeCustomToolConfig(Vector configs, String[] customToolAttrs) {	
+-	for (Iterator i = configs.iterator(); i.hasNext(); ) {
+-	    startTag("FileConfiguration",
+-		     new String[] {
+-			 "Name",  (String)i.next()
+-		     }
+-		     );
+-	    tag("Tool", customToolAttrs);
++        Vector children;
++
++        ContainerFilter(String fname) {
++            this.fname = fname;
++            children = new Vector();
++
++        }
++        boolean match(FileInfo fi) {
++            return false;
++        }
++
++        Iterator babies() { return children.iterator(); }
++
++        void add(NameFilter f) {
++            children.add(f);
++        }
++    }
+ 
+-	    endTag("FileConfiguration");
+-	}
++
++    void writeCustomToolConfig(Vector configs, String[] customToolAttrs) {
++        for (Iterator i = configs.iterator(); i.hasNext(); ) {
++            startTag("FileConfiguration",
++                     new String[] {
++                         "Name",  (String)i.next()
++                     }
++                     );
++            tag("Tool", customToolAttrs);
++
++            endTag("FileConfiguration");
++        }
+     }
+ 
+     // here we define filters, which define layout of what can be seen in 'Solution View' of MSVC
+     // Basically there are two types of entities - container filters and real filters
+     //   - container filter just provides a container to group together real filters
+-    //   - real filter can select elements from the set according to some rule, put it into XML 
+-    //     and remove from the list 
++    //   - real filter can select elements from the set according to some rule, put it into XML
++    //     and remove from the list
+     Vector makeFilters(TreeSet files) {
+-	Vector rv = new Vector();
+-	String sbase = Util.normalize(BuildConfig.getFieldString(null, "SourceBase")+"/src/");
++        Vector rv = new Vector();
++        String sbase = Util.normalize(BuildConfig.getFieldString(null, "SourceBase")+"/src/");
++
++        ContainerFilter rt = new ContainerFilter("Runtime");
++        rt.add(new DirectoryFilter("share/vm/prims", sbase));
++        rt.add(new DirectoryFilter("share/vm/runtime", sbase));
++        rt.add(new DirectoryFilter("share/vm/oops", sbase));
++        rv.add(rt);
++
++        ContainerFilter gc = new ContainerFilter("GC");
++        gc.add(new DirectoryFilter("share/vm/memory", sbase));
++        gc.add(new DirectoryFilter("share/vm/gc_interface", sbase));
++
++        ContainerFilter gc_impl = new ContainerFilter("Implementations");
++        gc_impl.add(new DirectoryFilter("CMS",
++                                        "share/vm/gc_implementation/concurrentMarkSweep",
++                                        sbase));
++        gc_impl.add(new DirectoryFilter("Parallel Scavenge",
++                                        "share/vm/gc_implementation/parallelScavenge",
++                                        sbase));
++        gc_impl.add(new DirectoryFilter("Shared",
++                                        "share/vm/gc_implementation/shared",
++                                        sbase));
++        // for all leftovers
++        gc_impl.add(new DirectoryFilter("Misc",
++                                        "share/vm/gc_implementation",
++                                        sbase));
++
++        gc.add(gc_impl);
++        rv.add(gc);
++
++        rv.add(new DirectoryFilter("C1", "share/vm/c1", sbase));
++
++        ContainerFilter c2 = new ContainerFilter("C2");
++        //c2.add(new DirectoryFilter("share/vm/adlc", sbase));
++        c2.add(new DirectoryFilter("share/vm/opto", sbase));
++        c2.add(new SpecificNameFilter("Generated", new String[] {"^ad_.+", "^dfa_.+", "^adGlobals.+"}));
++        rv.add(c2);
++
++        ContainerFilter comp = new ContainerFilter("Compiler Common");
++        comp.add(new DirectoryFilter("share/vm/asm", sbase));
++        comp.add(new DirectoryFilter("share/vm/ci", sbase));
++        comp.add(new DirectoryFilter("share/vm/code", sbase));
++        comp.add(new DirectoryFilter("share/vm/compiler", sbase));
++        rv.add(comp);
++
++        rv.add(new DirectoryFilter("Interpreter",
++                                   "share/vm/interpreter",
++                                   sbase));
++
++        ContainerFilter misc = new ContainerFilter("Misc");
++        //misc.add(new DirectoryFilter("share/vm/launch", sbase));
++        misc.add(new DirectoryFilter("share/vm/libadt", sbase));
++        misc.add(new DirectoryFilter("share/vm/services", sbase));
++        misc.add(new DirectoryFilter("share/vm/utilities", sbase));
++        rv.add(misc);
++
++        rv.add(new DirectoryFilter("os_cpu", sbase));
+ 
+-	ContainerFilter rt = new ContainerFilter("Runtime");	
+-	rt.add(new DirectoryFilter("share/vm/prims", sbase));
+-	rt.add(new DirectoryFilter("share/vm/runtime", sbase));
+-	rt.add(new DirectoryFilter("share/vm/oops", sbase));
+-	rv.add(rt);
+-
+-	ContainerFilter gc = new ContainerFilter("GC");
+-	gc.add(new DirectoryFilter("share/vm/memory", sbase));
+-	gc.add(new DirectoryFilter("share/vm/gc_interface", sbase));
+-
+-	ContainerFilter gc_impl = new ContainerFilter("Implementations");
+-	gc_impl.add(new DirectoryFilter("CMS", 
+-					"share/vm/gc_implementation/concurrentMarkSweep", 
+-					sbase));
+-	gc_impl.add(new DirectoryFilter("Parallel Scavenge", 
+-					"share/vm/gc_implementation/parallelScavenge", 
+-					sbase));
+-	gc_impl.add(new DirectoryFilter("Shared",
+-					"share/vm/gc_implementation/shared", 
+-					sbase));		
+-	// for all leftovers
+-	gc_impl.add(new DirectoryFilter("Misc",
+-					"share/vm/gc_implementation", 
+-					sbase));
+-	
+-	gc.add(gc_impl);
+-	rv.add(gc);
+-	
+-	rv.add(new DirectoryFilter("C1", "share/vm/c1", sbase));
+-	
+-	ContainerFilter c2 = new ContainerFilter("C2");
+-	//c2.add(new DirectoryFilter("share/vm/adlc", sbase));
+-	c2.add(new DirectoryFilter("share/vm/opto", sbase));
+-	c2.add(new SpecificNameFilter("Generated", new String[] {"^ad_.+", "^dfa_.+", "^adGlobals.+"}));
+-	rv.add(c2);
+-	
+-	ContainerFilter comp = new ContainerFilter("Compiler Common");
+-	comp.add(new DirectoryFilter("share/vm/asm", sbase));
+-	comp.add(new DirectoryFilter("share/vm/ci", sbase));
+-	comp.add(new DirectoryFilter("share/vm/code", sbase));
+-	comp.add(new DirectoryFilter("share/vm/compiler", sbase));
+-	rv.add(comp);
+-	
+-	rv.add(new DirectoryFilter("Interpreter", 
+-				   "share/vm/interpreter", 
+-				   sbase));
+-
+-	ContainerFilter misc = new ContainerFilter("Misc");
+-	//misc.add(new DirectoryFilter("share/vm/launch", sbase));
+-	misc.add(new DirectoryFilter("share/vm/libadt", sbase));	
+-	misc.add(new DirectoryFilter("share/vm/services", sbase));
+-	misc.add(new DirectoryFilter("share/vm/utilities", sbase));
+-	rv.add(misc);
+-
+-	rv.add(new DirectoryFilter("os_cpu", sbase));
+-
+-	rv.add(new DirectoryFilter("cpu", sbase));
+-
+-	rv.add(new DirectoryFilter("os", sbase));
+-
+-	rv.add(new SpecificNameFilter("JVMTI Generated", new String[] {"^jvmti.+"}));
+-	
+-	rv.add(new SpecificNameFilter("Include DBs", new String[] {"^includeDB_.+"}));
+-
+-	// this one is to catch files not caught by other filters
+-	//rv.add(new TypeFilter("Header Files", new String[] {"h", "hpp", "hxx", "hm", "inl", "fi", "fd"}));
+-	rv.add(new TerminatorFilter("Source Files"));
+-
+-	return rv;
+-    }
+-
+-    void writeFiles(Vector allConfigs) {	
+-
+-	Hashtable allFiles = computeAttributedFiles(allConfigs); 
+-
+-	Vector allConfigNames = new Vector();
+-	for (Iterator i = allConfigs.iterator(); i.hasNext(); ) {
+-	    allConfigNames.add(((BuildConfig)i.next()).get("Name"));
+-	}
+-
+-	TreeSet sortedFiles = sortFiles(allFiles);
+-
+-	startTag("Files", null);
+-	
+-	for (Iterator i = makeFilters(sortedFiles).iterator(); i.hasNext(); ) {
+-	    doWriteFiles(sortedFiles, allConfigNames, (NameFilter)i.next());
+-	}
+-
+-
+-	startTag("Filter",
+-		 new String[] {
+-		     "Name", "Resource Files",
++        rv.add(new DirectoryFilter("cpu", sbase));
++
++        rv.add(new DirectoryFilter("os", sbase));
++
++        rv.add(new SpecificNameFilter("JVMTI Generated", new String[] {"^jvmti.+"}));
++
++        rv.add(new SpecificNameFilter("C++ Interpreter Generated", new String[] {"^bytecodeInterpreterWithChecks.+"}));
++
++        rv.add(new SpecificNameFilter("Include DBs", new String[] {"^includeDB_.+"}));
++
++        // this one is to catch files not caught by other filters
++        //rv.add(new TypeFilter("Header Files", new String[] {"h", "hpp", "hxx", "hm", "inl", "fi", "fd"}));
++        rv.add(new TerminatorFilter("Source Files"));
++
++        return rv;
++    }
++
++    void writeFiles(Vector allConfigs) {
++
++        Hashtable allFiles = computeAttributedFiles(allConfigs);
++
++        Vector allConfigNames = new Vector();
++        for (Iterator i = allConfigs.iterator(); i.hasNext(); ) {
++            allConfigNames.add(((BuildConfig)i.next()).get("Name"));
++        }
++
++        TreeSet sortedFiles = sortFiles(allFiles);
++
++        startTag("Files", null);
++
++        for (Iterator i = makeFilters(sortedFiles).iterator(); i.hasNext(); ) {
++            doWriteFiles(sortedFiles, allConfigNames, (NameFilter)i.next());
++        }
++
++
++        startTag("Filter",
++                 new String[] {
++                     "Name", "Resource Files",
+                      "Filter", "ico;cur;bmp;dlg;rc2;rct;bin;cnt;rtf;gif;jpg;jpeg;jpe"
+-		 }
+-		 );
+-	endTag("Filter");	
++                 }
++                 );
++        endTag("Filter");
+ 
+-	endTag("Files");
++        endTag("Files");
+     }
+-   
++
+     void doWriteFiles(TreeSet allFiles, Vector allConfigNames, NameFilter filter) {
+-	startTag("Filter",
+-		 new String[] {
+-		     "Name",   filter.name(),
++        startTag("Filter",
++                 new String[] {
++                     "Name",   filter.name(),
+                      "Filter", filter.filterString()
+-		 }
+-		 );
++                 }
++                 );
++
++        if (filter instanceof ContainerFilter) {
+ 
+-	if (filter instanceof ContainerFilter) {
++            Iterator i = ((ContainerFilter)filter).babies();
++            while (i.hasNext()) {
++                doWriteFiles(allFiles, allConfigNames, (NameFilter)i.next());
++            }
+ 
+-	    Iterator i = ((ContainerFilter)filter).babies();
+-	    while (i.hasNext()) {
+-		doWriteFiles(allFiles, allConfigNames, (NameFilter)i.next());
+-	    }
+-
+-	} else {
+-
+-	    Iterator i = allFiles.iterator();
+-	    while (i.hasNext()) {
+-		FileInfo fi = (FileInfo)i.next();
+-		
+-		if (!filter.match(fi)) {
+-		    continue;
+-		}
+-		
+-		startTag("File",
+-			 new String[] {
+-			     "RelativePath", fi.full.replace('/', '\\')
+-			 }
+-			 );
+-		
+-		FileAttribute a = fi.attr; 
+-		if (a.pchRoot) {
+-		    writeCustomToolConfig(allConfigNames,
+-					  new String[] {
+-					      "Name", "VCCLCompilerTool",
+-					      "UsePrecompiledHeader", "1"
+-					  }); 
+-		}
+-		
+-		if (a.noPch) {
+-		    writeCustomToolConfig(allConfigNames,
+-					  new String[] {
+-					      "Name", "VCCLCompilerTool",
+-					      "UsePrecompiledHeader", "0"
+-					  }); 
+-		}
+-		
+-		if (a.configs != null) {
+-		    for (Iterator j=allConfigNames.iterator(); j.hasNext();) {
+-			String cfg = (String)j.next();
+-			if (!a.configs.contains(cfg)) {
+-			    startTag("FileConfiguration",
+-				     new String[] { 
+-					 "Name", cfg,
+-					 "ExcludedFromBuild", "TRUE" 
+-				     });
+-			    tag("Tool", new String[] {"Name", "VCCLCompilerTool"});
+-			    endTag("FileConfiguration");
+-			
+-			}
+-		    }
+-		}
+-		
+-		endTag("File");
+-	    
+-		// we not gonna look at this file anymore
+-		i.remove();
+-	    }
+-	}
++        } else {
+ 
+-	endTag("Filter");
++            Iterator i = allFiles.iterator();
++            while (i.hasNext()) {
++                FileInfo fi = (FileInfo)i.next();
++
++                if (!filter.match(fi)) {
++                    continue;
++                }
++
++                startTag("File",
++                         new String[] {
++                             "RelativePath", fi.full.replace('/', '\\')
++                         }
++                         );
++
++                FileAttribute a = fi.attr;
++                if (a.pchRoot) {
++                    writeCustomToolConfig(allConfigNames,
++                                          new String[] {
++                                              "Name", "VCCLCompilerTool",
++                                              "UsePrecompiledHeader", "1"
++                                          });
++                }
++
++                if (a.noPch) {
++                    writeCustomToolConfig(allConfigNames,
++                                          new String[] {
++                                              "Name", "VCCLCompilerTool",
++                                              "UsePrecompiledHeader", "0"
++                                          });
++                }
++
++                if (a.configs != null) {
++                    for (Iterator j=allConfigNames.iterator(); j.hasNext();) {
++                        String cfg = (String)j.next();
++                        if (!a.configs.contains(cfg)) {
++                            startTag("FileConfiguration",
++                                     new String[] {
++                                         "Name", cfg,
++                                         "ExcludedFromBuild", "TRUE"
++                                     });
++                            tag("Tool", new String[] {"Name", "VCCLCompilerTool"});
++                            endTag("FileConfiguration");
++
++                        }
++                    }
++                }
++
++                endTag("File");
++
++                // we not gonna look at this file anymore
++                i.remove();
++            }
++        }
++
++        endTag("Filter");
+     }
+ 
+-    
++
+     void writeConfiguration(BuildConfig cfg) {
+-	startTag("Configuration", 
+-		 new String[] {
+-		     "Name", cfg.get("Name"),
+-		     "OutputDirectory",  cfg.get("OutputDir"),
+-		     "IntermediateDirectory",  cfg.get("OutputDir"),
+-		     "ConfigurationType", "2",
+-		     "UseOfMFC", "0",
+-		     "ATLMinimizesCRunTimeLibraryUsage", "FALSE"
+-		 }
+-		 );
++        startTag("Configuration",
++                 new String[] {
++                     "Name", cfg.get("Name"),
++                     "OutputDirectory",  cfg.get("OutputDir"),
++                     "IntermediateDirectory",  cfg.get("OutputDir"),
++                     "ConfigurationType", "2",
++                     "UseOfMFC", "0",
++                     "ATLMinimizesCRunTimeLibraryUsage", "FALSE"
++                 }
++                 );
+ 
+ 
+ 
+-	tagV("Tool", cfg.getV("CompilerFlags"));
+-
+-	tag("Tool", 
+-	    new String[] {
+-		"Name", "VCCustomBuildTool"
+-	    }
+-	    );
+-	
+-	tagV("Tool", cfg.getV("LinkerFlags"));
+-
+-	tag("Tool", 
+-	    new String[] {
+-		"Name", "VCPostBuildEventTool"
+-	    }
+-	    );
+-
+-	tag("Tool",
+-	    new String[] {
+-		"Name", "VCPreBuildEventTool"
+-	    }
+-	    );
+-
+-	tag("Tool",
+-	    new String[] {
+-		"Name", "VCPreLinkEventTool",
+-		"Description", BuildConfig.getFieldString(null, "PrelinkDescription"),
+-		"CommandLine", cfg.expandFormat(BuildConfig.getFieldString(null, "PrelinkCommand").replace('\t', '\n'))
+-	    }
+-	    );
+-
+-	tag("Tool",
+-	    new String[] {
+-		"Name", "VCResourceCompilerTool",
+-		// XXX???
+-		"PreprocessorDefinitions", "NDEBUG",
+-		"Culture", "1033"
+-	    }
+-	    );
+-	tag("Tool", 
+-	    new String[] {
++        tagV("Tool", cfg.getV("CompilerFlags"));
++
++        tag("Tool",
++            new String[] {
++                "Name", "VCCustomBuildTool"
++            }
++            );
++
++        tagV("Tool", cfg.getV("LinkerFlags"));
++
++        tag("Tool",
++            new String[] {
++                "Name", "VCPostBuildEventTool"
++            }
++            );
++
++        tag("Tool",
++            new String[] {
++                "Name", "VCPreBuildEventTool"
++            }
++            );
++
++        tag("Tool",
++            new String[] {
++                "Name", "VCPreLinkEventTool",
++                "Description", BuildConfig.getFieldString(null, "PrelinkDescription"),
++                "CommandLine", cfg.expandFormat(BuildConfig.getFieldString(null, "PrelinkCommand").replace('\t', '\n'))
++            }
++            );
++
++        tag("Tool",
++            new String[] {
++                "Name", "VCResourceCompilerTool",
++                // XXX???
++                "PreprocessorDefinitions", "NDEBUG",
++                "Culture", "1033"
++            }
++            );
++        tag("Tool",
++            new String[] {
+               "Name", "VCWebServiceProxyGeneratorTool"
+-	    }
+-	    );
++            }
++            );
+ 
+-	tag ("Tool",
+-	     new String[] {
++        tag ("Tool",
++             new String[] {
+               "Name", "VCXMLDataGeneratorTool"
+-	     }
+-	     );
+-	
+-	tag("Tool",
+-	    new String[] {
++             }
++             );
++
++        tag("Tool",
++            new String[] {
+               "Name", "VCWebDeploymentTool"
+-	    }
+-	    );
+-	tag("Tool",
+-	     new String[] {
+-	    "Name", "VCManagedWrapperGeneratorTool"
+-	     }
+-	    );
+-	tag("Tool",
+-	    new String[] {
++            }
++            );
++        tag("Tool",
++             new String[] {
++            "Name", "VCManagedWrapperGeneratorTool"
++             }
++            );
++        tag("Tool",
++            new String[] {
+               "Name", "VCAuxiliaryManagedWrapperGeneratorTool"
+-	    }
+-	    );
++            }
++            );
+ 
+-	tag("Tool",
+-	    new String[] {
+-		"Name", "VCMIDLTool",
+-		"PreprocessorDefinitions", "NDEBUG",
+-		"MkTypLibCompatible", "TRUE",
+-		"SuppressStartupBanner", "TRUE",
+-		"TargetEnvironment", "1",		
+-		"TypeLibraryName", cfg.get("OutputDir") + Util.sep + "vm.tlb",
+-		"HeaderFileName", ""
+-	    }
+-	    );
++        tag("Tool",
++            new String[] {
++                "Name", "VCMIDLTool",
++                "PreprocessorDefinitions", "NDEBUG",
++                "MkTypLibCompatible", "TRUE",
++                "SuppressStartupBanner", "TRUE",
++                "TargetEnvironment", "1",
++                "TypeLibraryName", cfg.get("OutputDir") + Util.sep + "vm.tlb",
++                "HeaderFileName", ""
++            }
++            );
+ 
+-	endTag("Configuration");
++        endTag("Configuration");
+     }
+-    
++
+     int indent;
+ 
+-    private void startTagPrim(String name, 
+-			      String[] attrs, 
+-			      boolean close) {
+-	doIndent();
+-        printWriter.print("<"+name);	
+-	indent++;
+-	
++    private void startTagPrim(String name,
++                              String[] attrs,
++                              boolean close) {
++        doIndent();
++        printWriter.print("<"+name);
++        indent++;
++
+         if (attrs != null) {
+-	    printWriter.println();
+-            for (int i=0; i<attrs.length; i+=2) {	
+-		doIndent();
++            printWriter.println();
++            for (int i=0; i<attrs.length; i+=2) {
++                doIndent();
+                 printWriter.println(" " + attrs[i]+"=\""+attrs[i+1]+"\"");
+             }
+         }
+-        
+-	if (close) {
+-	    indent--;
+-	    //doIndent();
+-	    printWriter.println("/>");
+-	} else {
+-	    //doIndent();
+-	    printWriter.println(">");
+-	}
++
++        if (close) {
++            indent--;
++            //doIndent();
++            printWriter.println("/>");
++        } else {
++            //doIndent();
++            printWriter.println(">");
++        }
+     }
+ 
+     void startTag(String name, String[] attrs) {
+@@ -502,16 +504,16 @@
+     }
+ 
+     void startTagV(String name, Vector attrs) {
+-	String s[] = new String [attrs.size()];
+-	 for (int i=0; i<attrs.size(); i++) {
+-	     s[i] = (String)attrs.elementAt(i);
+-	 }
++        String s[] = new String [attrs.size()];
++         for (int i=0; i<attrs.size(); i++) {
++             s[i] = (String)attrs.elementAt(i);
++         }
+         startTagPrim(name, s, false);
+     }
+-    
++
+     void endTag(String name) {
+-	indent--;
+-	doIndent();
++        indent--;
++        doIndent();
+         printWriter.println("</"+name+">");
+     }
+ 
+@@ -520,127 +522,126 @@
+     }
+ 
+      void tagV(String name, Vector attrs) {
+-	 String s[] = new String [attrs.size()];
+-	 for (int i=0; i<attrs.size(); i++) {
+-	     s[i] = (String)attrs.elementAt(i);
+-	 }
+-	 startTagPrim(name, s, true);
++         String s[] = new String [attrs.size()];
++         for (int i=0; i<attrs.size(); i++) {
++             s[i] = (String)attrs.elementAt(i);
++         }
++         startTagPrim(name, s, true);
+     }
+ 
+ 
+     void doIndent() {
+-	for (int i=0; i<indent; i++) {
+-	    printWriter.print("    ");
+-	}
++        for (int i=0; i<indent; i++) {
++            printWriter.print("    ");
++        }
+     }
+ 
+     protected String getProjectExt() {
+-	return ".vcproj";
++        return ".vcproj";
+     }
+ }
+ 
+-class CompilerInterfaceVC7 extends CompilerInterface {   
++class CompilerInterfaceVC7 extends CompilerInterface {
+     Vector getBaseCompilerFlags(Vector defines, Vector includes, String outDir) {
+-	Vector rv = new Vector();
++        Vector rv = new Vector();
+ 
+-	// advanced M$ IDE (2003) can only recognize name if it's first or 
+-	// second attribute in the tag - go guess
+-	addAttr(rv, "Name", "VCCLCompilerTool");     	
+-	addAttr(rv, "AdditionalIncludeDirectories", Util.join(",", includes));
+-	addAttr(rv, "PreprocessorDefinitions", Util.join(";", defines).replace("\"","&quot;"));
++        // advanced M$ IDE (2003) can only recognize name if it's first or
++        // second attribute in the tag - go guess
++        addAttr(rv, "Name", "VCCLCompilerTool");
++        addAttr(rv, "AdditionalIncludeDirectories", Util.join(",", includes));
++        addAttr(rv, "PreprocessorDefinitions", Util.join(";", defines).replace("\"","&quot;"));
+         addAttr(rv, "UsePrecompiledHeader", "3");
+-	addAttr(rv, "PrecompiledHeaderThrough", "incls"+Util.sep+"_precompiled.incl");
+-	addAttr(rv, "PrecompiledHeaderFile", outDir+Util.sep+"vm.pch");
+-	addAttr(rv, "AssemblerListingLocation", outDir);
+-	addAttr(rv, "ObjectFile", outDir+Util.sep);
+-	addAttr(rv, "ProgramDataBaseFileName", outDir+Util.sep+"vm.pdb");
+-	addAttr(rv, "SuppressStartupBanner", "TRUE");
+-	addAttr(rv, "CompileAs", "0");
+-	addAttr(rv, "WarningLevel", "3");
+-	addAttr(rv, "WarnAsError", "TRUE");
+-	addAttr(rv, "BufferSecurityCheck", "FALSE");
+-	addAttr(rv, "ExceptionHandling", "FALSE");
++        addAttr(rv, "PrecompiledHeaderThrough", "incls"+Util.sep+"_precompiled.incl");
++        addAttr(rv, "PrecompiledHeaderFile", outDir+Util.sep+"vm.pch");
++        addAttr(rv, "AssemblerListingLocation", outDir);
++        addAttr(rv, "ObjectFile", outDir+Util.sep);
++        addAttr(rv, "ProgramDataBaseFileName", outDir+Util.sep+"vm.pdb");
++        addAttr(rv, "SuppressStartupBanner", "TRUE");
++        addAttr(rv, "CompileAs", "0");
++        addAttr(rv, "WarningLevel", "3");
++        addAttr(rv, "WarnAsError", "TRUE");
++        addAttr(rv, "BufferSecurityCheck", "FALSE");
++        addAttr(rv, "ExceptionHandling", "FALSE");
+ 
+-	return rv;
++        return rv;
+     }
+ 
+     Vector getBaseLinkerFlags(String outDir, String outDll) {
+-	Vector rv = new Vector();
++        Vector rv = new Vector();
+ 
+-	addAttr(rv, "Name", "VCLinkerTool");
+-	addAttr(rv, "AdditionalOptions", 
+-		"/export:JNI_GetDefaultJavaVMInitArgs " +
+-		"/export:JNI_CreateJavaVM " + 
+-		"/export:JNI_GetCreatedJavaVMs "+
+-		"/export:jio_snprintf /export:jio_printf "+
+-		"/export:jio_fprintf /export:jio_vfprintf "+
+-		"/export:jio_vsnprintf "+
+-		"/export:JVM_EnqueueOperation ");	
+-	addAttr(rv, "AdditionalDependencies", "Wsock32.lib winmm.lib");
+-	addAttr(rv, "OutputFile", outDll);
++        addAttr(rv, "Name", "VCLinkerTool");
++        addAttr(rv, "AdditionalOptions",
++                "/export:JNI_GetDefaultJavaVMInitArgs " +
++                "/export:JNI_CreateJavaVM " +
++                "/export:JNI_GetCreatedJavaVMs "+
++                "/export:jio_snprintf /export:jio_printf "+
++                "/export:jio_fprintf /export:jio_vfprintf "+
++                "/export:jio_vsnprintf ");
++        addAttr(rv, "AdditionalDependencies", "Wsock32.lib winmm.lib");
++        addAttr(rv, "OutputFile", outDll);
+         addAttr(rv, "LinkIncremental", "1");
+-	addAttr(rv, "SuppressStartupBanner", "TRUE");
+-	addAttr(rv, "ModuleDefinitionFile", outDir+Util.sep+"vm.def");
+-	addAttr(rv, "ProgramDatabaseFile", outDir+Util.sep+"vm.pdb");
+-	addAttr(rv, "SubSystem", "2");
+-	addAttr(rv, "BaseAddress", "0x8000000");
+-	addAttr(rv, "ImportLibrary", outDir+Util.sep+"jvm.lib");
+-	addAttr(rv, "TargetMachine", "1");
++        addAttr(rv, "SuppressStartupBanner", "TRUE");
++        addAttr(rv, "ModuleDefinitionFile", outDir+Util.sep+"vm.def");
++        addAttr(rv, "ProgramDatabaseFile", outDir+Util.sep+"vm.pdb");
++        addAttr(rv, "SubSystem", "2");
++        addAttr(rv, "BaseAddress", "0x8000000");
++        addAttr(rv, "ImportLibrary", outDir+Util.sep+"jvm.lib");
++        addAttr(rv, "TargetMachine", "1");
+ 
+-	return rv;
++        return rv;
+     }
+-    
++
+     Vector getDebugCompilerFlags(String opt) {
+-	Vector rv = new Vector();
++        Vector rv = new Vector();
+ 
+-	addAttr(rv, "Optimization", opt);
+-	addAttr(rv, "OptimizeForProcessor", "1");
+-	addAttr(rv, "DebugInformationFormat", "3");
+-	addAttr(rv, "RuntimeLibrary", "2");
+-	addAttr(rv, "BrowseInformation", "1");
+-	addAttr(rv, "BrowseInformationFile", "$(IntDir)" + Util.sep);
++        addAttr(rv, "Optimization", opt);
++        addAttr(rv, "OptimizeForProcessor", "1");
++        addAttr(rv, "DebugInformationFormat", "3");
++        addAttr(rv, "RuntimeLibrary", "2");
++        addAttr(rv, "BrowseInformation", "1");
++        addAttr(rv, "BrowseInformationFile", "$(IntDir)" + Util.sep);
+ 
+-	return rv;
++        return rv;
+     }
+ 
+     Vector getDebugLinkerFlags() {
+-	Vector rv = new Vector();
+-	
+-	addAttr(rv, "GenerateDebugInformation", "TRUE");
+-	
+-	return rv;
++        Vector rv = new Vector();
++
++        addAttr(rv, "GenerateDebugInformation", "TRUE");
++
++        return rv;
+     }
+ 
+     Vector getProductCompilerFlags() {
+-	Vector rv = new Vector();
+-	
+-	addAttr(rv, "Optimization", "2");
+-	addAttr(rv, "InlineFunctionExpansion", "1");
+-	addAttr(rv, "StringPooling", "TRUE");
+-	addAttr(rv, "RuntimeLibrary", "2");
+-	addAttr(rv, "EnableFunctionLevelLinking", "TRUE");
+-	
+-	return rv;
++        Vector rv = new Vector();
++
++        addAttr(rv, "Optimization", "2");
++        addAttr(rv, "InlineFunctionExpansion", "1");
++        addAttr(rv, "StringPooling", "TRUE");
++        addAttr(rv, "RuntimeLibrary", "2");
++        addAttr(rv, "EnableFunctionLevelLinking", "TRUE");
++
++        return rv;
+     }
+ 
+     Vector getProductLinkerFlags() {
+-	Vector rv = new Vector();
++        Vector rv = new Vector();
+ 
+-	addAttr(rv, "OptimizeReferences", "2");
+-	addAttr(rv, "EnableCOMDATFolding", "2");
++        addAttr(rv, "OptimizeReferences", "2");
++        addAttr(rv, "EnableCOMDATFolding", "2");
+ 
+-	return rv;
++        return rv;
+     }
+ 
+     String getOptFlag() {
+-	return "2";
++        return "2";
+     }
+ 
+     String getNoOptFlag() {
+-	return "0";
++        return "0";
+     }
+ 
+     String makeCfgName(String flavourBuild) {
+-	return  flavourBuild + "|" + Util.os;
++        return  flavourBuild + "|" + Util.os;
+     }
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/adlc/adlc.hpp openjdk/hotspot/src/share/vm/adlc/adlc.hpp
+--- openjdk6/hotspot/src/share/vm/adlc/adlc.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/adlc/adlc.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)adlc.hpp	1.28 07/05/05 17:04:59 JVM"
+-#endif
+ /*
+  * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //
+@@ -80,7 +77,7 @@
+   #include <inttypes.h>
+ #endif // LINUX
+ 
+-// Macros 
++// Macros
+ #define uint32 unsigned int
+ #define uint   unsigned int
+ 
+@@ -90,7 +87,7 @@
+ #define max(a, b)   (((a)>(b)) ? (a) : (b))
+ 
+ // VM components
+-#include "opcodes.hpp" 
++#include "opcodes.hpp"
+ 
+ // ADLC components
+ #include "arena.hpp"
+diff -ruN openjdk6/hotspot/src/share/vm/adlc/adlparse.cpp openjdk/hotspot/src/share/vm/adlc/adlparse.cpp
+--- openjdk6/hotspot/src/share/vm/adlc/adlparse.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/adlc/adlparse.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)adlparse.cpp	1.205 07/05/05 17:05:00 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ADLPARSE.CPP - Architecture Description Language Parser
+@@ -32,7 +29,7 @@
+ //----------------------------ADLParser----------------------------------------
+ // Create a new ADL parser
+ ADLParser::ADLParser(FileBuff& buffer, ArchDesc& archDesc)
+-  : _buf(buffer), _AD(archDesc), 
++  : _buf(buffer), _AD(archDesc),
+     _globalNames(archDesc.globalNames()) {
+   _AD._syntax_errs = _AD._semantic_errs = 0; // No errors so far this file
+   _AD._warnings    = 0;                      // No warnings either
+@@ -87,20 +84,20 @@
+ 
+ //------------------------------parse------------------------------------------
+ // Each top-level keyword should appear as the first non-whitespace on a line.
+-// 
++//
+ void ADLParser::parse() {
+   char *ident;
+ 
+   // Iterate over the lines in the file buffer parsing Level 1 objects
+   for( next_line(); _curline != NULL; next_line()) {
+     _ptr = _curline;             // Reset ptr to start of new line
+-    skipws();                    // Skip any leading whitespace 
++    skipws();                    // Skip any leading whitespace
+     ident = get_ident();         // Get first token
+     if (ident == NULL) {         // Empty line
+       continue;                  // Get the next line
+     }
+     if (!strcmp(ident, "instruct"))        instr_parse();
+-    else if (!strcmp(ident, "operand"))    oper_parse();	
++    else if (!strcmp(ident, "operand"))    oper_parse();
+     else if (!strcmp(ident, "opclass"))    opclass_parse();
+     else if (!strcmp(ident, "ins_attrib")) ins_attr_parse();
+     else if (!strcmp(ident, "op_attrib"))  op_attr_parse();
+@@ -166,8 +163,8 @@
+   else get_oplist(instr->_parameters, instr->_localNames);
+   skipws();                        // Skip leading whitespace
+   // Check for block delimiter
+-  if ( (_curchar != '%') 
+-       || ( next_char(),  (_curchar != '{')) ) { 
++  if ( (_curchar != '%')
++       || ( next_char(),  (_curchar != '{')) ) {
+     parse_err(SYNERR, "missing '%{' in instruction definition\n");
+     return;
+   }
+@@ -202,7 +199,7 @@
+         // Add the new match rule to the list
+         rule->_next = match_parse(instr->_localNames);
+         if (rule->_next) {
+-          rule = rule->_next; 
++          rule = rule->_next;
+           if( instr->is_ideal_control() ) {
+             parse_err(SYNERR, "unique match rule expected for %s\n", rule->_name);
+             return;
+@@ -261,11 +258,11 @@
+   adjust_set_rule(instr);
+   if (_AD._pipeline ) {
+     if( instr->expands() ) {
+-      if( instr->_ins_pipe ) 
+-	parse_err(WARN, "ins_pipe and expand rule both specified for instruction \"%s\"; ins_pipe will be unused\n", instr->_ident); 
++      if( instr->_ins_pipe )
++        parse_err(WARN, "ins_pipe and expand rule both specified for instruction \"%s\"; ins_pipe will be unused\n", instr->_ident);
+     } else {
+-      if( !instr->_ins_pipe ) 
+-	parse_err(WARN, "No ins_pipe specified for instruction \"%s\"\n", instr->_ident); 
++      if( !instr->_ins_pipe )
++        parse_err(WARN, "No ins_pipe specified for instruction \"%s\"\n", instr->_ident);
+     }
+   }
+   // Add instruction to tail of instruction list
+@@ -293,7 +290,7 @@
+ }
+ 
+ //------------------------------matchrule_clone_and_swap-----------------------
+-// Check for commutative operations with subtree operands, 
++// Check for commutative operations with subtree operands,
+ // create clones and swap operands.
+ void ADLParser::matchrule_clone_and_swap(MatchRule* rule, const char* instr_ident, int& match_rules_cnt) {
+   // Check for commutative operations with tree operands.
+@@ -302,7 +299,7 @@
+   if (count > 0) {
+     // Clone match rule and swap commutative operation's operands.
+     rule->swap_commutative_op(instr_ident, count, match_rules_cnt);
+-  } 
++  }
+ }
+ 
+ //------------------------------adjust_set_rule--------------------------------
+@@ -327,7 +324,7 @@
+       // Can not have additional base operands in right side of match!
+       if ( ! right->base_operand( position, _globalNames, result2, name2, optype2) ) {
+         assert( instr->_predicate == NULL, "ADLC does not support instruction chain rules with predicates");
+-        // Chain from input  _ideal_operand_type_, 
++        // Chain from input  _ideal_operand_type_,
+         // Needed for shared roots of match-trees
+         ChainList *lst = (ChainList *)_AD._chainRules[optype];
+         if (lst == NULL) {
+@@ -339,7 +336,7 @@
+           if (cost == NULL) {
+             cost = ((AttributeForm*)_globalNames[AttributeForm::_ins_cost])->_attrdef;
+           }
+-          // The ADLC does not support chaining from the ideal operand type 
++          // The ADLC does not support chaining from the ideal operand type
+           // of a predicated user-defined operand
+           if( frm->is_operand() == NULL || frm->is_operand()->_predicate == NULL ) {
+             lst->insert(instr->_matrule->_lChild->_opType,cost,instr->_ident);
+@@ -357,7 +354,7 @@
+             cost = ((AttributeForm*)_globalNames[AttributeForm::_ins_cost])->_attrdef;
+           }
+           // It is safe to chain from the top-level user-defined operand even
+-          // if it has a predicate, since the predicate is checked before 
++          // if it has a predicate, since the predicate is checked before
+           // the user-defined type is available.
+           lst->insert(instr->_matrule->_lChild->_opType,cost,instr->_ident);
+         }
+@@ -459,7 +456,7 @@
+     }
+     else if (!strcmp(ident, "opcode"))    {
+       parse_err(SYNERR, "Operands do not specify an opcode\n");
+-    }	
++    }
+     else if (!strcmp(ident, "effect"))    {
+       parse_err(SYNERR, "Operands do not specify an effect\n");
+     }
+@@ -613,9 +610,9 @@
+     return;
+   }
+   next_char();                    // Advance past the ';'
+-  
++
+   // Construct the attribute, record global name, and store in ArchDesc
+-  attrib = new AttributeForm(ident, OP_ATTR, aexpr); 
++  attrib = new AttributeForm(ident, OP_ATTR, aexpr);
+   _globalNames.Insert(ident, attrib);
+   _AD.addForm(attrib);
+ }
+@@ -645,10 +642,10 @@
+ }
+ 
+ //------------------------------int_def_parse----------------------------------
+-// Parse Example: 
++// Parse Example:
+ // int_def    MEMORY_REF_COST      (         200,  DEFAULT_COST * 2);
+ // <keyword>  <name>               ( <int_value>,   <description>  );
+-// 
++//
+ void ADLParser::int_def_parse(void) {
+   char *name        = NULL;         // Name of definition
+   char *value       = NULL;         // its value,
+@@ -709,10 +706,10 @@
+ 
+   // Debug Stuff
+   if (_AD._adl_debug > 1) {
+-    fprintf(stderr,"int_def: %s ( %s, %s )\n", name, 
++    fprintf(stderr,"int_def: %s ( %s, %s )\n", name,
+             (value), (description ? description : ""));
+   }
+-  
++
+   // Record new definition.
+   Expr *expr     = new Expr(name, description, int_value, int_value);
+   const Expr *old_expr = _AD.globalDefs().define(name, expr);
+@@ -800,7 +797,7 @@
+     parse_err(SYNERR, "Missing %c{ ... %c} block after register keyword.\n",'%','%');
+     return;
+   }
+-  
++
+   // Add reg_class spill_regs
+   regBlock->addSpillRegClass();
+ }
+@@ -862,7 +859,7 @@
+       next_char();               // skip open paren & comma characters
+       skipws();
+       if (_curchar == ')') break;
+-      
++
+       // Get parameter type
+       pType = get_ident();
+       if (pType == NULL) {
+@@ -913,12 +910,12 @@
+     sprintf(location, "#line %d \"%s\"\n", line, file);
+     encoding->add_code(location);
+   }
+-      
++
+   // Collect the parts of the encode description
+   // (1) strings that are passed through to output
+   // (2) replacement/substitution variable, preceeded by a '$'
+   while ( (_curchar != '%') && (*(_ptr+1) != '}') ) {
+-      
++
+     // (1)
+     // Check if there is a string to pass through to output
+     char *start = _ptr;       // Record start of the next string
+@@ -936,9 +933,9 @@
+       *_ptr  = '\0';          // Terminate the string
+       encoding->add_code(start);
+     }
+-        
++
+     // (2)
+-    // If we are at a replacement variable, 
++    // If we are at a replacement variable,
+     // copy it and record in EncClass
+     if ( _curchar == '$' ) {
+       // Found replacement Variable
+@@ -975,7 +972,7 @@
+             parse_err(SYNERR, "missing identifier inside frame block.\n");
+             return;
+       }
+-      if (strcmp(token,"stack_direction")==0) { 
++      if (strcmp(token,"stack_direction")==0) {
+         stack_dir_parse(frame);
+       }
+       if (strcmp(token,"sync_stack_slots")==0) {
+@@ -1350,8 +1347,8 @@
+ 
+   skipws();                       // Skip leading whitespace
+   // Check for block delimiter
+-  if ( (_curchar != '%') 
+-       || ( next_char(),  (_curchar != '{')) ) { 
++  if ( (_curchar != '%')
++       || ( next_char(),  (_curchar != '{')) ) {
+     parse_err(SYNERR, "missing '%{' in pipeline definition\n");
+     return;
+   }
+@@ -1367,8 +1364,8 @@
+     else if (!strcmp(ident, "pipe_class")) pipe_class_parse(*pipeline);
+     else if (!strcmp(ident, "define")) {
+       skipws();
+-      if ( (_curchar != '%') 
+-           || ( next_char(),  (_curchar != '{')) ) { 
++      if ( (_curchar != '%')
++           || ( next_char(),  (_curchar != '{')) ) {
+         parse_err(SYNERR, "expected '%{'\n");
+         return;
+       }
+@@ -1396,11 +1393,11 @@
+         parse_err(SYNERR, "expected `;`, found '%c'\n", _curchar);
+         break;
+       }
+-      next_char();		// Skip over semi-colon
++      next_char();              // Skip over semi-colon
+ 
+       skipws();
+-      if ( (_curchar != '%') 
+-           || ( next_char(),  (_curchar != '}')) ) { 
++      if ( (_curchar != '%')
++           || ( next_char(),  (_curchar != '}')) ) {
+         parse_err(SYNERR, "expected '%%}', found \"%c\"\n", _curchar);
+       }
+       next_char();
+@@ -1423,8 +1420,8 @@
+       bool vsi_seen = false, bhds_seen = false;
+ 
+       skipws();
+-      if ( (_curchar != '%') 
+-           || ( next_char(),  (_curchar != '{')) ) { 
++      if ( (_curchar != '%')
++           || ( next_char(),  (_curchar != '{')) ) {
+         parse_err(SYNERR, "expected '%{'\n");
+         return;
+       }
+@@ -1613,8 +1610,8 @@
+         parse_err(SYNERR, "unknown specifier \"%s\"\n", ident);
+       }
+ 
+-      if ( (_curchar != '%') 
+-           || ( next_char(),  (_curchar != '}')) ) { 
++      if ( (_curchar != '%')
++           || ( next_char(),  (_curchar != '}')) ) {
+         parse_err(SYNERR, "expected '%}', found \"%c\"\n", _curchar);
+       }
+       next_char(); skipws();
+@@ -1645,7 +1642,7 @@
+     return;
+   }
+ 
+-  next_char(); 
++  next_char();
+ }
+ 
+ //------------------------------resource_parse----------------------------
+@@ -1694,19 +1691,19 @@
+       while (_curchar == '|') {
+         next_char(); skipws();
+ 
+-	expr = get_ident();          // Grab next identifier
+-	if (expr == NULL) {
+-	  parse_err(SYNERR, "keyword identifier expected at \"%c\"\n", _curchar);
+-	  return;
+-	}
+-
+-	resource = (ResourceForm *) pipeline._resdict[expr];   // Look up the value
+-	if (resource == NULL) {
+-	  parse_err(SYNERR, "resource \"%s\" is not defined\n", expr);
+-	  return;
+-	}
++        expr = get_ident();          // Grab next identifier
++        if (expr == NULL) {
++          parse_err(SYNERR, "keyword identifier expected at \"%c\"\n", _curchar);
++          return;
++        }
+ 
+-	mask |= resource->mask();
++        resource = (ResourceForm *) pipeline._resdict[expr];   // Look up the value
++        if (resource == NULL) {
++          parse_err(SYNERR, "resource \"%s\" is not defined\n", expr);
++          return;
++        }
++
++        mask |= resource->mask();
+         skipws();
+       }
+     }
+@@ -1732,7 +1729,7 @@
+   char * ident;
+ 
+   skipws();                       // Skip leading whitespace
+-  
++
+   if (_curchar != '(') {
+     parse_err(SYNERR, "missing \"(\" in pipe_desc definition\n");
+     return;
+@@ -1774,7 +1771,7 @@
+   OperandForm  *oper;
+ 
+   skipws();                       // Skip leading whitespace
+-  
++
+   ident = get_ident();            // Grab next identifier
+ 
+   if (ident == NULL) {
+@@ -2313,13 +2310,13 @@
+     return;
+   }
+   next_char();                   // move past ';'
+-  
++
+   // Debug Stuff
+   if (_AD._adl_debug > 1) {
+-    fprintf(stderr,"Register Definition: %s ( %s, %s %s )\n", rname, 
++    fprintf(stderr,"Register Definition: %s ( %s, %s %s )\n", rname,
+             (callconv ? callconv : ""), (c_conv ? c_conv : ""), concrete);
+   }
+-  
++
+   // Record new register definition.
+   _AD._register->addRegDef(rname, callconv, c_conv, idealtype, encoding, concrete);
+   return;
+@@ -2374,7 +2371,7 @@
+   next_char();                    // Skip trailing ';'
+ 
+   // Check RegClass size, must be <= 32 registers in class.
+-  
++
+   return;
+ }
+ 
+@@ -2433,7 +2430,7 @@
+     return;
+   }
+   next_char();                    // Skip trailing ';'
+-  
++
+   return;
+ }
+ 
+@@ -2442,10 +2439,10 @@
+   char      *token  = NULL;
+   int        lparen = 0;          // keep track of parenthesis nesting depth
+   int        rparen = 0;          // position of instruction at this depth
+-  InstructForm *inst_seen  = NULL;  
++  InstructForm *inst_seen  = NULL;
+   InstructForm *child_seen = NULL;
+ 
+-  // Walk the match tree, 
++  // Walk the match tree,
+   // Record <parent, position, instruction name, input position>
+   while ( lparen >= rparen ) {
+     skipws();
+@@ -2478,7 +2475,7 @@
+       if (form) {
+         InstructForm *inst = form->is_instruction();
+         // Record the first instruction at this level
+-        if( inst_seen == NULL ) { 
++        if( inst_seen == NULL ) {
+           inst_seen = inst;
+         }
+         if (inst) {
+@@ -2500,7 +2497,7 @@
+       parse_err(SYNERR, "missing identifier in peepmatch rule.\n");
+       return NULL;
+     }
+-      
++
+   } // end while
+ 
+   assert( false, "ShouldNotReachHere();");
+@@ -2508,7 +2505,7 @@
+ }
+ 
+ //------------------------------peep_match_parse-------------------------------
+-// Syntax for a peepmatch rule 
++// Syntax for a peepmatch rule
+ //
+ // peepmatch ( root_instr_name [(instruction subtree)] [,(instruction subtree)]* );
+ //
+@@ -2533,7 +2530,7 @@
+     parse_err(SYNERR, "missing instruction-name at start of peepmatch.\n");
+     return;
+   }
+-  
++
+   if( _curchar != ')' ) {
+     parse_err(SYNERR, "missing ')' at end of peepmatch.\n");
+     return;
+@@ -2558,13 +2555,13 @@
+ // A parenthesized list of relations between operands in peepmatch subtree
+ //
+ // peepconstraint %{
+-// (instruction_number.operand_name 
+-//     relational_op 
++// (instruction_number.operand_name
++//     relational_op
+ //  instruction_number.operand_name OR register_name
+ //  [, ...] );
+-// 
++//
+ // // instruction numbers are zero-based using topological order in peepmatch
+-// 
++//
+ void ADLParser::peep_constraint_parse(Peephole &peep) {
+ 
+   skipws();
+@@ -2616,12 +2613,12 @@
+     char *right_op = get_ident_dup();
+ 
+     // Construct the next PeepConstraint
+-    PeepConstraint *constraint = new PeepConstraint( left_inst, left_op, 
+-                                                     relation, 
++    PeepConstraint *constraint = new PeepConstraint( left_inst, left_op,
++                                                     relation,
+                                                      right_inst, right_op );
+     // And append it to the list for this peephole rule
+     peep.append_constraint( constraint );
+-    
++
+     // Check for another constraint, or end of rule
+     skipws();
+     if( _curchar == ',' ) {
+@@ -2647,12 +2644,12 @@
+ 
+ //------------------------------peep_replace_parse-----------------------------
+ // Syntax for a peepreplace rule
+-// root instruction name followed by a 
++// root instruction name followed by a
+ // parenthesized list of whitespace separated instruction.operand specifiers
+ //
+ // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
+-// 
+-// 
++//
++//
+ void ADLParser::peep_replace_parse(Peephole &peep) {
+   int          lparen = 0;        // keep track of parenthesis nesting depth
+   int          rparen = 0;        // keep track of parenthesis nesting depth
+@@ -2815,7 +2812,7 @@
+ 
+ 
+ //------------------------------ins_encode_parse-------------------------------
+-// Encode rules have the form 
++// Encode rules have the form
+ //   ins_encode( encode_class_name(parameter_list), ... );
+ //
+ // The "encode_class_name" must be defined in the encode section
+@@ -2826,12 +2823,12 @@
+ //   ins_encode %{
+ //      ... // body
+ //   %}
+-// 
++//
+ // which synthesizes a new encoding class taking the same arguments as
+ // the InstructForm, and automatically prefixes the definition with:
+-// 
++//
+ //    MacroAssembler masm(&cbuf);\n");
+-// 
++//
+ //  making it more compact to take advantage of the MacroAssembler and
+ //  placing the assembly closer to it's use by instructions.
+ InsEncode *ADLParser::ins_encode_parse(InstructForm &inst) {
+@@ -2878,7 +2875,7 @@
+     skipws();
+     if ( _curchar == '(' ) {
+       next_char();                 // move past '(' for parameters
+-      
++
+       // Parse the encode method's parameters
+       while (_curchar != ')') {
+         char *param = get_ident_or_literal_constant("encoding operand");
+@@ -2889,8 +2886,8 @@
+           // New: allow parenthesized expressions as parameters.
+           // New: allow "primary", "secondary", "tertiary" as parameters.
+           // New: allow user-defined register name as parameter
+-          if ( (inst._localNames[param] == NULL) && 
+-               !ADLParser::is_literal_constant(param) && 
++          if ( (inst._localNames[param] == NULL) &&
++               !ADLParser::is_literal_constant(param) &&
+                (Opcode::as_opcode_type(param) == Opcode::NOT_AN_OPCODE) &&
+                ((_AD._register == NULL ) || (_AD._register->getRegDef(param) == NULL)) ) {
+             parse_err(SYNERR, "Using non-locally defined parameter %s for encoding %s.\n", param, ec_name);
+@@ -2924,7 +2921,7 @@
+           if (_curchar != ')') {
+             parse_err(SYNERR, "Expected ')' after encode parameters.\n");
+             return NULL;
+-          }            
++          }
+         }
+       } // WHILE loop collecting parameters
+       next_char();                   // move past ')' at end of parameters
+@@ -2963,7 +2960,7 @@
+   }
+   next_char();                     // move past ';'
+   skipws();                        // be friendly to oper_parse()
+-  
++
+   // Debug Stuff
+   if (_AD._adl_debug > 1) fprintf(stderr,"Instruction Encode: %s\n", ec_name);
+ 
+@@ -2977,7 +2974,7 @@
+ 
+   // Get value of the instruction's size
+   skipws();
+-  
++
+   // Parse size
+   sizeOfInstr = get_paren_expr("size expression");
+   if (sizeOfInstr == NULL) {
+@@ -2998,10 +2995,10 @@
+   // Debug Stuff
+   if (_AD._adl_debug > 1) {
+     if (sizeOfInstr != NULL) {
+-      fprintf(stderr,"size of opcode: %s\n", sizeOfInstr);    
++      fprintf(stderr,"size of opcode: %s\n", sizeOfInstr);
+     }
+   }
+-  
++
+   return sizeOfInstr;
+ }
+ 
+@@ -3021,7 +3018,7 @@
+     parse_err(SYNERR, "missing '(' in expand instruction declaration\n");
+     return NULL;
+   }
+-  next_char();                   // skip open paren 
++  next_char();                   // skip open paren
+   skipws();
+   if (_curchar != ')') {
+     // Parse primary, secondary, and tertiary opcodes, if provided.
+@@ -3104,7 +3101,7 @@
+   }
+   next_char();                    // move past ')'
+ 
+-  // Get details of the interface, 
++  // Get details of the interface,
+   // for the type of interface indicated by iface_name.
+   Interface *inter = NULL;
+   skipws();
+@@ -3116,7 +3113,7 @@
+       inter = cond_interface_parse();
+     }
+     // The parse routines consume the "%}"
+-      
++
+     // Check for probable extra ';' after defining block.
+     if ( _curchar == ';' ) {
+       parse_err(SYNERR, "Extra ';' after defining interface block.\n");
+@@ -3206,7 +3203,7 @@
+   char *greater_equal;
+   char *less_equal;
+   char *greater;
+-  
++
+   if (_curchar != '%') {
+     parse_err(SYNERR, "Missing '%{' for 'cond_interface' block.\n");
+     return NULL;
+@@ -3321,17 +3318,17 @@
+               "Missing ';' or invalid '%{' and '%}' constructor\n");
+     return NULL;                  // No MatchRule to return
+   }
+-  if (_AD._adl_debug > 1) 
++  if (_AD._adl_debug > 1)
+     if (cnstr) fprintf(stderr,"Match Constructor: %s\n", cnstr);
+   // Build new MatchRule object
+-  match = new MatchRule(_AD, mnode, depth, cnstr, numleaves); 
++  match = new MatchRule(_AD, mnode, depth, cnstr, numleaves);
+   skipws();                       // Skip any trailing whitespace
+   return match;                   // Return MatchRule object
+ }
+ 
+ //------------------------------format_parse-----------------------------------
+ FormatRule* ADLParser::format_parse(void) {
+-  char       *desc   = NULL;    
++  char       *desc   = NULL;
+   FormatRule *format = (new FormatRule(desc));
+ 
+   // Without expression form, MUST have a code block;
+@@ -3358,19 +3355,19 @@
+       // (2) replacement/substitution variable, preceeded by a '$'
+       // (3) multi-token ANSIY C style strings
+       while ( true ) {
+-	if ( _curchar == '%' || _curchar == '\n' ) {
+-	  if ( _curchar != '"' ) {
+-	    parse_err(SYNERR, "missing '\"' at end of format block");
+-	    return NULL;
+-	  }
+-	}
++        if ( _curchar == '%' || _curchar == '\n' ) {
++          if ( _curchar != '"' ) {
++            parse_err(SYNERR, "missing '\"' at end of format block");
++            return NULL;
++          }
++        }
+ 
+         // (1)
+         // Check if there is a string to pass through to output
+         char *start = _ptr;       // Record start of the next string
+         while ((_curchar != '$') && (_curchar != '"') && (_curchar != '%') && (_curchar != '\n')) {
+-	  if (_curchar == '\\')  next_char();  // superquote
+-	  if (_curchar == '\n')  parse_err(SYNERR, "newline in string");  // unimplemented!
++          if (_curchar == '\\')  next_char();  // superquote
++          if (_curchar == '\n')  parse_err(SYNERR, "newline in string");  // unimplemented!
+           next_char();
+         }
+         // If a string was found, terminate it and record in FormatRule
+@@ -3378,12 +3375,12 @@
+           *_ptr  = '\0';          // Terminate the string
+           format->_strings.addName(start);
+         }
+-        
++
+         // (2)
+-        // If we are at a replacement variable, 
++        // If we are at a replacement variable,
+         // copy it and record in FormatRule
+         if ( _curchar == '$' ) {
+-          next_char();		// Move past the '$'
++          next_char();          // Move past the '$'
+           char* rep_var = get_ident(); // Nil terminate the variable name
+           rep_var = strdup(rep_var);// Copy the string
+           *_ptr   = _curchar;     // and replace Nil with original character
+@@ -3392,19 +3389,19 @@
+           format->_strings.addName(NameList::_signal);
+         }
+ 
+-	// (3)
+-	// Allow very long strings to be broken up,
+-	// using the ANSI C syntax "foo\n" <newline> "bar"
+-	if ( _curchar == '"') {
+-	  next_char();           // Move past the '"'
+-	  skipws();              // Skip white space before next string token
+-	  if ( _curchar != '"') {
+-	    break;
+-	  } else {
+-	    // Found one.  Skip both " and the whitespace in between.
+-	    next_char();
+-	  }
+-	}
++        // (3)
++        // Allow very long strings to be broken up,
++        // using the ANSI C syntax "foo\n" <newline> "bar"
++        if ( _curchar == '"') {
++          next_char();           // Move past the '"'
++          skipws();              // Skip white space before next string token
++          if ( _curchar != '"') {
++            break;
++          } else {
++            // Found one.  Skip both " and the whitespace in between.
++            next_char();
++          }
++        }
+       } // end while part of format description
+ 
+       // Check for closing '"' and '%}' in format description
+@@ -3413,7 +3410,7 @@
+         parse_err(SYNERR, "non-blank characters between closing '\"' and '%' in format");
+         return NULL;
+       }
+-    } // Done with format description inside 
++    } // Done with format description inside
+ 
+     skipws();
+     // Past format description, at '%'
+@@ -3430,7 +3427,7 @@
+   }
+   // Debug Stuff
+   if (_AD._adl_debug > 1) fprintf(stderr,"Format Rule: %s\n", desc);
+-  
++
+   skipws();
+   return format;
+ }
+@@ -3438,7 +3435,7 @@
+ 
+ //------------------------------effect_parse-----------------------------------
+ void ADLParser::effect_parse(InstructForm *instr) {
+-  char* desc   = NULL;    
++  char* desc   = NULL;
+ 
+   skipws();                      // Skip whitespace
+   if (_curchar != '(') {
+@@ -3454,7 +3451,7 @@
+     parse_err(SYNERR, "missing ';' in Effect definition\n");
+   }
+   next_char();                  // Skip ';'
+-  
++
+ }
+ 
+ //------------------------------expand_parse-----------------------------------
+@@ -3469,7 +3466,7 @@
+   // which has an ordered list of operands.
+   // Check for block delimiter
+   skipws();                        // Skip leading whitespace
+-  if ((_curchar != '%') 
++  if ((_curchar != '%')
+       || (next_char(), (_curchar != '{')) ) { // If not open block
+     parse_err(SYNERR, "missing '%{' in expand definition\n");
+     return(NULL);
+@@ -3561,7 +3558,7 @@
+       }
+       next_char();
+ 
+-      // Record both instruction name and its operand list 
++      // Record both instruction name and its operand list
+       exp->add_instruction(instr_and_operands);
+ 
+       skipws();
+@@ -3585,7 +3582,7 @@
+ //------------------------------rewrite_parse----------------------------------
+ RewriteRule* ADLParser::rewrite_parse(void) {
+   char* params = NULL;
+-  char* desc   = NULL;    
++  char* desc   = NULL;
+ 
+ 
+   // This feature targeted for second generation description language.
+@@ -3598,7 +3595,7 @@
+   }
+   // Debug Stuff
+   if (_AD._adl_debug > 1) fprintf(stderr,"Rewrite parameters: %s\n", params);
+-  
++
+   // For now, grab entire block;
+   skipws();
+   if ( (desc = find_cpp_block("rewrite block")) == NULL ) {
+@@ -3607,7 +3604,7 @@
+   }
+   // Debug Stuff
+   if (_AD._adl_debug > 1) fprintf(stderr,"Rewrite Rule: %s\n", desc);
+-  
++
+   skipws();
+   return (new RewriteRule(params,desc));
+ }
+@@ -3654,7 +3651,7 @@
+   next_char();                    // advance past '('
+ 
+   // Parse the opcode
+-  token = get_ident();            // Get identifier, opcode 
++  token = get_ident();            // Get identifier, opcode
+   if (token == NULL) {
+     parse_err(SYNERR, "missing opcode in match expression\n");
+     return NULL;
+@@ -3690,7 +3687,7 @@
+   // Parse the operands
+   skipws();
+   if (cur_char() != ')') {
+-    
++
+     // Parse the left child
+     if (strcmp(operation,"Set"))
+       lChild = matchChild_parse(operands, lParens, numleaves, false);
+@@ -3725,7 +3722,7 @@
+ 
+   return mroot;
+ }
+-  
++
+ 
+ //------------------------------matchChild_parse-------------------------------
+ MatchNode *ADLParser::matchChild_parse(FormDict &operands, int &parens, int &numleaves, bool atroot) {
+@@ -3823,10 +3820,10 @@
+     }
+     else if (_curchar == ')') {   // Up one level of nesting
+       if (paren == 0) {
+-	// Paren underflow:  We didn't encounter the required stop-char.
+-	parse_err(SYNERR, "too many )'s, did not find %s after %s\n",
+-		  stop_chars, desc);
+-	return NULL;
++        // Paren underflow:  We didn't encounter the required stop-char.
++        parse_err(SYNERR, "too many )'s, did not find %s after %s\n",
++                  stop_chars, desc);
++        return NULL;
+       }
+       paren--;                    // Drop the parenthesis counter
+       next_char();                // Maintain the invariant
+@@ -3834,13 +3831,13 @@
+     else if (_curchar == '"' || _curchar == '\'') {
+       int qchar = _curchar;
+       while (true) {
+-	next_char();
+-	if (_curchar == qchar) { next_char(); break; }
+-	if (_curchar == '\\')  next_char();  // superquote
+-	if (_curchar == '\n' || _curchar == '\0') {
+-	  parse_err(SYNERR, "newline in string in %s\n", desc);
+-	  return NULL;
+-	}
++        next_char();
++        if (_curchar == qchar) { next_char(); break; }
++        if (_curchar == '\\')  next_char();  // superquote
++        if (_curchar == '\n' || _curchar == '\0') {
++          parse_err(SYNERR, "newline in string in %s\n", desc);
++          return NULL;
++        }
+       }
+     }
+     else if (_curchar == '%' && (_ptr[1] == '{' || _ptr[1] == '}')) {
+@@ -3859,9 +3856,9 @@
+       // If the parser declined to make progress on whitespace,
+       // skip the next character, which is therefore NOT whitespace.
+       if (pre_skip_ptr == _ptr) {
+-	next_char();
++        next_char();
+       } else if (pre_skip_ptr+strlen(pre_skip_ptr) != _ptr+strlen(_ptr)) {
+-	parse_err(SYNERR, "unimplemented: preprocessor must not elide subexpression in %s", desc);
++        parse_err(SYNERR, "unimplemented: preprocessor must not elide subexpression in %s", desc);
+       }
+     }
+   }
+@@ -3876,10 +3873,10 @@
+ char *ADLParser::get_paren_expr(const char *description) {
+   if (_curchar != '(')            // Escape if not valid starting position
+     return NULL;
+-  next_char();			  // Skip the required initial paren.
++  next_char();                    // Skip the required initial paren.
+   char *token2 = get_expr(description, ")");
+   if (_curchar == ')')
+-    next_char();		  // Skip required final paren.
++    next_char();                  // Skip required final paren.
+   return token2;
+ }
+ 
+@@ -3903,7 +3900,7 @@
+     c = *end;                     // Grab character to test
+   } while ( ((c >= 'a') && (c <= 'z')) || ((c >= 'A') && (c <= 'Z'))
+             || ((c >= '0') && (c <= '9'))
+-	    || ((c == '_')) || ((c == ':')) || ((c == '#')) );
++            || ((c == '_')) || ((c == ':')) || ((c == '#')) );
+   if (start == end) {             // We popped out on the first try
+     parse_err(SYNERR, "identifier expected at %c\n", c);
+     start = NULL;
+@@ -3932,14 +3929,14 @@
+ }
+ 
+ //------------------------------get_ident_dup----------------------------------
+-// Looks for an identifier in the buffer, and returns a duplicate 
++// Looks for an identifier in the buffer, and returns a duplicate
+ // or NULL if some other token is found instead.
+ char *ADLParser::get_ident_dup(void) {
+   char *ident = get_ident();
+ 
+   // Duplicate an identifier before returning and restore string.
+   if( ident != NULL ) {
+-    ident = strdup(ident);  // Copy the string 
++    ident = strdup(ident);  // Copy the string
+     *_ptr   = _curchar;         // and replace Nil with original character
+   }
+ 
+@@ -3960,7 +3957,7 @@
+       param = buf;
+     }
+     assert(is_literal_constant(param),
+-	   "expr must be recognizable as a constant");
++           "expr must be recognizable as a constant");
+   } else {
+     param = get_ident();
+   }
+@@ -3968,7 +3965,7 @@
+ }
+ 
+ //------------------------------get_rep_var_ident-----------------------------
+-// Do NOT duplicate, 
++// Do NOT duplicate,
+ // Leave nil terminator in buffer
+ // Preserve initial '$'(s) in string
+ char *ADLParser::get_rep_var_ident(void) {
+@@ -3998,9 +3995,9 @@
+ 
+   // Nil terminate the variable name following the '$'
+   char *rep_var_name = get_ident();
+-  assert( rep_var_name != NULL, 
++  assert( rep_var_name != NULL,
+           "Missing identifier after replacement variable indicator '$'");
+-  
++
+   return rep_var;
+ }
+ 
+@@ -4009,7 +4006,7 @@
+ //------------------------------get_rep_var_ident_dup-------------------------
+ // Return the next replacement variable identifier, skipping first '$'
+ // given a pointer into a line of the buffer.
+-// Null terminates string, still inside the file buffer, 
++// Null terminates string, still inside the file buffer,
+ // Returns a pointer to a copy of the string, or NULL on failure
+ char *ADLParser::get_rep_var_ident_dup(void) {
+   if( _curchar != '$' ) return NULL;
+@@ -4036,17 +4033,17 @@
+ 
+   // Nil terminate the variable name following the '$'
+   char *rep_var_name = get_ident();
+-  assert( rep_var_name != NULL, 
++  assert( rep_var_name != NULL,
+           "Missing identifier after replacement variable indicator '$'");
+-  rep_var = strdup(rep_var);  // Copy the string 
++  rep_var = strdup(rep_var);  // Copy the string
+   *_ptr   = _curchar;         // and replace Nil with original character
+-  
++
+   return rep_var;
+ }
+ 
+ 
+ //------------------------------get_unique_ident------------------------------
+-// Looks for an identifier in the buffer, terminates it with a NULL, 
++// Looks for an identifier in the buffer, terminates it with a NULL,
+ // and checks that it is unique
+ char *ADLParser::get_unique_ident(FormDict& dict, const char* nameDescription){
+   char* ident = get_ident();
+@@ -4082,7 +4079,7 @@
+   start = end = _ptr;             // Start points at first character
+   c = *end;                       // Grab character to test
+   while ((c >= '0') && (c <= '9')
+-	 || ((c == '-') && (end == start))) {
++         || ((c == '-') && (end == start))) {
+     end++;                        // Increment end pointer
+     c = *end;                     // Grab character to test
+   }
+@@ -4100,7 +4097,7 @@
+   // Reset _ptr to next char after token
+   _ptr = end;
+ 
+-  return result;                   // integer 
++  return result;                   // integer
+ }
+ 
+ 
+@@ -4163,7 +4160,7 @@
+         parse_err(SYNERR, "undefined operand type %s\n", ident);
+         return;
+       }
+-      
++
+       // Check for valid operand type
+       OpClassForm *opc  = form->is_opclass();
+       OperandForm *oper = form->is_operand();
+@@ -4247,10 +4244,10 @@
+     opForm = form ? form->is_operand() : NULL;
+     if( opForm == NULL ) {
+       if( form && form->is_opclass() ) {
+-	const char* cname = form->is_opclass()->_ident;
+-	parse_err(SYNERR, "operand classes are illegal in effect lists (found %s %s)\n", cname, ident);
++        const char* cname = form->is_opclass()->_ident;
++        parse_err(SYNERR, "operand classes are illegal in effect lists (found %s %s)\n", cname, ident);
+       } else {
+-	parse_err(SYNERR, "undefined operand %s in effect list\n", ident);
++        parse_err(SYNERR, "undefined operand %s in effect list\n", ident);
+       }
+       return;
+     }
+@@ -4321,7 +4318,7 @@
+     while (error_head > _curline && *error_head)  --error_head;
+     if (error_tail)  *error_tail = '\0';
+     fprintf(stderr, "Error Context:  %s>>>%c<<<%s\n",
+-	    error_head, error_char, error_ptr);
++            error_head, error_char, error_ptr);
+     if (error_tail)  *error_tail = '\n';
+     error_ptr[-1] = tem;
+   }
+@@ -4332,8 +4329,8 @@
+ // the begining of a line, or else report an error.
+ void ADLParser::ensure_start_of_line(void) {
+   assert( _ptr >= _curline && _ptr < _curline+strlen(_curline),
+-	  "Must be able to find which line we are in" );
+-  
++          "Must be able to find which line we are in" );
++
+   for (char *s = _curline; s < _ptr; s++) {
+     if (*s > ' ') {
+       parse_err(SYNERR, "'%c' must be at beginning of line\n", _curchar);
+@@ -4382,7 +4379,7 @@
+   if (ident == NULL) {
+     parse_err(SYNERR, "expected preprocessor command, got end of line\n");
+   } else if (!strcmp(ident, "ifdef") ||
+-	     !strcmp(ident, "ifndef")) {
++             !strcmp(ident, "ifndef")) {
+     char* flag = get_ident_no_preproc();
+     ensure_end_of_line();
+     // Test the identifier only if we are already in taken code:
+@@ -4431,21 +4428,21 @@
+       _ptr = _curline; next = _ptr + 1;
+     }
+     else if (_curchar == '#' ||
+-	(_curchar == '/' && (*next == '/' || *next == '*'))) {
++        (_curchar == '/' && (*next == '/' || *next == '*'))) {
+       parse_err(SYNERR, "unimplemented: comment token in a funny place");
+     }
+   }
+   while(_curline != NULL) {                // Check for end of file
+     if (*_ptr == '\n') {                   // keep proper track of new lines
+       if (!do_preproc)  break;             // let caller handle the newline
+-      next_line(); 
++      next_line();
+       _ptr = _curline; next = _ptr + 1;
+     }
+     else if ((*_ptr == '/') && (*next == '/'))      // C++ comment
+       do { _ptr++; next++; } while(*_ptr != '\n');  // So go to end of line
+     else if ((*_ptr == '/') && (*next == '*')) {    // C comment
+       _ptr++; next++;
+-      do { 
++      do {
+         _ptr++; next++;
+         if (*_ptr == '\n') {               // keep proper track of new lines
+           next_line();                     // skip newlines within comments
+@@ -4460,12 +4457,12 @@
+     }
+     else if (do_preproc && *_ptr == '#') {
+       // Note that this calls skipws_common(false) recursively!
+-      bool preproc_handled = handle_preproc_token(); 
++      bool preproc_handled = handle_preproc_token();
+       if (!preproc_handled) {
+-	if (preproc_taken()) {
+-	  return;  // short circuit
+-	}
+-	++_ptr;    // skip the preprocessor character
++        if (preproc_taken()) {
++          return;  // short circuit
++        }
++        ++_ptr;    // skip the preprocessor character
+       }
+       next = _ptr+1;
+     } else if(*_ptr > ' ' && !(do_preproc && !preproc_taken())) {
+@@ -4476,13 +4473,13 @@
+       // skip untaken quoted string
+       int qchar = *_ptr;
+       while (true) {
+-	++_ptr;
+-	if (*_ptr == qchar) { ++_ptr; break; }
+-	if (*_ptr == '\\')  ++_ptr;
+-	if (*_ptr == '\n' || *_ptr == '\0') {
+-	  parse_err(SYNERR, "newline in string");
+-	  break;
+-	}
++        ++_ptr;
++        if (*_ptr == qchar) { ++_ptr; break; }
++        if (*_ptr == '\\')  ++_ptr;
++        if (*_ptr == '\n' || *_ptr == '\0') {
++          parse_err(SYNERR, "newline in string");
++          break;
++        }
+       }
+       next = _ptr + 1;
+     }
+@@ -4503,7 +4500,7 @@
+   // if ( _curchar == '\n' ) {
+   //   next_line();
+   // }
+-} 
++}
+ 
+ //---------------------------next_char_or_line---------------------------------
+ void ADLParser::next_char_or_line() {
+@@ -4514,11 +4511,11 @@
+     _ptr = _curline;
+     _curchar = *_ptr;  // maintain invariant
+   }
+-} 
++}
+ 
+ //---------------------------next_line-----------------------------------------
+ void ADLParser::next_line() {
+-  _curline = _buf.get_line(); _linenum++; 
++  _curline = _buf.get_line(); _linenum++;
+ }
+ 
+ //-------------------------is_literal_constant---------------------------------
+@@ -4540,7 +4537,7 @@
+ //---------------------------is_hex_digit--------------------------------------
+ bool ADLParser::is_hex_digit(char digit) {
+   return ((digit >= '0') && (digit <= '9'))
+-       ||((digit >= 'a') && (digit <= 'f')) 
++       ||((digit >= 'a') && (digit <= 'f'))
+        ||((digit >= 'A') && (digit <= 'F'));
+ }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/adlc/adlparse.hpp openjdk/hotspot/src/share/vm/adlc/adlparse.hpp
+--- openjdk6/hotspot/src/share/vm/adlc/adlparse.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/adlc/adlparse.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)adlparse.hpp	1.79 07/05/05 17:05:00 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ADLPARSE.HPP - Definitions for Architecture Description Language Parser
+@@ -219,14 +216,14 @@
+   // Return the next replacement variable identifier
+   char *get_rep_var_ident(void);
+   // Skip first '$' and make a duplicate of the string
+-  char *get_rep_var_ident_dup(void);     
++  char *get_rep_var_ident_dup(void);
+   // Return the next token given as a signed integer.
+   int   get_int(void);
+   // Return the next token, a relational operator { ==, !=, <=, >= }
+   char *get_relation_dup(void);
+ 
+-  void  get_oplist(NameList &parameters, FormDict &operands);// Parse type-operand pairs 
+-  void  get_effectlist(FormDict &effects, FormDict &operands); // Parse effect-operand pairs 
++  void  get_oplist(NameList &parameters, FormDict &operands);// Parse type-operand pairs
++  void  get_effectlist(FormDict &effects, FormDict &operands); // Parse effect-operand pairs
+   // Return the contents of a parenthesized expression.
+   // Requires initial '(' and consumes final ')', which is replaced by '\0'.
+   char *get_paren_expr(const char *description);
+@@ -273,5 +270,3 @@
+   static bool is_int_token(const char* token, int& intval);
+   static void trim(char* &token);  // trim leading & trailing spaces
+ };
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/adlc/archDesc.cpp openjdk/hotspot/src/share/vm/adlc/archDesc.cpp
+--- openjdk6/hotspot/src/share/vm/adlc/archDesc.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/adlc/archDesc.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -19,12 +19,9 @@
+ // Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ // CA 95054 USA or visit www.sun.com if you need additional information or
+ // have any questions.
+-//  
++//
+ //
+ 
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)archDesc.cpp	1.273 07/05/05 17:05:01 JVM"
+-#endif
+ 
+ // archDesc.cpp - Internal format for architecture definition
+ #include "adlc.hpp"
+@@ -35,7 +32,7 @@
+ inline char  toUpper(char lower) {
+   return (('a' <= lower && lower <= 'z') ? (lower + ('A'-'a')) : lower);
+ }
+-char *toUpper(const char *str) { 
++char *toUpper(const char *str) {
+   char *upper  = new char[strlen(str)+1];
+   char *result = upper;
+   const char *end    = str + strlen(str);
+@@ -97,7 +94,7 @@
+   const char *n       = _name.iter();
+   const char *c       = _cost.iter();
+   const char *r       = _rule.iter();
+-      
++
+   if (n && c && r) {
+     notDone = true;
+     name = n;
+@@ -135,17 +132,17 @@
+ 
+ 
+ //---------------------------MatchList Methods-------------------------------
+-bool MatchList::search(const char *opc, const char *res, const char *lch, 
+-		       const char *rch, Predicate *pr) {
++bool MatchList::search(const char *opc, const char *res, const char *lch,
++                       const char *rch, Predicate *pr) {
+   bool tmp = false;
+   if ((res == _resultStr) || (res && _resultStr && !strcmp(res, _resultStr))) {
+     if ((lch == _lchild) || (lch && _lchild && !strcmp(lch, _lchild))) {
+       if ((rch == _rchild) || (rch && _rchild && !strcmp(rch, _rchild))) {
+-	char * predStr = get_pred();
+-	char * prStr = pr?pr->_pred:NULL;
+-	if ((prStr == predStr) || (prStr && predStr && !strcmp(prStr, predStr))) {
+-	  return true;
+-	}
++        char * predStr = get_pred();
++        char * prStr = pr?pr->_pred:NULL;
++        if ((prStr == predStr) || (prStr && predStr && !strcmp(prStr, predStr))) {
++          return true;
++        }
+       }
+     }
+   }
+@@ -168,15 +165,15 @@
+ //---------------------------ArchDesc Constructor and Destructor-------------
+ 
+ ArchDesc::ArchDesc()
+-  : _globalNames(cmpstr,hashstr, Form::arena), 
+-    _globalDefs(cmpstr,hashstr, Form::arena), 
+-    _preproc_table(cmpstr,hashstr, Form::arena), 
++  : _globalNames(cmpstr,hashstr, Form::arena),
++    _globalDefs(cmpstr,hashstr, Form::arena),
++    _preproc_table(cmpstr,hashstr, Form::arena),
+     _idealIndex(cmpstr,hashstr, Form::arena),
+     _internalOps(cmpstr,hashstr, Form::arena),
+     _internalMatch(cmpstr,hashstr, Form::arena),
+     _chainRules(cmpstr,hashstr, Form::arena),
+     _cisc_spill_operand(NULL) {
+-    
++
+       // Initialize the opcode to MatchList table with NULLs
+       for( int i=0; i<_last_opcode; ++i ) {
+         _mlistab[i] = NULL;
+@@ -257,7 +254,7 @@
+ void ArchDesc::inspectOperands() {
+ 
+   // Iterate through all operands
+-  _operands.reset(); 
++  _operands.reset();
+   OperandForm *op;
+   for( ; (op = (OperandForm*)_operands.iter()) != NULL;) {
+     // Construct list of top-level operands (components)
+@@ -292,10 +289,10 @@
+ 
+     // Cost for this match
+     const char *costStr     = op->cost();
+-    const char *defaultCost = 
++    const char *defaultCost =
+       ((AttributeForm*)_globalNames[AttributeForm::_op_cost])->_attrdef;
+     const char *cost        =  costStr? costStr : defaultCost;
+-    
++
+     // Find result type for match.
+     const char *result      = op->reduce_result();
+     bool        has_root    = false;
+@@ -309,7 +306,7 @@
+ void ArchDesc::inspectInstructions() {
+ 
+   // Iterate through all instructions
+-  _instructions.reset(); 
++  _instructions.reset();
+   InstructForm *instr;
+   for( ; (instr = (InstructForm*)_instructions.iter()) != NULL; ) {
+     // Construct list of top-level operands (components)
+@@ -327,10 +324,10 @@
+ 
+     // Cost for this match
+     const char *costStr = instr->cost();
+-    const char *defaultCost = 
++    const char *defaultCost =
+       ((AttributeForm*)_globalNames[AttributeForm::_ins_cost])->_attrdef;
+     const char *cost    =  costStr? costStr : defaultCost;
+-    
++
+     // Find result type for match
+     const char *result  = instr->reduce_result();
+ 
+@@ -414,8 +411,8 @@
+ }
+ 
+ //------------------------------add_chain_rule_entry--------------------------
+-void ArchDesc::add_chain_rule_entry(const char *src, const char *cost, 
+-				    const char *result) {
++void ArchDesc::add_chain_rule_entry(const char *src, const char *cost,
++                                    const char *result) {
+   // Look-up the operation in chain rule table
+   ChainList *lst = (ChainList *)_chainRules[src];
+   if (lst == NULL) {
+@@ -433,7 +430,7 @@
+ //------------------------------build_chain_rule-------------------------------
+ void ArchDesc::build_chain_rule(OperandForm *oper) {
+   MatchRule     *rule;
+-  
++
+   // Check for chain rules here
+   // If this is only a chain rule
+   if ((oper->_matrule) && (oper->_matrule->_lChild == NULL) &&
+@@ -477,9 +474,9 @@
+ 
+ //------------------------------buildMatchList---------------------------------
+ // operands and instructions provide the result
+-void ArchDesc::buildMatchList(MatchRule *mrule, const char *resultStr, 
+-                              const char *rootOp, Predicate *pred, 
+-			      const char *cost) {
++void ArchDesc::buildMatchList(MatchRule *mrule, const char *resultStr,
++                              const char *rootOp, Predicate *pred,
++                              const char *cost) {
+   const char *leftstr, *rightstr;
+   MatchNode  *mnode;
+ 
+@@ -500,13 +497,13 @@
+ 
+   // Check that this will be placed appropriately in the DFA
+   if (index >= _last_opcode) {
+-    fprintf(stderr, "Invalid match rule %s <-- ( %s )\n", 
++    fprintf(stderr, "Invalid match rule %s <-- ( %s )\n",
+             resultStr ? resultStr : " ",
+             rootOp    ? rootOp    : " ");
+     assert(index < _last_opcode, "Matching item not in ideal graph\n");
+     return;
+   }
+-  
++
+ 
+   // Walk the MatchRule, generating MatchList entries for each level
+   // of the rule (each nesting of parentheses)
+@@ -530,21 +527,21 @@
+     rightstr = mnode->_internalop ? mnode->_internalop : mnode->_opType;
+   }
+   // Search for an identical matchlist entry already on the list
+-  if ((_mlistab[index] == NULL) || 
+-      (_mlistab[index] && 
++  if ((_mlistab[index] == NULL) ||
++      (_mlistab[index] &&
+        !_mlistab[index]->search(rootOp, resultStr, leftstr, rightstr, pred))) {
+     // Place this match rule at front of list
+-    MatchList *mList = 
+-      new MatchList(_mlistab[index], pred, cost, 
++    MatchList *mList =
++      new MatchList(_mlistab[index], pred, cost,
+                     rootOp, resultStr, leftstr, rightstr);
+     _mlistab[index] = mList;
+   }
+ }
+ 
+ // Recursive call for construction of match lists
+-void ArchDesc::buildMList(MatchNode *node, const char *rootOp, 
+-			  const char *resultOp, Predicate *pred, 
+-			  const char *cost) {
++void ArchDesc::buildMList(MatchNode *node, const char *rootOp,
++                          const char *resultOp, Predicate *pred,
++                          const char *cost) {
+   const char *leftstr, *rightstr;
+   const char *resultop;
+   const char *opcode;
+@@ -596,7 +593,7 @@
+   if ((_mlistab[index] == NULL) || (_mlistab[index] &&
+                                     !_mlistab[index]->search(opcode, resultop, leftstr, rightstr, pred))) {
+     // Place this match rule at front of list
+-    MatchList *mList = 
++    MatchList *mList =
+       new MatchList(_mlistab[index],pred,cost,
+                     opcode, resultop, leftstr, rightstr);
+     _mlistab[index] = mList;
+@@ -663,29 +660,29 @@
+ 
+ bool ArchDesc::verify() {
+ 
+-  if (_register) 
++  if (_register)
+     assert( _register->verify(), "Register declarations failed verification");
+   if (!_quiet_mode)
+     fprintf(stderr,"\n");
+   // fprintf(stderr,"---------------------------- Verify Operands ---------------\n");
+-  // _operands.verify();                                             
+-  // fprintf(stderr,"\n");                                           
++  // _operands.verify();
++  // fprintf(stderr,"\n");
+   // fprintf(stderr,"---------------------------- Verify Operand Classes --------\n");
+-  // _opclass.verify();                                              
+-  // fprintf(stderr,"\n");                                           
++  // _opclass.verify();
++  // fprintf(stderr,"\n");
+   // fprintf(stderr,"---------------------------- Verify Attributes  ------------\n");
+-  // _attributes.verify();                                           
+-  // fprintf(stderr,"\n");                                           
++  // _attributes.verify();
++  // fprintf(stderr,"\n");
+   if (!_quiet_mode)
+     fprintf(stderr,"---------------------------- Verify Instructions ----------------------------\n");
+-  _instructions.verify();                                         
++  _instructions.verify();
+   if (!_quiet_mode)
+-    fprintf(stderr,"\n");                                           
+-  // if ( _encode ) {                                                
++    fprintf(stderr,"\n");
++  // if ( _encode ) {
+   //   fprintf(stderr,"---------------------------- Verify Encodings --------------\n");
+   //   _encode->verify();
+   // }
+-  
++
+   //if (_pipeline) _pipeline->verify();
+ 
+   return true;
+@@ -754,7 +751,7 @@
+   va_list args;
+ 
+   va_start(args, fmt);
+-  _internal_errs += emit_msg(0, INTERNAL_ERR, 0, fmt, args);  
++  _internal_errs += emit_msg(0, INTERNAL_ERR, 0, fmt, args);
+   va_end(args);
+ 
+   _no_output = 1;
+@@ -766,7 +763,7 @@
+   va_list args;
+ 
+   va_start(args, fmt);
+-  _internal_errs += emit_msg(0, SYNERR, lineno, fmt, args);  
++  _internal_errs += emit_msg(0, SYNERR, lineno, fmt, args);
+   va_end(args);
+ 
+   _no_output = 1;
+@@ -774,9 +771,9 @@
+ 
+ //------------------------------emit_msg---------------------------------------
+ // Emit a user message, typically a warning or error
+-int ArchDesc::emit_msg(int quiet, int flag, int line, const char *fmt, 
++int ArchDesc::emit_msg(int quiet, int flag, int line, const char *fmt,
+     va_list args) {
+-  static int  last_lineno = -1; 
++  static int  last_lineno = -1;
+   int         i;
+   const char *pref;
+ 
+@@ -858,13 +855,13 @@
+ // Obtain the name of the RegMask for an InstructForm
+ const char *ArchDesc::reg_mask(InstructForm &inForm) {
+   const char *result = inForm.reduce_result();
+-  assert( result, 
++  assert( result,
+           "Did not find result operand or RegMask for this instruction");
+ 
+   // Instructions producing 'Universe' use RegMask::Empty
+   if( strcmp(result,"Universe")==0 ) {
+     return "RegMask::Empty";
+-  } 
++  }
+ 
+   // Lookup this result operand and get its register class
+   Form *form = (Form*)_globalNames[result];
+@@ -896,7 +893,7 @@
+     reg_class->_stack_or_reg = true;
+   }
+ }
+-    
++
+ 
+ // Return the type signature for the ideal operation
+ const char *ArchDesc::getIdealType(const char *idealOp) {
+@@ -927,7 +924,7 @@
+ 
+ 
+ 
+-OperandForm *ArchDesc::constructOperand(const char *ident, 
++OperandForm *ArchDesc::constructOperand(const char *ident,
+                                         bool  ideal_only) {
+   OperandForm *opForm = new OperandForm(ident, ideal_only);
+   _globalNames.Insert(ident, opForm);
+@@ -947,7 +944,7 @@
+   // Create InstructForm and assign type for each ideal instruction.
+   for ( int j = _last_machine_leaf+1; j < _last_opcode; ++j) {
+     char         *ident    = (char *)NodeClassNames[j];
+-    if(!strcmp(ident, "ConI") || !strcmp(ident, "ConP") || 
++    if(!strcmp(ident, "ConI") || !strcmp(ident, "ConP") ||
+        !strcmp(ident, "ConF") || !strcmp(ident, "ConD") ||
+        !strcmp(ident, "ConL") || !strcmp(ident, "Con" ) ||
+        !strcmp(ident, "Bool") ) {
+@@ -1011,14 +1008,14 @@
+     _globalNames.Insert(ident, eForm);
+   }
+ 
+-  // 
++  //
+   // Build mapping from ideal names to ideal indices
+   int idealIndex = 0;
+   for (idealIndex = 1; idealIndex < _last_machine_leaf; ++idealIndex) {
+     const char *idealName = NodeClassNames[idealIndex];
+     _idealIndex.Insert((void*)idealName, (void*)idealIndex);
+   }
+-  for ( idealIndex = _last_machine_leaf+1; 
++  for ( idealIndex = _last_machine_leaf+1;
+         idealIndex < _last_opcode; ++idealIndex) {
+     const char *idealName = NodeClassNames[idealIndex];
+     _idealIndex.Insert((void*)idealName, (void*)idealIndex);
+@@ -1029,17 +1026,11 @@
+ 
+ //---------------------------addSUNcopyright-------------------------------
+ // output SUN copyright info
+-void ArchDesc::addSunCopyright(FILE *fp) {
++void ArchDesc::addSunCopyright(char* legal, int size, FILE *fp) {
++  fwrite(legal, size, 1, fp);
++  fprintf(fp,"\n");
+   fprintf(fp,"// Machine Generated File.  Do Not Edit!\n");
+   fprintf(fp,"\n");
+-  fprintf(fp,"// Copyright 1997-2006 Sun Microsystems, Inc.  All rights reserved.\n");
+-  fprintf(fp,"// SUN PROPRIETARY/CONFIDENTIAL.  Use is subject to license terms.\n");
+-  fprintf(fp,"// This software is the confidential and proprietary information of Sun\n");
+-  fprintf(fp,"// Microsystems, Inc. (\"Confidential Information\").  You shall not\n");
+-  fprintf(fp,"// disclose such Confidential Information and shall use it only in\n");
+-  fprintf(fp,"// accordance with the terms of the license agreement you entered into\n");
+-  fprintf(fp,"// with Sun.\n");
+-  fprintf(fp,"//\n");
+ }
+ 
+ //---------------------------machineDependentIncludes--------------------------
+@@ -1055,7 +1046,7 @@
+   fprintf(adlfile._fp, "#include \"incls/_precompiled.incl\"\n");
+   fprintf(adlfile._fp, "#include \"incls/_%s.incl\"\n",basename);
+   fprintf(adlfile._fp, "\n");
+-  
++
+ }
+ 
+ 
+@@ -1078,9 +1069,9 @@
+           fprintf(fp, "-D%s=%s\n", flag, def);
+     else  fprintf(fp, "-U%s\n", flag);
+     fprintf(fp, "#%s %s\n",
+-	    def ? "ifndef" : "ifdef", flag);
++            def ? "ifndef" : "ifdef", flag);
+     fprintf(fp, "#  error \"%s %s be defined\"\n",
+-	    flag, def ? "must" : "must not");
++            flag, def ? "must" : "must not");
+     fprintf(fp, "#endif // %s\n", flag);
+   }
+ }
+@@ -1123,7 +1114,7 @@
+          || strcmp(idealName,"CmpF") == 0
+          || strcmp(idealName,"FastLock") == 0
+          || strcmp(idealName,"FastUnlock") == 0
+-         || strcmp(idealName,"Bool") == 0 
++         || strcmp(idealName,"Bool") == 0
+          || strcmp(idealName,"Binary") == 0 ) {
+       // Removed ConI from the must_clone list.  CPUs that cannot use
+       // large constants as immediates manifest the constant as an
+@@ -1131,8 +1122,8 @@
+       // floating up out of loops.
+       must_clone = 1;
+     }
+-    fprintf(fp_cpp, "  %d%s // %s: %d\n", must_clone, 
+-      (idealIndex != (_last_opcode - 1)) ? "," : " // no trailing comma", 
++    fprintf(fp_cpp, "  %d%s // %s: %d\n", must_clone,
++      (idealIndex != (_last_opcode - 1)) ? "," : " // no trailing comma",
+       idealName, idealIndex);
+   }
+   // Finish defining table
+diff -ruN openjdk6/hotspot/src/share/vm/adlc/archDesc.hpp openjdk/hotspot/src/share/vm/adlc/archDesc.hpp
+--- openjdk6/hotspot/src/share/vm/adlc/archDesc.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/adlc/archDesc.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)archDesc.hpp	1.133 07/05/05 17:04:59 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Definitions for Error Flags
+@@ -75,18 +72,18 @@
+   MatchList(MatchList *nxt, Predicate *prd): _next(nxt), _pred(prd), _cost(NULL){
+     _resultStr = _lchild = _rchild = _opcode = NULL; }
+ 
+-  MatchList(MatchList *nxt, Predicate *prd, const char *cost, 
+-	    const char *opcode, const char *resultStr, const char *lchild, 
+-	    const char *rchild)
+-    : _next(nxt), _pred(prd), _cost(cost), _opcode(opcode), 
++  MatchList(MatchList *nxt, Predicate *prd, const char *cost,
++            const char *opcode, const char *resultStr, const char *lchild,
++            const char *rchild)
++    : _next(nxt), _pred(prd), _cost(cost), _opcode(opcode),
+       _resultStr(resultStr), _lchild(lchild), _rchild(rchild) { }
+ 
+   MatchList  *get_next(void)  { return _next; }
+   char       *get_pred(void)  { return (_pred?_pred->_pred:NULL); }
+   Predicate  *get_pred_obj(void)  { return _pred; }
+   const char *get_cost(void) { return _cost == NULL ? "0" :_cost; }
+-  bool        search(const char *opc, const char *res, const char *lch, 
+-		    const char *rch, Predicate *pr);
++  bool        search(const char *opc, const char *res, const char *lch,
++                    const char *rch, Predicate *pr);
+ 
+   void        dump();
+   void        output(FILE *fp);
+@@ -117,17 +114,17 @@
+ 
+   MatchList    *_mlistab[_last_opcode]; // Array of MatchLists
+ 
+-  // The Architecture Description identifies which user-defined operand can be used 
++  // The Architecture Description identifies which user-defined operand can be used
+   // to access [stack_pointer + offset]
+   OperandForm  *_cisc_spill_operand;
+ 
+   // Methods for outputting the DFA
+   void gen_match(FILE *fp, MatchList &mlist, ProductionState &status, Dict &operands_chained_from);
+-  void chain_rule(FILE *fp, const char *indent, const char *ideal, 
+-		  const Expr *icost, const char *irule, 
++  void chain_rule(FILE *fp, const char *indent, const char *ideal,
++                  const Expr *icost, const char *irule,
+                   Dict &operands_chained_from, ProductionState &status);
+   void chain_rule_c(FILE *fp, char *indent, char *ideal, char *irule);  // %%%%% TODO: remove this
+-  void expand_opclass(FILE *fp, const char *indent, const Expr *cost, 
++  void expand_opclass(FILE *fp, const char *indent, const Expr *cost,
+                       const char *result_type, ProductionState &status);
+   Expr *calc_cost(FILE *fp, const char *spaces, MatchList &mList, ProductionState &status);
+   void prune_matchlist(Dict &minimize, MatchList &mlist);
+@@ -150,7 +147,7 @@
+   int   _adlocation_debug;              // Debug Flag to use ad file locations
+   bool  _cisc_spill_debug;              // Debug Flag to see cisc-spill-instructions
+   bool  _short_branch_debug;            // Debug Flag to see short branch instructions
+-  
++
+   // Error/Warning Counts
+   int _syntax_errs;                  // Count of syntax errors
+   int _semantic_errs;                // Count of semantic errors
+@@ -225,8 +222,8 @@
+   // Helper utility that gets MatchList components from inside MatchRule
+   void check_optype(MatchRule *mrule);
+   void build_chain_rule(OperandForm *oper);
+-  void add_chain_rule_entry(const char *src, const char *cost, 
+-			    const char *result);
++  void add_chain_rule_entry(const char *src, const char *cost,
++                            const char *result);
+   const char *getMatchListIndex(MatchRule &mrule);
+   void generateMatchLists();         // Build MatchList array and populate it
+   void inspectOperands();            // Build MatchLists for all operands
+@@ -273,10 +270,10 @@
+   void build_map(OutputMap &map);
+   void buildReduceMaps(FILE *fp_hpp, FILE *fp_cpp);
+   // build flags for signaling that our machine needs this instruction cloned
+-  void buildMustCloneMap(FILE *fp_hpp, FILE *fp_cpp); 
++  void buildMustCloneMap(FILE *fp_hpp, FILE *fp_cpp);
+ 
+   // output SUN copyright info
+-  void addSunCopyright(FILE *fp);
++  void addSunCopyright(char* legal, int size, FILE *fp);
+   // output #include declarations for machine specific files
+   void machineDependentIncludes(ADLFILE &adlfile);
+   // Output C preprocessor code to verify the backend compilation environment.
+@@ -309,7 +306,7 @@
+ 
+   // Methods to construct the MachNode class hierarchy
+   // Return the type signature for the ideal operation
+-  const char *getIdealType(const char *idealOp); 
++  const char *getIdealType(const char *idealOp);
+   // Declare and define the classes derived from MachOper and MachNode
+   void declareClasses(FILE *fp_hpp);
+   void defineClasses(FILE *fp_cpp);
+@@ -317,7 +314,7 @@
+   // Emit an ADLC message
+   void internal_err( const char *fmt, ...);
+   void syntax_err  ( int lineno, const char *fmt, ...);
+-  int  emit_msg(int quiet, int flag, int linenum, const char *fmt, 
++  int  emit_msg(int quiet, int flag, int linenum, const char *fmt,
+        va_list args);
+ 
+   // Generator for has_match_rule methods
+@@ -338,10 +335,10 @@
+ 
+ protected:
+   // build MatchList from MatchRule
+-  void buildMatchList(MatchRule *mrule, const char *resultStr, 
+-		      const char *rootOp, Predicate *pred, const char *cost);
++  void buildMatchList(MatchRule *mrule, const char *resultStr,
++                      const char *rootOp, Predicate *pred, const char *cost);
+ 
+-  void buildMList(MatchNode *node, const char *rootOp, const char *resultOp, 
++  void buildMList(MatchNode *node, const char *rootOp, const char *resultOp,
+                   Predicate *pred, const char *cost);
+ 
+   friend class ADLParser;
+@@ -361,16 +358,16 @@
+   FormDict &_globals;
+   ArchDesc &_AD;
+ public:
+-  OutputMap (FILE *decl_file, FILE *def_file, FormDict &globals, ArchDesc &AD) 
++  OutputMap (FILE *decl_file, FILE *def_file, FormDict &globals, ArchDesc &AD)
+     : _hpp(decl_file), _cpp(def_file), _globals(globals), _AD(AD) {};
+   // Access files used by this routine
+   FILE        *decl_file() { return _hpp; }
+   FILE        *def_file()  { return _cpp; }
+   // Positions in iteration that derived class will be told about
+-  enum position { BEGIN_OPERANDS, 
++  enum position { BEGIN_OPERANDS,
+                   BEGIN_OPCLASSES,
+                   BEGIN_INTERNALS,
+-                  BEGIN_INSTRUCTIONS, 
++                  BEGIN_INSTRUCTIONS,
+                   BEGIN_INST_CHAIN_RULES,
+                   END_INST_CHAIN_RULES,
+                   BEGIN_REMATERIALIZE,
+@@ -390,5 +387,3 @@
+   // Allow derived class to output name and position specific info
+   virtual void record_position(OutputMap::position place, int index) {}
+ };
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/adlc/arena.cpp openjdk/hotspot/src/share/vm/adlc/arena.cpp
+--- openjdk6/hotspot/src/share/vm/adlc/arena.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/adlc/arena.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)arena.cpp	1.16 07/05/05 17:05:00 JVM"
+-#endif
+ /*
+  * Copyright 1998-2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "adlc.hpp"
+@@ -36,8 +33,8 @@
+ }
+ 
+ Chunk::Chunk(size_t length) {
+-  _next = NULL;		// Chain on the linked list
+-  _len  = length;	// Save actual size
++  _next = NULL;         // Chain on the linked list
++  _len  = length;       // Save actual size
+ }
+ 
+ //------------------------------chop-------------------------------------------
+@@ -47,7 +44,7 @@
+     Chunk *tmp = k->_next;
+     // clear out this chunk (to detect allocation bugs)
+     memset(k, 0xBAADBABE, k->_len);
+-    free(k);			// Free chunk (was malloc'd)
++    free(k);                    // Free chunk (was malloc'd)
+     k = tmp;
+   }
+ }
+@@ -61,33 +58,33 @@
+ Arena::Arena( size_t init_size ) {
+   init_size = (init_size+3) & ~3;
+   _first = _chunk = new (init_size) Chunk(init_size);
+-  _hwm = _chunk->bottom();	// Save the cached hwm, max
++  _hwm = _chunk->bottom();      // Save the cached hwm, max
+   _max = _chunk->top();
+   set_size_in_bytes(init_size);
+-} 
++}
+ 
+ Arena::Arena() {
+   _first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size);
+-  _hwm = _chunk->bottom();	// Save the cached hwm, max
++  _hwm = _chunk->bottom();      // Save the cached hwm, max
+   _max = _chunk->top();
+   set_size_in_bytes(Chunk::init_size);
+-} 
++}
+ 
+-Arena::Arena( Arena *a ) 
+-: _chunk(a->_chunk), _hwm(a->_hwm), _max(a->_max), _first(a->_first) { 
++Arena::Arena( Arena *a )
++: _chunk(a->_chunk), _hwm(a->_hwm), _max(a->_max), _first(a->_first) {
+   set_size_in_bytes(a->size_in_bytes());
+-} 
++}
+ 
+ //------------------------------used-------------------------------------------
+ // Total of all Chunks in arena
+ size_t Arena::used() const {
+   size_t sum = _chunk->_len - (_max-_hwm); // Size leftover in this Chunk
+   register Chunk *k = _first;
+-  while( k != _chunk) {		// Whilst have Chunks in a row
+-    sum += k->_len;		// Total size of this Chunk
+-    k = k->_next;		// Bump along to next Chunk
++  while( k != _chunk) {         // Whilst have Chunks in a row
++    sum += k->_len;             // Total size of this Chunk
++    k = k->_next;               // Bump along to next Chunk
+   }
+-  return sum;			// Return total consumed space.
++  return sum;                   // Return total consumed space.
+ }
+ 
+ //------------------------------grow-------------------------------------------
+@@ -96,12 +93,12 @@
+   // Get minimal required size.  Either real big, or even bigger for giant objs
+   size_t len = max(x, Chunk::size);
+ 
+-  register Chunk *k = _chunk;	// Get filled-up chunk address
++  register Chunk *k = _chunk;   // Get filled-up chunk address
+   _chunk = new (len) Chunk(len);
+ 
+-  if( k ) k->_next = _chunk;	// Append new chunk to end of linked list
++  if( k ) k->_next = _chunk;    // Append new chunk to end of linked list
+   else _first = _chunk;
+-  _hwm  = _chunk->bottom();	// Save the cached hwm, max
++  _hwm  = _chunk->bottom();     // Save the cached hwm, max
+   _max =  _chunk->top();
+   set_size_in_bytes(size_in_bytes() + len);
+   void* result = _hwm;
+@@ -112,55 +109,55 @@
+ //------------------------------calloc-----------------------------------------
+ // Allocate zeroed storage in Arena
+ void *Arena::Acalloc( size_t items, size_t x ) {
+-  size_t z = items*x;	// Total size needed
+-  void *ptr = Amalloc(z);	// Get space
+-  memset( ptr, 0, z );		// Zap space
+-  return ptr;			// Return space
++  size_t z = items*x;   // Total size needed
++  void *ptr = Amalloc(z);       // Get space
++  memset( ptr, 0, z );          // Zap space
++  return ptr;                   // Return space
+ }
+ 
+ //------------------------------realloc----------------------------------------
+-// Reallocate storage in Arena.  
++// Reallocate storage in Arena.
+ void *Arena::Arealloc( void *old_ptr, size_t old_size, size_t new_size ) {
+-  char *c_old = (char*)old_ptr;	// Handy name
++  char *c_old = (char*)old_ptr; // Handy name
+   // Stupid fast special case
+-  if( new_size <= old_size ) {	// Shrink in-place
+-    if( c_old+old_size == _hwm)	// Attempt to free the excess bytes
+-      _hwm = c_old+new_size;	// Adjust hwm
++  if( new_size <= old_size ) {  // Shrink in-place
++    if( c_old+old_size == _hwm) // Attempt to free the excess bytes
++      _hwm = c_old+new_size;    // Adjust hwm
+     return c_old;
+   }
+ 
+   // See if we can resize in-place
+-  if( (c_old+old_size == _hwm) &&	// Adjusting recent thing
+-      (c_old+new_size <= _max) ) {	// Still fits where it sits
+-    _hwm = c_old+new_size;	// Adjust hwm
+-    return c_old;		// Return old pointer
++  if( (c_old+old_size == _hwm) &&       // Adjusting recent thing
++      (c_old+new_size <= _max) ) {      // Still fits where it sits
++    _hwm = c_old+new_size;      // Adjust hwm
++    return c_old;               // Return old pointer
+   }
+ 
+   // Oops, got to relocate guts
+   void *new_ptr = Amalloc(new_size);
+   memcpy( new_ptr, c_old, old_size );
+-  Afree(c_old,old_size);	// Mostly done to keep stats accurate
++  Afree(c_old,old_size);        // Mostly done to keep stats accurate
+   return new_ptr;
+ }
+ 
+ //------------------------------reset------------------------------------------
+ // Reset this Arena to empty, and return this Arenas guts in a new Arena.
+ Arena *Arena::reset(void) {
+-  Arena *a = new Arena(this);	// New empty arena
+-  _first = _chunk = NULL;	// Normal, new-arena initialization
++  Arena *a = new Arena(this);   // New empty arena
++  _first = _chunk = NULL;       // Normal, new-arena initialization
+   _hwm = _max = NULL;
+-  return a;			// Return Arena with guts
++  return a;                     // Return Arena with guts
+ }
+ 
+ //------------------------------contains---------------------------------------
+ // Determine if pointer belongs to this Arena or not.
+ bool Arena::contains( const void *ptr ) const {
+-  if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm ) 
+-    return true;		// Check for in this chunk
++  if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm )
++    return true;                // Check for in this chunk
+   for( Chunk *c = _first; c; c = c->_next )
+     if( (void*)c->bottom() <= ptr && ptr < (void*)c->top())
+-      return true;		// Check for every chunk in Arena
+-  return false;			// Not in any Chunk, so not in Arena
++      return true;              // Check for every chunk in Arena
++  return false;                 // Not in any Chunk, so not in Arena
+ }
+ 
+ //-----------------------------------------------------------------------------
+@@ -173,4 +170,3 @@
+ void CHeapObj::operator delete(void* p){
+  free(p);
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/adlc/arena.hpp openjdk/hotspot/src/share/vm/adlc/arena.hpp
+--- openjdk6/hotspot/src/share/vm/adlc/arena.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/adlc/arena.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)arena.hpp	1.17 07/05/05 17:05:00 JVM"
+-#endif
+ /*
+  * Copyright 1998-2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // All classes in the virtual machine must be subclassed
+@@ -74,15 +71,15 @@
+   void  operator delete(void* p, size_t length);
+   Chunk(size_t length);
+ 
+-  enum { 
++  enum {
+       init_size =  1*1024,      // Size of first chunk
+       size      = 32*1024       // Default size of an Arena chunk (following the first)
+-  };	
+-  Chunk*       _next;	        // Next Chunk in list
+-  size_t       _len;		// Size of this Chunk
++  };
++  Chunk*       _next;           // Next Chunk in list
++  size_t       _len;            // Size of this Chunk
+ 
+-  void chop();			// Chop this chunk
+-  void next_chop();		// Chop next chunk
++  void chop();                  // Chop this chunk
++  void next_chop();             // Chop next chunk
+ 
+   // Boundaries of data area (possibly unused)
+   char* bottom() const { return ((char*) this) + sizeof(Chunk);  }
+@@ -97,10 +94,10 @@
+   friend class ResourceMark;
+   friend class HandleMark;
+   friend class NoHandleMark;
+-  Chunk *_first;		// First chunk
+-  Chunk *_chunk;		// current chunk
+-  char *_hwm, *_max;		// High water mark and max in current chunk
+-  void* grow(size_t x);		// Get a new Chunk of at least size x
++  Chunk *_first;                // First chunk
++  Chunk *_chunk;                // current chunk
++  char *_hwm, *_max;            // High water mark and max in current chunk
++  void* grow(size_t x);         // Get a new Chunk of at least size x
+   size_t _size_in_bytes;          // Size of arena (used for memory usage tracing)
+ public:
+   Arena();
+@@ -110,36 +107,36 @@
+   char* hwm() const             { return _hwm; }
+ 
+   // Fast allocate in the arena.  Common case is: pointer test + increment.
+-  void* Amalloc(size_t x) { 
++  void* Amalloc(size_t x) {
+ #ifdef _LP64
+-    x = (x + (8-1)) & ((unsigned)(-8)); 
++    x = (x + (8-1)) & ((unsigned)(-8));
+ #else
+-    x = (x + (4-1)) & ((unsigned)(-4)); 
++    x = (x + (4-1)) & ((unsigned)(-4));
+ #endif
+     if (_hwm + x > _max) {
+-      return grow(x); 
++      return grow(x);
+     } else {
+-      char *old = _hwm; 
+-      _hwm += x; 
+-      return old; 
++      char *old = _hwm;
++      _hwm += x;
++      return old;
+     }
+   }
+   // Further assume size is padded out to words
+   // Warning:  in LP64, Amalloc_4 is really Amalloc_8
+-  void *Amalloc_4(size_t x) { 
++  void *Amalloc_4(size_t x) {
+     assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
+     if (_hwm + x > _max) {
+       return grow(x);
+     } else {
+-      char *old = _hwm; 
+-      _hwm += x; 
+-      return old; 
++      char *old = _hwm;
++      _hwm += x;
++      return old;
+     }
+   }
+ 
+   // Fast delete in area.  Common case is: NOP (except for storage reclaimed)
+-  void Afree(void *ptr, size_t size) { 
+-    if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr;  
++  void Afree(void *ptr, size_t size) {
++    if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr;
+   }
+ 
+   void *Acalloc( size_t items, size_t x );
+@@ -154,8 +151,7 @@
+   // Total of all chunks in use (not thread-safe)
+   size_t used() const;
+ 
+-  // Total # of bytes used  
++  // Total # of bytes used
+   size_t size_in_bytes() const         {  return _size_in_bytes; }
+   void   set_size_in_bytes(size_t size)  { _size_in_bytes = size;   }
+-}; 
+-
++};
+diff -ruN openjdk6/hotspot/src/share/vm/adlc/dfa.cpp openjdk/hotspot/src/share/vm/adlc/dfa.cpp
+--- openjdk6/hotspot/src/share/vm/adlc/dfa.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/adlc/dfa.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)dfa.cpp	1.83 07/05/05 17:04:59 JVM"
+-#endif
+ /*
+  * Copyright 1997-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // DFA.CPP - Method definitions for outputting the matcher DFA from ADLC
+@@ -98,7 +95,7 @@
+   Expr           *cost_ub(const char *result);
+   void    set_cost_bounds(const char *result, const Expr *cost, bool has_state_check, bool has_cost_check);
+ 
+-  // Return the Production associated with the result, 
++  // Return the Production associated with the result,
+   // or create a new Production and insert it into the dictionary.
+   Production *getProduction(const char *result);
+ 
+@@ -116,9 +113,9 @@
+ // 1)      if (STATE__NOT_YET_VALID(EBXREGI) || _cost[EBXREGI] > c) {
+ // 2)        DFA_PRODUCTION__SET_VALID(EBXREGI, cmovI_memu_rule, c)
+ // 3)      }
+-// 
+-static void cost_check(FILE *fp, const char *spaces, 
+-		       const char *arrayIdx, const Expr *cost, const char *rule, ProductionState &status) {
++//
++static void cost_check(FILE *fp, const char *spaces,
++                       const char *arrayIdx, const Expr *cost, const char *rule, ProductionState &status) {
+   bool state_check               = false;  // true if this production needs to check validity
+   bool cost_check                = false;  // true if this production needs to check cost
+   bool cost_is_above_upper_bound = false;  // true if this production is unnecessary due to high cost
+@@ -148,7 +145,7 @@
+     fprintf(fp, "%sif (STATE__NOT_YET_VALID(%s) || _cost[%s] > %s) {\n",  spaces, arrayIdx, arrayIdx, cost->as_string());
+     state_check = true;
+     cost_check  = true;
+-  } 
++  }
+   else if( validity_check == knownInvalid ) {
+     if( debug_output ) { fprintf(fp, "%s// %s KNOWN_INVALID \n",  spaces, arrayIdx); }
+   }
+@@ -168,10 +165,10 @@
+   // no need to set State vector if our state is knownValid
+   const char *production = (validity_check == knownValid) ? dfa_production : dfa_production_set_valid;
+   fprintf(fp, "%s  %s(%s, %s_rule, %s)", spaces, production, arrayIdx, rule, cost->as_string() );
+-  if( validity_check == knownValid ) { 
+-    if( cost_is_below_lower_bound ) { fprintf(fp, "\t  // overwrites higher cost rule"); } 
+-   } 
+-   fprintf(fp, "\n"); 
++  if( validity_check == knownValid ) {
++    if( cost_is_below_lower_bound ) { fprintf(fp, "\t  // overwrites higher cost rule"); }
++   }
++   fprintf(fp, "\n");
+ 
+   // line 3)
+   if( cost_check || state_check ) {
+@@ -194,11 +191,11 @@
+ // Macro equivalent to: _kids[0]->valid(FOO) && _kids[1]->valid(BAR)
+ //
+ static void child_test(FILE *fp, MatchList &mList) {
+-  if( mList._lchild )		// If left child, check it
++  if( mList._lchild )           // If left child, check it
+     fprintf(fp, "STATE__VALID_CHILD(_kids[0], %s)", ArchDesc::getMachOperEnum(mList._lchild));
+   if( mList._lchild && mList._rchild )      // If both, add the "&&"
+     fprintf(fp, " && " );
+-  if( mList._rchild )		// If right child, check it
++  if( mList._rchild )           // If right child, check it
+     fprintf(fp, "STATE__VALID_CHILD(_kids[1], %s)", ArchDesc::getMachOperEnum(mList._rchild));
+ }
+ 
+@@ -280,8 +277,8 @@
+ 
+ //---------------------------expand_opclass------------------------------------
+ // Chain from one result_type to all other members of its operand class
+-void ArchDesc::expand_opclass(FILE *fp, const char *indent, const Expr *cost, 
+-			      const char *result_type, ProductionState &status) {
++void ArchDesc::expand_opclass(FILE *fp, const char *indent, const Expr *cost,
++                              const char *result_type, ProductionState &status) {
+   const Form *form = _globalNames[result_type];
+   OperandForm *op = form ? form->is_operand() : NULL;
+   if( op && op->_classes.count() > 0 ) {
+@@ -290,7 +287,7 @@
+     op->_classes.reset();
+     const char *oclass;
+     // Expr *cCost = new Expr(cost);
+-    while( (oclass = op->_classes.iter()) != NULL ) 
++    while( (oclass = op->_classes.iter()) != NULL )
+       // Check against other match costs, and update cost & rule vectors
+       cost_check(fp, indent, ArchDesc::getMachOperEnum(oclass), cost, result_type, status);
+   }
+@@ -298,7 +295,7 @@
+ 
+ //---------------------------chain_rule----------------------------------------
+ // Starting at 'operand', check if we know how to automatically generate other results
+-void ArchDesc::chain_rule(FILE *fp, const char *indent, const char *operand, 
++void ArchDesc::chain_rule(FILE *fp, const char *indent, const char *operand,
+      const Expr *icost, const char *irule, Dict &operands_chained_from,  ProductionState &status) {
+ 
+   // Check if we have already generated chains from this starting point
+@@ -335,7 +332,7 @@
+           // printf("   result=%s cost=%s rule=%s\n", result, total_cost, rule);
+           // Check against other match costs, and update cost & rule vectors
+           cost_check(fp, indent, ArchDesc::getMachOperEnum(result), total_cost, rule, status);
+-	  chain_rule(fp, indent, result, total_cost, rule, operands_chained_from, status);
++          chain_rule(fp, indent, result, total_cost, rule, operands_chained_from, status);
+         }
+ 
+         // If this is a member of an operand class, update class cost & rule
+@@ -375,7 +372,7 @@
+   ProductionState status(Form::arena);
+ 
+   // Output the start of the DFA method into the output file
+-  
++
+   fprintf(fp, "\n");
+   fprintf(fp, "//------------------------- Source -----------------------------------------\n");
+   // Do not put random source code into the DFA.
+@@ -389,18 +386,18 @@
+   // #define DFA_PRODUCTION(result, rule, cost)\
+   //   _cost[ (result) ] = cost; _rule[ (result) ] = rule;
+   fprintf(fp, "#define %s(result, rule, cost)\\\n", dfa_production);
+-  fprintf(fp, "  _cost[ (result) ] = cost; _rule[ (result) ] = rule;\n");     
++  fprintf(fp, "  _cost[ (result) ] = cost; _rule[ (result) ] = rule;\n");
+   fprintf(fp, "\n");
+ 
+   // #define DFA_PRODUCTION__SET_VALID(result, rule, cost)\
+   //     DFA_PRODUCTION( (result), (rule), (cost) ); STATE__SET_VALID( (result) );
+   fprintf(fp, "#define %s(result, rule, cost)\\\n", dfa_production_set_valid);
+-  fprintf(fp, "  %s( (result), (rule), (cost) ); STATE__SET_VALID( (result) );\n", dfa_production);     
++  fprintf(fp, "  %s( (result), (rule), (cost) ); STATE__SET_VALID( (result) );\n", dfa_production);
+   fprintf(fp, "\n");
+ 
+   fprintf(fp, "//------------------------- DFA --------------------------------------------\n");
+-  
+-  fprintf(fp, 
++
++  fprintf(fp,
+ "// DFA is a large switch with case statements for each ideal opcode encountered\n"
+ "// in any match rule in the ad file.  Each case has a series of if's to handle\n"
+ "// the match or fail decisions.  The matches test the cost function of that\n"
+@@ -422,7 +419,7 @@
+       gen_dfa_state_body(fp, minimize, status, operands_chained_from, i);
+       // End of routine
+       fprintf(fp, "}\n");
+-    }      
++    }
+   }
+   fprintf(fp, "bool State::DFA");
+   fprintf(fp, "(int opcode, const Node *n) {\n");
+@@ -471,7 +468,7 @@
+   static void check_index(int index) { assert( 0 <= index && index < count, "Invalid index"); }
+ 
+   // Confirm that this is a separate sub-expression.
+-  // Only need to catch common cases like " ... && shared ..." 
++  // Only need to catch common cases like " ... && shared ..."
+   // and avoid hazardous ones like "...->shared"
+   static bool valid_loc(char *pred, char *shared) {
+     // start of predicate is valid
+@@ -503,7 +500,7 @@
+ 
+   static bool        found(int index){ check_index(index); return _found[index]; }
+   static void    set_found(int index, bool val) { check_index(index); _found[index] = val; }
+-  static void  reset_found() { 
++  static void  reset_found() {
+     for( int i = 0; i < count; ++i ) { _found[i] = false; }
+   };
+ 
+@@ -526,8 +523,8 @@
+       }
+     }
+   }
+-  
+-  // If the Predicate contains a common sub-expression, replace the Predicate's 
++
++  // If the Predicate contains a common sub-expression, replace the Predicate's
+   // string with one that uses the variable name.
+   static bool cse_predicate(Predicate* predicate, const char *shared_pred, const char *shared_pred_var) {
+     bool result = false;
+@@ -546,7 +543,7 @@
+         strncpy(shared_pred_loc, shared_pred_var, strlen(shared_pred_var));
+       }
+       // Install new predicate
+-      if( new_pred != pred ) { 
++      if( new_pred != pred ) {
+         predicate->_pred = new_pred;
+         result = true;
+       }
+@@ -573,7 +570,7 @@
+ const char*  dfa_shared_preds::_pred[dfa_shared_preds::count]  = { "n->get_int()", "Compile::current()->select_24_bit_instr()" };
+ 
+ 
+-void ArchDesc::gen_dfa_state_body(FILE* fp, Dict &minimize, ProductionState &status, Dict &operands_chained_from, int i) {  
++void ArchDesc::gen_dfa_state_body(FILE* fp, Dict &minimize, ProductionState &status, Dict &operands_chained_from, int i) {
+   // Start the body of each Op_XXX sub-dfa with a clean state.
+   status.initialize();
+ 
+@@ -604,11 +601,11 @@
+   } while(mList != NULL);
+   // Fill in any chain rules which add instructions
+   // These can generate their own chains as well.
+-  operands_chained_from.Clear();  // 
++  operands_chained_from.Clear();  //
+   if( debug_output1 ) { fprintf(fp, "// top level chain rules for: %s \n", (char *)NodeClassNames[i]); } // %%%%% Explanation
+   const Expr *zeroCost = new Expr("0");
+   chain_rule(fp, "   ", (char *)NodeClassNames[i], zeroCost, "Invalid",
+-	     operands_chained_from, status);      
++             operands_chained_from, status);
+ }
+ 
+ 
+@@ -817,11 +814,11 @@
+ }
+ 
+ // Return # of name-Expr pairs in dict
+-int ExprDict::Size(void) const { 
+-  return _expr.Size(); 
++int ExprDict::Size(void) const {
++  return _expr.Size();
+ }
+ 
+-// define inserts the given key-value pair into the dictionary, 
++// define inserts the given key-value pair into the dictionary,
+ // and records the name in order for later output, ...
+ const Expr  *ExprDict::define(const char *name, Expr *expr) {
+   const Expr *old_expr = (*this)[name];
+@@ -839,7 +836,7 @@
+   return (Expr*)_expr.Insert((void*)name, (void*)expr);
+ }
+ 
+-// Finds the value of a given key; or NULL if not found.  
++// Finds the value of a given key; or NULL if not found.
+ // The dictionary is NOT changed.
+ const Expr  *ExprDict::operator [](const char *name) const {
+   return (Expr*)_expr[name];
+@@ -930,7 +927,7 @@
+   const void *x, *y = NULL;
+   for( ; iter.test(); ++iter) {
+     x = iter._key;
+-    y = iter._value;  
++    y = iter._value;
+     Production *p = (Production*)y;
+     if( p != NULL ) {
+       p->initialize();
+@@ -1022,4 +1019,3 @@
+ void ProductionState::print() {
+   _production.print(print_key, print_production);
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/adlc/dict2.cpp openjdk/hotspot/src/share/vm/adlc/dict2.cpp
+--- openjdk6/hotspot/src/share/vm/adlc/dict2.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/adlc/dict2.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)dict2.cpp	1.19 07/05/05 17:04:59 JVM"
+-#endif
+ /*
+  * Copyright 1998-2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Dictionaries - An Abstract Data Type
+@@ -35,20 +32,20 @@
+ //------------------------------data-----------------------------------------
+ // String hash tables
+ #define MAXID 20
+-static char initflag = 0;	// True after 1st initialization
++static char initflag = 0;       // True after 1st initialization
+ static char shft[MAXID] = {1,2,3,4,5,6,7,1,2,3,4,5,6,7,1,2,3,4,5,6};
+ static short xsum[MAXID + 1];
+ 
+ //------------------------------bucket---------------------------------------
+ class bucket {
+ public:
+-  int          _cnt, _max;	// Size of bucket
+-  const void **_keyvals;	// Array of keys and values
++  int          _cnt, _max;      // Size of bucket
++  const void **_keyvals;        // Array of keys and values
+ };
+ 
+ //------------------------------Dict-----------------------------------------
+ // The dictionary is kept has a hash table.  The hash table is a even power
+-// of two, for nice modulo operations.	Each bucket in the hash table points
++// of two, for nice modulo operations.  Each bucket in the hash table points
+ // to a linear list of key-value pairs; each key & value is just a (void *).
+ // The list starts with a count.  A hash lookup finds the list head, then a
+ // simple linear scan finds the key.  If the table gets too full, it's
+@@ -67,31 +64,31 @@
+   int i;
+ 
+   // Precompute table of null character hashes
+-  if( !initflag ) {		// Not initializated yet?
+-    xsum[0] = (1<<shft[0])+1;	// Initialize
++  if( !initflag ) {             // Not initializated yet?
++    xsum[0] = (1<<shft[0])+1;   // Initialize
+     for( i = 1; i < MAXID + 1; i++) {
+       xsum[i] = (1<<shft[i])+1+xsum[i-1];
+     }
+-    initflag = 1;		// Never again
++    initflag = 1;               // Never again
+   }
+ 
+-  _size = 16;  			// Size is a power of 2
+-  _cnt = 0;			// Dictionary is empty
++  _size = 16;                   // Size is a power of 2
++  _cnt = 0;                     // Dictionary is empty
+   _bin = (bucket*)_arena->Amalloc_4(sizeof(bucket)*_size);
+   memset(_bin,0,sizeof(bucket)*_size);
+ }
+ 
+ //------------------------------~Dict------------------------------------------
+ // Delete an existing dictionary.
+-Dict::~Dict() {  
++Dict::~Dict() {
+ }
+ 
+ //------------------------------Clear----------------------------------------
+ // Zap to empty; ready for re-use
+ void Dict::Clear() {
+-  _cnt = 0;			// Empty contents
++  _cnt = 0;                     // Empty contents
+   for( int i=0; i<_size; i++ )
+-    _bin[i]._cnt = 0;		// Empty buckets, but leave allocated
++    _bin[i]._cnt = 0;           // Empty buckets, but leave allocated
+   // Leave _size & _bin alone, under the assumption that dictionary will
+   // grow to this size again.
+ }
+@@ -104,18 +101,18 @@
+ // lo list depending on the value of the bit.
+ void Dict::doubhash(void) {
+   int oldsize = _size;
+-  _size <<= 1;			// Double in size
++  _size <<= 1;                  // Double in size
+   _bin = (bucket*)_arena->Arealloc( _bin, sizeof(bucket)*oldsize, sizeof(bucket)*_size );
+   memset( &_bin[oldsize], 0, oldsize*sizeof(bucket) );
+   // Rehash things to spread into new table
+   for( int i=0; i < oldsize; i++) { // For complete OLD table do
+-    bucket *b = &_bin[i];	// Handy shortcut for _bin[i]
+-    if( !b->_keyvals ) continue;	// Skip empties fast
++    bucket *b = &_bin[i];       // Handy shortcut for _bin[i]
++    if( !b->_keyvals ) continue;        // Skip empties fast
+ 
+     bucket *nb = &_bin[i+oldsize];  // New bucket shortcut
+-    int j = b->_max;		    // Trim new bucket to nearest power of 2 
++    int j = b->_max;                // Trim new bucket to nearest power of 2
+     while( j > b->_cnt ) j >>= 1;   // above old bucket _cnt
+-    if( !j ) j = 1;		// Handle zero-sized buckets
++    if( !j ) j = 1;             // Handle zero-sized buckets
+     nb->_max = j<<1;
+     // Allocate worst case space for key-value pairs
+     nb->_keyvals = (const void**)_arena->Amalloc_4( sizeof(void *)*nb->_max*2 );
+@@ -124,13 +121,13 @@
+     for( j=0; j<b->_cnt; j++ ) {  // Rehash all keys in this bucket
+       const void *key = b->_keyvals[j+j];
+       if( (_hash( key ) & (_size-1)) != i ) { // Moving to hi bucket?
+-	nb->_keyvals[nbcnt+nbcnt] = key;
+-	nb->_keyvals[nbcnt+nbcnt+1] = b->_keyvals[j+j+1];
+-	nb->_cnt = nbcnt = nbcnt+1;
+-	b->_cnt--;		// Remove key/value from lo bucket
+-	b->_keyvals[j+j  ] = b->_keyvals[b->_cnt+b->_cnt  ];
+-	b->_keyvals[j+j+1] = b->_keyvals[b->_cnt+b->_cnt+1];
+-	j--;			// Hash compacted element also
++        nb->_keyvals[nbcnt+nbcnt] = key;
++        nb->_keyvals[nbcnt+nbcnt+1] = b->_keyvals[j+j+1];
++        nb->_cnt = nbcnt = nbcnt+1;
++        b->_cnt--;              // Remove key/value from lo bucket
++        b->_keyvals[j+j  ] = b->_keyvals[b->_cnt+b->_cnt  ];
++        b->_keyvals[j+j+1] = b->_keyvals[b->_cnt+b->_cnt+1];
++        j--;                    // Hash compacted element also
+       }
+     } // End of for all key-value pairs in bucket
+   } // End of for all buckets
+@@ -153,19 +150,19 @@
+ //------------------------------Dict-----------------------------------------
+ // Deep copy a dictionary.
+ Dict &Dict::operator =( const Dict &d ) {
+-  if( _size < d._size ) {	// If must have more buckets
++  if( _size < d._size ) {       // If must have more buckets
+     _arena = d._arena;
+     _bin = (bucket*)_arena->Arealloc( _bin, sizeof(bucket)*_size, sizeof(bucket)*d._size );
+     memset( &_bin[_size], 0, (d._size-_size)*sizeof(bucket) );
+     _size = d._size;
+   }
+   for( int i=0; i<_size; i++ ) // All buckets are empty
+-    _bin[i]._cnt = 0;		// But leave bucket allocations alone
++    _bin[i]._cnt = 0;           // But leave bucket allocations alone
+   _cnt = d._cnt;
+   *(Hash*)(&_hash) = d._hash;
+   *(CmpKey*)(&_cmp) = d._cmp;
+   for(int k=0; k<_size; k++ ) {
+-    bucket *b = &d._bin[k];	// Shortcut to source bucket
++    bucket *b = &d._bin[k];     // Shortcut to source bucket
+     for( int j=0; j<b->_cnt; j++ )
+       Insert( b->_keyvals[j+j], b->_keyvals[j+j+1] );
+   }
+@@ -173,55 +170,55 @@
+ }
+ 
+ //------------------------------Insert---------------------------------------
+-// Insert or replace a key/value pair in the given dictionary.	If the
++// Insert or replace a key/value pair in the given dictionary.  If the
+ // dictionary is too full, it's size is doubled.  The prior value being
+-// replaced is returned (NULL if this is a 1st insertion of that key).	If
++// replaced is returned (NULL if this is a 1st insertion of that key).  If
+ // an old value is found, it's swapped with the prior key-value pair on the
+ // list.  This moves a commonly searched-for value towards the list head.
+ const void *Dict::Insert(const void *key, const void *val) {
+-  int hash = _hash( key );	// Get hash key
+-  int i = hash & (_size-1);	// Get hash key, corrected for size
+-  bucket *b = &_bin[i];		// Handy shortcut
++  int hash = _hash( key );      // Get hash key
++  int i = hash & (_size-1);     // Get hash key, corrected for size
++  bucket *b = &_bin[i];         // Handy shortcut
+   for( int j=0; j<b->_cnt; j++ )
+     if( !_cmp(key,b->_keyvals[j+j]) ) {
+       const void *prior = b->_keyvals[j+j+1];
+-      b->_keyvals[j+j  ] = key;	// Insert current key-value
++      b->_keyvals[j+j  ] = key; // Insert current key-value
+       b->_keyvals[j+j+1] = val;
+-      return prior;		// Return prior
+-    } 
++      return prior;             // Return prior
++    }
+ 
+-  if( ++_cnt > _size ) {	// Hash table is full
+-    doubhash();			// Grow whole table if too full
+-    i = hash & (_size-1);	// Rehash
+-    b = &_bin[i];		// Handy shortcut
++  if( ++_cnt > _size ) {        // Hash table is full
++    doubhash();                 // Grow whole table if too full
++    i = hash & (_size-1);       // Rehash
++    b = &_bin[i];               // Handy shortcut
+   }
+-  if( b->_cnt == b->_max ) {	// Must grow bucket?
++  if( b->_cnt == b->_max ) {    // Must grow bucket?
+     if( !b->_keyvals ) {
+-      b->_max = 2;		// Initial bucket size
++      b->_max = 2;              // Initial bucket size
+       b->_keyvals = (const void**)_arena->Amalloc_4( sizeof(void *)*b->_max*2 );
+     } else {
+       b->_keyvals = (const void**)_arena->Arealloc( b->_keyvals, sizeof(void *)*b->_max*2, sizeof(void *)*b->_max*4 );
+-      b->_max <<= 1;		// Double bucket
++      b->_max <<= 1;            // Double bucket
+     }
+   }
+   b->_keyvals[b->_cnt+b->_cnt  ] = key;
+   b->_keyvals[b->_cnt+b->_cnt+1] = val;
+   b->_cnt++;
+-  return NULL;			// Nothing found prior
++  return NULL;                  // Nothing found prior
+ }
+ 
+ //------------------------------Delete---------------------------------------
+ // Find & remove a value from dictionary. Return old value.
+ const void *Dict::Delete(void *key) {
+-  int i = _hash( key ) & (_size-1);	// Get hash key, corrected for size
+-  bucket *b = &_bin[i];		// Handy shortcut
++  int i = _hash( key ) & (_size-1);     // Get hash key, corrected for size
++  bucket *b = &_bin[i];         // Handy shortcut
+   for( int j=0; j<b->_cnt; j++ )
+     if( !_cmp(key,b->_keyvals[j+j]) ) {
+       const void *prior = b->_keyvals[j+j+1];
+-      b->_cnt--;		// Remove key/value from lo bucket
++      b->_cnt--;                // Remove key/value from lo bucket
+       b->_keyvals[j+j  ] = b->_keyvals[b->_cnt+b->_cnt  ];
+       b->_keyvals[j+j+1] = b->_keyvals[b->_cnt+b->_cnt+1];
+-      _cnt--;			// One less thing in table
++      _cnt--;                   // One less thing in table
+       return prior;
+     }
+   return NULL;
+@@ -231,10 +228,10 @@
+ // Find a key-value pair in the given dictionary.  If not found, return NULL.
+ // If found, move key-value pair towards head of list.
+ const void *Dict::operator [](const void *key) const {
+-  int i = _hash( key ) & (_size-1);	// Get hash key, corrected for size
+-  bucket *b = &_bin[i];		// Handy shortcut
++  int i = _hash( key ) & (_size-1);     // Get hash key, corrected for size
++  bucket *b = &_bin[i];         // Handy shortcut
+   for( int j=0; j<b->_cnt; j++ )
+-    if( !_cmp(key,b->_keyvals[j+j]) ) 
++    if( !_cmp(key,b->_keyvals[j+j]) )
+       return b->_keyvals[j+j+1];
+   return NULL;
+ }
+@@ -247,13 +244,13 @@
+   if( _cnt != d2._cnt ) return 0;
+   if( _hash != d2._hash ) return 0;
+   if( _cmp != d2._cmp ) return 0;
+-  for( int i=0; i < _size; i++) {	// For complete hash table do
+-    bucket *b = &_bin[i];	// Handy shortcut
++  for( int i=0; i < _size; i++) {       // For complete hash table do
++    bucket *b = &_bin[i];       // Handy shortcut
+     if( b->_cnt != d2._bin[i]._cnt ) return 0;
+     if( memcmp(b->_keyvals, d2._bin[i]._keyvals, b->_cnt*2*sizeof(void*) ) )
+-      return 0;			// Key-value pairs must match
++      return 0;                 // Key-value pairs must match
+   }
+-  return 1;			// All match, is OK
++  return 1;                     // All match, is OK
+ }
+ 
+ 
+@@ -263,8 +260,8 @@
+   print(printvoid, printvoid);
+ }
+ void Dict::print(PrintKeyOrValue print_key, PrintKeyOrValue print_value) {
+-  for( int i=0; i < _size; i++) {	// For complete hash table do
+-    bucket *b = &_bin[i];	// Handy shortcut
++  for( int i=0; i < _size; i++) {       // For complete hash table do
++    bucket *b = &_bin[i];       // Handy shortcut
+     for( int j=0; j<b->_cnt; j++ ) {
+       print_key(  b->_keyvals[j+j  ]);
+       printf(" -> ");
+@@ -275,7 +272,7 @@
+ }
+ 
+ //------------------------------Hashing Functions----------------------------
+-// Convert string to hash key.	This algorithm implements a universal hash
++// Convert string to hash key.  This algorithm implements a universal hash
+ // function with the multipliers frozen (ok, so it's not universal).  The
+ // multipliers (and allowable characters) are all odd, so the resultant sum
+ // is odd - guarenteed not divisible by any power of two, so the hash tables
+@@ -291,8 +288,8 @@
+   register const char *s = (const char *)t;
+ 
+   while( ((c = s[k]) != '\0') && (k < MAXID-1) ) { // Get characters till nul
+-    c = (c<<1)+1;		// Characters are always odd!
+-    sum += c + (c<<shft[k++]);	// Universal hash function
++    c = (c<<1)+1;               // Characters are always odd!
++    sum += c + (c<<shft[k++]);  // Universal hash function
+   }
+   assert( k < (MAXID + 1), "Exceeded maximum name length");
+   return (int)((sum+xsum[k]) >> 1); // Hash key, un-modulo'd table size
+@@ -328,23 +325,23 @@
+ //------------------------------reset------------------------------------------
+ // Create an iterator and initialize the first variables.
+ void DictI::reset( const Dict *dict ) {
+-  _d = dict;			// The dictionary
+-  _i = (int)-1;		// Before the first bin
+-  _j = 0;			// Nothing left in the current bin
+-  ++(*this);			// Step to first real value
++  _d = dict;                    // The dictionary
++  _i = (int)-1;         // Before the first bin
++  _j = 0;                       // Nothing left in the current bin
++  ++(*this);                    // Step to first real value
+ }
+ 
+ //------------------------------next-------------------------------------------
+ // Find the next key-value pair in the dictionary, or return a NULL key and
+ // value.
+ void DictI::operator ++(void) {
+-  if( _j-- ) {			// Still working in current bin?
++  if( _j-- ) {                  // Still working in current bin?
+     _key   = _d->_bin[_i]._keyvals[_j+_j];
+     _value = _d->_bin[_i]._keyvals[_j+_j+1];
+     return;
+   }
+ 
+-  while( ++_i < _d->_size ) {	// Else scan for non-zero bucket
++  while( ++_i < _d->_size ) {   // Else scan for non-zero bucket
+     _j = _d->_bin[_i]._cnt;
+     if( !_j ) continue;
+     _j--;
+@@ -354,5 +351,3 @@
+   }
+   _key = _value = NULL;
+ }
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/adlc/dict2.hpp openjdk/hotspot/src/share/vm/adlc/dict2.hpp
+--- openjdk6/hotspot/src/share/vm/adlc/dict2.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/adlc/dict2.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)dict2.hpp	1.16 07/05/05 17:05:01 JVM"
+-#endif
+ /*
+  * Copyright 1998-2000 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #ifndef _DICT_
+@@ -45,24 +42,24 @@
+ 
+ class Dict { // Dictionary structure
+  private:
+-  class Arena *_arena;		// Where to draw storage from
+-  class bucket *_bin;		// Hash table is array of buckets
+-  int _size;			// Size (# of slots) in hash table
+-  int _cnt;			// Number of key-value pairs in hash table
+-  const Hash _hash;		// Hashing function
+-  const CmpKey _cmp;		// Key comparison function
+-  void doubhash( void );	// Double hash table size
++  class Arena *_arena;          // Where to draw storage from
++  class bucket *_bin;           // Hash table is array of buckets
++  int _size;                    // Size (# of slots) in hash table
++  int _cnt;                     // Number of key-value pairs in hash table
++  const Hash _hash;             // Hashing function
++  const CmpKey _cmp;            // Key comparison function
++  void doubhash( void );        // Double hash table size
+ 
+  public:
+-  friend class DictI;		 // Friendly iterator function
++  friend class DictI;            // Friendly iterator function
+ 
+   // cmp is a key comparision routine.  hash is a routine to hash a key.
+   Dict( CmpKey cmp, Hash hash );
+   Dict( CmpKey cmp, Hash hash, Arena *arena );
+-  void init(); 
++  void init();
+   ~Dict();
+ 
+-  Dict( const Dict & );		// Deep-copy guts
++  Dict( const Dict & );         // Deep-copy guts
+   Dict &operator =( const Dict & );
+ 
+   // Zap to empty; ready for re-use
+@@ -74,9 +71,9 @@
+   // Insert inserts the given key-value pair into the dictionary.  The prior
+   // value of the key is returned; NULL if the key was not previously defined.
+   const void *Insert(const void *key, const void *val); // A new key-value
+-  const void *Delete(void *key);	                // Delete & return old
++  const void *Delete(void *key);                        // Delete & return old
+ 
+-  // Find finds the value of a given key; or NULL if not found.  
++  // Find finds the value of a given key; or NULL if not found.
+   // The dictionary is NOT changed.
+   const void *operator [](const void *key) const;  // Do a lookup
+ 
+@@ -91,7 +88,7 @@
+ };
+ 
+ // Hashing functions
+-int hashstr(const void *s);	   // Nice string hash
++int hashstr(const void *s);        // Nice string hash
+ // Slimey cheap hash function; no guarenteed performance.  Better than the
+ // default for pointers, especially on MS-DOS machines.
+ int hashptr(const void *key);
+@@ -105,7 +102,7 @@
+ 
+ //------------------------------Iteration--------------------------------------
+ // The class of dictionary iterators.  Fails in the presences of modifications
+-// to the dictionary during iteration (including searches).  
++// to the dictionary during iteration (including searches).
+ // Usage:  for( DictI i(dict); i.test(); ++i ) { body = i.key; body = i.value;}
+ class DictI {
+  private:
+@@ -121,5 +118,3 @@
+ };
+ 
+ #endif // _DICT_
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/adlc/filebuff.cpp openjdk/hotspot/src/share/vm/adlc/filebuff.cpp
+--- openjdk6/hotspot/src/share/vm/adlc/filebuff.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/adlc/filebuff.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)filebuff.cpp	1.30 07/05/05 17:05:01 JVM"
+-#endif
+ /*
+  * Copyright 1997-2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // FILEBUFF.CPP - Routines for handling a parser file buffer
+@@ -51,18 +48,18 @@
+     exit(1);                    // Exit on allocation failure
+   }
+   *_bigbuf = '\n';               // Lead with a sentinal newline
+-  _buf = _bigbuf+1;			// Skip sentinal
+-  _bufmax = _buf;         	// Buffer is empty
++  _buf = _bigbuf+1;                     // Skip sentinal
++  _bufmax = _buf;               // Buffer is empty
+   _bufeol = _bigbuf;              // _bufeol points at sentinal
+   _filepos = -1;                 // filepos is in sync with _bufeol
+-  _bufoff = _offset = 0L; 	// Offset at file start
++  _bufoff = _offset = 0L;       // Offset at file start
+ 
+   _bufmax += fread(_buf, 1, _bufferSize-2, _fp->_fp); // Fill buffer & set end value
+   if (_bufmax == _buf) {
+     file_error(SEMERR, 0, "File read error, no input read\n");
+     exit(1);                     // Exit on read error
+   }
+-  *_bufmax = '\n';               // End with a sentinal new-line 
++  *_bufmax = '\n';               // End with a sentinal new-line
+   *(_bufmax+1) = '\0';           // Then end with a sentinal NULL
+ }
+ 
+@@ -82,7 +79,7 @@
+   retval = ++_bufeol;      // return character following end of previous line
+   if (*retval == '\0') return NULL; // Check for EOF sentinal
+   // Search for newline character which must end each line
+-  for(_filepos++; *_bufeol != '\n'; _bufeol++) 
++  for(_filepos++; *_bufeol != '\n'; _bufeol++)
+     _filepos++;                    // keep filepos in sync with _bufeol
+   // _bufeol & filepos point at end of current line, so return pointer to start
+   return retval;
+@@ -91,9 +88,9 @@
+ //------------------------------FileBuffRegion---------------------------------
+ // Create a new region in a FileBuff.
+ FileBuffRegion::FileBuffRegion( FileBuff* bufr, int soln, int ln,
+-				int off, int len)
++                                int off, int len)
+ : _bfr(bufr), _sol(soln), _line(ln), _offset(off), _length(len) {
+-  _next = NULL;			// No chained regions
++  _next = NULL;                 // No chained regions
+ }
+ 
+ //------------------------------~FileBuffRegion--------------------------------
+@@ -105,7 +102,7 @@
+ //------------------------------copy-------------------------------------------
+ // Deep copy a FileBuffRegion
+ FileBuffRegion *FileBuffRegion::copy() {
+-  if( !this ) return NULL;	// The empty buffer region
++  if( !this ) return NULL;      // The empty buffer region
+   FileBuffRegion *br = new FileBuffRegion(_bfr,_sol,_line,_offset,_length);
+   if( _next ) br->_next = _next->copy();
+   return br;
+@@ -117,42 +114,42 @@
+ // Since the buffer regions are sorted by file offset, this is a varient of a
+ // "sorted-merge" running in linear time.
+ FileBuffRegion *FileBuffRegion::merge( FileBuffRegion *br ) {
+-  if( !br ) return this;	// Merging nothing
+-  if( !this ) return br;	// Merging into nothing
++  if( !br ) return this;        // Merging nothing
++  if( !this ) return br;        // Merging into nothing
+ 
+   assert( _bfr == br->_bfr, "" );     // Check for pointer-equivalent buffers
+ 
+-  if( _offset < br->_offset ) {	// "this" starts before "br"
++  if( _offset < br->_offset ) { // "this" starts before "br"
+     if( _offset+_length < br->_offset ) { // "this" ends before "br"
+       if( _next ) _next->merge( br );    // Merge with remainder of list
+-      else _next = br;		       // No more in this list; just append.
+-    } else {			       // Regions overlap.
++      else _next = br;                 // No more in this list; just append.
++    } else {                           // Regions overlap.
+       int l = br->_offset + br->_length - _offset;
+       if( l > _length ) _length = l;     // Pick larger region
+       FileBuffRegion *nr = br->_next;     // Get rest of region
+-      br->_next = NULL;		// Remove indication of rest of region
+-      delete br;		// Delete this region (it's been subsumed).
+-      if( nr ) merge( nr );	// Merge with rest of region
+-    }				// End of if regions overlap or not.
+-  } else {			// "this" starts after "br"
++      br->_next = NULL;         // Remove indication of rest of region
++      delete br;                // Delete this region (it's been subsumed).
++      if( nr ) merge( nr );     // Merge with rest of region
++    }                           // End of if regions overlap or not.
++  } else {                      // "this" starts after "br"
+     if( br->_offset+br->_length < _offset ) {    // "br" ends before "this"
+       FileBuffRegion *nr = new FileBuffRegion(_bfr,_sol,_line,_offset,_length);
+-      nr->_next = _next;		// Structure copy "this" guy to "nr"
+-      *this = *br;		// Structure copy "br" over "this".
+-      br->_next = NULL;		// Remove indication of rest of region
+-      delete br;		// Delete this region (it's been copied)
+-      merge( nr );		// Finish merging
+-    } else {			// Regions overlap.
++      nr->_next = _next;                // Structure copy "this" guy to "nr"
++      *this = *br;              // Structure copy "br" over "this".
++      br->_next = NULL;         // Remove indication of rest of region
++      delete br;                // Delete this region (it's been copied)
++      merge( nr );              // Finish merging
++    } else {                    // Regions overlap.
+       int l = _offset + _length - br->_offset;
+-      if( l > _length ) _length = l;	// Pick larger region
+-      _offset = br->_offset;		// Start with earlier region
+-      _sol = br->_sol;			// Also use earlier line start
+-      _line = br->_line;			// Also use earlier line
+-      FileBuffRegion *nr = br->_next;	// Get rest of region
+-      br->_next = NULL;		// Remove indication of rest of region
+-      delete br;		// Delete this region (it's been subsumed).
+-      if( nr ) merge( nr );	// Merge with rest of region
+-    }				// End of if regions overlap or not.
++      if( l > _length ) _length = l;    // Pick larger region
++      _offset = br->_offset;            // Start with earlier region
++      _sol = br->_sol;                  // Also use earlier line start
++      _line = br->_line;                        // Also use earlier line
++      FileBuffRegion *nr = br->_next;   // Get rest of region
++      br->_next = NULL;         // Remove indication of rest of region
++      delete br;                // Delete this region (it's been subsumed).
++      if( nr ) merge( nr );     // Merge with rest of region
++    }                           // End of if regions overlap or not.
+   }
+   return this;
+ }
+@@ -160,11 +157,11 @@
+ //------------------------------expandtab--------------------------------------
+ static int expandtab( ostream &os, int off, char c, char fill1, char fill2 ) {
+   if( c == '\t' ) {             // Tab?
+-    do os << fill1;		// Expand the tab; Output space
+-    while( (++off) & 7 );	// Expand to tab stop
+-  } else {			// Normal character
+-    os << fill2;		// Display normal character
+-    off++;			// Increment "cursor" offset
++    do os << fill1;             // Expand the tab; Output space
++    while( (++off) & 7 );       // Expand to tab stop
++  } else {                      // Normal character
++    os << fill2;                // Display normal character
++    off++;                      // Increment "cursor" offset
+   }
+   return off;
+ }
+@@ -173,112 +170,112 @@
+ // Print and highlite a region of a line.  Return the amount of highliting left
+ // to do (i.e. highlite length minus length of line).
+ static int printline( ostream& os, const char *fname, int line,
+-			const char *_sol, int skip, int len ) {
++                        const char *_sol, int skip, int len ) {
+ 
+   // Display the entire tab-expanded line
+   os << fname << ":" << line << ": ";
+   const char *t = strchr(_sol,'\n')+1; // End of line
+-  int off = 0;			// Cursor offset for tab expansion
+-  const char *s = _sol;		// Nice string pointer
+-  while( t-s ) {		// Display whole line
+-    char c = *s++;		// Get next character to display
++  int off = 0;                  // Cursor offset for tab expansion
++  const char *s = _sol;         // Nice string pointer
++  while( t-s ) {                // Display whole line
++    char c = *s++;              // Get next character to display
+     off = expandtab(os,off,c,' ',c);
+   }
+ 
+   // Display the tab-expanded skippings before underlining.
+   os << fname << ":" << line << ": ";
+-  off = 0;			// Cursor offset for tab expansion
+-  s = _sol;			// Restart string pointer
++  off = 0;                      // Cursor offset for tab expansion
++  s = _sol;                     // Restart string pointer
+ 
+   // Start underlining.
+-  if( skip != -1 ) {		// The no-start-indicating flag
+-    const char *u = _sol+skip;	// Amount to skip
+-    while( u-s )		// Display skipped part
++  if( skip != -1 ) {            // The no-start-indicating flag
++    const char *u = _sol+skip;  // Amount to skip
++    while( u-s )                // Display skipped part
+       off = expandtab(os,off,*s++,' ',' ');
+     os << '^';                  // Start region
+-    off++;			// Moved cursor
+-    len--;			// 1 less char to do
++    off++;                      // Moved cursor
++    len--;                      // 1 less char to do
+     if( *s++ == '\t' )          // Starting character is a tab?
+       off = expandtab(os,off,'\t','-','^');
+   }
+ 
+   // Long region doesn't end on this line
+-  int llen = (int)(t-s);	// Length of line, minus what's already done
+-  if( len > llen ) {		// Doing entire rest of line?
+-    while( t-s )		// Display rest of line
++  int llen = (int)(t-s);        // Length of line, minus what's already done
++  if( len > llen ) {            // Doing entire rest of line?
++    while( t-s )                // Display rest of line
+       off = expandtab(os,off,*s++,'-','-');
+     os << '\n';                 // EOL
+-    return len-llen;		// Return what's not yet done.
++    return len-llen;            // Return what's not yet done.
+   }
+ 
+   // Region does end on this line.  This code fails subtly if the region ends
+   // in a tab character.
+   int i;
+-  for( i=1; i<len; i++ )	// Underline just what's needed
++  for( i=1; i<len; i++ )        // Underline just what's needed
+     off = expandtab(os,off,*s++,'-','-');
+   if( i == len ) os << '^';     // Mark end of region
+   os << '\n';                   // End of marked line
+-  return 0L;			// All done
++  return 0L;                    // All done
+ }
+ 
+ //------------------------------print------------------------------------------
+ //std::ostream& operator<< ( std::ostream& os, FileBuffRegion &br ) {
+ ostream& operator<< ( ostream& os, FileBuffRegion &br ) {
+-  if( &br == NULL ) return os;	// The empty buffer region
+-  FileBuffRegion *brp = &br;	// Pointer to region
+-  while( brp ) {		// While have chained regions
+-    brp->print(os);		// Print region
+-    brp = brp->_next;		// Chain to next
++  if( &br == NULL ) return os;  // The empty buffer region
++  FileBuffRegion *brp = &br;    // Pointer to region
++  while( brp ) {                // While have chained regions
++    brp->print(os);             // Print region
++    brp = brp->_next;           // Chain to next
+   }
+-  return os;			// Return final stream
++  return os;                    // Return final stream
+ }
+ 
+ //------------------------------print------------------------------------------
+ // Print the FileBuffRegion to a stream. FileBuffRegions are printed with the
+ // filename and line number to the left, and complete text lines to the right.
+ // Selected portions (portions of a line actually in the FileBuffRegion are
+-// underlined.	Ellipses are used for long multi-line regions.
++// underlined.  Ellipses are used for long multi-line regions.
+ //void FileBuffRegion::print( std::ostream& os ) {
+ void FileBuffRegion::print( ostream& os ) {
+-  if( !this ) return;		// Nothing to print
++  if( !this ) return;           // Nothing to print
+   char *s = _bfr->get_line();
+-  int skip = (int)(_offset - _sol);	// Amount to skip to start of data
++  int skip = (int)(_offset - _sol);     // Amount to skip to start of data
+   int len = printline( os, _bfr->_fp->_name, _line, s, skip, _length );
+ 
+-  if( !len ) return;			// All done; exit
++  if( !len ) return;                    // All done; exit
+ 
+   // Here we require at least 2 lines
+-  int off1 = _length - len + skip;	// Length of line 1
+-  int off2 = off1 + _sol;		// Offset to start of line 2
++  int off1 = _length - len + skip;      // Length of line 1
++  int off2 = off1 + _sol;               // Offset to start of line 2
+   char *s2 = _bfr->get_line();           // Start of line 2
+   char *s3 = strchr( s2, '\n' )+1;      // Start of line 3 (unread)
+-  if( len <= (s3-s2) ) {		// It all fits on the next line
++  if( len <= (s3-s2) ) {                // It all fits on the next line
+     printline( os, _bfr->_fp->_name, _line+1, s2, -1, len ); // Print&underline
+     return;
+   }
+ 
+   // Here we require at least 3 lines
+-  int off3 = off2 + (int)(s3-s2);	// Offset to start of line 3
+-  s3 = _bfr->get_line(); 		// Start of line 3 (read)
++  int off3 = off2 + (int)(s3-s2);       // Offset to start of line 3
++  s3 = _bfr->get_line();                // Start of line 3 (read)
+   const char *s4 = strchr( s3, '\n' )+1;// Start of line 4 (unread)
+-  if( len < (s4-s3) ) { 		// It all fits on the next 2 lines
++  if( len < (s4-s3) ) {                 // It all fits on the next 2 lines
+     s2 = _bfr->get_line();
+     len = printline( os, _bfr->_fp->_name, _line+1, s2, -1, len ); // Line 2
+     s3 = _bfr->get_line();
+-    printline( os, _bfr->_fp->_name, _line+2, s3, -1, len );	 // Line 3
++    printline( os, _bfr->_fp->_name, _line+2, s3, -1, len );     // Line 3
+     return;
+   }
+ 
+   // Here we require at least 4 lines.
+   // Print only the 1st and last line, with ellipses in middle.
+   os << "...\n";                // The ellipses
+-  int cline = _line+1;   	// Skipped 2 lines
+-  do {				// Do until find last line
+-    len -= (int)(s3-s2);	// Remove length of line
+-    cline++;			// Next line
+-    s2 = _bfr->get_line();	// Get next line from end of this line
++  int cline = _line+1;          // Skipped 2 lines
++  do {                          // Do until find last line
++    len -= (int)(s3-s2);        // Remove length of line
++    cline++;                    // Next line
++    s2 = _bfr->get_line();      // Get next line from end of this line
+     s3 = strchr( s2, '\n' ) + 1;// Get end of next line
+-  } while( len > (s3-s2) );	// Repeat until last line
++  } while( len > (s3-s2) );     // Repeat until last line
+   printline( os, _bfr->_fp->_name, cline, s2, -1, len ); // Print & underline
+ }
+ 
+@@ -297,4 +294,3 @@
+   va_end(args);
+   _AD._no_output = 1;
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/adlc/filebuff.hpp openjdk/hotspot/src/share/vm/adlc/filebuff.hpp
+--- openjdk6/hotspot/src/share/vm/adlc/filebuff.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/adlc/filebuff.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)filebuff.hpp	1.27 07/05/05 17:05:01 JVM"
+-#endif
+ /*
+  * Copyright 1997-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // FILEBUFF.HPP - Definitions for parser file buffering routines
+@@ -47,14 +44,14 @@
+ class FileBuff {
+   friend class FileBuffRegion;
+  private:
+-  long  _bufferSize;	        // Size of text holding buffer.
+-  long  _offset;   		// Expected filepointer offset.
+-  long  _bufoff; 	        // Start of buffer file offset
++  long  _bufferSize;            // Size of text holding buffer.
++  long  _offset;                // Expected filepointer offset.
++  long  _bufoff;                // Start of buffer file offset
+ 
+-  char *_buf;    		// The buffer itself.
++  char *_buf;                   // The buffer itself.
+   char *_bigbuf;                // The buffer plus sentinals; actual heap area
+-  char *_bufmax; 		// A pointer to the buffer end sentinal
+-  char *_bufeol; 		// A pointer to the last complete line end
++  char *_bufmax;                // A pointer to the buffer end sentinal
++  char *_bufeol;                // A pointer to the last complete line end
+ 
+   int   _err;                   // Error flag for file seek/read operations
+   long  _filepos;               // Current offset from start of file
+@@ -74,7 +71,7 @@
+   // and increments bufeol and filepos to point at the end of that line.
+   char *get_line(void);
+ 
+-  // This converts a pointer into the buffer to a file offset.	It only works
++  // This converts a pointer into the buffer to a file offset.  It only works
+   // when the pointer is valid (i.e. just obtained from getline()).
+   int getoff(const char *s) { return _bufoff+(int)(s-_buf); }
+ };
+@@ -84,19 +81,19 @@
+ // of offsets and lengths.  These regions can be merged; overlapping regions
+ // will coalesce.
+ class FileBuffRegion {
+- public:			// Workaround dev-studio friend/private bug
+-  FileBuffRegion *_next;	// Linked list of regions sorted by offset.
++ public:                        // Workaround dev-studio friend/private bug
++  FileBuffRegion *_next;        // Linked list of regions sorted by offset.
+  private:
+-  FileBuff       *_bfr;		// The Buffer of the file
++  FileBuff       *_bfr;         // The Buffer of the file
+   int _offset, _length;         // The file area
+-  int             _sol;		// Start of line where the file area starts
+-  int             _line;	// First line of region
++  int             _sol;         // Start of line where the file area starts
++  int             _line;        // First line of region
+ 
+  public:
+   FileBuffRegion(FileBuff*, int sol, int line, int offset, int len);
+   ~FileBuffRegion();
+ 
+-  FileBuffRegion *copy();	            // Deep copy
++  FileBuffRegion *copy();                   // Deep copy
+   FileBuffRegion *merge(FileBuffRegion*); // Merge 2 regions; delete input
+ 
+ //  void print(std::ostream&);
+diff -ruN openjdk6/hotspot/src/share/vm/adlc/forms.cpp openjdk/hotspot/src/share/vm/adlc/forms.cpp
+--- openjdk6/hotspot/src/share/vm/adlc/forms.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/adlc/forms.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)forms.cpp	1.161 07/05/05 17:04:59 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // FORMS.CPP - Definitions for ADL Parser Generic & Utility Forms Classes
+@@ -32,15 +29,15 @@
+ // allocate arena used by forms
+ Arena  *Form::arena = Form::generate_arena(); //  = Form::generate_arena();
+ Arena *Form::generate_arena() {
+-  return (new Arena); 
++  return (new Arena);
+ }
+ 
+ //------------------------------NameList---------------------------------------
+ // reserved user-defined string
+-const char  *NameList::_signal   = "$$SIGNAL$$";  
++const char  *NameList::_signal   = "$$SIGNAL$$";
+ 
+ // Constructor and Destructor
+-NameList::NameList() : _cur(0), _max(4), _iter(0), _justReset(true) { 
++NameList::NameList() : _cur(0), _max(4), _iter(0), _justReset(true) {
+   _names = (const char**)malloc(_max*sizeof(char*));
+ }
+ NameList::~NameList() {
+@@ -66,7 +63,7 @@
+ int    NameList::count()  const { return _cur; }
+ 
+ void   NameList::reset()   { _iter = 0; _justReset = true;}
+-const char  *NameList::iter()    { 
++const char  *NameList::iter()    {
+   if (_justReset) {_justReset=false; return (_iter < _cur ? _names[_iter] : NULL);}
+   else return (_iter <_cur-1 ? _names[++_iter] : NULL);
+ }
+@@ -118,7 +115,7 @@
+   int         iter       = 0;
+   bool        justReset  = true;
+ 
+-  while( ( name  = (justReset ? 
++  while( ( name  = (justReset ?
+                     (justReset=false, (iter < _cur ? _names[iter] : NULL)) :
+                     (iter < _cur-1 ? _names[++iter] : NULL)) )
+          != NULL ) {
+@@ -311,7 +308,7 @@
+ 
+ //------------------------------FormList---------------------------------------
+ // Destructor
+-FormList::~FormList()  { 
++FormList::~FormList()  {
+   // // This list may not own its elements
+   // Form *cur  = _root;
+   // Form *next = NULL;
+@@ -330,8 +327,8 @@
+ }
+ 
+ // Return # of name-Form pairs in dict
+-int FormDict::Size(void) const { 
+-  return _form.Size(); 
++int FormDict::Size(void) const {
++  return _form.Size();
+ }
+ 
+ // Insert inserts the given key-value pair into the dictionary.  The prior
+@@ -340,7 +337,7 @@
+   return (Form*)_form.Insert((void*)name, (void*)form);
+ }
+ 
+-// Finds the value of a given key; or NULL if not found.  
++// Finds the value of a given key; or NULL if not found.
+ // The dictionary is NOT changed.
+ const Form  *FormDict::operator [](const char *name) const {
+   return (Form*)_form[name];
+@@ -384,6 +381,5 @@
+ }
+ 
+ void SourceForm::output(FILE *fp) {
+-  fprintf(fp,"\n//%s\n%s\n",classname(),(_code?_code:"")); 
+-} 
+-
++  fprintf(fp,"\n//%s\n%s\n",classname(),(_code?_code:""));
++}
+diff -ruN openjdk6/hotspot/src/share/vm/adlc/forms.hpp openjdk/hotspot/src/share/vm/adlc/forms.hpp
+--- openjdk6/hotspot/src/share/vm/adlc/forms.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/adlc/forms.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)forms.hpp	1.150 07/05/05 17:05:00 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // FORMS.HPP - ADL Parser Generic and Utility Forms Classes
+@@ -106,7 +103,7 @@
+   // value of the key is returned; NULL if the key was not previously defined.
+   const Form  *Insert(const char *name, Form *form); // A new key-value
+ 
+-  // Find finds the value of a given key; or NULL if not found.  
++  // Find finds the value of a given key; or NULL if not found.
+   // The dictionary is NOT changed.
+   const Form  *operator [](const char *name) const;  // Do a lookup
+ 
+@@ -144,7 +141,7 @@
+   virtual bool           is_cisc_mem(FormDict &globals) const { return false; }
+ 
+   // Public Methods
+-  Form(int formType=0, int line=0) 
++  Form(int formType=0, int line=0)
+     : _next(NULL), _linenum(line), _ftype(formType) { };
+   ~Form() {};
+ 
+@@ -154,8 +151,8 @@
+   }
+ 
+   // Check constraints after parsing
+-  virtual bool verify()    { return true; }       
+- 
++  virtual bool verify()    { return true; }
++
+   virtual void dump()      { output(stderr); }    // Debug printer
+   // Write info to output files
+   virtual void output(FILE *fp)    { fprintf(fp,"Form Output"); }
+@@ -179,7 +176,7 @@
+   Form::DataType  ideal_to_sReg_type(const char *name) const;
+   // Convert ideal name to a DataType, return DataType::none if not a 'RegX
+   Form::DataType  ideal_to_Reg_type(const char *name) const;
+-  
++
+   // Convert ideal name to a DataType, return DataType::none if not a 'LoadX
+   Form::DataType is_load_from_memory(const char *opType) const;
+   // Convert ideal name to a DataType, return DataType::none if not a 'StoreX
+@@ -258,31 +255,31 @@
+   Form *_cur2;                     // Nested iterator
+   int   _justReset2;
+ 
+-public: 
+-  void addForm(Form * entry) { 
++public:
++  void addForm(Form * entry) {
+     if (_tail==NULL) { _root = _tail = _cur = entry;}
+     else { _tail->_next = entry; _tail = entry;}
+   };
+   Form * current() { return _cur; };
+-  Form * iter()    { if (_justReset) _justReset = 0; 
+-                     else if (_cur)  _cur = _cur->_next; 
++  Form * iter()    { if (_justReset) _justReset = 0;
++                     else if (_cur)  _cur = _cur->_next;
+                      return _cur;};
+   void   reset()   { if (_root) {_cur = _root; _justReset = 1;} };
+ 
+   // Second iterator, state is internal
+   Form * current2(){ return _cur2; };
+-  Form * iter2()   { if (_justReset2) _justReset2 = 0; 
+-                    else if (_cur2)  _cur2 = _cur2->_next; 
++  Form * iter2()   { if (_justReset2) _justReset2 = 0;
++                    else if (_cur2)  _cur2 = _cur2->_next;
+                     return _cur2;};
+   void   reset2()  { if (_root) {_cur2 = _root; _justReset2 = 1;} };
+ 
+-  int  count() { 
++  int  count() {
+     int  count = 0; reset();
+     for( Form *cur; (cur =  iter()) != NULL; ) { ++count; };
+     return count;
+   }
+ 
+-  void dump() { 
++  void dump() {
+     reset();
+     Form *cur;
+     for(; (cur =  iter()) != NULL; ) {
+@@ -290,7 +287,7 @@
+     };
+   }
+ 
+-  bool verify() { 
++  bool verify() {
+     bool verified = true;
+ 
+     reset();
+@@ -302,7 +299,7 @@
+     return verified;
+   }
+ 
+-  void output(FILE* fp) { 
++  void output(FILE* fp) {
+     reset();
+     Form *cur;
+     for( ; (cur =  iter()) != NULL; ) {
+@@ -321,24 +318,24 @@
+ 
+ private:
+   int                _cur;         // Insert next entry here; count of entries
+-  int  	             _max;         // Number of spaces allocated
++  int                _max;         // Number of spaces allocated
+   const char       **_names;       // Array of names
+ 
+ protected:
+   int                _iter;        // position during iteration
+   bool               _justReset;   // Set immediately after reset
+ 
+-                                   
+-public:                            
++
++public:
+   static const char *_signal;      // reserved user-defined string
+   enum               { Not_in_list = -1 };
+ 
+-  void  addName(const char *name);       
++  void  addName(const char *name);
+   void  add_signal();
+   void  clear();                   // Remove all entries
+ 
+-  int   count() const;                   
+-                                   
++  int   count() const;
++
+   void  reset();                   // Reset iteration
+   const char *iter();              // after reset(), first element : else next
+   const char *current();           // return current element in iteration.
+@@ -378,7 +375,7 @@
+   }
+ 
+ };
+-  
++
+ 
+ //------------------------------NameAndList------------------------------------
+ // Storage for a name and an associated list of names
+@@ -408,14 +405,14 @@
+   void  dump();                    // output to stderr
+   void  output(FILE *fp);          // Output list of names to 'fp'
+ };
+-  
++
+ //------------------------------ComponentList---------------------------------
+ // Component lists always have match rule operands first, followed by parameter
+ // operands which do not appear in the match list (in order of declaration).
+ class ComponentList : private NameList {
+ private:
+   int   _matchcnt;                 // Count of match rule operands
+-                                   
++
+ public:
+ 
+   // This is a batch program.  (And I have a destructor bug!)
+@@ -426,7 +423,7 @@
+ 
+   int  count();
+   int  match_count() { return _matchcnt; } // Get count of match rule opers
+-                                   
++
+   Component *iter();               // after reset(), first element : else next
+   Component *match_iter();         // after reset(), first element : else next
+   Component *post_match_iter();    // after reset(), first element : else next
+@@ -452,7 +449,7 @@
+   int        label_position();
+   // Find position for the Method when looked up for output via "format"
+   int        method_position();
+-                                  
++
+   void       dump();               // output to stderr
+   void       output(FILE *fp);     // Output list of names to 'fp'
+ 
+@@ -506,7 +503,7 @@
+     Zero     = 0,
+     Max      = 0x7fffffff
+   };
+-  const char *_external_name;  // if !NULL, then print this instead of _expr 
++  const char *_external_name;  // if !NULL, then print this instead of _expr
+   const char *_expr;
+   int         _min_value;
+   int         _max_value;
+@@ -571,7 +568,7 @@
+   // Return # of key-value pairs in dict
+   int Size(void) const;
+ 
+-  // define inserts the given key-value pair into the dictionary, 
++  // define inserts the given key-value pair into the dictionary,
+   // and records the name in order for later output, ...
+   const Expr  *define(const char *name, Expr *expr);
+ 
+@@ -579,7 +576,7 @@
+   // value of the key is returned; NULL if the key was not previously defined.
+   const Expr  *Insert(const char *name, Expr *expr); // A new key-value
+ 
+-  // Find finds the value of a given key; or NULL if not found.  
++  // Find finds the value of a given key; or NULL if not found.
+   // The dictionary is NOT changed.
+   const Expr  *operator [](const char *name) const;  // Do a lookup
+ 
+@@ -587,6 +584,3 @@
+   void print_asserts(FILE *fp);
+   void dump();
+ };
+-
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/adlc/formsopt.cpp openjdk/hotspot/src/share/vm/adlc/formsopt.cpp
+--- openjdk6/hotspot/src/share/vm/adlc/formsopt.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/adlc/formsopt.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)formsopt.cpp	1.53 07/05/05 17:05:01 JVM"
+-#endif
+ /*
+  * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // FORMS.CPP - Definitions for ADL Parser Forms Classes
+@@ -33,9 +30,9 @@
+ 
+ //------------------------------RegisterForm-----------------------------------
+ // Constructor
+-RegisterForm::RegisterForm() 
+-  : _regDef(cmpstr,hashstr, Form::arena), 
+-    _regClass(cmpstr,hashstr, Form::arena), 
++RegisterForm::RegisterForm()
++  : _regDef(cmpstr,hashstr, Form::arena),
++    _regClass(cmpstr,hashstr, Form::arena),
+     _allocClass(cmpstr,hashstr, Form::arena) {
+ }
+ RegisterForm::~RegisterForm() {
+@@ -145,7 +142,7 @@
+             "ERROR: More than one register has been assigned register-number 0.\n"
+             "Probably because a register has not been entered into an allocation class.\n");
+   }
+-    
++
+   return  valid;
+ }
+ 
+@@ -178,14 +175,14 @@
+     ((AllocClass*)_allocClass[name])->output(fp);
+   }
+   fprintf(fp,"-------------------- end  RegisterForm --------------------\n");
+-} 
++}
+ 
+ //------------------------------RegDef-----------------------------------------
+ // Constructor
+ RegDef::RegDef(char *regname, char *callconv, char *c_conv, char * idealtype, char * encode, char * concrete)
+-  : _regname(regname), _callconv(callconv), _c_conv(c_conv), 
+-    _idealtype(idealtype), 
+-    _register_encode(encode), 
++  : _regname(regname), _callconv(callconv), _c_conv(c_conv),
++    _idealtype(idealtype),
++    _register_encode(encode),
+     _concrete(concrete),
+     _register_num(0) {
+ 
+@@ -234,7 +231,7 @@
+ // Number of registers in class
+ uint RegClass::size() const {
+   return _regDef.Size();
+-} 
++}
+ 
+ const RegDef *RegClass::get_RegDef(const char *rd_name) const {
+   return  (const RegDef*)_regDef[rd_name];
+@@ -330,7 +327,7 @@
+ 
+ //==============================Frame Handling=================================
+ //------------------------------FrameForm--------------------------------------
+-FrameForm::FrameForm() { 
++FrameForm::FrameForm() {
+   _frame_pointer = NULL;
+   _c_frame_pointer = NULL;
+   _alignment = NULL;
+@@ -353,7 +350,7 @@
+ }
+ 
+ void FrameForm::output(FILE *fp) {           // Write info to output files
+-  fprintf(fp,"\nFrame:\n"); 
++  fprintf(fp,"\nFrame:\n");
+ }
+ 
+ //==============================Scheduling=====================================
+@@ -591,8 +588,8 @@
+ 
+ 
+ // Insert info into the match-rule
+-void  PeepMatch::add_instruction(int parent, int position, const char *name, 
+-				 int input) {
++void  PeepMatch::add_instruction(int parent, int position, const char *name,
++                                 int input) {
+   if( position > _max_position ) _max_position = position;
+ 
+   _parent.addName((char *)parent);
+@@ -640,7 +637,7 @@
+ }
+ 
+ //------------------------------PeepConstraint---------------------------------
+-PeepConstraint::PeepConstraint(intptr_t  left_inst,  char *left_op, char *relation, 
++PeepConstraint::PeepConstraint(intptr_t  left_inst,  char *left_op, char *relation,
+                                intptr_t  right_inst, char *right_op)
+   : _left_inst(left_inst), _left_op(left_op), _relation(relation),
+     _right_inst(right_inst), _right_op(right_op), _next(NULL) {}
+@@ -725,4 +722,3 @@
+ void PeepReplace::output(FILE *fp) {      // Write info to output files
+   fprintf(fp,"PeepReplace:\n");
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/adlc/formsopt.hpp openjdk/hotspot/src/share/vm/adlc/formsopt.hpp
+--- openjdk6/hotspot/src/share/vm/adlc/formsopt.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/adlc/formsopt.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)formsopt.hpp	1.54 07/05/05 17:05:01 JVM"
+-#endif
+ /*
+  * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // FORMSOPT.HPP - ADL Parser Target Specific Optimization Forms Classes
+@@ -96,7 +93,7 @@
+   RegisterForm();
+   ~RegisterForm();
+ 
+-  void        addRegDef(char *regName, char *callingConv, char *c_conv, 
++  void        addRegDef(char *regName, char *callingConv, char *c_conv,
+                         char * idealtype, char *encoding, char* concreteName);
+   RegClass   *addRegClass(const char *className);
+   AllocClass *addAllocClass(char *allocName);
+@@ -137,7 +134,7 @@
+ 
+ public:
+   // Public Methods
+-  RegDef(char  *regname, char *callconv, char *c_conv, 
++  RegDef(char  *regname, char *callconv, char *c_conv,
+          char *idealtype, char *encoding, char *concrete);
+   ~RegDef();                       // Destructor
+ 
+@@ -170,7 +167,7 @@
+   uint size() const;            // Number of registers in class
+   int regs_in_word( int wordnum, bool stack_also );
+ 
+-  const RegDef *get_RegDef(const char *regDef_name) const; 
++  const RegDef *get_RegDef(const char *regDef_name) const;
+ 
+   // Returns the lowest numbered register in the mask.
+   const RegDef* find_first_elem();
+@@ -267,8 +264,8 @@
+   bool       _branchHasDelaySlot; // Indicates that branches have delay slot instructions
+   int        _maxInstrsPerBundle; // Indicates the maximum number of instructions for ILP
+   int        _maxBundlesPerCycle; // Indicates the maximum number of bundles for ILP
+-  int        _instrUnitSize;      // The minimum instruction unit size, in bytes 
+-  int        _bundleUnitSize;     // The bundle unit size, in bytes 
++  int        _instrUnitSize;      // The minimum instruction unit size, in bytes
++  int        _bundleUnitSize;     // The bundle unit size, in bytes
+   int        _instrFetchUnitSize; // The size of the I-fetch unit, in bytes [must be power of 2]
+   int        _instrFetchUnits;    // The number of I-fetch units processed per cycle
+ 
+@@ -313,7 +310,7 @@
+   unsigned _more_instrs;          // Additional Instructions
+ 
+   // Public Methods
+-  PipeClassOperandForm(const char *stage, unsigned iswrite, unsigned more_instrs) 
++  PipeClassOperandForm(const char *stage, unsigned iswrite, unsigned more_instrs)
+   : _stage(stage)
+   , _iswrite(iswrite)
+   , _more_instrs(more_instrs)
+@@ -391,13 +388,13 @@
+   void setBranchDelay(bool s)        { _has_branch_delay_slot = s; }
+   void setForceSerialization(bool s) { _force_serialization = s; }
+   void setMayHaveNoCode(bool s)      { _may_have_no_code = s; }
+-  
++
+   int  InstructionCount()   const { return _instruction_count; }
+   bool hasMultipleBundles() const { return _has_multiple_bundles; }
+   bool hasBranchDelay()     const { return _has_branch_delay_slot; }
+   bool forceSerialization() const { return _force_serialization; }
+   bool mayHaveNoCode()      const { return _may_have_no_code; }
+-  
++
+   void dump();                    // Debug printer
+   void output(FILE *fp);          // Write info to output files
+ };
+@@ -485,7 +482,7 @@
+ 
+ public:
+   // Public Methods
+-  PeepConstraint(intptr_t  left_inst,  char *left_op, char *relation, 
++  PeepConstraint(intptr_t  left_inst,  char *left_op, char *relation,
+                  intptr_t  right_inst, char *right_op);
+   ~PeepConstraint();
+ 
+@@ -549,4 +546,3 @@
+   void dump();
+   void output(FILE *fp);
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/adlc/formssel.cpp openjdk/hotspot/src/share/vm/adlc/formssel.cpp
+--- openjdk6/hotspot/src/share/vm/adlc/formssel.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/adlc/formssel.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)formssel.cpp	1.183 07/09/06 15:24:29 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // FORMS.CPP - Definitions for ADL Parser Forms Classes
+@@ -31,10 +28,10 @@
+ //==============================Instructions===================================
+ //------------------------------InstructForm-----------------------------------
+ InstructForm::InstructForm(const char *id, bool ideal_only)
+-  : _ident(id), _ideal_only(ideal_only), 
++  : _ident(id), _ideal_only(ideal_only),
+     _localNames(cmpstr, hashstr, Form::arena),
+     _effects(cmpstr, hashstr, Form::arena) {
+-      _ftype = Form::INS; 
++      _ftype = Form::INS;
+ 
+       _matrule   = NULL;
+       _insencode = NULL;
+@@ -59,10 +56,10 @@
+ }
+ 
+ InstructForm::InstructForm(const char *id, InstructForm *instr, MatchRule *rule)
+-  : _ident(id), _ideal_only(false), 
++  : _ident(id), _ideal_only(false),
+     _localNames(instr->_localNames),
+     _effects(instr->_effects) {
+-      _ftype = Form::INS; 
++      _ftype = Form::INS;
+ 
+       _matrule   = rule;
+       _insencode = instr->_insencode;
+@@ -180,9 +177,9 @@
+     return src_op->_matrule->_opType;
+   }
+   // Operand chain rules do not really have ideal Opcodes
+-  if( _matrule->is_chain_rule(globalNames) ) 
++  if( _matrule->is_chain_rule(globalNames) )
+     return "Node";
+-  return strcmp(_matrule->_opType,"Set") 
++  return strcmp(_matrule->_opType,"Set")
+     ? _matrule->_opType
+     : _matrule->_rChild->_opType;
+ }
+@@ -236,23 +233,58 @@
+   return _matrule ? _matrule->is_ideal_copy() : 0;
+ }
+ 
+-// Return 'true' if this instruction matches an ideal 'CosD' node
++// Return 'true' if this instruction is too complex to rematerialize.
+ int InstructForm::is_expensive() const {
+-  if (_matrule == NULL)  return 0;
+   // We can prove it is cheap if it has an empty encoding.
+   // This helps with platform-specific nops like ThreadLocal and RoundFloat.
++  if (is_empty_encoding())
++    return 0;
++
++  if (is_tls_instruction())
++    return 1;
++
++  if (_matrule == NULL)  return 0;
++
++  return _matrule->is_expensive();
++}
++
++// Has an empty encoding if _size is a constant zero or there
++// are no ins_encode tokens.
++int InstructForm::is_empty_encoding() const {
+   if (_insencode != NULL) {
+     _insencode->reset();
+     if (_insencode->encode_class_iter() == NULL) {
+-      return 0;
++      return 1;
+     }
+   }
+   if (_size != NULL && strcmp(_size, "0") == 0) {
+-    return 0;
++    return 1;
+   }
+-  return _matrule->is_expensive();
++  return 0;
+ }
+ 
++int InstructForm::is_tls_instruction() const {
++  if (_ident != NULL &&
++      ( ! strcmp( _ident,"tlsLoadP") ||
++        ! strncmp(_ident,"tlsLoadP_",9)) ) {
++    return 1;
++  }
++
++  if (_matrule != NULL && _insencode != NULL) {
++    const char* opType = _matrule->_opType;
++    if (strcmp(opType, "Set")==0)
++      opType = _matrule->_rChild->_opType;
++    if (strcmp(opType,"ThreadLocal")==0) {
++      fprintf(stderr, "Warning: ThreadLocal instruction %s should be named 'tlsLoadP_*'\n",
++              (_ident == NULL ? "NULL" : _ident));
++      return 1;
++    }
++  }
++
++  return 0;
++}
++
++
+ // Return 'true' if this instruction matches an ideal 'Copy*' node
+ bool InstructForm::is_ideal_unlock() const {
+   return _matrule ? _matrule->is_ideal_unlock() : false;
+@@ -307,11 +339,11 @@
+ // Return 'true' if this instruction matches an ideal 'Jump' node
+ bool InstructForm::is_ideal_jump() const {
+   if( _matrule == NULL ) return false;
+-  
++
+   return _matrule->is_ideal_jump();
+ }
+ 
+-// Return 'true' if instruction matches ideal 'If' | 'Goto' | 
++// Return 'true' if instruction matches ideal 'If' | 'Goto' |
+ //                    'CountedLoopEnd' | 'Jump'
+ bool InstructForm::is_ideal_branch() const {
+   if( _matrule == NULL ) return false;
+@@ -411,7 +443,7 @@
+     OperandForm *op       = form->is_operand();
+     if( op ) {
+       if( op->constrained_reg_class() != NULL &&
+-	  op->interface_type(globals) == Form::register_interface ) {
++          op->interface_type(globals) == Form::register_interface ) {
+         // Remember the local name for equality test later
+         const char *def_name = comp->_name;
+         // Check if a component has the same name and is a USE
+@@ -434,15 +466,15 @@
+   const char *dummy2 = NULL;
+   return is_chain_of_constant(globals, dummy, dummy2);
+ }
+-Form::DataType InstructForm::is_chain_of_constant(FormDict &globals, 
+-		const char * &opTypeParam) {
++Form::DataType InstructForm::is_chain_of_constant(FormDict &globals,
++                const char * &opTypeParam) {
+   const char *result = NULL;
+ 
+   return is_chain_of_constant(globals, opTypeParam, result);
+ }
+ 
+-Form::DataType InstructForm::is_chain_of_constant(FormDict &globals, 
+-		const char * &opTypeParam, const char * &resultParam) {
++Form::DataType InstructForm::is_chain_of_constant(FormDict &globals,
++                const char * &opTypeParam, const char * &resultParam) {
+   Form::DataType  data_type = Form::none;
+   if ( ! _matrule)  return data_type;
+ 
+@@ -485,22 +517,26 @@
+   bool   rematerialize = false;
+ 
+   Form::DataType data_type = is_chain_of_constant(globals);
+-  if( data_type != Form::none ) 
++  if( data_type != Form::none )
+     rematerialize = true;
+ 
+   // Constants
+-  if( _components.count() == 1 && _components[0]->is(Component::USE_DEF) ) 
++  if( _components.count() == 1 && _components[0]->is(Component::USE_DEF) )
++    rematerialize = true;
++
++  // Pseudo-constants (values easily available to the runtime)
++  if (is_empty_encoding() && is_tls_instruction())
+     rematerialize = true;
+ 
+   // 1-input, 1-output, such as copies or increments.
+-  if( _components.count() == 2 && 
+-      _components[0]->is(Component::DEF) && 
+-      _components[1]->isa(Component::USE) ) 
++  if( _components.count() == 2 &&
++      _components[0]->is(Component::DEF) &&
++      _components[1]->isa(Component::USE) )
+     rematerialize = true;
+ 
+   // Check for an ideal 'Load?' and eliminate rematerialize option
+   if ( is_ideal_load() != Form::none || // Ideal load?  Do not rematerialize
+-       is_ideal_copy() != Form::none ||	// Ideal copy?  Do not rematerialize
++       is_ideal_copy() != Form::none || // Ideal copy?  Do not rematerialize
+        is_expensive()  != Form::none) { // Expensive?   Do not rematerialize
+     rematerialize = false;
+   }
+@@ -515,13 +551,13 @@
+       // Avoid the special stack_slots register classes
+       const char *rc_name = opform->constrained_reg_class();
+       if( rc_name ) {
+-	if( strcmp(rc_name,"stack_slots") ) {
+-	  // Check for ideal_type of RegFlags
+-	  const char *type = opform->ideal_type( globals, registers );
+-	  if( !strcmp(type,"RegFlags") )
+-	    rematerialize = true;
+-	} else
+-	  rematerialize = false; // Do not rematerialize things target stk
++        if( strcmp(rc_name,"stack_slots") ) {
++          // Check for ideal_type of RegFlags
++          const char *type = opform->ideal_type( globals, registers );
++          if( !strcmp(type,"RegFlags") )
++            rematerialize = true;
++        } else
++          rematerialize = false; // Do not rematerialize things target stk
+       }
+     }
+   }
+@@ -535,12 +571,12 @@
+   if( is_ideal_load() != Form::none )  return true;
+ 
+   // !!!!! !!!!! !!!!!
+-  // TEMPORARY 
++  // TEMPORARY
+   // if( is_simple_chain_rule(globals) )  return false;
+ 
+   // String-compare uses many memorys edges, but writes none
+   if( _matrule && _matrule->_rChild &&
+-      strcmp(_matrule->_rChild->_opType,"StrComp")==0 ) 
++      strcmp(_matrule->_rChild->_opType,"StrComp")==0 )
+     return true;
+ 
+   // Check if instruction has a USE of a memory operand class, but no defs
+@@ -589,7 +625,7 @@
+   int DEF_of_memory  = 0;
+   const char*    last_memory_DEF = NULL; // to test DEF/USE pairing in asserts
+   Component     *unique          = NULL;
+-  Component     *comp 	         = NULL;
++  Component     *comp            = NULL;
+   ComponentList &components      = (ComponentList &)_components;
+ 
+   components.reset();
+@@ -601,19 +637,19 @@
+     if( op->stack_slots_only(globals) )  continue;
+     if( form->interface_type(globals) == Form::memory_interface ) {
+       if( comp->isa(Component::DEF) ) {
+-	last_memory_DEF = comp->_name;
+-	DEF_of_memory++;
+-	unique = comp;
++        last_memory_DEF = comp->_name;
++        DEF_of_memory++;
++        unique = comp;
+       } else if( comp->isa(Component::USE) ) {
+-	if( last_memory_DEF != NULL ) {
+-	  assert(0 == strcmp(last_memory_DEF, comp->_name), "every memory DEF is followed by a USE of the same name");
+-	  last_memory_DEF = NULL;
+-	}
+-	USE_of_memory++;
+-	if (DEF_of_memory == 0)  // defs take precedence
+-	  unique = comp;
++        if( last_memory_DEF != NULL ) {
++          assert(0 == strcmp(last_memory_DEF, comp->_name), "every memory DEF is followed by a USE of the same name");
++          last_memory_DEF = NULL;
++        }
++        USE_of_memory++;
++        if (DEF_of_memory == 0)  // defs take precedence
++          unique = comp;
+       } else {
+-	assert(last_memory_DEF == NULL, "unpaired memory DEF");
++        assert(last_memory_DEF == NULL, "unpaired memory DEF");
+       }
+     }
+   }
+@@ -633,10 +669,10 @@
+     if( DEF_of_memory == 1 ) {
+       assert(unique != NULL, "");
+       if( USE_of_memory == 0 ) {
+-	// unique def, no uses
++        // unique def, no uses
+       } else {
+-	// // unique def, some uses
+-	// // must return bottom unless all uses match def
++        // // unique def, some uses
++        // // must return bottom unless all uses match def
+         // unique = NULL;
+       }
+     } else if( DEF_of_memory > 0 ) {
+@@ -657,7 +693,7 @@
+     } else {
+       int pos = components.operand_position(unique->_name);
+       if( unique->isa(Component::DEF) ) {
+-	pos += 1;                // get corresponding USE from DEF
++        pos += 1;                // get corresponding USE from DEF
+       }
+       assert(pos >= 1, "I was just looking at it!");
+       return pos;
+@@ -687,10 +723,10 @@
+ // This instruction captures the machine-independent bottom_type
+ // Expected use is for pointer vs oop determination for LoadP
+ bool InstructForm::captures_bottom_type() const {
+-  if( _matrule && _matrule->_rChild && 
++  if( _matrule && _matrule->_rChild &&
+        (!strcmp(_matrule->_rChild->_opType,"CastPP")     ||  // new result type
+         !strcmp(_matrule->_rChild->_opType,"CastX2P")    ||  // new result type
+-        !strcmp(_matrule->_rChild->_opType,"CreateEx")   ||  // type of exception 
++        !strcmp(_matrule->_rChild->_opType,"CreateEx")   ||  // type of exception
+         !strcmp(_matrule->_rChild->_opType,"CheckCastPP")) ) return true;
+   else if ( is_ideal_load() == Form::idealP )                return true;
+   else if ( is_ideal_store() != Form::none  )                return true;
+@@ -714,7 +750,7 @@
+   int  num_opnds = _components.num_operands();
+ 
+   // Need special handling for matching some ideal nodes
+-  // i.e. Matching a return node 
++  // i.e. Matching a return node
+   /*
+   if( _matrule ) {
+     if( strcmp(_matrule->_opType,"Return"   )==0 ||
+@@ -765,7 +801,7 @@
+   if( !_matrule ) return 1;     // Skip control for most nodes
+ 
+   // Need special handling for matching some ideal nodes
+-  // i.e. Matching a return node 
++  // i.e. Matching a return node
+   if( strcmp(_matrule->_opType,"Return"    )==0 ||
+       strcmp(_matrule->_opType,"Rethrow"   )==0 ||
+       strcmp(_matrule->_opType,"TailCall"  )==0 ||
+@@ -775,8 +811,8 @@
+     return AdlcVMDeps::Parms;   // Skip the machine-state edges
+ 
+   if( _matrule->_rChild &&
+-	  strcmp(_matrule->_rChild->_opType,"StrComp")==0 ) {
+-	// String compare takes 1 control and 4 memory edges.
++          strcmp(_matrule->_rChild->_opType,"StrComp")==0 ) {
++        // String compare takes 1 control and 4 memory edges.
+     return 5;
+   }
+ 
+@@ -879,7 +915,7 @@
+       }
+       // Component positions are zero based.
+       int  pos  = _components.operand_position(name);
+-      assert( ! (component->isa(Component::DEF) && (pos >= 1)), 
++      assert( ! (component->isa(Component::DEF) && (pos >= 1)),
+               "Component::DEF can only occur in the first position");
+     }
+   }
+@@ -930,16 +966,16 @@
+     while (_matrule->base_operand(position, globals, result, name, opType)) {
+       if ( strcmp(opType,"ConP") == 0 ) {
+ #ifdef SPARC
+-	reloc_entries += 2; // 1 for sethi + 1 for setlo
++        reloc_entries += 2; // 1 for sethi + 1 for setlo
+ #else
+-	++reloc_entries;
++        ++reloc_entries;
+ #endif
+       }
+       ++position;
+     }
+   }
+ 
+-  // Above is only a conservative estimate 
++  // Above is only a conservative estimate
+   // because it did not check contents of operand classes.
+   // !!!!! !!!!!
+   // Add 1 to reloc info for each operand class in the component list.
+@@ -967,7 +1003,7 @@
+   // Check for any component being an immediate float or double.
+   Form::DataType data_type = is_chain_of_constant(globals);
+   if( data_type==idealD || data_type==idealF ) {
+-#ifdef SPARC 
++#ifdef SPARC
+     // sparc required more relocation entries for floating constants
+     // (expires 9/98)
+     reloc_entries += 6;
+@@ -990,7 +1026,7 @@
+   if (comp != NULL && comp->isa(Component::DEF)) {
+     result = comp->_type;
+     // Override this if the rule is a store operation:
+-    if (_matrule && _matrule->_rChild && 
++    if (_matrule && _matrule->_rChild &&
+         is_store_to_memory(_matrule->_rChild->_opType))
+       result = "Universe";
+   }
+@@ -1099,7 +1135,7 @@
+         fprintf(stderr, "   using operand %s %s at index %d\n", reg_type, op_name, cisc_spill_operand);
+       }
+       // Record that a stack-version of the reg_mask is needed
+-      // !!!!! 
++      // !!!!!
+       OperandForm *oper = (OperandForm*)(globals[reg_type]->is_operand());
+       assert( oper != NULL, "cisc-spilling non operand");
+       const char *reg_class_name = oper->constrained_reg_class();
+@@ -1113,7 +1149,7 @@
+   } else {
+     cisc_spill_operand = Not_cisc_spillable;
+   }
+-      
++
+   set_cisc_spill_operand(cisc_spill_operand);
+   return (cisc_spill_operand != Not_cisc_spillable);
+ }
+@@ -1124,7 +1160,7 @@
+   if (_matrule != NULL &&
+       this != short_branch &&   // Don't match myself
+       !is_short_branch() &&     // Don't match another short branch variant
+-      reduce_result() != NULL &&       
++      reduce_result() != NULL &&
+       strcmp(reduce_result(), short_branch->reduce_result()) == 0 &&
+       _matrule->equivalent(AD.globalNames(), short_branch->_matrule)) {
+     // The instructions are equivalent.
+@@ -1139,7 +1175,7 @@
+ 
+ 
+ // --------------------------- FILE *output_routines
+-// 
++//
+ // Generate the format call for the replacement variable
+ void InstructForm::rep_var_format(FILE *fp, const char *rep_var) {
+   // Find replacement variable's type
+@@ -1170,10 +1206,10 @@
+   } else {
+     // Output the format call for this operand
+     fprintf(fp,"opnd_array(%d)->",idx);
+-    if (idx == 0) 
+-      fprintf(fp,"int_format(ra, this); // %s\n", rep_var);
++    if (idx == 0)
++      fprintf(fp,"int_format(ra, this, st); // %s\n", rep_var);
+     else
+-      fprintf(fp,"ext_format(ra, this,idx%d); // %s\n", idx, rep_var );
++      fprintf(fp,"ext_format(ra, this,idx%d, st); // %s\n", idx, rep_var );
+   }
+ }
+ 
+@@ -1197,7 +1233,7 @@
+     const char *name;
+     uint count;
+     bool has_dupl_use = false;
+-    
++
+     _parameters.reset();
+     while( (name = _parameters.iter()) != NULL ) {
+       count = 0;
+@@ -1211,7 +1247,7 @@
+       }
+       // The next code is copied from the method operand_position().
+       for (; (comp = _components.iter()) != NULL; ++position) {
+-        // When the first component is not a DEF, 
++        // When the first component is not a DEF,
+         // leave space for the result operand!
+         if ( position==0 && (! comp->isa(Component::DEF)) ) {
+           ++position;
+@@ -1324,7 +1360,7 @@
+   char buf[1024], *s=buf;
+   Dict names(cmpstr,hashstr,Form::arena);       // Map Names to counts
+ 
+-  MatchNode *mnode = 
++  MatchNode *mnode =
+     strcmp(_matrule->_opType, "Set") ? _matrule : _matrule->_rChild;
+   mnode->count_instr_names(names);
+ 
+@@ -1344,7 +1380,7 @@
+       // Handle many pairs
+       if( first ) first=0;
+       else {                    // All tests must pass, so use '&&'
+-        strcpy(s," && ");  
++        strcpy(s," && ");
+         s += strlen(s);
+       }
+       // Add predicate to working buffer
+@@ -1368,7 +1404,7 @@
+ 
+ //------------------------------EncodeForm-------------------------------------
+ // Constructor
+-EncodeForm::EncodeForm() 
++EncodeForm::EncodeForm()
+   : _encClass(cmpstr,hashstr, Form::arena) {
+ }
+ EncodeForm::~EncodeForm() {
+@@ -1399,14 +1435,14 @@
+   encClass->_code.reset();
+   const char *code = (const char*)encClass->_code.iter();
+   assert( code != NULL, "Found an empty encode class body.");
+-  
++
+   return code;
+ }
+ 
+ // Lookup the function body for an encoding class
+ const char *EncodeForm::encClassPrototype(const char *className) {
+   assert( className != NULL, "Encode class name must be non NULL.");
+-  
++
+   return className;
+ }
+ 
+@@ -1422,9 +1458,9 @@
+     ((EncClass*)_encClass[name])->output(fp);
+   }
+   fprintf(fp,"-------------------- end  EncodeForm --------------------\n");
+-} 
++}
+ //------------------------------EncClass---------------------------------------
+-EncClass::EncClass(const char *name) 
++EncClass::EncClass(const char *name)
+   : _localNames(cmpstr,hashstr, Form::arena), _name(name) {
+ }
+ EncClass::~EncClass() {
+@@ -1482,7 +1518,7 @@
+ 
+ // Write info to output files
+ void EncClass::output(FILE *fp) {
+-  fprintf(fp,"EncClass: %s", (_name ? _name : "")); 
++  fprintf(fp,"EncClass: %s", (_name ? _name : ""));
+ 
+   // Output the parameter list
+   _parameter_type.reset();
+@@ -1514,7 +1550,7 @@
+ }
+ 
+ //------------------------------Opcode-----------------------------------------
+-Opcode::Opcode(char *primary, char *secondary, char *tertiary) 
++Opcode::Opcode(char *primary, char *secondary, char *tertiary)
+   : _primary(primary), _secondary(secondary), _tertiary(tertiary) {
+ }
+ 
+@@ -1590,7 +1626,7 @@
+   return encode;
+ }
+ 
+-// Access the list of encodings 
++// Access the list of encodings
+ void InsEncode::reset() {
+   _encoding.reset();
+   // _parameter.reset();
+@@ -1608,7 +1644,7 @@
+   // Remove '$' if parser placed it there.
+   return ( param != NULL && *param == '$') ? (param+1) : param;
+ }
+-  
++
+ void InsEncode::dump() {
+   output(stderr);
+ }
+@@ -1680,7 +1716,7 @@
+ }
+ 
+ void Effect::output(FILE *fp) {          // Write info to output files
+-  fprintf(fp,"Effect: %s\n", (_name?_name:"")); 
++  fprintf(fp,"Effect: %s\n", (_name?_name:""));
+ }
+ 
+ //------------------------------ExpandRule-------------------------------------
+@@ -1725,10 +1761,10 @@
+     }
+     fprintf(fp,");\n");
+   }
+-} 
++}
+ 
+ //------------------------------RewriteRule------------------------------------
+-RewriteRule::RewriteRule(char* params, char* block) 
++RewriteRule::RewriteRule(char* params, char* block)
+   : _tempParams(params), _tempBlock(block) { };  // Constructor
+ RewriteRule::~RewriteRule() {                 // Destructor
+ }
+@@ -1738,7 +1774,7 @@
+ }
+ 
+ void RewriteRule::output(FILE *fp) {         // Write info to output files
+-  fprintf(fp,"\nRewrite Rule:\n%s\n%s\n", 
++  fprintf(fp,"\nRewrite Rule:\n%s\n%s\n",
+           (_tempParams?_tempParams:""),
+           (_tempBlock?_tempBlock:""));
+ }
+@@ -1829,7 +1865,7 @@
+ //==============================Operands=======================================
+ //------------------------------OperandForm------------------------------------
+ OperandForm::OperandForm(const char* id)
+-  : OpClassForm(id), _ideal_only(false), 
++  : OpClassForm(id), _ideal_only(false),
+     _localNames(cmpstr, hashstr, Form::arena) {
+       _ftype = Form::OPER;
+ 
+@@ -1842,7 +1878,7 @@
+       _format    = NULL;
+ }
+ OperandForm::OperandForm(const char* id, bool ideal_only)
+-  : OpClassForm(id), _ideal_only(ideal_only), 
++  : OpClassForm(id), _ideal_only(ideal_only),
+     _localNames(cmpstr, hashstr, Form::arena) {
+       _ftype = Form::OPER;
+ 
+@@ -1948,7 +1984,7 @@
+ 
+   return edges;
+ }
+-  
++
+ 
+ // Check if this operand is usable for cisc-spilling
+ bool  OperandForm::is_cisc_reg(FormDict &globals) const {
+@@ -1981,7 +2017,7 @@
+     else if( strcmp(_ident,"stackSlotL") == 0 ) data_type = Form::idealL;
+   }
+   assert((data_type == none) || (_matrule == NULL), "No match-rule for stackSlotX");
+-  
++
+   return data_type;
+ }
+ 
+@@ -1995,7 +2031,7 @@
+     const char *rc_name = constrained_reg_class();
+     // !!!!!
+     if (rc_name == NULL) return NULL;
+-    // !!!!! !!!!! 
++    // !!!!! !!!!!
+     // Check constraints on result's register class
+     if( registers ) {
+       RegClass *reg_class  = registers->getRegClass(rc_name);
+@@ -2029,7 +2065,7 @@
+ 
+ 
+ // If there is a single ideal type for this interface field, return it.
+-const char *OperandForm::interface_ideal_type(FormDict &globals, 
++const char *OperandForm::interface_ideal_type(FormDict &globals,
+                                               const char *field) const {
+   const char  *ideal_type = NULL;
+   const char  *value      = NULL;
+@@ -2105,7 +2141,7 @@
+   const char *result   = NULL;
+   const char *name     = NULL;
+   const char *type     = NULL;
+-  // iterate through all base operands 
++  // iterate through all base operands
+   // until we reach the register that corresponds to "leaf"
+   // This function is not looking for an ideal type.  It needs the first
+   // level user type associated with the leaf.
+@@ -2116,7 +2152,7 @@
+       reg_class = oper->constrained_reg_class();
+       if( reg_class ) {
+         reg_class = reg_class;
+-      } else { 
++      } else {
+         // ShouldNotReachHere();
+       }
+     } else {
+@@ -2201,7 +2237,7 @@
+   uint  position = 0;
+   Component *comp;
+   _components.reset();
+-  while( (comp = _components.iter()) != NULL  
++  while( (comp = _components.iter()) != NULL
+          && (strcmp(comp->_name,reg_name) != 0) ) {
+     // Special case for operands that take a single user-defined operand
+     // Skip the initial definition in the component list.
+@@ -2239,7 +2275,7 @@
+ 
+ 
+ // --------------------------- FILE *output_routines
+-// 
++//
+ // Output code for disp_is_oop, if true.
+ void OperandForm::disp_is_oop(FILE *fp, FormDict &globals) {
+   //  Check it is a memory interface with a non-user-constant disp field
+@@ -2270,9 +2306,9 @@
+ }
+ 
+ // Generate code for internal and external format methods
+-// 
++//
+ // internal access to reg# node->_idx
+-// access to subsumed constant _c0, _c1, 
++// access to subsumed constant _c0, _c1,
+ void  OperandForm::int_format(FILE *fp, FormDict &globals, uint index) {
+   Form::DataType dtype;
+   if (_matrule && (_matrule->is_base_register(globals) ||
+@@ -2297,10 +2333,10 @@
+     dump();
+     assert( false,"Internal error:\n  output_internal_operand() attempting to output other than a Register or Constant");
+   }
+-}    
++}
+ 
+ // Similar to "int_format" but for cases where data is external to operand
+-// external access to reg# node->in(idx)->_idx, 
++// external access to reg# node->in(idx)->_idx,
+ void  OperandForm::ext_format(FILE *fp, FormDict &globals, uint index) {
+   Form::DataType dtype;
+   if (_matrule && (_matrule->is_base_register(globals) ||
+@@ -2329,11 +2365,11 @@
+ 
+ void OperandForm::format_constant(FILE *fp, uint const_index, uint const_type) {
+   switch(const_type) {
+-  case Form::idealI: fprintf(fp,"tty->print(\"#%%d\", _c%d);\n", const_index); break;
+-  case Form::idealP: fprintf(fp,"_c%d->dump();\n",               const_index); break;
+-  case Form::idealL: fprintf(fp,"tty->print(\"#%%lld\", _c%d);\n", const_index); break;
+-  case Form::idealF: fprintf(fp,"tty->print(\"#%%f\", _c%d);\n", const_index); break;
+-  case Form::idealD: fprintf(fp,"tty->print(\"#%%f\", _c%d);\n", const_index); break;
++  case Form::idealI: fprintf(fp,"st->print(\"#%%d\", _c%d);\n", const_index); break;
++  case Form::idealP: fprintf(fp,"_c%d->dump_on(st);\n",         const_index); break;
++  case Form::idealL: fprintf(fp,"st->print(\"#%%lld\", _c%d);\n", const_index); break;
++  case Form::idealF: fprintf(fp,"st->print(\"#%%f\", _c%d);\n", const_index); break;
++  case Form::idealD: fprintf(fp,"st->print(\"#%%f\", _c%d);\n", const_index); break;
+   default:
+     assert( false, "ShouldNotReachHere()");
+   }
+@@ -2402,8 +2438,8 @@
+   return false;
+ }
+ 
+-// Output code to access the value of the index'th constant 
+-void OperandForm::access_constant(FILE *fp, FormDict &globals, 
++// Output code to access the value of the index'th constant
++void OperandForm::access_constant(FILE *fp, FormDict &globals,
+                                   uint const_index) {
+   OperandForm *oper = constant_operand(globals, const_index);
+   assert( oper, "Index exceeds number of constants in operand");
+@@ -2454,12 +2490,12 @@
+ 
+ void Constraint::output(FILE *fp) {           // Write info to output files
+   assert((_func != NULL && _arg != NULL),"missing constraint function or arg");
+-  fprintf(fp,"Constraint: %s ( %s )\n", _func, _arg); 
++  fprintf(fp,"Constraint: %s ( %s )\n", _func, _arg);
+ }
+ 
+ //------------------------------Predicate--------------------------------------
+ Predicate::Predicate(char *pr)
+-  : _pred(pr) { 
++  : _pred(pr) {
+ }
+ Predicate::~Predicate() {
+ }
+@@ -2516,7 +2552,7 @@
+ }
+ 
+ //------------------------------RegInterface-----------------------------------
+-RegInterface::RegInterface() : Interface("REG_INTER") { 
++RegInterface::RegInterface() : Interface("REG_INTER") {
+ }
+ RegInterface::~RegInterface() {
+ }
+@@ -2531,7 +2567,7 @@
+ }
+ 
+ //------------------------------ConstInterface---------------------------------
+-ConstInterface::ConstInterface() : Interface("CONST_INTER") { 
++ConstInterface::ConstInterface() : Interface("CONST_INTER") {
+ }
+ ConstInterface::~ConstInterface() {
+ }
+@@ -2546,7 +2582,7 @@
+ }
+ 
+ //------------------------------MemInterface-----------------------------------
+-MemInterface::MemInterface(char *base, char *index, char *scale, char *disp) 
++MemInterface::MemInterface(char *base, char *index, char *scale, char *disp)
+   : Interface("MEMORY_INTER"), _base(base), _index(index), _scale(scale), _disp(disp) {
+ }
+ MemInterface::~MemInterface() {
+@@ -2568,14 +2604,14 @@
+ }
+ 
+ //------------------------------CondInterface----------------------------------
+-CondInterface::CondInterface(char *equal,      char *not_equal, 
+-                             char *less,       char *greater_equal, 
+-                             char *less_equal, char *greater) 
+-  : Interface("COND_INTER"), 
+-    _equal(equal), _not_equal(not_equal), 
++CondInterface::CondInterface(char *equal,      char *not_equal,
++                             char *less,       char *greater_equal,
++                             char *less_equal, char *greater)
++  : Interface("COND_INTER"),
++    _equal(equal), _not_equal(not_equal),
+     _less(less), _greater_equal(greater_equal),
+     _less_equal(less_equal), _greater(greater) {
+-      // 
++      //
+ }
+ CondInterface::~CondInterface() {
+   // not owner of any character arrays
+@@ -2599,7 +2635,7 @@
+ 
+ //------------------------------ConstructRule----------------------------------
+ ConstructRule::ConstructRule(char *cnstr)
+-  : _construct(cnstr) { 
++  : _construct(cnstr) {
+ }
+ ConstructRule::~ConstructRule() {
+ }
+@@ -2660,7 +2696,7 @@
+ //------------------------------Component--------------------------------------
+ Component::Component(const char *name, const char *type, int usedef)
+   : _name(name), _type(type), _usedef(usedef) {
+-    _ftype = Form::COMP; 
++    _ftype = Form::COMP;
+ }
+ Component::~Component() {
+ }
+@@ -2728,8 +2764,8 @@
+   NameList::addName((char *)component);
+   if(mflag) _matchcnt++;
+ }
+-void   ComponentList::insert(const char *name, const char *opType, int usedef, 
+-			     bool mflag) {
++void   ComponentList::insert(const char *name, const char *opType, int usedef,
++                             bool mflag) {
+   Component * component = new Component(name, opType, usedef);
+   insert(component, mflag);
+ }
+@@ -2811,7 +2847,7 @@
+   Component* preceding_non_use = NULL;
+   Component* first_def = NULL;
+   for (reset(); (component = iter()) != NULL; ++position) {
+-    // When the first component is not a DEF, 
++    // When the first component is not a DEF,
+     // leave space for the result operand!
+     if ( position==0 && (! component->isa(Component::DEF)) ) {
+       ++position;
+@@ -2820,18 +2856,18 @@
+     if (strcmp(name, component->_name)==0 && (component->isa(usedef))) {
+       // When the first entry in the component list is a DEF and a USE
+       // Treat them as being separate, a DEF first, then a USE
+-      if( position==0 
++      if( position==0
+           && usedef==Component::USE && component->isa(Component::DEF) ) {
+-	assert(position+1 < num_opnds, "advertised index in bounds");
++        assert(position+1 < num_opnds, "advertised index in bounds");
+         return position+1;
+       } else {
+-	if( preceding_non_use && strcmp(component->_name, preceding_non_use->_name) ) {
+-	  fprintf(stderr, "the name '%s' should not precede the name '%s'\n", preceding_non_use->_name, name);
+-	}
+-	if( position >= num_opnds ) {
+-	  fprintf(stderr, "the name '%s' is too late in its name list\n", name);
+-	}
+-	assert(position < num_opnds, "advertised index in bounds");
++        if( preceding_non_use && strcmp(component->_name, preceding_non_use->_name) ) {
++          fprintf(stderr, "the name '%s' should not precede the name '%s'\n", preceding_non_use->_name, name);
++        }
++        if( position >= num_opnds ) {
++          fprintf(stderr, "the name '%s' is too late in its name list\n", name);
++        }
++        assert(position < num_opnds, "advertised index in bounds");
+         return position;
+       }
+     }
+@@ -2858,12 +2894,12 @@
+   int position = 0;
+   Component *component;
+   for (reset(); (component = iter()) != NULL; ++position) {
+-    // When the first component is not a DEF, 
++    // When the first component is not a DEF,
+     // leave space for the result operand!
+     if ( position==0 && (! component->isa(Component::DEF)) ) {
+       ++position;
+     }
+-    if (strcmp(name, component->_name)==0) { 
++    if (strcmp(name, component->_name)==0) {
+       return position;
+     }
+     if( component->isa(Component::DEF)
+@@ -2888,12 +2924,12 @@
+   int position = 0;
+   reset();
+   for( Component *comp; (comp = iter()) != NULL; ++position) {
+-    // When the first component is not a DEF, 
++    // When the first component is not a DEF,
+     // leave space for the result operand!
+     if ( position==0 && (! comp->isa(Component::DEF)) ) {
+       ++position;
+     }
+-    if (strcmp(comp->_type, "label")==0) { 
++    if (strcmp(comp->_type, "label")==0) {
+       return position;
+     }
+     if( comp->isa(Component::DEF)
+@@ -2911,12 +2947,12 @@
+   int position = 0;
+   reset();
+   for( Component *comp; (comp = iter()) != NULL; ++position) {
+-    // When the first component is not a DEF, 
++    // When the first component is not a DEF,
+     // leave space for the result operand!
+     if ( position==0 && (! comp->isa(Component::DEF)) ) {
+       ++position;
+     }
+-    if (strcmp(comp->_type, "method")==0) { 
++    if (strcmp(comp->_type, "method")==0) {
+       return position;
+     }
+     if( comp->isa(Component::DEF)
+@@ -2945,23 +2981,23 @@
+ MatchNode::MatchNode(ArchDesc &ad, const char *result, const char *mexpr,
+                      const char *opType, MatchNode *lChild, MatchNode *rChild)
+   : _AD(ad), _result(result), _name(mexpr), _opType(opType),
+-    _lChild(lChild), _rChild(rChild), _internalop(0), _numleaves(0), 
++    _lChild(lChild), _rChild(rChild), _internalop(0), _numleaves(0),
+     _commutative_id(0) {
+-  _numleaves = (lChild ? lChild->_numleaves : 0) 
++  _numleaves = (lChild ? lChild->_numleaves : 0)
+                + (rChild ? rChild->_numleaves : 0);
+ }
+ 
+-MatchNode::MatchNode(ArchDesc &ad, MatchNode& mnode) 
++MatchNode::MatchNode(ArchDesc &ad, MatchNode& mnode)
+   : _AD(ad), _result(mnode._result), _name(mnode._name),
+-    _opType(mnode._opType), _lChild(mnode._lChild), _rChild(mnode._rChild), 
+-    _internalop(0), _numleaves(mnode._numleaves), 
++    _opType(mnode._opType), _lChild(mnode._lChild), _rChild(mnode._rChild),
++    _internalop(0), _numleaves(mnode._numleaves),
+     _commutative_id(mnode._commutative_id) {
+ }
+ 
+-MatchNode::MatchNode(ArchDesc &ad, MatchNode& mnode, int clone) 
++MatchNode::MatchNode(ArchDesc &ad, MatchNode& mnode, int clone)
+   : _AD(ad), _result(mnode._result), _name(mnode._name),
+     _opType(mnode._opType),
+-    _internalop(0), _numleaves(mnode._numleaves), 
++    _internalop(0), _numleaves(mnode._numleaves),
+     _commutative_id(mnode._commutative_id) {
+   if (mnode._lChild) {
+     _lChild = new MatchNode(ad, *mnode._lChild, clone);
+@@ -2995,7 +3031,7 @@
+ 
+ // Recursive call collecting info on top-level operands, not transitive.
+ // Implementation does not modify state of internal structures.
+-void MatchNode::append_components(FormDict &locals, ComponentList &components, 
++void MatchNode::append_components(FormDict &locals, ComponentList &components,
+                                   bool deflag) const {
+   int   usedef = deflag ? Component::DEF : Component::USE;
+   FormDict &globals = _AD.globalNames();
+@@ -3006,12 +3042,12 @@
+     // If _opType is not an operation, do not build a component for it #####
+     const Form *f = globals[_opType];
+     if( f != NULL ) {
+-      // Add non-ideals that are operands, operand-classes, 
++      // Add non-ideals that are operands, operand-classes,
+       if( ! f->ideal_only()
+           && (f->is_opclass() || f->is_operand()) ) {
+         components.insert(_name, _opType, usedef, true);
+       }
+-    } 
++    }
+     return;
+   }
+   // Promote results of "Set" to DEF
+@@ -3021,14 +3057,14 @@
+   if (_rChild) _rChild->append_components(locals, components, def_flag);
+ }
+ 
+-// Find the n'th base-operand in the match node, 
++// Find the n'th base-operand in the match node,
+ // recursively investigates match rules of user-defined operands.
+-// 
++//
+ // Implementation does not modify state of internal structures since they
+ // can be shared.
+-bool MatchNode::base_operand(uint &position, FormDict &globals, 
+-			     const char * &result, const char * &name, 
+-			     const char * &opType) const {
++bool MatchNode::base_operand(uint &position, FormDict &globals,
++                             const char * &result, const char * &name,
++                             const char * &opType) const {
+   assert (_name != NULL, "MatchNode::base_operand encountered empty node\n");
+   // Base case
+   if (_lChild==NULL && _rChild==NULL) {
+@@ -3058,7 +3094,7 @@
+         matchNode = (MatchNode*)inForm->_matrule;
+       }
+     }
+-    // if this is user-defined, recurse on match rule 
++    // if this is user-defined, recurse on match rule
+     // User-defined operand and instruction forms have a match-rule.
+     if (matchNode) {
+       return (matchNode->base_operand(position,globals,result,name,opType));
+@@ -3081,7 +3117,7 @@
+       if (_lChild->base_operand(position, globals, result, name, opType))
+         return 1;
+     }
+-    
++
+     if (_rChild) {
+       if (_rChild->base_operand(position, globals, result, name, opType))
+         return 1;
+@@ -3099,7 +3135,7 @@
+   const char *name;
+   const char *opType;
+ 
+-  for (uint position = index; 
++  for (uint position = index;
+        base_operand(position,globals,result,name,opType); position = index) {
+     ++index;
+     if( ideal_to_const_type(opType) )        num_consts++;
+@@ -3117,7 +3153,7 @@
+   const char *name;
+   const char *opType;
+ 
+-  for (uint position = index; 
++  for (uint position = index;
+        base_operand(position,globals,result,name,opType); position = index) {
+     ++index;
+     if( ideal_to_const_type(opType) == type ) num_consts++;
+@@ -3140,15 +3176,15 @@
+   const char      *rightStr    = NULL;
+ 
+   // If we are a "Set", start from the right child.
+-  const MatchNode *const mnode = sets_result() ? 
+-    (const MatchNode *const)this->_rChild : 
++  const MatchNode *const mnode = sets_result() ?
++    (const MatchNode *const)this->_rChild :
+     (const MatchNode *const)this;
+ 
+   // If our right child exists, it is the right reduction
+   if ( mnode->_rChild ) {
+     rightStr = mnode->_rChild->_internalop ? mnode->_rChild->_internalop
+       : mnode->_rChild->_opType;
+-  } 
++  }
+   // Else, May be simple chain rule: (Set dst operand_form), rightStr=NULL;
+   return rightStr;
+ }
+@@ -3158,8 +3194,8 @@
+   const char  *leftStr  = NULL;
+ 
+   // If we are a "Set", start from the right child.
+-  const MatchNode *const mnode = sets_result() ? 
+-    (const MatchNode *const)this->_rChild : 
++  const MatchNode *const mnode = sets_result() ?
++    (const MatchNode *const)this->_rChild :
+     (const MatchNode *const)this;
+ 
+   // If our left child exists, it is the left reduction
+@@ -3224,9 +3260,9 @@
+   // Build string representation of subtree
+   // Operation lchildType rchildType
+   int len = (int)strlen(_opType) + 4;
+-  lstr = (_lChild) ? ((_lChild->_internalop) ? 
++  lstr = (_lChild) ? ((_lChild->_internalop) ?
+                        _lChild->_internalop : _lChild->_opType) : "";
+-  rstr = (_rChild) ? ((_rChild->_internalop) ? 
++  rstr = (_rChild) ? ((_rChild->_internalop) ?
+                        _rChild->_internalop : _rChild->_opType) : "";
+   len += (int)strlen(lstr) + (int)strlen(rstr);
+   subtree = (char *)malloc(len);
+@@ -3253,7 +3289,7 @@
+ void MatchNode::output(FILE *fp) {
+   if (_lChild==0 && _rChild==0) {
+     fprintf(fp," %s",_name);    // operand
+-  } 
++  }
+   else {
+     fprintf(fp," (%s ",_name);  // " (opcodeName "
+     if(_lChild) _lChild->output(fp); //               left operand
+@@ -3267,7 +3303,7 @@
+     "StoreI","StoreL","StoreP","StoreD","StoreF" ,
+     "StoreB","StoreC","Store" ,"StoreFP",
+     "LoadI" ,"LoadL", "LoadP" ,"LoadD" ,"LoadF"  ,
+-    "LoadB" ,"LoadC" ,"LoadS" ,"Load"   , 
++    "LoadB" ,"LoadC" ,"LoadS" ,"Load"   ,
+     "Store4I","Store2I","Store2L","Store2D","Store4F","Store2F","Store16B",
+     "Store8B","Store4B","Store8C","Store4C","Store2C",
+     "Load4I" ,"Load2I" ,"Load2L" ,"Load2D" ,"Load4F" ,"Load2F" ,"Load16B" ,
+@@ -3275,7 +3311,7 @@
+     "LoadRange", "LoadKlass", "LoadL_unaligned", "LoadD_unaligned",
+     "LoadPLocked", "LoadLLocked",
+     "StorePConditional", "StoreLConditional",
+-    "CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP", 
++    "CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP",
+     "StoreCM",
+     "ClearArray"
+   };
+@@ -3298,7 +3334,7 @@
+     if( _rChild->needs_ideal_memory_edge(globals) )
+       return 1;
+   }
+-  
++
+   return 0;
+ }
+ 
+@@ -3316,8 +3352,8 @@
+     OperandForm *src_op = globals[src]->is_operand();
+     assert( src_op, "Not operand class of chain rule" );
+     return src_op->_matrule ? src_op->_matrule->needs_base_oop_edge() : 0;
+-  }                             // Else check instruction 
+-    
++  }                             // Else check instruction
++
+   return _matrule ? _matrule->needs_base_oop_edge() : 0;
+ }
+ 
+@@ -3372,7 +3408,7 @@
+   int right_spillable = Maybe_cisc_spillable;
+ 
+   // Check that each has same number of operands at this level
+-  if( (_lChild && !(mRule2->_lChild)) || (_rChild && !(mRule2->_rChild)) ) 
++  if( (_lChild && !(mRule2->_lChild)) || (_rChild && !(mRule2->_rChild)) )
+     return Not_cisc_spillable;
+ 
+   // Base Case: check that the current operands/operations match
+@@ -3388,8 +3424,8 @@
+     const char *name_left  = mRule2->_lChild ? mRule2->_lChild->_opType : NULL;
+     const char *name_right = mRule2->_rChild ? mRule2->_rChild->_opType : NULL;
+     // Detect reg vs (loadX memory)
+-    if( form->is_cisc_reg(globals) 
+-        && form2_inst 
++    if( form->is_cisc_reg(globals)
++        && form2_inst
+         && (is_load_from_memory(mRule2->_opType) != Form::none) // reg vs. (load memory)
+         && (name_left != NULL)       // NOT (load)
+         && (name_right == NULL) ) {  // NOT (load memory foo)
+@@ -3442,11 +3478,11 @@
+ 
+ //---------------------------cisc_spill_match----------------------------------
+ // Recursively check two MatchRules for legal conversion via cisc-spilling
+-// This method handles the root of Match tree, 
++// This method handles the root of Match tree,
+ // general recursive checks done in MatchNode
+-int  MatchRule::cisc_spill_match(FormDict &globals, RegisterForm *registers, 
+-				 MatchRule *mRule2, const char * &operand, 
+-				 const char * &reg_type) {
++int  MatchRule::cisc_spill_match(FormDict &globals, RegisterForm *registers,
++                                 MatchRule *mRule2, const char * &operand,
++                                 const char * &reg_type) {
+   int cisc_spillable  = Maybe_cisc_spillable;
+   int left_spillable  = Maybe_cisc_spillable;
+   int right_spillable = Maybe_cisc_spillable;
+@@ -3500,14 +3536,14 @@
+   }
+ 
+   if (_lChild ) {
+-    if( !_lChild->equivalent(globals, mRule2->_lChild) ) 
++    if( !_lChild->equivalent(globals, mRule2->_lChild) )
+       return false;
+   } else if (mRule2->_lChild) {
+     return false; // I have NULL left child, mRule2 has non-NULL left child.
+   }
+ 
+   if (_rChild ) {
+-    if( !_rChild->equivalent(globals, mRule2->_rChild) ) 
++    if( !_rChild->equivalent(globals, mRule2->_rChild) )
+       return false;
+   } else if (mRule2->_rChild) {
+     return false; // I have NULL right child, mRule2 has non-NULL right child.
+@@ -3578,7 +3614,7 @@
+ // Recursively swap specified commutative operation with subtree operands.
+ void MatchNode::swap_commutative_op(bool atroot, int id) {
+   if( _commutative_id == id ) { // id should be > 0
+-    assert(_lChild && _rChild && (_lChild->_lChild || _rChild->_lChild ), 
++    assert(_lChild && _rChild && (_lChild->_lChild || _rChild->_lChild ),
+             "not swappable operation");
+     MatchNode* tmp = _lChild;
+     _lChild = _rChild;
+@@ -3625,7 +3661,7 @@
+ }
+ 
+ MatchRule::MatchRule(ArchDesc &ad, MatchRule* mRule)
+-  : MatchNode(ad, *mRule, 0), _depth(mRule->_depth), 
++  : MatchNode(ad, *mRule, 0), _depth(mRule->_depth),
+     _construct(mRule->_construct), _numchilds(mRule->_numchilds) {
+     _next = NULL;
+ }
+@@ -3649,7 +3685,7 @@
+ void MatchRule::append_components(FormDict &locals, ComponentList &components) const {
+   assert (_name != NULL, "MatchNode::build_components encountered empty node\n");
+ 
+-  MatchNode::append_components(locals, components, 
++  MatchNode::append_components(locals, components,
+                                false /* not necessarily a def */);
+ }
+ 
+@@ -3659,7 +3695,7 @@
+ // The MatchNode that is called first treats its
+ bool MatchRule::base_operand(uint &position0, FormDict &globals,
+                              const char *&result, const char * &name,
+-			     const char * &opType)const{
++                             const char * &opType)const{
+   uint position = position0;
+ 
+   return (MatchNode::base_operand( position, globals, result, name, opType));
+@@ -3673,12 +3709,12 @@
+   const char  *opType   = NULL;
+   if (!base_operand(position, globals, result, name, opType)) {
+     position = 0;
+-    if( base_operand(position, globals, result, name, opType) && 
+-        (strcmp(opType,"RegI")==0 || 
+-         strcmp(opType,"RegP")==0 || 
+-         strcmp(opType,"RegL")==0 || 
+-         strcmp(opType,"RegF")==0 || 
+-         strcmp(opType,"RegD")==0 || 
++    if( base_operand(position, globals, result, name, opType) &&
++        (strcmp(opType,"RegI")==0 ||
++         strcmp(opType,"RegP")==0 ||
++         strcmp(opType,"RegL")==0 ||
++         strcmp(opType,"RegF")==0 ||
++         strcmp(opType,"RegD")==0 ||
+          strcmp(opType,"Reg" )==0) ) {
+       return 1;
+     }
+@@ -3714,20 +3750,31 @@
+   if (_rChild) {
+     const char *rch = _rChild->_opType;
+     const Form *form = globals[rch];
+-    if ((!strcmp(_opType,"Set") && 
++    if ((!strcmp(_opType,"Set") &&
+          ((form) && form->is_operand()))) {
+       return true;
+     }
+   }
+   return false;
+-}  
++}
+ 
+ int MatchRule::is_ideal_copy() const {
+   if( _rChild ) {
+     const char  *opType = _rChild->_opType;
++    if( strcmp(opType,"CastII")==0 )
++      return 1;
++    // Do not treat *CastPP this way, because it
++    // may transfer a raw pointer to an oop.
++    // If the register allocator were to coalesce this
++    // into a single LRG, the GC maps would be incorrect.
++    //if( strcmp(opType,"CastPP")==0 )
++    //  return 1;
++    //if( strcmp(opType,"CheckCastPP")==0 )
++    //  return 1;
++    //
+     // Do not treat CastX2P or CastP2X this way, because
+     // raw pointers and int types are treated differently
+-    // when saving local & stack info for safepoints in 
++    // when saving local & stack info for safepoints in
+     // Output().
+     //if( strcmp(opType,"CastX2P")==0 )
+     //  return 1;
+@@ -3745,16 +3792,16 @@
+   if( _rChild ) {
+     const char  *opType = _rChild->_opType;
+     if( strcmp(opType,"AtanD")==0 ||
+-        strcmp(opType,"CosD")==0 || 
+-        strcmp(opType,"DivD")==0 || 
+-        strcmp(opType,"DivF")==0 || 
+-        strcmp(opType,"DivI")==0 || 
++        strcmp(opType,"CosD")==0 ||
++        strcmp(opType,"DivD")==0 ||
++        strcmp(opType,"DivF")==0 ||
++        strcmp(opType,"DivI")==0 ||
+         strcmp(opType,"ExpD")==0 ||
+         strcmp(opType,"LogD")==0 ||
+         strcmp(opType,"Log10D")==0 ||
+-        strcmp(opType,"ModD")==0 || 
+-        strcmp(opType,"ModF")==0 || 
+-        strcmp(opType,"ModI")==0 || 
++        strcmp(opType,"ModD")==0 ||
++        strcmp(opType,"ModF")==0 ||
++        strcmp(opType,"ModI")==0 ||
+         strcmp(opType,"PowD")==0 ||
+         strcmp(opType,"SinD")==0 ||
+         strcmp(opType,"SqrtD")==0 ||
+@@ -3773,7 +3820,6 @@
+         strcmp(opType,"ConvL2I")==0 ||
+         strcmp(opType,"RoundDouble")==0 ||
+         strcmp(opType,"RoundFloat")==0 ||
+-        strcmp(opType,"ThreadLocal")==0 ||
+         strcmp(opType,"ReverseBytesI")==0 ||
+         strcmp(opType,"ReverseBytesL")==0 ||
+         strcmp(opType,"Replicate16B")==0 ||
+@@ -3789,7 +3835,7 @@
+         strcmp(opType,"Replicate4F")==0 ||
+         strcmp(opType,"Replicate2F")==0 ||
+         strcmp(opType,"Replicate2D")==0 ||
+-        0 /* 0 to line up columns nicely */ ) 
++        0 /* 0 to line up columns nicely */ )
+       return 1;
+   }
+   return 0;
+@@ -3810,8 +3856,8 @@
+ 
+ bool MatchRule::is_ideal_if() const {
+   if( !_opType ) return false;
+-  return 
+-    !strcmp(_opType,"If"            ) || 
++  return
++    !strcmp(_opType,"If"            ) ||
+     !strcmp(_opType,"CountedLoopEnd");
+ }
+ 
+@@ -3824,11 +3870,11 @@
+ 
+ bool MatchRule::is_ideal_membar() const {
+   if( !_opType ) return false;
+-  return 
+-    !strcmp(_opType,"MemBarAcquire"  ) || 
+-    !strcmp(_opType,"MemBarRelease"  ) || 
+-    !strcmp(_opType,"MemBarVolatile" ) || 
+-    !strcmp(_opType,"MemBarCPUOrder" ) ; 
++  return
++    !strcmp(_opType,"MemBarAcquire"  ) ||
++    !strcmp(_opType,"MemBarRelease"  ) ||
++    !strcmp(_opType,"MemBarVolatile" ) ||
++    !strcmp(_opType,"MemBarCPUOrder" ) ;
+ }
+ 
+ bool MatchRule::is_ideal_loadPC() const {
+@@ -3911,7 +3957,7 @@
+ 
+ //------------------------------Attribute--------------------------------------
+ Attribute::Attribute(char *id, char* val, int type)
+-  : _ident(id), _val(val), _atype(type) { 
++  : _ident(id), _val(val), _atype(type) {
+ }
+ Attribute::~Attribute() {
+ }
+@@ -3921,13 +3967,13 @@
+   int result = 0;
+   if (!_val || !ADLParser::is_int_token(_val, result)) {
+     ad.syntax_err(0, "Attribute %s must have an integer value: %s",
+-		  _ident, _val ? _val : "");
++                  _ident, _val ? _val : "");
+   }
+   return result;
+ }
+ 
+ void Attribute::dump() {
+-  output(stderr); 
++  output(stderr);
+ } // Debug printer
+ 
+ // Write to output files
+@@ -3937,7 +3983,7 @@
+ 
+ //------------------------------FormatRule----------------------------------
+ FormatRule::FormatRule(char *temp)
+-  : _temp(temp) { 
++  : _temp(temp) {
+ }
+ FormatRule::~FormatRule() {
+ }
+@@ -3951,4 +3997,3 @@
+   fprintf(fp,"\nFormat Rule: \n%s", (_temp?_temp:""));
+   fprintf(fp,"\n");
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/adlc/formssel.hpp openjdk/hotspot/src/share/vm/adlc/formssel.hpp
+--- openjdk6/hotspot/src/share/vm/adlc/formssel.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/adlc/formssel.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)formssel.hpp	1.76 07/05/17 15:49:19 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // FORMSSEL.HPP - ADL Parser Instruction Selection Forms Classes
+@@ -95,7 +92,7 @@
+   char          *_size;            // Size of instruction
+   InsEncode     *_insencode;       // Encoding class instruction belongs to
+   Attribute     *_attribs;         // List of Attribute rules
+-  Predicate     *_predicate;  	   // Predicate test for this instruction
++  Predicate     *_predicate;       // Predicate test for this instruction
+   FormDict       _effects;         // Dictionary of effect rules
+   ExpandRule    *_exprule;         // Expand rule for this instruction
+   RewriteRule   *_rewrule;         // Rewrite rule for this instruction
+@@ -137,8 +134,10 @@
+   virtual bool        is_projection(FormDict &globals); // node requires projection
+   virtual bool        is_parm(FormDict &globals); // node matches ideal 'Parm'
+   // ideal opcode enumeration
+-  virtual const char *ideal_Opcode(FormDict &globals)  const; 
++  virtual const char *ideal_Opcode(FormDict &globals)  const;
+   virtual int         is_expensive() const;     // node matches ideal 'CosD'
++  virtual int         is_empty_encoding() const; // _size=0 and/or _insencode empty
++  virtual int         is_tls_instruction() const; // tlsLoadP rule or ideal ThreadLocal
+   virtual int         is_ideal_copy() const;    // node matches ideal 'Copy*'
+   virtual bool        is_ideal_unlock() const;  // node matches ideal 'Unlock'
+   virtual bool        is_ideal_call_leaf() const; // node matches ideal 'CallLeaf'
+@@ -169,7 +168,7 @@
+   // Check if a simple chain rule
+   virtual bool        is_simple_chain_rule(FormDict &globals) const;
+ 
+-  // check for structural rematerialization 
++  // check for structural rematerialization
+   virtual bool        rematerialize(FormDict &globals, RegisterForm *registers);
+ 
+   // loads from memory, so must check for anti-dependence
+@@ -189,7 +188,7 @@
+ 
+   virtual const char *cost();      // Access ins_cost attribute
+   virtual uint        num_opnds(); // Count of num_opnds for MachNode class
+-  virtual uint        num_post_match_opnds(); 
++  virtual uint        num_post_match_opnds();
+   virtual uint        num_consts(FormDict &globals) const;// Constants in match rule
+   // Constants in match rule with specified type
+   virtual uint        num_consts(FormDict &globals, Form::DataType type) const;
+@@ -198,7 +197,7 @@
+   virtual const char *out_reg_class(FormDict &globals);
+ 
+   // number of ideal node inputs to skip
+-  virtual uint        oper_input_base(FormDict &globals); 
++  virtual uint        oper_input_base(FormDict &globals);
+ 
+   // Does this instruction need a base-oop edge?
+   int needs_base_oop_edge(FormDict &globals) const;
+@@ -257,9 +256,9 @@
+   // Seach through operands to determine operands unique positions.
+   void                set_unique_opnds();
+   uint                num_unique_opnds() { return _num_uniq; }
+-  uint                unique_opnds_idx(int idx) { 
++  uint                unique_opnds_idx(int idx) {
+                         if( _uniq_idx != NULL && idx > 0 )
+-                          return _uniq_idx[idx]; 
++                          return _uniq_idx[idx];
+                         else
+                           return idx;
+                       }
+@@ -272,12 +271,12 @@
+   }
+ 
+   // --------------------------- FILE *output_routines
+-  // 
++  //
+   // Generate the format call for the replacement variable
+   void                rep_var_format(FILE *fp, const char *rep_var);
+   // Generate index values needed for determing the operand position
+   void                index_temps   (FILE *fp, FormDict &globals, const char *prefix = "", const char *receiver = "");
+-  // --------------------------- 
++  // ---------------------------
+ 
+   virtual bool verify();           // Check consistency after parsing
+ 
+@@ -319,12 +318,12 @@
+   // There is an entry in _strings, perhaps NULL, that precedes each _rep_vars
+   NameList       _code;            // Strings passed through to tty->print
+   NameList       _rep_vars;        // replacement variables
+-                                   
++
+   NameList       _parameters;      // Locally defined names
+   FormDict       _localNames;      // Table of components & their types
+-                                   
+-public:                            
+-  // Public Data                   
++
++public:
++  // Public Data
+   const char    *_name;            // encoding class name
+ 
+   // Public Methods
+@@ -434,7 +433,7 @@
+ 
+   // --------------------------- Parameters
+   // The following call depends upon position within encode_class_iteration
+-  // 
++  //
+   // Obtain parameter name from zero based index
+   const char   *rep_var_name(InstructForm &inst, uint param_no);
+   // ---------------------------
+@@ -557,7 +556,7 @@
+   FormatRule    *_format;     // Format for assembly generation
+   NameList       _classes;    // List of opclasses which contain this oper
+ 
+-  ComponentList _components;  // 
++  ComponentList _components;  //
+ 
+   // Public Methods
+   OperandForm(const char *id);
+@@ -587,11 +586,11 @@
+ 
+   // node matches ideal 'Bool', grab condition codes from the ideal world
+   virtual bool        is_ideal_bool()  const;
+-  
++
+   // Has an integer constant suitable for spill offsets
+-  bool has_conI(FormDict &globals) const { 
++  bool has_conI(FormDict &globals) const {
+     return (num_consts(globals,idealI) == 1) && !is_ideal_bool(); }
+-  bool has_conL(FormDict &globals) const { 
++  bool has_conL(FormDict &globals) const {
+     return (num_consts(globals,idealL) == 1) && !is_ideal_bool(); }
+ 
+   // Node is user-defined operand for an sRegX
+@@ -600,7 +599,7 @@
+   // Return ideal type, if there is a single ideal type for this operand
+   virtual const char *ideal_type(FormDict &globals, RegisterForm *registers = NULL) const;
+   // If there is a single ideal type for this interface field, return it.
+-  virtual const char *interface_ideal_type(FormDict   &globals, 
++  virtual const char *interface_ideal_type(FormDict   &globals,
+                                            const char *field_name) const;
+ 
+   // Return true if this operand represents a bound register class
+@@ -649,17 +648,17 @@
+ 
+ 
+   // --------------------------- FILE *output_routines
+-  // 
++  //
+   // Output code for disp_is_oop, if true.
+   void                disp_is_oop(FILE *fp, FormDict &globals);
+   // Generate code for internal and external format methods
+   void                int_format(FILE *fp, FormDict &globals, uint index);
+   void                ext_format(FILE *fp, FormDict &globals, uint index);
+   void                format_constant(FILE *fp, uint con_index, uint con_type);
+-  // Output code to access the value of the index'th constant 
++  // Output code to access the value of the index'th constant
+   void                access_constant(FILE *fp, FormDict &globals,
+                                       uint con_index);
+-  // --------------------------- 
++  // ---------------------------
+ 
+ 
+   virtual void dump();             // Debug printer
+@@ -780,7 +779,7 @@
+   const char *_greater_equal;
+   const char *_less_equal;
+   const char *_greater;
+-  
++
+   // Public Methods
+   CondInterface(char *equal, char *not_equal, char *less, char *greater_equal,
+                 char *less_equal, char *greater);
+@@ -869,10 +868,10 @@
+ 
+ public:
+   // Implementation depends upon working bit intersection and union.
+-  enum use_def_enum { 
+-    INVALID = 0x0, 
+-    USE     = 0x1, 
+-    DEF     = 0x2, USE_DEF   = 0x3, 
++  enum use_def_enum {
++    INVALID = 0x0,
++    USE     = 0x1,
++    DEF     = 0x2, USE_DEF   = 0x3,
+     KILL    = 0x4, USE_KILL  = 0x5,
+     SYNTHETIC = 0x8,
+     TEMP = USE | SYNTHETIC
+@@ -897,22 +896,22 @@
+   int         _commutative_id;     // id of commutative operation
+ 
+   // Public Methods
+-  MatchNode(ArchDesc &ad, const char *result = 0, const char *expr = 0, 
+-            const char *opType=0, MatchNode *lChild=NULL, 
+-	    MatchNode *rChild=NULL);
++  MatchNode(ArchDesc &ad, const char *result = 0, const char *expr = 0,
++            const char *opType=0, MatchNode *lChild=NULL,
++            MatchNode *rChild=NULL);
+   MatchNode(ArchDesc &ad, MatchNode& mNode); // Shallow copy constructor;
+   MatchNode(ArchDesc &ad, MatchNode& mNode, int clone); // Construct clone
+   ~MatchNode();
+ 
+-  // return 0 if not found:  
++  // return 0 if not found:
+   // return 1 if found and position is incremented by operand offset in rule
+   bool       find_name(const char *str, int &position) const;
+   bool       find_type(const char *str, int &position) const;
+-  void       append_components(FormDict &locals, ComponentList &components, 
++  void       append_components(FormDict &locals, ComponentList &components,
+                                bool def_flag) const;
+-  bool       base_operand(uint &position, FormDict &globals, 
+-                         const char * &result, const char * &name, 
+-			 const char * &opType) const;
++  bool       base_operand(uint &position, FormDict &globals,
++                         const char * &result, const char * &name,
++                         const char * &opType) const;
+   // recursive count on operands
+   uint       num_consts(FormDict &globals) const;
+   uint       num_const_ptrs(FormDict &globals) const;
+@@ -935,11 +934,11 @@
+   const char *reduce_left (FormDict &globals)  const;
+ 
+   // Recursive version of check in MatchRule
+-  int        cisc_spill_match(FormDict &globals, RegisterForm *registers, 
+-			      MatchNode *mRule2, const char * &operand, 
+-			      const char * &reg_type);
++  int        cisc_spill_match(FormDict &globals, RegisterForm *registers,
++                              MatchNode *mRule2, const char * &operand,
++                              const char * &reg_type);
+   int        cisc_spill_merge(int left_result, int right_result);
+-  
++
+   bool       equivalent(FormDict &globals, MatchNode *mNode2);
+ 
+   void       count_commutative_op(int& count);
+@@ -970,8 +969,8 @@
+   void       append_components(FormDict &locals, ComponentList &components) const;
+   // Recursive call on all operands' match rules in my match rule.
+   bool       base_operand(uint &position, FormDict &globals,
+-                         const char * &result, const char * &name, 
+-			 const char * &opType) const;
++                         const char * &result, const char * &name,
++                         const char * &opType) const;
+ 
+ 
+   bool       is_base_register(FormDict &globals) const;
+@@ -993,14 +992,14 @@
+   Form::DataType is_ideal_load() const;// node matches ideal 'LoadXNode'
+   Form::DataType is_ideal_store() const;// node matches ideal 'StoreXNode'
+ 
+-  // Check if 'mRule2' is a cisc-spill variant of this MatchRule 
+-  int        cisc_spill_match(FormDict &globals, RegisterForm *registers, 
+-			      MatchRule *mRule2, const char * &operand, 
+-			      const char * &reg_type);
++  // Check if 'mRule2' is a cisc-spill variant of this MatchRule
++  int        cisc_spill_match(FormDict &globals, RegisterForm *registers,
++                              MatchRule *mRule2, const char * &operand,
++                              const char * &reg_type);
+ 
+   // Check if 'mRule2' is equivalent to this MatchRule
+   bool       equivalent(FormDict &globals, MatchRule *mRule2);
+-  
++
+   void       swap_commutative_op(const char* instr_ident, int count, int& match_rules_cnt);
+ 
+   void dump();
+diff -ruN openjdk6/hotspot/src/share/vm/adlc/main.cpp openjdk/hotspot/src/share/vm/adlc/main.cpp
+--- openjdk6/hotspot/src/share/vm/adlc/main.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/adlc/main.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)main.cpp	1.100 07/05/05 17:05:01 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // MAIN.CPP - Entry point for the Architecture Description Language Compiler
+@@ -31,8 +28,9 @@
+ //------------------------------Prototypes-------------------------------------
+ static void  usage(ArchDesc& AD);          // Print usage message and exit
+ static char *strip_ext(char *fname);       // Strip off name extension
+-static char *base_plus_suffix(const char* base, const char *suffix);// New concatenated string 
+-static char *prefix_plus_base_plus_suffix(const char* prefix, const char* base, const char *suffix);// New concatenated string 
++static char *base_plus_suffix(const char* base, const char *suffix);// New concatenated string
++static char *prefix_plus_base_plus_suffix(const char* prefix, const char* base, const char *suffix);// New concatenated string
++static int get_legal_text(FileBuff &fbuf, char **legal_text); // Get pointer to legal text
+ 
+ ArchDesc* globalAD = NULL;      // global reference to Architecture Description object
+ 
+@@ -50,33 +48,33 @@
+ 
+   // Read command line arguments and file names
+   for( int i = 1; i < argc; i++ ) { // For all arguments
+-    register char *s = argv[i];	// Get option/filename
++    register char *s = argv[i]; // Get option/filename
+ 
+     if( *s++ == '-' ) {         // It's a flag? (not a filename)
+-      if( !*s ) {		// Stand-alone `-' means stdin
+-	//********** INSERT CODE HERE **********	
++      if( !*s ) {               // Stand-alone `-' means stdin
++        //********** INSERT CODE HERE **********
+       } else while (*s != '\0') { // While have flags on option
+-	switch (*s++) { 	// Handle flag
+-	case 'd':               // Debug flag
+-	  AD._dfa_debug += 1;   // Set Debug Flag
+-	  break;
+-	case 'g':               // Debug ad location flag
+-	  AD._adlocation_debug += 1;       // Set Debug ad location Flag
+-	  break;
+-	case 'o':               // No Output Flag
+-	  AD._no_output ^= 1;   // Toggle no_output flag
+-	  break;
+-	case 'q':               // Quiet Mode Flag
+-	  AD._quiet_mode ^= 1;  // Toggle quiet_mode flag
+-	  break;
+-	case 'w':               // Disable Warnings Flag
+-	  AD._disable_warnings ^= 1; // Toggle disable_warnings flag
+-	  break;
+-	case 'T':               // Option to make DFA as many subroutine calls.
+-	  AD._dfa_small += 1;   // Set Mode Flag
+-	  break;
+-	case 'c': {             // Set C++ Output file name
+-	  AD._CPP_file._name = s;
++        switch (*s++) {         // Handle flag
++        case 'd':               // Debug flag
++          AD._dfa_debug += 1;   // Set Debug Flag
++          break;
++        case 'g':               // Debug ad location flag
++          AD._adlocation_debug += 1;       // Set Debug ad location Flag
++          break;
++        case 'o':               // No Output Flag
++          AD._no_output ^= 1;   // Toggle no_output flag
++          break;
++        case 'q':               // Quiet Mode Flag
++          AD._quiet_mode ^= 1;  // Toggle quiet_mode flag
++          break;
++        case 'w':               // Disable Warnings Flag
++          AD._disable_warnings ^= 1; // Toggle disable_warnings flag
++          break;
++        case 'T':               // Option to make DFA as many subroutine calls.
++          AD._dfa_small += 1;   // Set Mode Flag
++          break;
++        case 'c': {             // Set C++ Output file name
++          AD._CPP_file._name = s;
+           const char *base = strip_ext(strdup(s));
+           AD._CPP_CLONE_file._name    = base_plus_suffix(base,"_clone.cpp");
+           AD._CPP_EXPAND_file._name   = base_plus_suffix(base,"_expand.cpp");
+@@ -86,48 +84,48 @@
+           AD._CPP_PEEPHOLE_file._name = base_plus_suffix(base,"_peephole.cpp");
+           AD._CPP_PIPELINE_file._name = base_plus_suffix(base,"_pipeline.cpp");
+           s += strlen(s);
+-	  break;
++          break;
+         }
+-	case 'h':               // Set C++ Output file name
+-	  AD._HPP_file._name = s; s += strlen(s);
+-	  break;
+-	case 'v':               // Set C++ Output file name
+-	  AD._VM_file._name = s; s += strlen(s);
+-	  break;
+-	case 'a':               // Set C++ Output file name
+-	  AD._DFA_file._name = s;
+-	  AD._bug_file._name = s;
+-	  s += strlen(s);
+-	  break;
+-	case '#':               // Special internal debug flag
+-	  AD._adl_debug++;      // Increment internal debug level
+-	  break;
+-	case 's':               // Output which instructions are cisc-spillable
+-	  AD._cisc_spill_debug = true;
+-	  break;
+-	case 'D':               // Flag Definition
+-	  {
+-	    char* flag = s;
+-	    s += strlen(s);
+-	    char* def = strchr(flag, '=');
+-	    if (def == NULL)  def = (char*)"1";
+-	    else              *def++ = '\0';
+-	    AD.set_preproc_def(flag, def);
+-	  }
+-	  break;
+-	case 'U':               // Flag Un-Definition
+-	  {
+-	    char* flag = s;
+-	    s += strlen(s);
+-	    AD.set_preproc_def(flag, NULL);
+-	  }
+-	  break;
+-	default:		// Unknown option
+-	  usage(AD);            // So print usage and exit
+-	}			// End of switch on options...
+-      } 			// End of while have options...
++        case 'h':               // Set C++ Output file name
++          AD._HPP_file._name = s; s += strlen(s);
++          break;
++        case 'v':               // Set C++ Output file name
++          AD._VM_file._name = s; s += strlen(s);
++          break;
++        case 'a':               // Set C++ Output file name
++          AD._DFA_file._name = s;
++          AD._bug_file._name = s;
++          s += strlen(s);
++          break;
++        case '#':               // Special internal debug flag
++          AD._adl_debug++;      // Increment internal debug level
++          break;
++        case 's':               // Output which instructions are cisc-spillable
++          AD._cisc_spill_debug = true;
++          break;
++        case 'D':               // Flag Definition
++          {
++            char* flag = s;
++            s += strlen(s);
++            char* def = strchr(flag, '=');
++            if (def == NULL)  def = (char*)"1";
++            else              *def++ = '\0';
++            AD.set_preproc_def(flag, def);
++          }
++          break;
++        case 'U':               // Flag Un-Definition
++          {
++            char* flag = s;
++            s += strlen(s);
++            AD.set_preproc_def(flag, NULL);
++          }
++          break;
++        default:                // Unknown option
++          usage(AD);            // So print usage and exit
++        }                       // End of switch on options...
++      }                         // End of while have options...
+ 
+-    } else {			// Not an option; must be a filename
++    } else {                    // Not an option; must be a filename
+       AD._ADL_file._name = argv[i]; // Set the input filename
+ 
+       // // Files for storage, based on input file name
+@@ -148,12 +146,12 @@
+       delete temp;
+       temp = base_plus_suffix("adGlobals_",base);
+       AD._VM_file._name = base_plus_suffix(temp,".hpp");
+-      delete temp;      
++      delete temp;
+       temp = base_plus_suffix("bugs_",base);
+       AD._bug_file._name = base_plus_suffix(temp,".out");
+       delete temp;
+-    }				// End of files vs options...
+-  }				// End of while have command line arguments
++    }                           // End of files vs options...
++  }                             // End of while have command line arguments
+ 
+   // Open files used to store the matcher and its components
+   if (AD.open_files() == 0) return 1; // Open all input/output files
+@@ -161,6 +159,11 @@
+   // Build the File Buffer, Parse the input, & Generate Code
+   FileBuff  ADL_Buf(&AD._ADL_file, AD); // Create a file buffer for input file
+ 
++  // Get pointer to legal text at the beginning of AD file.
++  // It will be used in generated ad files.
++  char* legal_text;
++  int legal_sz = get_legal_text(ADL_Buf, &legal_text);
++
+   ADL_Parse = new ADLParser(ADL_Buf, AD); // Create a parser to parse the buffer
+   ADL_Parse->parse();           // Parse buffer & build description lists
+ 
+@@ -173,21 +176,23 @@
+   // Verify that the results of the parse are consistent
+   AD.verify();
+ 
+-  // Prepare to generate the result files: 
++  // Prepare to generate the result files:
+   AD.generateMatchLists();
+   AD.identify_unique_operands();
+   AD.identify_cisc_spill_instructions();
+   AD.identify_short_branches();
+   // Make sure every file starts with a copyright:
+-  AD.addSunCopyright(AD._HPP_file._fp);           // .hpp
+-  AD.addSunCopyright(AD._CPP_file._fp);           // .cpp
+-  AD.addSunCopyright(AD._CPP_CLONE_file._fp);     // .cpp
+-  AD.addSunCopyright(AD._CPP_EXPAND_file._fp);    // .cpp
+-  AD.addSunCopyright(AD._CPP_FORMAT_file._fp);    // .cpp
+-  AD.addSunCopyright(AD._CPP_GEN_file._fp);       // .cpp
+-  AD.addSunCopyright(AD._CPP_MISC_file._fp);      // .cpp
+-  AD.addSunCopyright(AD._CPP_PEEPHOLE_file._fp);  // .cpp
+-  AD.addSunCopyright(AD._CPP_PIPELINE_file._fp);  // .cpp
++  AD.addSunCopyright(legal_text, legal_sz, AD._HPP_file._fp);           // .hpp
++  AD.addSunCopyright(legal_text, legal_sz, AD._CPP_file._fp);           // .cpp
++  AD.addSunCopyright(legal_text, legal_sz, AD._CPP_CLONE_file._fp);     // .cpp
++  AD.addSunCopyright(legal_text, legal_sz, AD._CPP_EXPAND_file._fp);    // .cpp
++  AD.addSunCopyright(legal_text, legal_sz, AD._CPP_FORMAT_file._fp);    // .cpp
++  AD.addSunCopyright(legal_text, legal_sz, AD._CPP_GEN_file._fp);       // .cpp
++  AD.addSunCopyright(legal_text, legal_sz, AD._CPP_MISC_file._fp);      // .cpp
++  AD.addSunCopyright(legal_text, legal_sz, AD._CPP_PEEPHOLE_file._fp);  // .cpp
++  AD.addSunCopyright(legal_text, legal_sz, AD._CPP_PIPELINE_file._fp);  // .cpp
++  AD.addSunCopyright(legal_text, legal_sz, AD._VM_file._fp);            // .hpp
++  AD.addSunCopyright(legal_text, legal_sz, AD._DFA_file._fp);           // .cpp
+   // Make sure each .cpp file starts with include lines:
+   // files declaring and defining generators for Mach* Objects (hpp,cpp)
+   AD.machineDependentIncludes(AD._CPP_file);      // .cpp
+@@ -198,7 +203,7 @@
+   AD.machineDependentIncludes(AD._CPP_MISC_file);      // .cpp
+   AD.machineDependentIncludes(AD._CPP_PEEPHOLE_file);  // .cpp
+   AD.machineDependentIncludes(AD._CPP_PIPELINE_file);  // .cpp
+-  // Generate the result files: 
++  // Generate the result files:
+   // enumerations, class definitions, object generators, and the DFA
+   // file containing enumeration of machine operands & instructions (hpp)
+   AD.addPreHeaderBlocks(AD._HPP_file._fp);        // .hpp
+@@ -239,7 +244,6 @@
+   AD.addPreprocessorChecks(AD._CPP_PIPELINE_file._fp);  // .cpp
+ 
+   // define the finite automata that selects lowest cost production
+-  AD.addSunCopyright(AD._DFA_file._fp);           // .cpp
+   AD.machineDependentIncludes(AD._DFA_file);      // .cpp
+   AD.buildDFA(AD._DFA_file._fp);
+ 
+@@ -268,7 +272,7 @@
+   printf("Usage: adl [-doqw] [-Dflag[=def]] [-Uflag] [-cFILENAME] [-hFILENAME] [-aDFAFILE] ADLFILE\n");
+   printf(" d  produce DFA debugging info\n");
+   printf(" o  no output produced, syntax and semantic checking only\n");
+-  printf(" q  quiet mode, supresses all non-essential messages\n"); 
++  printf(" q  quiet mode, supresses all non-essential messages\n");
+   printf(" w  suppress warning messages\n");
+   printf(" c  specify CPP file name (default: %s)\n", AD._CPP_file._name);
+   printf(" h  specify HPP file name (default: %s)\n", AD._HPP_file._name);
+@@ -345,15 +349,15 @@
+     }
+     else {
+       if (_ADL_file._name) printf("%s --> ", _ADL_file._name);
+-      printf("%s, %s, %s, %s, %s, %s, %s, %s, %s", 
+-             _CPP_file._name, 
+-             _CPP_CLONE_file._name, 
+-             _CPP_EXPAND_file._name, 
+-             _CPP_FORMAT_file._name, 
+-             _CPP_GEN_file._name, 
+-             _CPP_MISC_file._name, 
+-             _CPP_PEEPHOLE_file._name, 
+-             _CPP_PIPELINE_file._name, 
++      printf("%s, %s, %s, %s, %s, %s, %s, %s, %s",
++             _CPP_file._name,
++             _CPP_CLONE_file._name,
++             _CPP_EXPAND_file._name,
++             _CPP_FORMAT_file._name,
++             _CPP_GEN_file._name,
++             _CPP_MISC_file._name,
++             _CPP_PEEPHOLE_file._name,
++             _CPP_PIPELINE_file._name,
+              _HPP_file._name, _DFA_file._name);
+     }
+     printf("\n");
+@@ -364,11 +368,11 @@
+ static char *strip_ext(char *fname)
+ {
+   char *ep;
+-   
++
+   if (fname) {
+     ep = fname + strlen(fname) - 1; // start at last character and look for '.'
+     while (ep >= fname && *ep != '.') --ep;
+-    if (*ep == '.')	*ep = '\0'; // truncate string at '.' 
++    if (*ep == '.')     *ep = '\0'; // truncate string at '.'
+   }
+   return fname;
+ }
+@@ -378,19 +382,19 @@
+ {
+   char *ep;
+   char *sp;
+-   
++
+   if (fname) {
+     for (sp = fname; *sp; sp++)
+       if (*sp == '/')  fname = sp+1;
+     ep = fname;                    // start at first character and look for '.'
+     while (ep <= (fname + strlen(fname) - 1) && *ep != '.') ep++;
+-    if (*ep == '.')	*ep = '\0'; // truncate string at '.' 
++    if (*ep == '.')     *ep = '\0'; // truncate string at '.'
+   }
+   return fname;
+ }
+ 
+ //------------------------------base_plus_suffix-------------------------------
+-// New concatenated string 
++// New concatenated string
+ static char *base_plus_suffix(const char* base, const char *suffix)
+ {
+   int len = (int)strlen(base) + (int)strlen(suffix) + 1;
+@@ -402,7 +406,7 @@
+ 
+ 
+ //------------------------------prefix_plus_base_plus_suffix-------------------
+-// New concatenated string 
++// New concatenated string
+ static char *prefix_plus_base_plus_suffix(const char* prefix, const char* base, const char *suffix)
+ {
+   int len = (int)strlen(prefix) + (int)strlen(base) + (int)strlen(suffix) + 1;
+@@ -412,6 +416,24 @@
+   return fname;
+ }
+ 
++//------------------------------get_legal_text---------------------------------
++// Get pointer to legal text at the beginning of AD file.
++// This code assumes that a legal text starts at the beginning of .ad files,
++// is commented by "//" at each line and ends with empty line.
++//
++int get_legal_text(FileBuff &fbuf, char **legal_text)
++{
++  char* legal_start = fbuf.get_line();
++  assert(legal_start[0] == '/' && legal_start[1] == '/', "Incorrect header of AD file");
++  char* legal_end = fbuf.get_line();
++  assert(strncmp(legal_end, "// Copyright", 12) == 0, "Incorrect header of AD file");
++  while(legal_end[0] == '/' && legal_end[1] == '/') {
++    legal_end = fbuf.get_line();
++  }
++  *legal_text = legal_start;
++  return (legal_end - legal_start);
++}
++
+ // VS2005 has its own definition, identical to this one.
+ #if !defined(_WIN32) || defined(_WIN64) || _MSC_VER < 1400
+ void *operator new( size_t size, int, const char *, int ) {
+diff -ruN openjdk6/hotspot/src/share/vm/adlc/output_c.cpp openjdk/hotspot/src/share/vm/adlc/output_c.cpp
+--- openjdk6/hotspot/src/share/vm/adlc/output_c.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/adlc/output_c.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)output_c.cpp	1.184 07/05/17 15:49:23 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // output_c.cpp - Class CPP file output routines for architecture definition
+@@ -69,7 +66,7 @@
+     for( reg_def = registers->iter_RegDefs(); reg_def != NULL; reg_def = next ) {
+       next = registers->iter_RegDefs();
+       const char *comma = (next != NULL) ? "," : " // no trailing comma";
+-      fprintf(fp,"  \"%s\"%s\n", 
++      fprintf(fp,"  \"%s\"%s\n",
+                  reg_def->_regname, comma );
+     }
+ 
+@@ -113,13 +110,13 @@
+       const char *comma = (next != NULL) ? "," : " // no trailing comma";
+       int encval;
+       if (!ADLParser::is_int_token(register_encode, encval)) {
+-	fprintf(fp,"  %s%s  // %s\n", 
+-		register_encode, comma, reg_def->_regname );
++        fprintf(fp,"  %s%s  // %s\n",
++                register_encode, comma, reg_def->_regname );
+       } else {
+-	// Output known constants in hex char format (backward compatibility).
++        // Output known constants in hex char format (backward compatibility).
+         assert(encval < 256, "Exceeded supported width for register encoding");
+-	fprintf(fp,"  (unsigned char)'\\x%X'%s  // %s\n", 
+-		encval,          comma, reg_def->_regname );
++        fprintf(fp,"  (unsigned char)'\\x%X'%s  // %s\n",
++                encval,          comma, reg_def->_regname );
+       }
+     }
+     // Finish defining enumeration
+@@ -136,7 +133,7 @@
+     fprintf(fp,"// Enumeration of register class names\n");
+     fprintf(fp, "enum machRegisterClass {\n");
+     registers->_rclasses.reset();
+-    for( const char *class_name = NULL; 
++    for( const char *class_name = NULL;
+          (class_name = registers->_rclasses.iter()) != NULL; ) {
+       fprintf(fp,"  %s,\n", toUpper( class_name ));
+     }
+@@ -159,7 +156,7 @@
+     fprintf(fp_hpp,"\n");
+     fprintf(fp_hpp,"// Register masks, one for each register class.\n");
+     _register->_rclasses.reset();
+-    for( rc_name = NULL; 
++    for( rc_name = NULL;
+          (rc_name = _register->_rclasses.iter()) != NULL; ) {
+       const char *prefix    = "";
+       RegClass   *reg_class = _register->getRegClass(rc_name);
+@@ -167,7 +164,7 @@
+ 
+       int len = RegisterForm::RegMask_Size();
+       fprintf(fp_hpp, "extern const RegMask %s%s_mask;\n", prefix, toUpper( rc_name ) );
+-      
++
+       if( reg_class->_stack_or_reg ) {
+         fprintf(fp_hpp, "extern const RegMask %sSTACK_OR_%s_mask;\n", prefix, toUpper( rc_name ) );
+       }
+@@ -185,7 +182,7 @@
+     fprintf(fp_cpp,"\n");
+     fprintf(fp_cpp,"// Register masks, one for each register class.\n");
+     _register->_rclasses.reset();
+-    for( rc_name = NULL; 
++    for( rc_name = NULL;
+          (rc_name = _register->_rclasses.iter()) != NULL; ) {
+       const char *prefix    = "";
+       RegClass   *reg_class = _register->getRegClass(rc_name);
+@@ -198,11 +195,11 @@
+           fprintf(fp_cpp," 0x%x,",reg_class->regs_in_word(i,false));
+         fprintf(fp_cpp," 0x%x );\n",reg_class->regs_in_word(i,false));
+       }
+-      
++
+       if( reg_class->_stack_or_reg ) {
+-	int i;
++        int i;
+         fprintf(fp_cpp, "const RegMask %sSTACK_OR_%s_mask(", prefix, toUpper( rc_name ) );
+-        for( i = 0; i < len-1; i++ ) 
++        for( i = 0; i < len-1; i++ )
+           fprintf(fp_cpp," 0x%x,",reg_class->regs_in_word(i,true));
+         fprintf(fp_cpp," 0x%x );\n",reg_class->regs_in_word(i,true));
+       }
+@@ -229,7 +226,7 @@
+ 
+   while ( (paramname = pipeclass->_parameters.iter()) != NULL ) {
+     const PipeClassOperandForm *pipeopnd =
+-	(const PipeClassOperandForm *)pipeclass->_localUsage[paramname];
++        (const PipeClassOperandForm *)pipeclass->_localUsage[paramname];
+ 
+     if (pipeopnd)
+       templen += 10 + (int)strlen(pipeopnd->_stage);
+@@ -257,7 +254,7 @@
+ 
+   while ( (paramname = pipeclass->_parameters.iter()) != NULL ) {
+     const PipeClassOperandForm *pipeopnd =
+-	(const PipeClassOperandForm *)pipeclass->_localUsage[paramname];
++        (const PipeClassOperandForm *)pipeclass->_localUsage[paramname];
+     templen += sprintf(&operand_stages[templen], "  stage_%s%c\n",
+       pipeopnd ? pipeopnd->_stage : "undefined",
+       (++i < paramcount ? ',' : ' ') );
+@@ -299,9 +296,9 @@
+     int used_mask = pipeline->_resdict[piperesource->_resource]->is_resource()->mask();
+     for (i = 0; i < pipeline->_rescount; i++)
+       if ((1 << i) & used_mask) {
+-	int stage = pipeline->_stages.index(piperesource->_stage);
+-	if (res_stages[i] < stage+1)
+-	  res_stages[i] = stage+1;
++        int stage = pipeline->_stages.index(piperesource->_stage);
++        if (res_stages[i] < stage+1)
++          res_stages[i] = stage+1;
+       }
+   }
+ 
+@@ -472,7 +469,7 @@
+   static const char * pipeline_use_cycle_mask = "Pipeline_Use_Cycle_Mask";
+   static const char * pipeline_use_element    = "Pipeline_Use_Element";
+ 
+-  templen = 1 + 
++  templen = 1 +
+     (int)(strlen(pipeline_use_cycle_mask) + (int)strlen(pipeline_use_element) +
+      (cyclemasksize * 12) + masklen + (cycledigit * 2) + 30) * element_count;
+ 
+@@ -529,9 +526,9 @@
+ 
+     mask -= (((uint)1) << lower_position) - 1;
+     res_mask[upper_idx] |= mask;
+- 
++
+     for (j = cyclemasksize-1; j >= 0; j--) {
+-      formatlen = 
++      formatlen =
+         sprintf(&resource_mask[templen], "0x%08x%s", res_mask[j], j > 0 ? ", " : "");
+       templen += formatlen;
+     }
+@@ -562,7 +559,7 @@
+     char * args = new char [9 + 2*masklen + maskdigit];
+ 
+     sprintf(args, "0x%0*x, 0x%0*x, %*d",
+-      masklen, resources_used, 
++      masklen, resources_used,
+       masklen, resources_used_exclusively,
+       maskdigit, element_count);
+ 
+@@ -757,7 +754,7 @@
+   if (!_pipeline)
+     /* Do Nothing */;
+ 
+-  else if (_pipeline->_maxcycleused <= 
++  else if (_pipeline->_maxcycleused <=
+ #ifdef SPARC
+     64
+ #else
+@@ -1051,14 +1048,14 @@
+   intptr_t   input         = 0;
+   fprintf(fp, "      // Check instruction sub-tree\n");
+   pmatch->reset();
+-  for( pmatch->next_instruction( parent, inst_position, inst_name, input ); 
++  for( pmatch->next_instruction( parent, inst_position, inst_name, input );
+        inst_name != NULL;
+        pmatch->next_instruction( parent, inst_position, inst_name, input ) ) {
+     // If this is not a placeholder
+     if( ! pmatch->is_placeholder() ) {
+       // Define temporaries 'inst#', based on parent and parent's input index
+       if( parent != -1 ) {                // root was initialized
+-        fprintf(fp, "  inst%ld = inst%ld->in(%ld);\n", 
++        fprintf(fp, "  inst%ld = inst%ld->in(%ld);\n",
+                 inst_position, parent, input);
+       }
+ 
+@@ -1092,7 +1089,7 @@
+   intptr_t   input         = 0;
+   fprintf(fp, "  // Check instruction sub-tree\n");
+   pmatch->reset();
+-  for( pmatch->next_instruction( parent, inst_position, inst_name, input ); 
++  for( pmatch->next_instruction( parent, inst_position, inst_name, input );
+        inst_name != NULL;
+        pmatch->next_instruction( parent, inst_position, inst_name, input ) ) {
+     // If this is not a placeholder
+@@ -1130,7 +1127,7 @@
+   intptr_t   input         = 0;
+   fprintf(fp, "      // Build map to register info\n");
+   pmatch->reset();
+-  for( pmatch->next_instruction( parent, inst_position, inst_name, input ); 
++  for( pmatch->next_instruction( parent, inst_position, inst_name, input );
+        inst_name != NULL;
+        pmatch->next_instruction( parent, inst_position, inst_name, input ) ) {
+     // If this is not a placeholder
+@@ -1220,15 +1217,15 @@
+         // assert( false, "should be a register" );
+       }
+ 
+-      // 
++      //
+       // Check for equivalence
+-      // 
++      //
+       // fprintf(fp, "phase->eqv( ");
+       // fprintf(fp, "inst%d->in(%d+%d) /* %s */, inst%d->in(%d+%d) /* %s */",
+       //         left_index,  left_op_base,  left_op_index,  left_op,
+       //         right_index, right_op_base, right_op_index, right_op );
+       // fprintf(fp, ")");
+-      // 
++      //
+       switch( left_interface_type ) {
+       case Form::register_interface: {
+         // Check that they are allocated to the same register
+@@ -1275,7 +1272,7 @@
+         // Compare 'base', 'index', 'scale', and 'disp'
+         // base
+         fprintf(fp, "( \n");
+-        fprintf(fp, "  (inst%d->_opnds[%d]->base(ra_,inst%d,inst%d_idx%d)  /* %d.%s$$base */", 
++        fprintf(fp, "  (inst%d->_opnds[%d]->base(ra_,inst%d,inst%d_idx%d)  /* %d.%s$$base */",
+           left_index, left_op_index, left_index, left_index, left_op_index, left_index, left_op );
+         fprintf(fp, " == ");
+         fprintf(fp, "/* %d.%s$$base */ inst%d->_opnds[%d]->base(ra_,inst%d,inst%d_idx%d)) &&\n",
+@@ -1311,7 +1308,7 @@
+         break;
+       }
+       }
+- 
++
+       // Advance to next constraint
+       pconstraint = pconstraint->next();
+       first_constraint = false;
+@@ -1331,12 +1328,12 @@
+ //     }
+ //   }
+ //   assert( op_index != NameList::Not_in_list, "Did not find operand in instruction");
+-// 
++//
+ //   ComponentList components_right = instr->_components;
+ //   char *right_comp_type = components_right.at(op_index)->_type;
+ //   OpClassForm *right_opclass = globals[right_comp_type]->is_opclass();
+ //   Form::InterfaceType  right_interface_type = right_opclass->interface_type(globals);
+-// 
++//
+ //   return;
+ // }
+ 
+@@ -1376,13 +1373,13 @@
+       if( opnds_index == 0 ) {
+         // Initial setup of new instruction
+         fprintf(fp, "        // ----- Initial setup -----\n");
+-        // 
++        //
+         // Add control edge for this node
+         fprintf(fp, "        root->add_req(_in[0]);                // control edge\n");
+         // Add unmatched edges from root of match tree
+         int op_base = root_form->oper_input_base(globals);
+         for( int unmatched_edge = 1; unmatched_edge < op_base; ++unmatched_edge ) {
+-          fprintf(fp, "        root->add_req(inst%ld->in(%d));        // unmatched ideal edge\n", 
++          fprintf(fp, "        root->add_req(inst%ld->in(%d));        // unmatched ideal edge\n",
+                                           inst_num, unmatched_edge);
+         }
+         // If new instruction captures bottom type
+@@ -1400,7 +1397,7 @@
+         if( (op_form == NULL) || (op_form->is_base_constant(globals) == Form::none) ) {
+           // Do not have ideal edges for constants after matching
+           fprintf(fp, "        for( unsigned x%d = inst%ld_idx%d; x%d < inst%ld_idx%d; x%d++ )\n",
+-                  inst_op_num, inst_num, inst_op_num, 
++                  inst_op_num, inst_num, inst_op_num,
+                   inst_op_num, inst_num, inst_op_num+1, inst_op_num );
+           fprintf(fp, "          root->add_req( inst%ld->in(x%d) );\n",
+                   inst_num, inst_op_num );
+@@ -1416,7 +1413,7 @@
+     // Replacing subtree with empty-tree
+     assert( false, "ShouldNotReachHere();");
+   }
+-  
++
+   // Return the new sub-tree
+   fprintf(fp, "        deleted = %d;\n", max_position+1 /*zero to one based*/);
+   fprintf(fp, "        return root;  // return new root;\n");
+@@ -1432,11 +1429,11 @@
+ 
+   // Identify the maximum instruction position,
+   // generate temporaries that hold current instruction
+-  // 
++  //
+   //   MachNode  *inst0 = NULL;
+   //   ...
+   //   MachNode  *instMAX = NULL;
+-  // 
++  //
+   int max_position = 0;
+   Peephole *peep;
+   for( peep = node->peepholes(); peep != NULL; peep = peep->next() ) {
+@@ -1463,7 +1460,7 @@
+     PeepReplace    *preplace    = peep->replacement();
+ 
+     // Root of this peephole is the current MachNode
+-    assert( true, // %%name?%% strcmp( node->_ident, pmatch->name(0) ) == 0, 
++    assert( true, // %%name?%% strcmp( node->_ident, pmatch->name(0) ) == 0,
+             "root of PeepMatch does not match instruction");
+ 
+     // Make each peephole rule individually selectable
+@@ -1481,7 +1478,7 @@
+ 
+     // Construct the new sub-tree
+     generate_peepreplace( fp, _globalNames, pmatch, pconstraint, preplace, max_position );
+-    
++
+     // End of scope for this peephole's constraints
+     fprintf(fp, "    }\n");
+     // Closing brace '}' to make each peephole rule individually selectable
+@@ -1510,8 +1507,8 @@
+     const Form   *frm      = NULL;
+     InstructForm *new_inst = NULL;
+     OperandForm  *new_oper = NULL;
+-    unsigned      numo     = node->num_opnds() + 
+-				node->_exprule->_newopers.count();
++    unsigned      numo     = node->num_opnds() +
++                                node->_exprule->_newopers.count();
+ 
+     // If necessary, generate any operands created in expand rule
+     if (node->_exprule->_newopers.count()) {
+@@ -1526,7 +1523,7 @@
+                   cnt, new_oper->_ident);
+         }
+         else {
+-          fprintf(fp,"  MachOper *op%d = new (C) %sOper(%s);\n", 
++          fprintf(fp,"  MachOper *op%d = new (C) %sOper(%s);\n",
+                   cnt, new_oper->_ident, tmp);
+         }
+       }
+@@ -1581,16 +1578,16 @@
+       new_inst = form->is_instruction();
+       assert( new_inst, "'new_id' must be an instruction name");
+       if( node->is_ideal_if() && new_inst->is_ideal_if() ) {
+-	fprintf(fp, "  ((MachIfNode*)n%d)->_prob = _prob;\n",cnt);
+-	fprintf(fp, "  ((MachIfNode*)n%d)->_fcnt = _fcnt;\n",cnt);
++        fprintf(fp, "  ((MachIfNode*)n%d)->_prob = _prob;\n",cnt);
++        fprintf(fp, "  ((MachIfNode*)n%d)->_fcnt = _fcnt;\n",cnt);
+       }
+ 
+       if( node->is_ideal_fastlock() && new_inst->is_ideal_fastlock() ) {
+-	fprintf(fp, "  ((MachFastLockNode*)n%d)->_counters = _counters;\n",cnt);
++        fprintf(fp, "  ((MachFastLockNode*)n%d)->_counters = _counters;\n",cnt);
+       }
+ 
+       const char *resultOper = new_inst->reduce_result();
+-      fprintf(fp,"  n%d->set_opnd_array(0, state->MachOperGenerator( %s, C ));\n", 
++      fprintf(fp,"  n%d->set_opnd_array(0, state->MachOperGenerator( %s, C ));\n",
+               cnt, machOperEnum(resultOper));
+ 
+       // get the formal operand NameList
+@@ -1601,7 +1598,7 @@
+       int memory_operand = new_inst->memory_operand(_globalNames);
+       if( memory_operand != InstructForm::NO_MEMORY_OPERAND ) {
+         int node_mem_op = node->memory_operand(_globalNames);
+-        assert( node_mem_op != InstructForm::NO_MEMORY_OPERAND, 
++        assert( node_mem_op != InstructForm::NO_MEMORY_OPERAND,
+                 "expand rule member needs memory but top-level inst doesn't have any" );
+         // Copy memory edge
+         fprintf(fp,"  n%d->add_req(_in[1]);\t// Add memory edge\n", cnt);
+@@ -1619,7 +1616,7 @@
+           // If there is no use of the created operand, just skip it
+           if (new_pos != -1) {
+             //Copy the operand from the original made above
+-            fprintf(fp,"  n%d->set_opnd_array(%d, op%d->clone(C)); // %s\n", 
++            fprintf(fp,"  n%d->set_opnd_array(%d, op%d->clone(C)); // %s\n",
+                     cnt, new_pos, exp_pos-node->num_opnds(), opid);
+             // Check for who defines this operand & add edge if needed
+             fprintf(fp,"  if(tmp%d != NULL)\n", exp_pos);
+@@ -1631,11 +1628,11 @@
+           // ins = (InstructForm *) _globalNames[new_id];
+           exp_pos = node->operand_position_format(opid);
+           assert(exp_pos != -1, "Bad expand rule");
+-          
++
+           new_pos = new_inst->operand_position(parameter,Component::USE);
+           if (new_pos != -1) {
+             // Copy the operand from the ExpandNode to the new node
+-            fprintf(fp,"  n%d->set_opnd_array(%d, opnd_array(%d)->clone(C)); // %s\n", 
++            fprintf(fp,"  n%d->set_opnd_array(%d, opnd_array(%d)->clone(C)); // %s\n",
+                     cnt, new_pos, exp_pos, opid);
+             // For each operand add appropriate input edges by looking at tmp's
+             fprintf(fp,"  if(tmp%d == this) {\n", exp_pos);
+@@ -1659,12 +1656,12 @@
+           fprintf(fp,"  tmp%d = n%d;\n", exp_pos, cnt);
+         }
+         else if( new_def_pos != -1 ) {
+-          // Instruction defines a value but user did not declare it 
++          // Instruction defines a value but user did not declare it
+           // in the 'effect' clause
+           fprintf(fp,"  tmp%d = n%d;\n", exp_pos, cnt);
+         }
+       } // done iterating over a new instruction's operands
+-      
++
+       // Invoke Expand() for the newly created instruction.
+       fprintf(fp,"  result = n%d->Expand( state, proj_list );\n", cnt);
+       assert( !new_inst->expands(), "Do not have complete support for recursive expansion");
+@@ -1706,7 +1703,7 @@
+         int j = node->unique_opnds_idx(i);
+         // unique_opnds_idx(i) is unique if unique_opnds_idx(j) is not unique.
+         if( j != node->unique_opnds_idx(j) ) {
+-          fprintf(fp,"  set_opnd_array(%d, opnd_array(%d)->clone(C)); // %s\n", 
++          fprintf(fp,"  set_opnd_array(%d, opnd_array(%d)->clone(C)); // %s\n",
+                   new_num_opnds, i, comp->_name);
+           // delete not unique edges here
+           fprintf(fp,"  for(unsigned i = 0; i < num%d; i++) {\n", i);
+@@ -1726,7 +1723,7 @@
+   }
+ 
+ 
+-  // Generate projections for instruction's additional DEFs and KILLs 
++  // Generate projections for instruction's additional DEFs and KILLs
+   if( ! node->expands() && (node->needs_projections() || node->has_temps())) {
+     // Get string representing the MachNode that projections point at
+     const char *machNode = "this";
+@@ -1756,11 +1753,11 @@
+           declared_def = true;
+         }
+         if (op && op->_interface && op->_interface->is_RegInterface()) {
+-          fprintf(fp,"  def = new (C) MachTempNode(state->MachOperGenerator( %s, C ));\n", 
++          fprintf(fp,"  def = new (C) MachTempNode(state->MachOperGenerator( %s, C ));\n",
+                   machOperEnum(op->_ident));
+           fprintf(fp,"  add_req(def);\n");
+           int idx  = node->operand_position_format(comp->_name);
+-          fprintf(fp,"  set_opnd_array(%d, state->MachOperGenerator( %s, C ));\n", 
++          fprintf(fp,"  set_opnd_array(%d, state->MachOperGenerator( %s, C ));\n",
+                   idx, machOperEnum(op->_ident));
+         } else {
+           assert(false, "can't have temps which aren't registers");
+@@ -1809,15 +1806,15 @@
+ //
+ // (1) void  ___Node::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+ // (2)   // ...  encoding defined by user
+-// (3)   
++// (3)
+ // (4) }
+-// 
++//
+ 
+ class DefineEmitState {
+ private:
+-  enum reloc_format { RELOC_NONE        = -1, 
+-                      RELOC_IMMEDIATE   =  0, 
+-                      RELOC_DISP        =  1, 
++  enum reloc_format { RELOC_NONE        = -1,
++                      RELOC_IMMEDIATE   =  0,
++                      RELOC_DISP        =  1,
+                       RELOC_CALL_DISP   =  2 };
+   enum literal_status{ LITERAL_NOT_SEEN  = 0,
+                        LITERAL_SEEN      = 1,
+@@ -1843,9 +1840,9 @@
+   bool          _may_reloc;
+   bool          _must_reloc;
+   reloc_format  _reloc_form;
+-  const char *  _reloc_type;                              
++  const char *  _reloc_type;
+   bool          _processing_noninput;
+-               
++
+   NameList      _strings_to_emit;
+ 
+   // Stable state, set by constructor
+@@ -1854,10 +1851,10 @@
+   EncClass     &_encoding;
+   InsEncode    &_ins_encode;
+   InstructForm &_inst;
+-  
++
+ public:
+-  DefineEmitState(FILE *fp, ArchDesc &AD, EncClass &encoding, 
+-                  InsEncode &ins_encode, InstructForm &inst) 
++  DefineEmitState(FILE *fp, ArchDesc &AD, EncClass &encoding,
++                  InsEncode &ins_encode, InstructForm &inst)
+     : _AD(AD), _fp(fp), _encoding(encoding), _ins_encode(ins_encode), _inst(inst) {
+       clear();
+   }
+@@ -1895,7 +1892,7 @@
+       // check_rep_var( rep_var );
+       if ( Opcode::as_opcode_type(rep_var) != Opcode::NOT_AN_OPCODE ) {
+         // No state needed.
+-        assert( _opclass == NULL, 
++        assert( _opclass == NULL,
+                 "'primary', 'secondary' and 'tertiary' don't follow operand.");
+       } else {
+         // Lookup its position in parameter list
+@@ -1924,25 +1921,25 @@
+ 
+         if ( idx != -1 ) {
+           // This is a local in the instruction
+-  	  // Update local state info.
+-  	  _opclass        = opc;
+-  	  _operand_idx    = idx;
+-  	  _local_name     = rep_var;
+-  	  _operand_name   = inst_rep_var;
+-
+-  	  // !!!!!
+-  	  // Do not support consecutive operands.
+-  	  assert( _operand == NULL, "Unimplemented()");
+-  	  _operand = opc->is_operand();
++          // Update local state info.
++          _opclass        = opc;
++          _operand_idx    = idx;
++          _local_name     = rep_var;
++          _operand_name   = inst_rep_var;
++
++          // !!!!!
++          // Do not support consecutive operands.
++          assert( _operand == NULL, "Unimplemented()");
++          _operand = opc->is_operand();
+         }
+         else if( ADLParser::is_literal_constant(inst_rep_var) ) {
+           // Instruction provided a constant expression
+-	  // Check later that encoding specifies $$$constant to resolve as constant
++          // Check later that encoding specifies $$$constant to resolve as constant
+           _constant_status   = LITERAL_SEEN;
+         }
+         else if( Opcode::as_opcode_type(inst_rep_var) != Opcode::NOT_AN_OPCODE ) {
+           // Instruction provided an opcode: "primary", "secondary", "tertiary"
+-	  // Check later that encoding specifies $$$constant to resolve as constant
++          // Check later that encoding specifies $$$constant to resolve as constant
+           _constant_status   = LITERAL_SEEN;
+         }
+         else if((_AD.get_registers() != NULL ) && (_AD.get_registers()->getRegDef(inst_rep_var) != NULL)) {
+@@ -1957,11 +1954,11 @@
+         }
+       } // done checking which operand this is.
+     } else {
+-      // 
++      //
+       // A subfield variable, '$$' prefix
+       // Check for fields that may require relocation information.
+       // Then check that literal register parameters are accessed with 'reg' or 'constant'
+-      // 
++      //
+       if ( strcmp(rep_var,"$disp") == 0 ) {
+         _doing_disp = true;
+         assert( _opclass, "Must use operand or operand class before '$disp'");
+@@ -1972,7 +1969,7 @@
+           _reloc_type   = AdlcVMDeps::oop_reloc_type();
+         } else {
+           // Do precise check on operand: is it a ConP or not
+-          // 
++          //
+           // Check interface for value of displacement
+           assert( ( _operand->_interface != NULL ),
+                   "$disp can only follow memory interface operand");
+@@ -1984,7 +1981,7 @@
+           if( disp != NULL && (*disp == '$') ) {
+             // MemInterface::disp contains a replacement variable,
+             // Check if this matches a ConP
+-            // 
++            //
+             // Lookup replacement variable, in operand's component list
+             const char *rep_var_name = disp + 1; // Skip '$'
+             const Component *comp = _operand->_components.search(rep_var_name);
+@@ -2007,7 +2004,7 @@
+                 _reloc_form   = RELOC_DISP;
+                 _reloc_type   = AdlcVMDeps::oop_reloc_type();
+               }
+-            } 
++            }
+ 
+             else if( _operand->is_user_name_for_sReg() != Form::none ) {
+               // The only non-constant allowed access to disp is an operand sRegX in a stackSlotX
+@@ -2111,7 +2108,7 @@
+     const char *rep_var;
+     _strings_to_emit.reset();
+     while ( (rep_var = _strings_to_emit.iter()) != NULL ) {
+-      
++
+       if ( (*rep_var) == '$' ) {
+         // A subfield variable, '$$' prefix
+         emit_field( rep_var );
+@@ -2131,15 +2128,15 @@
+   void gen_emit_x_reloc(const char *d32_lo_hi ) {
+     fprintf(_fp,"emit_%s_reloc(cbuf, ", d32_lo_hi );
+     emit_replacement();             fprintf(_fp,", ");
+-    emit_reloc_type( _reloc_type ); fprintf(_fp,", ");    
++    emit_reloc_type( _reloc_type ); fprintf(_fp,", ");
+     fprintf(_fp, "%d", _reloc_form);fprintf(_fp, ");");
+   }
+-    
+-  
++
++
+   void emit() {
+-    // 
++    //
+     //   "emit_d32_reloc(" or "emit_hi_reloc" or "emit_lo_reloc"
+-    // 
++    //
+     // Emit the function name when generating an emit function
+     if ( _doing_emit_d32 || _doing_emit_hi || _doing_emit_lo ) {
+       const char *d32_hi_lo = _doing_emit_d32 ? "d32" : (_doing_emit_hi ? "hi" : "lo");
+@@ -2148,8 +2145,8 @@
+       if ( ! _may_reloc ) {
+         // Definitely don't need relocation information
+         fprintf( _fp, "emit_%s(cbuf, ", d32_hi_lo );
+-        emit_replacement(); fprintf(_fp, ")"); 
+-      } 
++        emit_replacement(); fprintf(_fp, ")");
++      }
+       else if ( _must_reloc ) {
+         // Must emit relocation information
+         gen_emit_x_reloc( d32_hi_lo );
+@@ -2162,9 +2159,9 @@
+                 && !(_doing_disp && _doing_constant),
+                 "Must be emitting either a displacement or a constant");
+         fprintf(_fp,"\n");
+-        fprintf(_fp,"if ( opnd_array(%d)->%s_is_oop() ) {\n", 
++        fprintf(_fp,"if ( opnd_array(%d)->%s_is_oop() ) {\n",
+                 _operand_idx, disp_constant);
+-        fprintf(_fp,"  ");  
++        fprintf(_fp,"  ");
+         gen_emit_x_reloc( d32_hi_lo ); fprintf(_fp,"\n");
+         fprintf(_fp,"} else {\n");
+         fprintf(_fp,"  emit_%s(cbuf, ", d32_hi_lo);
+@@ -2173,13 +2170,13 @@
+     }
+     else if ( _doing_emit_d16 ) {
+       // Relocation of 16-bit values is not supported
+-      fprintf(_fp,"emit_d16(cbuf, "); 
++      fprintf(_fp,"emit_d16(cbuf, ");
+       emit_replacement(); fprintf(_fp, ")");
+       // No relocation done for 16-bit values
+     }
+     else if ( _doing_emit8 ) {
+       // Relocation of 8-bit values is not supported
+-      fprintf(_fp,"emit_d8(cbuf, "); 
++      fprintf(_fp,"emit_d8(cbuf, ");
+       emit_replacement(); fprintf(_fp, ")");
+       // No relocation done for 8-bit values
+     }
+@@ -2199,19 +2196,19 @@
+   const char* reg_conversion(const char* rep_var) {
+     if (strcmp(rep_var,"$Register") == 0)      return "as_Register";
+     if (strcmp(rep_var,"$FloatRegister") == 0) return "as_FloatRegister";
+-#if defined(IA32)
++#if defined(IA32) || defined(AMD64)
+     if (strcmp(rep_var,"$XMMRegister") == 0)   return "as_XMMRegister";
+ #endif
+     return NULL;
+   }
+-  
++
+   void emit_field(const char *rep_var) {
+     const char* reg_convert = reg_conversion(rep_var);
+ 
+     // A subfield variable, '$$subfield'
+     if ( strcmp(rep_var, "$reg") == 0 || reg_convert != NULL) {
+       // $reg form or the $Register MacroAssembler type conversions
+-      assert( _operand_idx != -1, 
++      assert( _operand_idx != -1,
+               "Must use this subfield after operand");
+       if( _reg_status == LITERAL_NOT_SEEN ) {
+         if (_processing_noninput) {
+@@ -2235,13 +2232,13 @@
+       }
+     }
+     else if ( strcmp(rep_var,"$base") == 0 ) {
+-      assert( _operand_idx != -1, 
++      assert( _operand_idx != -1,
+               "Must use this subfield after operand");
+       assert( ! _may_reloc, "UnImplemented()");
+       fprintf(_fp,"->base(ra_,this,idx%d)", _operand_idx);
+     }
+     else if ( strcmp(rep_var,"$index") == 0 ) {
+-      assert( _operand_idx != -1, 
++      assert( _operand_idx != -1,
+               "Must use this subfield after operand");
+       assert( ! _may_reloc, "UnImplemented()");
+       fprintf(_fp,"->index(ra_,this,idx%d)", _operand_idx);
+@@ -2360,7 +2357,7 @@
+ 
+ 
+ void ArchDesc::defineSize(FILE *fp, InstructForm &inst) {
+-  
++
+   //(1)
+   // Output instruction's emit prototype
+   fprintf(fp,"uint  %sNode::size(PhaseRegAlloc *ra_) const {\n",
+@@ -2382,7 +2379,7 @@
+   fprintf(fp,"void  %sNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {\n",
+           inst._ident);
+ 
+-  // If user did not define an encode section, 
++  // If user did not define an encode section,
+   // provide stub that does not generate any machine code.
+   if( (_encode == NULL) || (ins_encode == NULL) ) {
+     fprintf(fp, "  // User did not define an encode section.\n");
+@@ -2398,7 +2395,7 @@
+ 
+   // Output each operand's offset into the array of registers.
+   inst.index_temps( fp, _globalNames );
+-  
++
+   // Output this instruction's encodings
+   const char *ec_name;
+   bool        user_defined = false;
+@@ -2425,7 +2422,7 @@
+     DefineEmitState  pending(fp, *this, *encoding, *ins_encode, inst );
+     encoding->_code.reset();
+     encoding->_rep_vars.reset();
+-    // Process list of user-defined strings, 
++    // Process list of user-defined strings,
+     // and occurrences of replacement variables.
+     // Replacement Vars are pushed into a list and then output
+     while ( (ec_code = encoding->_code.iter()) != NULL ) {
+@@ -2452,7 +2449,7 @@
+   if ( user_defined == false ) {
+     fprintf(fp, "  // User did not define which encode class to use.\n");
+   }
+-  
++
+   // (3) and (4)
+   fprintf(fp,"}\n");
+ }
+@@ -2466,7 +2463,7 @@
+   uint num_edges = oper.num_edges(globals);
+   if( num_edges != 0 ) {
+     // Method header
+-    fprintf(fp, "const RegMask *%sOper::in_RegMask(int index) const {\n", 
++    fprintf(fp, "const RegMask *%sOper::in_RegMask(int index) const {\n",
+             oper._ident);
+ 
+     // Assert that the index is in range.
+@@ -2500,11 +2497,11 @@
+       for (uint index = 0; index < num_edges; index++) {
+         const char *reg_class = oper.in_reg_class(index, globals);
+         assert(reg_class != NULL, "did not find register mask");
+-	if( !strcmp(reg_class, "stack_slots") ) {
+-	  fprintf(fp, "  case %d: return &(Compile::current()->FIRST_STACK_mask());\n", index);
+-	} else {
+-	  fprintf(fp, "  case %d: return &%s_mask;\n", index, toUpper(reg_class));
+-	}
++        if( !strcmp(reg_class, "stack_slots") ) {
++          fprintf(fp, "  case %d: return &(Compile::current()->FIRST_STACK_mask());\n", index);
++        } else {
++          fprintf(fp, "  case %d: return &%s_mask;\n", index, toUpper(reg_class));
++        }
+       }
+       fprintf(fp,"  }\n");
+       fprintf(fp,"  ShouldNotReachHere();\n");
+@@ -2517,11 +2514,11 @@
+ }
+ 
+ // generate code to create a clone for a class derived from MachOper
+-// 
++//
+ // (0)  MachOper  *MachOperXOper::clone(Compile* C) const {
+ // (1)    return new (C) MachXOper( _ccode, _c0, _c1, ..., _cn);
+ // (2)  }
+-// 
++//
+ static void defineClone(FILE *fp, FormDict &globalNames, OperandForm &oper) {
+   fprintf(fp,"MachOper  *%sOper::clone(Compile* C) const {\n", oper._ident);
+   // Check for constants that need to be copied over
+@@ -2555,7 +2552,7 @@
+ }
+ 
+ 
+-// Helper functions for bug 4796752, abstracted with minimal modification 
++// Helper functions for bug 4796752, abstracted with minimal modification
+ // from define_oper_interface()
+ OperandForm *rep_var_to_operand(const char *encoding, OperandForm &oper, FormDict &globals) {
+   OperandForm *op = NULL;
+@@ -2606,7 +2603,7 @@
+ 
+   OperandForm *op = rep_var_to_operand(encoding, oper, globals);
+   if( op != NULL ) {
+-    // Check that this is a register 
++    // Check that this is a register
+     if ( (op->_matrule && op->_matrule->is_base_register(globals)) ) {
+       // Register
+       const char* ideal  = op->ideal_type(globals);
+@@ -2693,7 +2690,7 @@
+     } else {
+       assert( false, "Attempting to emit a non-register or non-constant");
+     }
+-  } 
++  }
+   else if( *encoding == '0' && *(encoding+1) == 'x' ) {
+     // Hex value
+     fprintf(fp,"return %s;", encoding);
+@@ -2707,16 +2704,16 @@
+     MemInterface *mem_interface = oper._interface->is_MemInterface();
+     const char *base = mem_interface->_base;
+     const char *disp = mem_interface->_disp;
+-    if( emit_position && (strcmp(name,"base") == 0) 
+-        && base != NULL && is_regI(base, oper, globals) 
++    if( emit_position && (strcmp(name,"base") == 0)
++        && base != NULL && is_regI(base, oper, globals)
+         && disp != NULL && is_conP(disp, oper, globals) ) {
+-      // Found a memory access using a constant pointer for a displacement 
++      // Found a memory access using a constant pointer for a displacement
+       // and a base register containing an integer offset.
+-      // In this case the base and disp are reversed with respect to what 
++      // In this case the base and disp are reversed with respect to what
+       // is expected by MachNode::get_base_and_disp() and MachNode::adr_type().
+       // Provide a non-NULL return for disp_as_type() that will allow adr_type()
+       // to correctly compute the access type for alias analysis.
+-      // 
++      //
+       // See BugId 4796752, operand indOffset32X in i486.ad
+       int idx = rep_var_to_constant_index(disp, oper, globals);
+       fprintf(fp,"  virtual const TypePtr *disp_as_type() const { return _c%d; }\n", idx);
+@@ -2758,7 +2755,7 @@
+ }
+ 
+ //------------------------------defineClasses----------------------------------
+-// Define members of MachNode and MachOper classes based on 
++// Define members of MachNode and MachOper classes based on
+ // operand and instruction lists
+ void ArchDesc::defineClasses(FILE *fp) {
+ 
+@@ -2781,7 +2778,7 @@
+   fprintf(fp,"\n");
+   fprintf(fp,"//------------------Define classes derived from MachOper---------------------\n");
+   // Iterate through all operands
+-  _operands.reset(); 
++  _operands.reset();
+   OperandForm *oper;
+   for( ; (oper = (OperandForm*)_operands.iter()) != NULL; ) {
+     // Ensure this is a machine-world instruction
+@@ -2854,15 +2851,15 @@
+ 
+   bool used = false;
+   // Output the definitions for expand rules & peephole rules
+-  _instructions.reset(); 
++  _instructions.reset();
+   for( ; (instr = (InstructForm*)_instructions.iter()) != NULL; ) {
+     // Ensure this is a machine-world instruction
+     if ( instr->ideal_only() ) continue;
+     // If there are multiple defs/kills, or an explicit expand rule, build rule
+     if( instr->expands() || instr->needs_projections() ||
+         instr->has_temps() ||
+-        instr->_matrule != NULL && 
+-        instr->num_opnds() != instr->num_unique_opnds() ) 
++        instr->_matrule != NULL &&
++        instr->num_opnds() != instr->num_unique_opnds() )
+       defineExpand(_CPP_EXPAND_file._fp, instr);
+     // If there is an explicit peephole rule, build it
+     if ( instr->peepholes() )
+@@ -2879,7 +2876,7 @@
+   define_fill_new_machnode(used, fp);
+ 
+   // Output the definitions for labels
+-  _instructions.reset(); 
++  _instructions.reset();
+   while( (instr = (InstructForm*)_instructions.iter()) != NULL ) {
+     // Ensure this is a machine-world instruction
+     if ( instr->ideal_only() ) continue;
+@@ -2898,7 +2895,7 @@
+   }
+ 
+   // Output the definitions for methods
+-  _instructions.reset(); 
++  _instructions.reset();
+   while( (instr = (InstructForm*)_instructions.iter()) != NULL ) {
+     // Ensure this is a machine-world instruction
+     if ( instr->ideal_only() ) continue;
+@@ -2916,7 +2913,7 @@
+   }
+ 
+   // Define this instruction's number of relocation entries, base is '0'
+-  _instructions.reset(); 
++  _instructions.reset();
+   while( (instr = (InstructForm*)_instructions.iter()) != NULL ) {
+     // Output the definition for number of relocation entries
+     uint reloc_size = instr->reloc(_globalNames);
+@@ -2930,13 +2927,13 @@
+   fprintf(fp,"\n");
+ 
+   // Output the definitions for code generation
+-  // 
++  //
+   // address  ___Node::emit(address ptr, PhaseRegAlloc *ra_) const {
+   //   // ...  encoding defined by user
+   //   return ptr;
+   // }
+   //
+-  _instructions.reset(); 
++  _instructions.reset();
+   for( ; (instr = (InstructForm*)_instructions.iter()) != NULL; ) {
+     // Ensure this is a machine-world instruction
+     if ( instr->ideal_only() ) continue;
+@@ -2950,7 +2947,7 @@
+   }
+ 
+   // Output the definitions for alias analysis
+-  _instructions.reset(); 
++  _instructions.reset();
+   for( ; (instr = (InstructForm*)_instructions.iter()) != NULL; ) {
+     // Ensure this is a machine-world instruction
+     if ( instr->ideal_only() ) continue;
+@@ -2964,10 +2961,10 @@
+ 
+     if ( memory_operand != InstructForm::NO_MEMORY_OPERAND ) {
+       if( memory_operand == InstructForm::MANY_MEMORY_OPERANDS ) {
+-	fprintf(fp,"const TypePtr *%sNode::adr_type() const { return TypePtr::BOTTOM; }\n", instr->_ident);
+-	fprintf(fp,"const MachOper* %sNode::memory_operand() const { return (MachOper*)-1; }\n", instr->_ident);
++        fprintf(fp,"const TypePtr *%sNode::adr_type() const { return TypePtr::BOTTOM; }\n", instr->_ident);
++        fprintf(fp,"const MachOper* %sNode::memory_operand() const { return (MachOper*)-1; }\n", instr->_ident);
+       } else {
+-	fprintf(fp,"const MachOper* %sNode::memory_operand() const { return _opnds[%d]; }\n", instr->_ident, memory_operand);
++        fprintf(fp,"const MachOper* %sNode::memory_operand() const { return _opnds[%d]; }\n", instr->_ident, memory_operand);
+   }
+     }
+   }
+@@ -3027,7 +3024,7 @@
+ // Information needed to generate the ReduceOp mapping for the DFA
+ class OutputReduceOp : public OutputMap {
+ public:
+-  OutputReduceOp(FILE *hpp, FILE *cpp, FormDict &globals, ArchDesc &AD) 
++  OutputReduceOp(FILE *hpp, FILE *cpp, FormDict &globals, ArchDesc &AD)
+     : OutputMap(hpp, cpp, globals, AD) {};
+ 
+   void declaration() { fprintf(_hpp, "extern const int   reduceOp[];\n"); }
+@@ -3035,7 +3032,7 @@
+   void closing()     { fprintf(_cpp, "  0 // no trailing comma\n");
+                        OutputMap::closing();
+   }
+-  void map(OpClassForm &opc)  { 
++  void map(OpClassForm &opc)  {
+     const char *reduce = opc._ident;
+     if( reduce )  fprintf(_cpp, "  %s_rule", reduce);
+     else          fprintf(_cpp, "  0");
+@@ -3053,7 +3050,7 @@
+     if( reduce )  fprintf(_cpp, "  %s_rule", reduce);
+     else          fprintf(_cpp, "  0");
+   }
+-  void map(char         *reduce) { 
++  void map(char         *reduce) {
+     if( reduce )  fprintf(_cpp, "  %s_rule", reduce);
+     else          fprintf(_cpp, "  0");
+   }
+@@ -3062,7 +3059,7 @@
+ // Information needed to generate the LeftOp mapping for the DFA
+ class OutputLeftOp : public OutputMap {
+ public:
+-  OutputLeftOp(FILE *hpp, FILE *cpp, FormDict &globals, ArchDesc &AD) 
++  OutputLeftOp(FILE *hpp, FILE *cpp, FormDict &globals, ArchDesc &AD)
+     : OutputMap(hpp, cpp, globals, AD) {};
+ 
+   void declaration() { fprintf(_hpp, "extern const int   leftOp[];\n"); }
+@@ -3092,7 +3089,7 @@
+ // Information needed to generate the RightOp mapping for the DFA
+ class OutputRightOp : public OutputMap {
+ public:
+-  OutputRightOp(FILE *hpp, FILE *cpp, FormDict &globals, ArchDesc &AD) 
++  OutputRightOp(FILE *hpp, FILE *cpp, FormDict &globals, ArchDesc &AD)
+     : OutputMap(hpp, cpp, globals, AD) {};
+ 
+   void declaration() { fprintf(_hpp, "extern const int   rightOp[];\n"); }
+@@ -3122,7 +3119,7 @@
+ // Information needed to generate the Rule names for the DFA
+ class OutputRuleName : public OutputMap {
+ public:
+-  OutputRuleName(FILE *hpp, FILE *cpp, FormDict &globals, ArchDesc &AD) 
++  OutputRuleName(FILE *hpp, FILE *cpp, FormDict &globals, ArchDesc &AD)
+     : OutputMap(hpp, cpp, globals, AD) {};
+ 
+   void declaration() { fprintf(_hpp, "extern const char *ruleName[];\n"); }
+@@ -3140,7 +3137,7 @@
+ // Information needed to generate the swallowed mapping for the DFA
+ class OutputSwallowed : public OutputMap {
+ public:
+-  OutputSwallowed(FILE *hpp, FILE *cpp, FormDict &globals, ArchDesc &AD) 
++  OutputSwallowed(FILE *hpp, FILE *cpp, FormDict &globals, ArchDesc &AD)
+     : OutputMap(hpp, cpp, globals, AD) {};
+ 
+   void declaration() { fprintf(_hpp, "extern const bool  swallowed[];\n"); }
+@@ -3161,7 +3158,7 @@
+ // Information needed to generate the decision array for instruction chain rule
+ class OutputInstChainRule : public OutputMap {
+ public:
+-  OutputInstChainRule(FILE *hpp, FILE *cpp, FormDict &globals, ArchDesc &AD) 
++  OutputInstChainRule(FILE *hpp, FILE *cpp, FormDict &globals, ArchDesc &AD)
+     : OutputMap(hpp, cpp, globals, AD) {};
+ 
+   void declaration() { fprintf(_hpp, "extern const bool  instruction_chain_rule[];\n"); }
+@@ -3194,10 +3191,10 @@
+   map.declaration();
+   fprintf(fp_cpp,"\n");
+   map.definition();
+-  
++
+   // Output the mapping for operands
+   map.record_position(OutputMap::BEGIN_OPERANDS, idx );
+-  _operands.reset(); 
++  _operands.reset();
+   for(; (op = (OperandForm*)_operands.iter()) != NULL; ) {
+     // Ensure this is a machine-world instruction
+     if ( op->ideal_only() )  continue;
+@@ -3210,7 +3207,7 @@
+ 
+   // Place all user-defined operand classes into the mapping
+   map.record_position(OutputMap::BEGIN_OPCLASSES, idx );
+-  _opclass.reset(); 
++  _opclass.reset();
+   for(; (opc = (OpClassForm*)_opclass.iter()) != NULL; ) {
+     map.map(*opc);    fprintf(fp_cpp, ", // %d\n", idx);
+     ++idx;
+@@ -3219,7 +3216,7 @@
+ 
+   // Place all internally defined operands into the mapping
+   map.record_position(OutputMap::BEGIN_INTERNALS, idx );
+-  _internalOpNames.reset(); 
++  _internalOpNames.reset();
+   char *name = NULL;
+   for(; (name = (char *)_internalOpNames.iter()) != NULL; ) {
+     map.map(name);    fprintf(fp_cpp, ", // %d\n", idx);
+@@ -3233,24 +3230,24 @@
+     // Output all simple instruction chain rules first
+     map.record_position(OutputMap::BEGIN_INST_CHAIN_RULES, idx );
+     {
+-      _instructions.reset(); 
++      _instructions.reset();
+       for(; (inst = (InstructForm*)_instructions.iter()) != NULL; ) {
+         // Ensure this is a machine-world instruction
+         if ( inst->ideal_only() )  continue;
+         if ( ! inst->is_simple_chain_rule(_globalNames) ) continue;
+         if ( inst->rematerialize(_globalNames, get_registers()) ) continue;
+-        
++
+         map.map(*inst);      fprintf(fp_cpp, ", // %d\n", idx);
+         ++idx;
+       };
+       map.record_position(OutputMap::BEGIN_REMATERIALIZE, idx );
+-      _instructions.reset(); 
++      _instructions.reset();
+       for(; (inst = (InstructForm*)_instructions.iter()) != NULL; ) {
+         // Ensure this is a machine-world instruction
+         if ( inst->ideal_only() )  continue;
+         if ( ! inst->is_simple_chain_rule(_globalNames) ) continue;
+         if ( ! inst->rematerialize(_globalNames, get_registers()) ) continue;
+-        
++
+         map.map(*inst);      fprintf(fp_cpp, ", // %d\n", idx);
+         ++idx;
+       };
+@@ -3258,24 +3255,24 @@
+     }
+     // Output all instructions that are NOT simple chain rules
+     {
+-      _instructions.reset(); 
++      _instructions.reset();
+       for(; (inst = (InstructForm*)_instructions.iter()) != NULL; ) {
+         // Ensure this is a machine-world instruction
+         if ( inst->ideal_only() )  continue;
+         if ( inst->is_simple_chain_rule(_globalNames) ) continue;
+         if ( ! inst->rematerialize(_globalNames, get_registers()) ) continue;
+-        
++
+         map.map(*inst);      fprintf(fp_cpp, ", // %d\n", idx);
+         ++idx;
+       };
+       map.record_position(OutputMap::END_REMATERIALIZE, idx );
+-      _instructions.reset(); 
++      _instructions.reset();
+       for(; (inst = (InstructForm*)_instructions.iter()) != NULL; ) {
+         // Ensure this is a machine-world instruction
+         if ( inst->ideal_only() )  continue;
+         if ( inst->is_simple_chain_rule(_globalNames) ) continue;
+         if ( inst->rematerialize(_globalNames, get_registers()) ) continue;
+-        
++
+         map.map(*inst);      fprintf(fp_cpp, ", // %d\n", idx);
+         ++idx;
+       };
+@@ -3323,7 +3320,7 @@
+ //---------------------------addHeaderBlocks-----------------------------
+ void ArchDesc::addHeaderBlocks(FILE *fp_hpp) {
+   if (_header.count() > 0)
+-    _header.output(fp_hpp); 
++    _header.output(fp_hpp);
+ }
+ //-------------------------addPreHeaderBlocks----------------------------
+ void ArchDesc::addPreHeaderBlocks(FILE *fp_hpp) {
+@@ -3331,7 +3328,7 @@
+   globalDefs().print_defines(fp_hpp);
+ 
+   if (_pre_header.count() > 0)
+-    _pre_header.output(fp_hpp); 
++    _pre_header.output(fp_hpp);
+ }
+ 
+ //---------------------------buildReduceMaps-----------------------------
+@@ -3415,7 +3412,7 @@
+ static void path_to_constant(FILE *fp, FormDict &globals,
+                              MatchNode *mnode, uint idx) {
+   if ( ! mnode) return;
+-  
++
+   unsigned    position = 0;
+   const char *result   = NULL;
+   const char *name     = NULL;
+@@ -3447,14 +3444,14 @@
+     return;
+   }
+ 
+-  // If constant is in left child, build path and recurse 
++  // If constant is in left child, build path and recurse
+   uint lConsts = (mnode->_lChild) ? (mnode->_lChild->num_consts(globals) ) : 0;
+   uint rConsts = (mnode->_rChild) ? (mnode->_rChild->num_consts(globals) ) : 0;
+   if ( (mnode->_lChild) && (lConsts > idx) ) {
+     fprintf(fp, "_kids[0]->");
+     path_to_constant(fp, globals, mnode->_lChild, idx);
+     return;
+-  } 
++  }
+   // If constant is in right child, build path and recurse
+   if ( (mnode->_rChild) && (rConsts > (idx - lConsts) ) ) {
+     idx = idx - lConsts;
+@@ -3466,7 +3463,7 @@
+ }
+ 
+ // Generate code that is executed when generating a specific Machine Operand
+-static void genMachOperCase(FILE *fp, FormDict &globalNames, ArchDesc &AD, 
++static void genMachOperCase(FILE *fp, FormDict &globalNames, ArchDesc &AD,
+                             OperandForm &op) {
+   const char *opName         = op._ident;
+   const char *opEnumName     = AD.machOperEnum(opName);
+@@ -3476,7 +3473,7 @@
+   fprintf(fp, "  case %s:", opEnumName);
+   fprintf(fp, "\n    return new (C) %sOper(", opName);
+   // Access parameters for constructor from the stat object
+-  // 
++  //
+   // Build access to condition code value
+   if ( (num_consts > 0) ) {
+     uint i = 0;
+@@ -3497,9 +3494,9 @@
+   // Build switch to invoke 'new' for a specific MachOper
+   fprintf(fp_cpp, "\n");
+   fprintf(fp_cpp, "\n");
+-  fprintf(fp_cpp, 
++  fprintf(fp_cpp,
+           "//------------------------- MachOper Generator ---------------\n");
+-  fprintf(fp_cpp, 
++  fprintf(fp_cpp,
+           "// A switch statement on the dense-packed user-defined type system\n"
+           "// that invokes 'new' on the corresponding class constructor.\n");
+   fprintf(fp_cpp, "\n");
+@@ -3516,7 +3513,7 @@
+   for( ; (op =  (OperandForm*)_operands.iter()) != NULL; ) {
+     // Ensure this is a machine-world instruction
+     if ( op->ideal_only() )  continue;
+-    
++
+     genMachOperCase(fp_cpp, _globalNames, *this, *op);
+   };
+ 
+@@ -3550,31 +3547,31 @@
+ // Build a new MachNode, for MachNodeGenerator or cisc-spilling
+ void ArchDesc::buildMachNode(FILE *fp_cpp, InstructForm *inst, const char *indent) {
+   const char *opType  = NULL;
+-  const char *opClass = inst->_ident; 
++  const char *opClass = inst->_ident;
+ 
+   // Create the MachNode object
+   fprintf(fp_cpp, "%s %sNode *node = new (C) %sNode();\n",indent, opClass,opClass);
+ 
+   if ( (inst->num_post_match_opnds() != 0) ) {
+     // Instruction that contains operands which are not in match rule.
+-    // 
++    //
+     // Check if the first post-match component may be an interesting def
+     bool           dont_care = false;
+     ComponentList &comp_list = inst->_components;
+     Component     *comp      = NULL;
+     comp_list.reset();
+     if ( comp_list.match_iter() != NULL )    dont_care = true;
+-    
++
+     // Insert operands that are not in match-rule.
+     // Only insert a DEF if the do_care flag is set
+     comp_list.reset();
+     while ( comp = comp_list.post_match_iter() ) {
+       // Check if we don't care about DEFs or KILLs that are not USEs
+       if ( dont_care && (! comp->isa(Component::USE)) ) {
+-	continue;
+-      } 
++        continue;
++      }
+       dont_care = true;
+-      // For each operand not in the match rule, call MachOperGenerator 
++      // For each operand not in the match rule, call MachOperGenerator
+       // with the enum for the opcode that needs to be built
+       // and the node just built, the parent of the operand.
+       ComponentList clist = inst->_components;
+@@ -3584,15 +3581,15 @@
+       fprintf(fp_cpp, "%s node->set_opnd_array(%d, ", indent, index);
+       fprintf(fp_cpp, "MachOperGenerator(%s, C));\n", opcode);
+       }
+-  } 
++  }
+   else if ( inst->is_chain_of_constant(_globalNames, opType) ) {
+     // An instruction that chains from a constant!
+     // In this case, we need to subsume the constant into the node
+     // at operand position, oper_input_base().
+-    // 
++    //
+     // Fill in the constant
+-    fprintf(fp_cpp, "%s node->_opnd_array[%d] = ", indent, 
+-	    inst->oper_input_base(_globalNames));
++    fprintf(fp_cpp, "%s node->_opnd_array[%d] = ", indent,
++            inst->oper_input_base(_globalNames));
+     // #####
+     // Check for multiple constants and then fill them in.
+     // Just like MachOperGenerator
+@@ -3606,14 +3603,14 @@
+       uint i = 0;
+       path_to_constant(fp_cpp, _globalNames, op->_matrule, i);
+       for ( i = 1; i < num_consts; ++i ) {
+-	fprintf(fp_cpp, ", ");
+-	path_to_constant(fp_cpp, _globalNames, op->_matrule, i);
++        fprintf(fp_cpp, ", ");
++        path_to_constant(fp_cpp, _globalNames, op->_matrule, i);
+       }
+     }
+     fprintf(fp_cpp, " );\n");
+     // #####
+   }
+-  
++
+   // Fill in the bottom_type where requested
+   if ( inst->captures_bottom_type() ) {
+     fprintf(fp_cpp, "%s node->_bottom_type = _leaf->bottom_type();\n", indent);
+@@ -3625,7 +3622,7 @@
+   if( inst->is_ideal_fastlock() ) {
+     fprintf(fp_cpp, "%s node->_counters = _leaf->as_FastLock()->counters();\n", indent);
+   }
+-  
++
+ }
+ 
+ //---------------------------declare_cisc_version------------------------------
+@@ -3741,9 +3738,9 @@
+   // Build switch to invoke 'new' for a specific MachNode
+   fprintf(fp_cpp, "\n");
+   fprintf(fp_cpp, "\n");
+-  fprintf(fp_cpp, 
++  fprintf(fp_cpp,
+           "//------------------------- MachNode Generator ---------------\n");
+-  fprintf(fp_cpp, 
++  fprintf(fp_cpp,
+           "// A switch statement on the dense-packed user-defined type system\n"
+           "// that invokes 'new' on the corresponding class constructor.\n");
+   fprintf(fp_cpp, "\n");
+@@ -3751,9 +3748,9 @@
+   fprintf(fp_cpp, "(int opcode, Compile* C)");
+   fprintf(fp_cpp, "{\n");
+   fprintf(fp_cpp, "  switch(opcode) {\n");
+- 
++
+   // Provide constructor for all user-defined instructions
+-  _instructions.reset(); 
++  _instructions.reset();
+   int  opIndex = operandFormCount();
+   InstructForm *inst;
+   for( ; (inst = (InstructForm*)_instructions.iter()) != NULL; ) {
+@@ -3761,13 +3758,13 @@
+     if ( inst->_matrule == NULL ) continue;
+ 
+     int         opcode  = opIndex++;
+-    const char *opClass = inst->_ident; 
++    const char *opClass = inst->_ident;
+     char       *opType  = NULL;
+ 
+     // Generate the case statement for this instruction
+     fprintf(fp_cpp, "  case %s_rule:", opClass);
+ 
+-    // Start local scope 
++    // Start local scope
+     fprintf(fp_cpp, "  {\n");
+     // Generate code to construct the new MachNode
+     buildMachNode(fp_cpp, inst, "     ");
+@@ -3931,7 +3928,7 @@
+   if( _frame->_cisc_spilling_operand_name != NULL ) {
+     const Form *form = _globalNames[_frame->_cisc_spilling_operand_name];
+     OperandForm *oper = form ? form->is_operand() : NULL;
+-    // Verify the user's suggestion 
++    // Verify the user's suggestion
+     if( oper != NULL ) {
+       // Ensure that match field is defined.
+       if ( oper->_matrule != NULL )  {
+@@ -3959,7 +3956,7 @@
+ 
+   if( cisc_spill_operand() != NULL ) {
+     // N^2 comparison of instructions looking for a cisc-spilling version
+-    _instructions.reset(); 
++    _instructions.reset();
+     InstructForm *instr;
+     for( ; (instr = (InstructForm*)_instructions.iter()) != NULL; ) {
+       // Ensure that match field is defined.
+@@ -4000,7 +3997,7 @@
+ void ArchDesc::build_cisc_spill_instructions(FILE *fp_hpp, FILE *fp_cpp) {
+   // Output the table for cisc spilling
+   fprintf(fp_cpp, "//  The following instructions can cisc-spill\n");
+-  _instructions.reset(); 
++  _instructions.reset();
+   InstructForm *inst = NULL;
+   for(; (inst = (InstructForm*)_instructions.iter()) != NULL; ) {
+     // Ensure this is a machine-world instruction
+@@ -4020,7 +4017,7 @@
+ void ArchDesc::identify_short_branches() {
+   // Walk over all instructions, checking to see if they match a short
+   // branching alternate.
+-  _instructions.reset(); 
++  _instructions.reset();
+   InstructForm *instr;
+   while( (instr = (InstructForm*)_instructions.iter()) != NULL ) {
+     // The instruction must have a match rule.
+@@ -4041,7 +4038,7 @@
+ // Identify unique operands.
+ void ArchDesc::identify_unique_operands() {
+   // Walk over all instructions.
+-  _instructions.reset(); 
++  _instructions.reset();
+   InstructForm *instr;
+   while( (instr = (InstructForm*)_instructions.iter()) != NULL ) {
+     // Ensure this is a machine-world instruction
+diff -ruN openjdk6/hotspot/src/share/vm/adlc/output_h.cpp openjdk/hotspot/src/share/vm/adlc/output_h.cpp
+--- openjdk6/hotspot/src/share/vm/adlc/output_h.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/adlc/output_h.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)output_h.cpp	1.178 07/05/05 17:05:03 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // output_h.cpp - Class HPP file output routines for architecture definition
+@@ -44,8 +41,8 @@
+ // // Enumerate machine registers starting after reserved regs.
+ // // in the order of occurrence in the register block.
+ // enum MachRegisterNumbers {
+-//   EAX_num = 0, 
+-//   ...    
++//   EAX_num = 0,
++//   ...
+ //   _last_Mach_Reg
+ // }
+ void ArchDesc::buildMachRegisterNumbers(FILE *fp_hpp) {
+@@ -109,8 +106,8 @@
+ // // Enumerate machine registers starting after reserved regs.
+ // // in the order of occurrence in the alloc_class(es).
+ // enum MachRegisterEncodes {
+-//   EAX_enc = 0x00, 
+-//   ...    
++//   EAX_enc = 0x00,
++//   ...
+ // }
+ void ArchDesc::buildMachRegisterEncodes(FILE *fp_hpp) {
+   if (_register) {
+@@ -156,7 +153,7 @@
+ // Declare an array containing the machine register encoding values
+ static void declareRegEncodes(FILE *fp, RegisterForm *registers) {
+   if (registers) {
+-    // // // 
++    // // //
+     // fprintf(fp,"\n");
+     // fprintf(fp,"// An array containing the machine register encode values\n");
+     // fprintf(fp,"extern const char  regEncode[];\n");
+@@ -191,7 +188,7 @@
+ static void declareConstStorage(FILE *fp, FormDict &globals, OperandForm *oper) {
+   int i = 0;
+   Component *comp;
+-  
++
+   if (oper->num_consts(globals) == 0) return;
+   // Iterate over the component list looking for constants
+   oper->_components.reset();
+@@ -256,11 +253,11 @@
+ 
+ // Declare constructor.
+ // Parameters start with condition code, then all other constants
+-// 
++//
+ // (0) public:
+ // (1)  MachXOper(int32 ccode, int32 c0, int32 c1, ..., int32 cn)
+ // (2)     : _ccode(ccode), _c0(c0), _c1(c1), ..., _cn(cn) { }
+-// 
++//
+ static void defineConstructor(FILE *fp, const char *name, uint num_consts,
+                               ComponentList &lst, bool is_ideal_bool,
+                               Form::DataType constant_type, FormDict &globals) {
+@@ -279,16 +276,16 @@
+   if ((comp = lst.iter()) == NULL) {
+     assert(num_consts == 1, "Bad component list detected.\n");
+     switch( constant_type ) {
+-    case Form::idealI : { 
++    case Form::idealI : {
+       fprintf(fp,is_ideal_bool ? "BoolTest::mask c%d" : "int32 c%d", i);
+-      break;        
++      break;
+     }
+     case Form::idealP : { fprintf(fp,"const TypePtr *c%d", i); break; }
+     case Form::idealL : { fprintf(fp,"jlong c%d", i);   break;        }
+     case Form::idealF : { fprintf(fp,"jfloat c%d", i);  break;        }
+     case Form::idealD : { fprintf(fp,"jdouble c%d", i); break;        }
+     default:
+-      assert(!is_ideal_bool, "Non-constant operand lacks component list."); 
++      assert(!is_ideal_bool, "Non-constant operand lacks component list.");
+       break;
+     }
+   } // end if NULL
+@@ -296,34 +293,34 @@
+     lst.reset();
+     while((comp = lst.iter()) != NULL) {
+       if (!strcmp(comp->base_type(globals), "ConI")) {
+-	if (i > 0) fprintf(fp,", ");
+-	fprintf(fp,"int32 c%d", i);
+-	i++;
++        if (i > 0) fprintf(fp,", ");
++        fprintf(fp,"int32 c%d", i);
++        i++;
+       }
+       else if (!strcmp(comp->base_type(globals), "ConP")) {
+-	if (i > 0) fprintf(fp,", ");
+-	fprintf(fp,"const TypePtr *c%d", i);
+-	i++;
++        if (i > 0) fprintf(fp,", ");
++        fprintf(fp,"const TypePtr *c%d", i);
++        i++;
+       }
+       else if (!strcmp(comp->base_type(globals), "ConL")) {
+-	if (i > 0) fprintf(fp,", ");
+-	fprintf(fp,"jlong c%d", i);
+-	i++;
++        if (i > 0) fprintf(fp,", ");
++        fprintf(fp,"jlong c%d", i);
++        i++;
+       }
+       else if (!strcmp(comp->base_type(globals), "ConF")) {
+-	if (i > 0) fprintf(fp,", ");
+-	fprintf(fp,"jfloat c%d", i);
+-	i++;
++        if (i > 0) fprintf(fp,", ");
++        fprintf(fp,"jfloat c%d", i);
++        i++;
+       }
+       else if (!strcmp(comp->base_type(globals), "ConD")) {
+-	if (i > 0) fprintf(fp,", ");
+-	fprintf(fp,"jdouble c%d", i);
+-	i++;
++        if (i > 0) fprintf(fp,", ");
++        fprintf(fp,"jdouble c%d", i);
++        i++;
+       }
+       else if (!strcmp(comp->base_type(globals), "Bool")) {
+-	if (i > 0) fprintf(fp,", ");
+-	fprintf(fp,"BoolTest::mask c%d", i);
+-	i++;
++        if (i > 0) fprintf(fp,", ");
++        fprintf(fp,"BoolTest::mask c%d", i);
++        i++;
+       }
+     }
+   }
+@@ -345,34 +342,34 @@
+ 
+ // Generate the format rule for condition codes
+ static void defineCCodeDump(FILE *fp, int i) {
+-  fprintf(fp, "         if( _c%d == BoolTest::eq ) tty->print(\"eq\");\n",i);
+-  fprintf(fp, "    else if( _c%d == BoolTest::ne ) tty->print(\"ne\");\n",i);
+-  fprintf(fp, "    else if( _c%d == BoolTest::le ) tty->print(\"le\");\n",i);
+-  fprintf(fp, "    else if( _c%d == BoolTest::ge ) tty->print(\"ge\");\n",i);
+-  fprintf(fp, "    else if( _c%d == BoolTest::lt ) tty->print(\"lt\");\n",i);
+-  fprintf(fp, "    else if( _c%d == BoolTest::gt ) tty->print(\"gt\");\n",i);
++  fprintf(fp, "         if( _c%d == BoolTest::eq ) st->print(\"eq\");\n",i);
++  fprintf(fp, "    else if( _c%d == BoolTest::ne ) st->print(\"ne\");\n",i);
++  fprintf(fp, "    else if( _c%d == BoolTest::le ) st->print(\"le\");\n",i);
++  fprintf(fp, "    else if( _c%d == BoolTest::ge ) st->print(\"ge\");\n",i);
++  fprintf(fp, "    else if( _c%d == BoolTest::lt ) st->print(\"lt\");\n",i);
++  fprintf(fp, "    else if( _c%d == BoolTest::gt ) st->print(\"gt\");\n",i);
+ }
+ 
+ // Output code that dumps constant values, increment "i" if type is constant
+ static uint dump_spec_constant(FILE *fp, const char *ideal_type, uint i) {
+   if (!strcmp(ideal_type, "ConI")) {
+-    fprintf(fp,"   tty->print(\"#%%d\", _c%d);\n", i);
++    fprintf(fp,"   st->print(\"#%%d\", _c%d);\n", i);
+     ++i;
+   }
+   else if (!strcmp(ideal_type, "ConP")) {
+-    fprintf(fp,"    _c%d->dump();\n", i);
++    fprintf(fp,"    _c%d->dump_on(st);\n", i);
+     ++i;
+   }
+   else if (!strcmp(ideal_type, "ConL")) {
+-    fprintf(fp,"    tty->print(\"#\" INT64_FORMAT, _c%d);\n", i);
++    fprintf(fp,"    st->print(\"#\" INT64_FORMAT, _c%d);\n", i);
+     ++i;
+   }
+   else if (!strcmp(ideal_type, "ConF")) {
+-    fprintf(fp,"    tty->print(\"#%%f\", _c%d);\n", i);
++    fprintf(fp,"    st->print(\"#%%f\", _c%d);\n", i);
+     ++i;
+   }
+   else if (!strcmp(ideal_type, "ConD")) {
+-    fprintf(fp,"    tty->print(\"#%%f\", _c%d);\n", i);
++    fprintf(fp,"    st->print(\"#%%f\", _c%d);\n", i);
+     ++i;
+   }
+   else if (!strcmp(ideal_type, "Bool")) {
+@@ -386,10 +383,10 @@
+ // Generate the format rule for an operand
+ void gen_oper_format(FILE *fp, FormDict &globals, OperandForm &oper, bool for_c_file = false) {
+   if (!for_c_file) {
+-    // invoked after output #ifndef PRODUCT to ad_<arch>.hpp 
++    // invoked after output #ifndef PRODUCT to ad_<arch>.hpp
+     // compile the bodies separately, to cut down on recompilations
+-    fprintf(fp,"  virtual void           int_format(PhaseRegAlloc *ra, const MachNode *node) const;\n");
+-    fprintf(fp,"  virtual void           ext_format(PhaseRegAlloc *ra, const MachNode *node, int idx) const;\n");
++    fprintf(fp,"  virtual void           int_format(PhaseRegAlloc *ra, const MachNode *node, outputStream *st) const;\n");
++    fprintf(fp,"  virtual void           ext_format(PhaseRegAlloc *ra, const MachNode *node, int idx, outputStream *st) const;\n");
+     return;
+   }
+ 
+@@ -398,7 +395,7 @@
+ 
+   // Generate internal format function, used when stored locally
+   fprintf(fp, "\n#ifndef PRODUCT\n");
+-  fprintf(fp,"void %sOper::int_format(PhaseRegAlloc *ra, const MachNode *node) const {\n", oper._ident);
++  fprintf(fp,"void %sOper::int_format(PhaseRegAlloc *ra, const MachNode *node, outputStream *st) const {\n", oper._ident);
+   // Generate the user-defined portion of the format
+   if (oper._format) {
+     if ( oper._format->_strings.count() != 0 ) {
+@@ -414,8 +411,8 @@
+         // Check if this is a standard string or a replacement variable
+         if ( string != NameList::_signal ) {
+           // Normal string
+-          // Pass through to tty->print
+-          fprintf(fp,"tty->print(\"%s\");\n", string);
++          // Pass through to st->print
++          fprintf(fp,"st->print(\"%s\");\n", string);
+         } else {
+           // Replacement variable
+           const char *rep_var = oper._format->_rep_vars.iter();
+@@ -425,7 +422,7 @@
+           // Get index if register or constant
+           if ( op->_matrule && op->_matrule->is_base_register(globals) ) {
+             idx  = oper.register_position( globals, rep_var);
+-          } 
++          }
+           else if (op->_matrule && op->_matrule->is_base_constant(globals)) {
+             idx  = oper.constant_position( globals, rep_var);
+           } else {
+@@ -436,7 +433,7 @@
+           if ( op != NULL )   op->int_format(fp, globals, idx);
+ 
+           if ( idx == -1 ) {
+-            fprintf(stderr, 
++            fprintf(stderr,
+                     "Using a name, %s, that isn't in match rule\n", rep_var);
+             assert( strcmp(op->_ident,"label")==0, "Unimplemented");
+           }
+@@ -450,7 +447,7 @@
+   } else { // oper._format == NULL
+     // Provide a few special case formats where the AD writer cannot.
+     if ( strcmp(oper._ident,"Universe")==0 ) {
+-      fprintf(fp, "  tty->print(\"$$univ\");\n");
++      fprintf(fp, "  st->print(\"$$univ\");\n");
+     }
+     // labelOper::int_format is defined in ad_<...>.cpp
+   }
+@@ -461,7 +458,7 @@
+   fprintf(fp,"}\n");
+ 
+   // Generate external format function, when data is stored externally
+-  fprintf(fp,"void %sOper::ext_format(PhaseRegAlloc *ra, const MachNode *node, int idx) const {\n", oper._ident);
++  fprintf(fp,"void %sOper::ext_format(PhaseRegAlloc *ra, const MachNode *node, int idx, outputStream *st) const {\n", oper._ident);
+   // Generate the user-defined portion of the format
+   if (oper._format) {
+     if ( oper._format->_strings.count() != 0 ) {
+@@ -481,18 +478,18 @@
+         // Check if this is a standard string or a replacement variable
+         if ( string != NameList::_signal ) {
+           // Normal string
+-          // Pass through to tty->print
+-          fprintf(fp,"tty->print(\"%s\");\n", string);
++          // Pass through to st->print
++          fprintf(fp,"st->print(\"%s\");\n", string);
+         } else {
+           // Replacement variable
+           const char *rep_var = oper._format->_rep_vars.iter();
+-	  // Check that it is a local name, and an operand
++          // Check that it is a local name, and an operand
+           OperandForm *op      = oper._localNames[rep_var]->is_operand();
+           assert( op, "replacement variable was not found in local names");
+           // Get index if register or constant
+           if ( op->_matrule && op->_matrule->is_base_register(globals) ) {
+             idx  = oper.register_position( globals, rep_var);
+-          } 
++          }
+           else if (op->_matrule && op->_matrule->is_base_constant(globals)) {
+             idx  = oper.constant_position( globals, rep_var);
+           } else {
+@@ -504,13 +501,13 @@
+           // Lookup the index position of the replacement variable
+           idx      = oper._components.operand_position_format(rep_var);
+           if ( idx == -1 ) {
+-            fprintf(stderr, 
++            fprintf(stderr,
+                     "Using a name, %s, that isn't in match rule\n", rep_var);
+             assert( strcmp(op->_ident,"label")==0, "Unimplemented");
+           }
+         } // Done with a replacement variable
+       } // Done with all format strings
+-      
++
+     } else {
+       // Default formats for base operands (RegI, RegP, ConI, ConP, ...)
+       oper.ext_format(fp, globals, 0);
+@@ -518,7 +515,7 @@
+   } else { // oper._format == NULL
+     // Provide a few special case formats where the AD writer cannot.
+     if ( strcmp(oper._ident,"Universe")==0 ) {
+-      fprintf(fp, "  tty->print(\"$$univ\");\n");
++      fprintf(fp, "  st->print(\"$$univ\");\n");
+     }
+     // labelOper::ext_format is defined in ad_<...>.cpp
+   }
+@@ -535,14 +532,14 @@
+ void gen_inst_format(FILE *fp, FormDict &globals, InstructForm &inst, bool for_c_file = false) {
+   if (!for_c_file) {
+     // compile the bodies separately, to cut down on recompilations
+-    // #ifndef PRODUCT region generated by caller 
+-    fprintf(fp,"  virtual void           format(PhaseRegAlloc *ra) const;\n");
++    // #ifndef PRODUCT region generated by caller
++    fprintf(fp,"  virtual void           format(PhaseRegAlloc *ra, outputStream *st) const;\n");
+     return;
+   }
+ 
+   // Define the format function
+   fprintf(fp, "#ifndef PRODUCT\n");
+-  fprintf(fp, "void %sNode::format(PhaseRegAlloc *ra) const {\n", inst._ident);
++  fprintf(fp, "void %sNode::format(PhaseRegAlloc *ra, outputStream *st) const {\n", inst._ident);
+ 
+   // Generate the user-defined portion of the format
+   if( inst._format ) {
+@@ -559,9 +556,9 @@
+       fprintf(fp,"    ");
+       // Check if this is a standard string or a replacement variable
+       if( string != NameList::_signal )  // Normal string.  Pass through.
+-        fprintf(fp,"tty->print(\"%s\");\n", string);
+-      else			// Replacement variable
+-	inst.rep_var_format( fp, inst._format->_rep_vars.iter() );
++        fprintf(fp,"st->print(\"%s\");\n", string);
++      else                      // Replacement variable
++        inst.rep_var_format( fp, inst._format->_rep_vars.iter() );
+     } // Done with all format strings
+   } // Done generating the user-defined portion of the format
+ 
+@@ -573,8 +570,8 @@
+       fprintf(fp,"    _method->print_short_name();\n");
+       break;
+     case Form::JAVA_STATIC:
+-      fprintf(fp,"    if( _method ) _method->print_short_name(); else tty->print(\" wrapper for: %%s\", _name);\n");
+-      fprintf(fp,"    if( !_method ) dump_trap_args();\n");
++      fprintf(fp,"    if( _method ) _method->print_short_name(st); else st->print(\" wrapper for: %%s\", _name);\n");
++      fprintf(fp,"    if( !_method ) dump_trap_args(st);\n");
+       break;
+     case Form::JAVA_COMPILED:
+     case Form::JAVA_INTERP:
+@@ -582,38 +579,38 @@
+     case Form::JAVA_RUNTIME:
+     case Form::JAVA_LEAF:
+     case Form::JAVA_NATIVE:
+-      fprintf(fp,"    tty->print(\" %%s\", _name);");
++      fprintf(fp,"    st->print(\" %%s\", _name);");
+       break;
+-    default: 
++    default:
+       assert(0,"ShouldNotReacHere");
+     }
+-    fprintf(fp,  "    tty->print_cr(\"\");\n" );
+-    fprintf(fp,  "    if (_jvms) _jvms->format(ra, this); else tty->print_cr(\"        No JVM State Info\");\n" );
+-    fprintf(fp,  "    tty->print(\"        # \");\n" );
+-    fprintf(fp,  "    if( _jvms ) _oop_map->print();\n");
++    fprintf(fp,  "    st->print_cr(\"\");\n" );
++    fprintf(fp,  "    if (_jvms) _jvms->format(ra, this, st); else st->print_cr(\"        No JVM State Info\");\n" );
++    fprintf(fp,  "    st->print(\"        # \");\n" );
++    fprintf(fp,  "    if( _jvms ) _oop_map->print_on(st);\n");
+   }
+   else if(inst.is_ideal_safepoint()) {
+-    fprintf(fp,  "    tty->print(\"\");\n" );
+-    fprintf(fp,  "    if (_jvms) _jvms->format(ra, this); else tty->print_cr(\"        No JVM State Info\");\n" );
+-    fprintf(fp,  "    tty->print(\"        # \");\n" );
+-    fprintf(fp,  "    if( _jvms ) _oop_map->print();\n");
++    fprintf(fp,  "    st->print(\"\");\n" );
++    fprintf(fp,  "    if (_jvms) _jvms->format(ra, this, st); else st->print_cr(\"        No JVM State Info\");\n" );
++    fprintf(fp,  "    st->print(\"        # \");\n" );
++    fprintf(fp,  "    if( _jvms ) _oop_map->print_on(st);\n");
+   }
+   else if( inst.is_ideal_if() ) {
+-    fprintf(fp,  "    tty->print(\"  P=%%f C=%%f\",_prob,_fcnt);\n" );
++    fprintf(fp,  "    st->print(\"  P=%%f C=%%f\",_prob,_fcnt);\n" );
+   }
+   else if( inst.is_ideal_mem() ) {
+     // Print out the field name if available to improve readability
+     fprintf(fp,  "    if (ra->C->alias_type(adr_type())->field() != NULL) {\n");
+-    fprintf(fp,  "      tty->print(\" ! Field \");\n");
++    fprintf(fp,  "      st->print(\" ! Field \");\n");
+     fprintf(fp,  "      if( ra->C->alias_type(adr_type())->is_volatile() )\n");
+-    fprintf(fp,  "        tty->print(\" Volatile\");\n");
+-    fprintf(fp,  "      ra->C->alias_type(adr_type())->field()->holder()->name()->print_symbol_on(tty);\n");
+-    fprintf(fp,  "      tty->print(\".\");\n");
+-    fprintf(fp,  "      ra->C->alias_type(adr_type())->field()->name()->print_symbol_on(tty);\n");
++    fprintf(fp,  "        st->print(\" Volatile\");\n");
++    fprintf(fp,  "      ra->C->alias_type(adr_type())->field()->holder()->name()->print_symbol_on(st);\n");
++    fprintf(fp,  "      st->print(\".\");\n");
++    fprintf(fp,  "      ra->C->alias_type(adr_type())->field()->name()->print_symbol_on(st);\n");
+     fprintf(fp,  "    } else\n");
+     // Make sure 'Volatile' gets printed out
+     fprintf(fp,  "    if( ra->C->alias_type(adr_type())->is_volatile() )\n");
+-    fprintf(fp,  "      tty->print(\" Volatile!\");\n");
++    fprintf(fp,  "      st->print(\" Volatile!\");\n");
+   }
+ 
+   // Complete the definition of the format function
+@@ -635,7 +632,7 @@
+   fprintf(fp_hpp, "// Pipeline_Use_Cycle_Mask Class\n");
+   fprintf(fp_hpp, "class Pipeline_Use_Cycle_Mask {\n");
+ 
+-  if (_pipeline->_maxcycleused <= 
++  if (_pipeline->_maxcycleused <=
+ #ifdef SPARC
+     64
+ #else
+@@ -1069,7 +1066,7 @@
+   fprintf(fp,"\n");
+   fprintf(fp,"//----------------------------Declare classes derived from MachOper----------\n");
+   // Iterate through all operands
+-  _operands.reset(); 
++  _operands.reset();
+   OperandForm *oper;
+   for( ; (oper = (OperandForm*)_operands.iter()) != NULL;) {
+     // Ensure this is a machine-world instruction
+@@ -1094,7 +1091,7 @@
+         in_RegMask(fp);
+       }
+     }
+-    
++
+     // Support storing constants inside the MachOper
+     declareConstStorage(fp,_globalNames,oper);
+ 
+@@ -1122,10 +1119,10 @@
+ 
+     // Declare constructor.
+     // Parameters start with condition code, then all other constants
+-    // 
++    //
+     // (1)  MachXOper(int32 ccode, int32 c0, int32 c1, ..., int32 cn)
+     // (2)     : _ccode(ccode), _c0(c0), _c1(c1), ..., _cn(cn) { }
+-    // 
++    //
+     Form::DataType constant_type = oper->simple_type(_globalNames);
+     defineConstructor(fp, oper->_ident, oper->num_consts(_globalNames),
+                       oper->_components, oper->is_ideal_bool(),
+@@ -1153,9 +1150,9 @@
+             machOperEnum(oper->_ident));
+ 
+     // virtual function to look up ideal return type of machine instruction
+-    // 
++    //
+     // (1)  virtual const Type    *type() const { return .....; }
+-    // 
++    //
+     if ((oper->_matrule) && (oper->_matrule->_lChild == NULL) &&
+         (oper->_matrule->_rChild == NULL)) {
+       unsigned int position = 0;
+@@ -1197,9 +1194,9 @@
+     }
+ 
+ 
+-    // 
++    //
+     // virtual functions for defining the encoding interface.
+-    // 
++    //
+     // Access the linearized ideal register mask,
+     // map to physical register encoding
+     if ( oper->_matrule && oper->_matrule->is_base_register(_globalNames) ) {
+@@ -1335,7 +1332,7 @@
+         assert( false, "ShouldNotReachHere();");
+       }
+     }
+-    
++
+     fprintf(fp,"\n");
+     // // Currently all XXXOper::hash() methods are identical (990820)
+     // declare_hash(fp);
+@@ -1352,16 +1349,16 @@
+     // Machine independent print functionality for debugging
+     // IF we have constants, create a dump_spec function for the derived class
+     //
+-    // (1)  virtual void           dump_spec() const { 
+-    // (2)    tty->print("#%d", _c#);        // Constant != ConP
+-    //  OR    _c#->dump();                   // Type ConP
++    // (1)  virtual void           dump_spec() const {
++    // (2)    st->print("#%d", _c#);        // Constant != ConP
++    //  OR    _c#->dump_on(st);             // Type ConP
+     //  ...
+     // (3)  }
+     uint num_consts = oper->num_consts(_globalNames);
+     if( num_consts > 0 ) {
+       // line (1)
+-      fprintf(fp, "  virtual void           dump_spec() const {\n");
+-      // generate format string for tty->print
++      fprintf(fp, "  virtual void           dump_spec(outputStream *st) const {\n");
++      // generate format string for st->print
+       // Iterate over the component list & spit out the right thing
+       uint i = 0;
+       const char *type = oper->ideal_type(_globalNames);
+@@ -1404,7 +1401,7 @@
+   // Generate Machine Classes for each instruction defined in AD file
+   fprintf(fp,"\n");
+   fprintf(fp,"//----------------------------Declare classes derived from MachNode----------\n");
+-  _instructions.reset(); 
++  _instructions.reset();
+   InstructForm *instr;
+   for( ; (instr = (InstructForm*)_instructions.iter()) != NULL; ) {
+     // Ensure this is a machine-world instruction
+@@ -1412,7 +1409,7 @@
+ 
+     // Build class definition for this instruction
+     fprintf(fp,"\n");
+-    fprintf(fp,"class %sNode : public %s { \n", 
++    fprintf(fp,"class %sNode : public %s { \n",
+             instr->_ident, instr->mach_base_class() );
+     fprintf(fp,"private:\n");
+     fprintf(fp,"  MachOper *_opnd_array[%d];\n", instr->num_opnds() );
+@@ -1441,7 +1438,7 @@
+     if( label_position != -1 ) {
+       // Set the label, stored in labelOper::_branch_label
+       fprintf(fp,"  virtual void           label_set( Label& label, uint block_num );\n");
+-    } 
++    }
+ 
+     // If this instruction contains a methodOper
+     // Declare Node::methods that set operand method's contents
+@@ -1449,7 +1446,7 @@
+     if( method_position != -1 ) {
+       // Set the address method, stored in methodOper::_method
+       fprintf(fp,"  virtual void           method_set( intptr_t method );\n");
+-    } 
++    }
+ 
+     // virtual functions for attributes
+     //
+@@ -1469,8 +1466,8 @@
+       attr = (Attribute *)attr->_next;
+     }
+ 
+-    // virtual functions for encode and format 
+-    // 
++    // virtual functions for encode and format
++    //
+     // Output the opcode function and the encode function here using the
+     // encoding class information in the _insencode slot.
+     if ( instr->_insencode ) {
+@@ -1485,7 +1482,7 @@
+     // Return the top-level ideal opcode.
+     // Use MachNode::ideal_Opcode() for nodes based on MachNode class
+     // if the ideal_Opcode == Op_Node.
+-    if ( strcmp("Node", instr->ideal_Opcode(_globalNames)) != 0 || 
++    if ( strcmp("Node", instr->ideal_Opcode(_globalNames)) != 0 ||
+          strcmp("MachNode", instr->mach_base_class()) != 0 ) {
+       fprintf(fp,"  virtual int            ideal_Opcode() const { return Op_%s; }\n",
+             instr->ideal_Opcode(_globalNames) );
+@@ -1550,7 +1547,7 @@
+     // Starting point for inputs matcher wants.
+     // Use MachNode::oper_input_base() for nodes based on MachNode class
+     // if the base == 1.
+-    if ( instr->oper_input_base(_globalNames) != 1 || 
++    if ( instr->oper_input_base(_globalNames) != 1 ||
+          strcmp("MachNode", instr->mach_base_class()) != 0 ) {
+       fprintf(fp,"  virtual uint           oper_input_base() const { return %d; }\n",
+             instr->oper_input_base(_globalNames));
+@@ -1643,7 +1640,7 @@
+       }
+     }
+ 
+-    // Check if machine instructions that USE memory, but do not DEF memory, 
++    // Check if machine instructions that USE memory, but do not DEF memory,
+     // depend upon a node that defines memory in machine-independent graph.
+     if ( instr->needs_anti_dependence_check(_globalNames) ) {
+       if ( node_flags_set ) {
+@@ -1672,11 +1669,11 @@
+     // Virtual methods which are only generated to override base class
+     if( instr->expands() || instr->needs_projections() ||
+         instr->has_temps() ||
+-        instr->_matrule != NULL && 
++        instr->_matrule != NULL &&
+         instr->num_opnds() != instr->num_unique_opnds() ) {
+       fprintf(fp,"  virtual MachNode      *Expand(State *state, Node_List &proj_list);\n");
+     }
+-  
++
+     if (instr->is_pinned(_globalNames)) {
+       fprintf(fp,"  virtual bool           pinned() const { return ");
+       if (instr->is_parm(_globalNames)) {
+@@ -1700,7 +1697,7 @@
+     // Declare short branch methods, if applicable
+     instr->declare_short_branch_methods(fp);
+ 
+-    // Instructions containing a constant that will be entered into the 
++    // Instructions containing a constant that will be entered into the
+     // float/double table redefine the base virtual function
+ #ifdef SPARC
+     // Sparc doubles entries in the constant table require more space for
+@@ -1716,7 +1713,7 @@
+       fprintf(fp,   " return %d;", table_entries);
+       fprintf(fp, " }\n");
+     }
+-    
++
+ 
+     // See if there is an "ins_pipe" declaration for this instruction
+     if (instr->_ins_pipe) {
+@@ -1739,11 +1736,11 @@
+     // instruct foo() %{ ... bottom_type(TypeRawPtr::BOTTOM); ... %}
+     if( data_type != Form::none ) {
+       // A constant's bottom_type returns a Type containing its constant value
+-      
++
+       // !!!!!
+       // Convert all ints, floats, ... to machine-independent TypeXs
+       // as is done for pointers
+-      // 
++      //
+       // Construct appropriate constant type containing the constant value.
+       fprintf(fp,"  virtual const class Type *bottom_type() const{\n");
+       switch( data_type ) {
+@@ -1767,8 +1764,8 @@
+         break;
+       }
+       fprintf(fp,"  };\n");
+-    } 
+-/*    else if ( instr->_matrule && instr->_matrule->_rChild && 
++    }
++/*    else if ( instr->_matrule && instr->_matrule->_rChild &&
+         (  strcmp("ConvF2I",instr->_matrule->_rChild->_opType)==0
+         || strcmp("ConvD2I",instr->_matrule->_rChild->_opType)==0 ) ) {
+       // !!!!! !!!!!
+@@ -1778,14 +1775,14 @@
+       fprintf(fp,   " return  TypeInt::INT;");
+       fprintf(fp, " };\n");
+     }*/
+-    else if( instr->is_ideal_copy() && 
++    else if( instr->is_ideal_copy() &&
+               !strcmp(instr->_matrule->_lChild->_opType,"stackSlotP") ) {
+       // !!!!!
+       // Special hack for ideal Copy of pointer.  Bottom type is oop or not depending on input.
+       fprintf(fp,"  const Type            *bottom_type() const { return in(1)->bottom_type(); } // Copy?\n");
+     }
+     else if( instr->is_ideal_loadPC() ) {
+-      // LoadPCNode provides the return address of a call to native code. 
++      // LoadPCNode provides the return address of a call to native code.
+       // Define its bottom type to be TypeRawPtr::BOTTOM instead of TypePtr::BOTTOM
+       // since it is a pointer to an internal VM location and must have a zero offset.
+       // Allocation detects derived pointers, in part, by their non-zero offsets.
+@@ -1804,8 +1801,8 @@
+       // Special special hack to see if the Cmp? has been incorporated in the conditional move
+       MatchNode *rl = instr->_matrule->_rChild->_lChild;
+       if( rl && !strcmp(rl->_opType, "Binary") ) {
+-          MatchNode *rlr = rl->_rChild; 
+-	  if (rlr && strncmp(rlr->_opType, "Cmp", 3) == 0)
++          MatchNode *rlr = rl->_rChild;
++          if (rlr && strncmp(rlr->_opType, "Cmp", 3) == 0)
+             offset = 2;
+       }
+       // Special hack for ideal CMoveP; ideal type depends on inputs
+@@ -1813,14 +1810,12 @@
+         offset, offset+1, offset+1);
+     }
+     else if( instr->needs_base_oop_edge(_globalNames) ) {
+-      // Special hack for ideal AddP.  Bottom type is an oop IFF it has a 
++      // Special hack for ideal AddP.  Bottom type is an oop IFF it has a
+       // legal base-pointer input.  Otherwise it is NOT an oop.
+       fprintf(fp,"  const Type *bottom_type() const { return AddPNode::mach_bottom_type(this); } // AddP\n");
+     }
+-    else if ( instr->_ident && ( ! strcmp(instr->_ident,"tlsLoadP")
+-				 || ! strncmp(instr->_ident,"tlsLoadP_",9)) ) {
+-      // !!!!! 
+-      // Special hack for tlsLoadP 
++    else if (instr->is_tls_instruction()) {
++      // Special hack for tlsLoadP
+       fprintf(fp,"  const Type            *bottom_type() const { return TypeRawPtr::BOTTOM; } // tlsLoadP\n");
+     }
+     else if ( instr->is_ideal_if() ) {
+@@ -1832,7 +1827,7 @@
+ 
+     // Check where 'ideal_type' must be customized
+     /*
+-    if ( instr->_matrule && instr->_matrule->_rChild && 
++    if ( instr->_matrule && instr->_matrule->_rChild &&
+         (  strcmp("ConvF2I",instr->_matrule->_rChild->_opType)==0
+         || strcmp("ConvD2I",instr->_matrule->_rChild->_opType)==0 ) ) {
+       fprintf(fp,"  virtual uint           ideal_reg() const { return Compile::current()->matcher()->base2reg[Type::Int]; }\n");
+@@ -1846,7 +1841,7 @@
+     }
+     if ( memory_operand != InstructForm::NO_MEMORY_OPERAND ) {
+       if( memory_operand == InstructForm::MANY_MEMORY_OPERANDS ) {
+-	fprintf(fp,"  virtual const TypePtr *adr_type() const;\n");
++        fprintf(fp,"  virtual const TypePtr *adr_type() const;\n");
+       }
+       fprintf(fp,"  virtual const MachOper *memory_operand() const;\n");
+     }
+@@ -1951,7 +1946,7 @@
+ // Information needed to generate the ReduceOp mapping for the DFA
+ class OutputMachOperands : public OutputMap {
+ public:
+-  OutputMachOperands(FILE *hpp, FILE *cpp, FormDict &globals, ArchDesc &AD) 
++  OutputMachOperands(FILE *hpp, FILE *cpp, FormDict &globals, ArchDesc &AD)
+     : OutputMap(hpp, cpp, globals, AD) {};
+ 
+   void declaration() { }
+@@ -1986,21 +1981,21 @@
+   int end_rematerialize;
+   int end_instructions;
+ public:
+-  OutputMachOpcodes(FILE *hpp, FILE *cpp, FormDict &globals, ArchDesc &AD) 
+-    : OutputMap(hpp, cpp, globals, AD), 
++  OutputMachOpcodes(FILE *hpp, FILE *cpp, FormDict &globals, ArchDesc &AD)
++    : OutputMap(hpp, cpp, globals, AD),
+       begin_inst_chain_rule(-1), end_inst_chain_rule(-1), end_instructions(-1)
+   {};
+ 
+   void declaration() { }
+   void definition()  { fprintf(_cpp, "enum MachOpcodes {\n"); }
+-  void closing()     { 
+-    if( begin_inst_chain_rule != -1 ) 
++  void closing()     {
++    if( begin_inst_chain_rule != -1 )
+       fprintf(_cpp, "  _BEGIN_INST_CHAIN_RULE = %d,\n", begin_inst_chain_rule);
+-    if( end_inst_chain_rule   != -1 ) 
++    if( end_inst_chain_rule   != -1 )
+       fprintf(_cpp, "  _END_INST_CHAIN_RULE  = %d,\n", end_inst_chain_rule);
+-    if( begin_rematerialize   != -1 ) 
++    if( begin_rematerialize   != -1 )
+       fprintf(_cpp, "  _BEGIN_REMATERIALIZE   = %d,\n", begin_rematerialize);
+-    if( end_rematerialize     != -1 ) 
++    if( end_rematerialize     != -1 )
+       fprintf(_cpp, "  _END_REMATERIALIZE    = %d,\n", end_rematerialize);
+     // always execute since do_instructions() is true, and avoids trailing comma
+     fprintf(_cpp, "  _last_Mach_Node  = %d \n",  end_instructions);
+@@ -2014,19 +2009,19 @@
+ 
+   void record_position(OutputMap::position place, int idx ) {
+     switch(place) {
+-    case OutputMap::BEGIN_INST_CHAIN_RULES : 
++    case OutputMap::BEGIN_INST_CHAIN_RULES :
+       begin_inst_chain_rule = idx;
+       break;
+-    case OutputMap::END_INST_CHAIN_RULES : 
++    case OutputMap::END_INST_CHAIN_RULES :
+       end_inst_chain_rule   = idx;
+       break;
+-    case OutputMap::BEGIN_REMATERIALIZE : 
++    case OutputMap::BEGIN_REMATERIALIZE :
+       begin_rematerialize   = idx;
+       break;
+-    case OutputMap::END_REMATERIALIZE : 
++    case OutputMap::END_REMATERIALIZE :
+       end_rematerialize     = idx;
+       break;
+-    case OutputMap::END_INSTRUCTIONS : 
++    case OutputMap::END_INSTRUCTIONS :
+       end_instructions      = idx;
+       break;
+     default:
+@@ -2050,7 +2045,7 @@
+   int stagelen = (int)strlen("undefined");
+   int stagenum = 0;
+ 
+-  if (_pipeline) {		// Find max enum string length
++  if (_pipeline) {              // Find max enum string length
+     const char *stage;
+     for ( _pipeline->_stages.reset(); (stage = _pipeline->_stages.iter()) != NULL; ) {
+       int len = (int)strlen(stage);
+diff -ruN openjdk6/hotspot/src/share/vm/asm/assembler.cpp openjdk/hotspot/src/share/vm/asm/assembler.cpp
+--- openjdk6/hotspot/src/share/vm/asm/assembler.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/asm/assembler.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)assembler.cpp	1.41 07/05/05 17:05:03 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/asm/assembler.hpp openjdk/hotspot/src/share/vm/asm/assembler.hpp
+--- openjdk6/hotspot/src/share/vm/asm/assembler.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/asm/assembler.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)assembler.hpp	1.52 07/05/05 17:05:03 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This file contains platform-independant assembler declarations.
+@@ -40,18 +37,18 @@
+  * method.  A Label may be referenced by an instruction before it's bound
+  * (i.e., 'forward referenced').  'bind' stores the current code offset
+  * in the Label object.
+- * 
++ *
+  * If an instruction references a bound Label, the offset field(s) within
+  * the instruction are immediately filled in based on the Label's code
+  * offset.  If an instruction references an unbound label, that
+  * instruction is put on a list of instructions that must be patched
+  * (i.e., 'resolved') when the Label is bound.
+- * 
++ *
+  * 'bind' will call the platform-specific 'patch_instruction' method to
+  * fill in the offset field(s) for each unresolved instruction (if there
+  * are any).  'patch_instruction' lives in one of the
+  * cpu/<arch>/vm/assembler_<arch>* files.
+- * 
++ *
+  * Instead of using a linked list of unresolved instructions, a Label has
+  * an array of unresolved instruction code offsets.  _patch_index
+  * contains the total number of forward references.  If the Label's array
+@@ -64,7 +61,7 @@
+  */
+ class Label VALUE_OBJ_CLASS_SPEC {
+  private:
+-  enum { PatchCacheSize = 4 }; 
++  enum { PatchCacheSize = 4 };
+ 
+   // _loc encodes both the binding state (via its sign)
+   // and the binding locator (via its value) of a label.
+@@ -118,7 +115,7 @@
+   bool is_unused() const   { return _loc == -1 && _patch_index == 0; }
+ 
+   /**
+-   * Adds a reference to an unresolved displacement instruction to 
++   * Adds a reference to an unresolved displacement instruction to
+    * this unbound label
+    *
+    * @param cb         the code buffer being patched
+@@ -145,7 +142,7 @@
+ 
+ 
+ // The Abstract Assembler: Pure assembler doing NO optimizations on the
+-// instruction level; i.e., what you write is what you get. 
++// instruction level; i.e., what you write is what you get.
+ // The Assembler is generating code into a CodeBuffer.
+ class AbstractAssembler : public ResourceObj  {
+   friend class Label;
+@@ -163,7 +160,7 @@
+   // This routine is called with a label is used for an address.
+   // Labels and displacements truck in offsets, but target must return a PC.
+   address target(Label& L);            // return _code_section->target(L)
+-   
++
+   bool is8bit(int x) const             { return -0x80 <= x && x < 0x80; }
+   bool isByte(int x) const             { return 0 <= x && x < 0x100; }
+   bool isShiftCount(int x) const       { return 0 <= x && x < 32; }
+@@ -209,11 +206,11 @@
+   void flush();
+ 
+   // Accessors
+-  CodeBuffer*	code() const;          // _code_section->outer()
+-  CodeSection*	code_section() const   { return _code_section; }
++  CodeBuffer*   code() const;          // _code_section->outer()
++  CodeSection*  code_section() const   { return _code_section; }
+   int           sect() const;          // return _code_section->index()
+   address       pc() const             { return _code_pos; }
+-  int		offset() const         { return _code_pos - _code_begin; }
++  int           offset() const         { return _code_pos - _code_begin; }
+   int           locator() const;       // CodeBuffer::locator(offset(), sect())
+   OopRecorder*  oop_recorder() const   { return _oop_recorder; }
+   void      set_oop_recorder(OopRecorder* r) { _oop_recorder = r; }
+diff -ruN openjdk6/hotspot/src/share/vm/asm/assembler.inline.hpp openjdk/hotspot/src/share/vm/asm/assembler.inline.hpp
+--- openjdk6/hotspot/src/share/vm/asm/assembler.inline.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/asm/assembler.inline.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)assembler.inline.hpp	1.27 07/05/05 17:05:03 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ inline void AbstractAssembler::sync() {
+diff -ruN openjdk6/hotspot/src/share/vm/asm/codeBuffer.cpp openjdk/hotspot/src/share/vm/asm/codeBuffer.cpp
+--- openjdk6/hotspot/src/share/vm/asm/codeBuffer.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/asm/codeBuffer.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)codeBuffer.cpp	1.100 07/05/05 17:05:03 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -309,7 +306,7 @@
+            rtype == relocInfo::runtime_call_type ||
+            rtype == relocInfo::internal_word_type||
+            rtype == relocInfo::section_word_type ||
+-           rtype == relocInfo::external_word_type, 
++           rtype == relocInfo::external_word_type,
+            "code needs relocation information");
+     // leave behind an indication that we attempted a relocation
+     DEBUG_ONLY(_locs_start = _locs_limit = (relocInfo*)badAddress);
+@@ -501,7 +498,7 @@
+ #ifndef PRODUCT
+   tty->print_cr("Dangling address " PTR_FORMAT " in:", addr);
+   ((CodeBuffer*)this)->print();
+-#endif  
++#endif
+   ShouldNotReachHere();
+   return -1;
+ }
+@@ -649,7 +646,7 @@
+     Copy::disjoint_words((HeapWord*)cs->start(),
+                          (HeapWord*)dest_cs->start(),
+                          wsize / HeapWordSize);
+-    
++
+     if (dest->blob() == NULL) {
+       // Destination is a final resting place, not just another buffer.
+       // Normalize uninitialized bytes in the final padding.
+@@ -789,7 +786,7 @@
+ 
+   // Move all the code and relocations to the new blob:
+   relocate_code_to(&cb);
+-    
++
+   // Copy the temporary code buffer into the current code buffer.
+   // Basically, do {*this = cb}, except for some control information.
+   this->take_over_code_from(&cb);
+@@ -816,7 +813,7 @@
+   // Must already have disposed of the old blob somehow.
+   assert(blob() == NULL, "must be empty");
+ #ifdef ASSERT
+-  
++
+ #endif
+   // Take the new blob away from cb.
+   set_blob(cb->blob());
+diff -ruN openjdk6/hotspot/src/share/vm/asm/codeBuffer.hpp openjdk/hotspot/src/share/vm/asm/codeBuffer.hpp
+--- openjdk6/hotspot/src/share/vm/asm/codeBuffer.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/asm/codeBuffer.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)codeBuffer.hpp	1.63 07/05/17 15:49:26 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class  CodeComments;
+@@ -47,7 +44,7 @@
+   // always dangerous and suspect.
+ 
+   enum { frame_never_safe = -1 };
+-     
++
+ private:
+   int _values[max_Entries];
+ 
+@@ -167,9 +164,9 @@
+   bool allocates2(address pc) const { return pc >= _start && pc <= _limit; }
+ 
+   void    set_end(address pc)       { assert(allocates2(pc),""); _end = pc; }
+-  void    set_mark(address pc)      { assert(contains2(pc),"not in codeBuffer"); 
++  void    set_mark(address pc)      { assert(contains2(pc),"not in codeBuffer");
+                                       _mark = pc; }
+-  void    set_mark_off(int offset)  { assert(contains2(offset+_start),"not in codeBuffer"); 
++  void    set_mark_off(int offset)  { assert(contains2(offset+_start),"not in codeBuffer");
+                                       _mark = offset + _start; }
+   void    set_mark()                { _mark = _end; }
+   void    clear_mark()              { _mark = NULL; }
+@@ -282,7 +279,7 @@
+   typedef int csize_t;  // code size type; would be size_t except for history
+   enum {
+     // Here is the list of all possible sections, in order of ascending address.
+-    SECT_INSTS,		      // Executable instructions.
++    SECT_INSTS,               // Executable instructions.
+     SECT_STUBS,               // Outbound trampolines for supporting call sites.
+     SECT_CONSTS,              // Non-instruction data:  Floats, jump tables, etc.
+     SECT_LIMIT, SECT_NONE = -1
+@@ -302,7 +299,7 @@
+ 
+   CodeBuffer*  _before_expand;  // dead buffer, from before the last expansion
+ 
+-  BufferBlob*  _blob;		// optional buffer in CodeCache for generated code
++  BufferBlob*  _blob;           // optional buffer in CodeCache for generated code
+   address      _total_start;    // first address of combined memory buffer
+   csize_t      _total_size;     // size in bytes of combined memory buffer
+ 
+@@ -311,7 +308,7 @@
+   OopRecorder  _default_oop_recorder;  // override with initialize_oop_recorder
+   Arena*       _overflow_arena;
+ 
+-  address      _decode_begin;	// start address for decode
++  address      _decode_begin;   // start address for decode
+   address      decode_begin();
+ 
+   void initialize_misc(const char * name) {
+@@ -448,13 +445,13 @@
+   bool    is_pure() const               { return code_size() == total_code_size(); }
+ 
+   // size in bytes of output so far in the insts sections
+-  csize_t code_size() const             { return _insts.size(); } 
++  csize_t code_size() const             { return _insts.size(); }
+ 
+   // same as code_size(), except that it asserts there is no non-code here
+   csize_t pure_code_size() const        { assert(is_pure(), "no non-code");
+                                           return code_size(); }
+   // capacity in bytes of the insts sections
+-  csize_t code_capacity() const         { return _insts.capacity(); } 
++  csize_t code_capacity() const         { return _insts.capacity(); }
+ 
+   // number of bytes remaining in the insts section
+   csize_t code_remaining() const        { return _insts.remaining(); }
+@@ -523,9 +520,9 @@
+  public:
+   // Printing / Decoding
+   // decodes from decode_begin() to code_end() and sets decode_begin to end
+-  void    decode();		
+-  void    decode_all();		// decodes all the code
+-  void    skip_decode();	// sets decode_begin to code_end();
++  void    decode();
++  void    decode_all();         // decodes all the code
++  void    skip_decode();        // sets decode_begin to code_end();
+   void    print();
+ #endif
+ 
+@@ -543,4 +540,3 @@
+   if (remaining() < amount) { _outer->expand(this, amount); return true; }
+   return false;
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/asm/register.cpp openjdk/hotspot/src/share/vm/asm/register.cpp
+--- openjdk6/hotspot/src/share/vm/asm/register.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/asm/register.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)register.cpp	1.11 07/05/05 17:05:04 JVM"
+-#endif
+ /*
+  * Copyright 2000 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/asm/register.hpp openjdk/hotspot/src/share/vm/asm/register.hpp
+--- openjdk6/hotspot/src/share/vm/asm/register.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/asm/register.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)register.hpp	1.13 07/05/05 17:05:03 JVM"
+-#endif
+ /*
+  * Copyright 2000-2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Use AbstractRegister as shortcut
+@@ -64,18 +61,18 @@
+ // invocation is terminated with a ;.
+ //
+ // CONSTANT_REGISTER_DECLARATION(Register, G0, 0);
+-// 
++//
+ // extern const Register G0 ;
+-// enum { G0_RegisterEnumValue = 0 } ; 
+-// 
++// enum { G0_RegisterEnumValue = 0 } ;
++//
+ // REGISTER_DECLARATION(Register, Gmethod, G5);
+-// 
++//
+ // extern const Register Gmethod ;
+-// enum { Gmethod_RegisterEnumValue = G5_RegisterEnumValue } ; 
+-// 
++// enum { Gmethod_RegisterEnumValue = G5_RegisterEnumValue } ;
++//
+ // REGISTER_DEFINITION(Register, G0);
+-// 
+-// const Register G0 = ( ( Register ) G0_RegisterEnumValue ) ; 
++//
++// const Register G0 = ( ( Register ) G0_RegisterEnumValue ) ;
+ //
+ 
+ #define AS_REGISTER(type,name)         ((type)name##_##type##EnumValue)
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp openjdk/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)c1_Canonicalizer.cpp	1.57 07/05/05 17:05:05 JVM"
+-#endif
+ /*
+  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -82,7 +79,7 @@
+             case Bytecodes::_iadd: set_constant(a + b); return;
+             case Bytecodes::_isub: set_constant(a - b); return;
+             case Bytecodes::_imul: set_constant(a * b); return;
+-            case Bytecodes::_idiv: 
++            case Bytecodes::_idiv:
+               if (b != 0) {
+                 if (a == min_jint && b == -1) {
+                   set_constant(min_jint);
+@@ -362,7 +359,7 @@
+           set_constant(1);
+         break;
+       }
+-        
++
+       case floatTag: {
+         float vx = x->x()->type()->as_FloatConstant()->value();
+         float vy = x->y()->type()->as_FloatConstant()->value();
+@@ -376,7 +373,7 @@
+           set_constant(1);
+         break;
+       }
+-        
++
+       case doubleTag: {
+         double vx = x->x()->type()->as_DoubleConstant()->value();
+         double vy = x->y()->type()->as_DoubleConstant()->value();
+@@ -666,7 +663,7 @@
+       BlockBegin* no_inst_sux = x->sux_for(is_true(0, x->cond(), rc)); // successor for instanceof == 0
+       if (is_inst_sux == no_inst_sux && inst->is_loaded()) {
+         // both successors identical and klass is loaded => simplify to: Goto
+-        set_canonical(new Goto(is_inst_sux, x->state_before(), x->is_safepoint()));       
++        set_canonical(new Goto(is_inst_sux, x->state_before(), x->is_safepoint()));
+       } else {
+         // successors differ => simplify to: IfInstanceOf
+         set_canonical(new IfInstanceOf(inst->klass(), inst->obj(), true, inst->bci(), is_inst_sux, no_inst_sux));
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_Canonicalizer.hpp openjdk/hotspot/src/share/vm/c1/c1_Canonicalizer.hpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_Canonicalizer.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_Canonicalizer.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c1_Canonicalizer.hpp	1.29 07/05/05 17:05:05 JVM"
+-#endif
+ /*
+  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class Canonicalizer: InstructionVisitor {
+@@ -97,4 +94,3 @@
+   virtual void do_ProfileCall    (ProfileCall*     x);
+   virtual void do_ProfileCounter (ProfileCounter*  x);
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_CFGPrinter.cpp openjdk/hotspot/src/share/vm/c1/c1_CFGPrinter.cpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_CFGPrinter.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_CFGPrinter.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)c1_CFGPrinter.cpp	1.8 07/05/05 17:05:04 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -103,7 +100,7 @@
+ 
+ 
+ 
+-CFGPrinterOutput::CFGPrinterOutput() 
++CFGPrinterOutput::CFGPrinterOutput()
+  : _output(new(ResourceObj::C_HEAP) fileStream("output.cfg"))
+ {
+ }
+@@ -120,7 +117,7 @@
+   output()->dec();
+ }
+ 
+-void CFGPrinterOutput::print(const char* format, ...) { 
++void CFGPrinterOutput::print(const char* format, ...) {
+   output()->indent();
+ 
+   va_list ap;
+@@ -270,7 +267,7 @@
+ 
+ void CFGPrinterOutput::print_block(BlockBegin* block) {
+   print_begin("block");
+-  
++
+   print("name \"B%d\"", block->block_id());
+ 
+   print("from_bci %d", block->bci());
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_CFGPrinter.hpp openjdk/hotspot/src/share/vm/c1/c1_CFGPrinter.hpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_CFGPrinter.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_CFGPrinter.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)c1_CFGPrinter.hpp	1.7 07/05/05 17:05:05 JVM"
+-#endif
+ /*
+  * Copyright 2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #ifndef PRODUCT
+@@ -47,4 +44,3 @@
+ };
+ 
+ #endif
+-
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_CodeStubs.hpp openjdk/hotspot/src/share/vm/c1/c1_CodeStubs.hpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_CodeStubs.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_CodeStubs.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c1_CodeStubs.hpp	1.86 07/05/05 17:05:06 JVM"
+-#endif
+ /*
+  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class CodeEmitInfo;
+@@ -110,7 +107,7 @@
+  private:
+   Bytecodes::Code _bytecode;
+   LIR_Opr         _input;
+-  LIR_Opr         _result; 
++  LIR_Opr         _result;
+ 
+   static float float_zero;
+   static double double_zero;
+@@ -485,4 +482,3 @@
+   virtual void print_name(outputStream* out) const { out->print("ArrayCopyStub"); }
+ #endif // PRODUCT
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_Compilation.cpp openjdk/hotspot/src/share/vm/c1/c1_Compilation.cpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_Compilation.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_Compilation.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)c1_Compilation.cpp	1.159 07/05/17 15:49:28 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -175,7 +172,7 @@
+ 
+ void Compilation::emit_lir() {
+   CHECK_BAILOUT();
+-  
++
+   LIRGenerator gen(this, method());
+   {
+     PhaseTraceTime timeit(_t_lirGeneration);
+@@ -189,7 +186,7 @@
+ 
+     LinearScan* allocator = new LinearScan(hir(), &gen, frame_map());
+     set_allocator(allocator);
+-    // Assign physical registers to LIR operands using a linear scan algorithm.    
++    // Assign physical registers to LIR operands using a linear scan algorithm.
+     allocator->do_linear_scan();
+     CHECK_BAILOUT();
+ 
+@@ -290,13 +287,16 @@
+ }
+ 
+ void Compilation::install_code(int frame_size) {
++  // frame_size is in 32-bit words so adjust it intptr_t words
++  assert(frame_size == frame_map()->framesize(), "must match");
++  assert(in_bytes(frame_map()->framesize_in_bytes()) % sizeof(intptr_t) == 0, "must be at least pointer aligned");
+   _env->register_method(
+     method(),
+     osr_bci(),
+     &_offsets,
+     in_bytes(_frame_map->sp_offset_for_orig_pc()),
+     code(),
+-    frame_size,
++    in_bytes(frame_map()->framesize_in_bytes()) / sizeof(intptr_t),
+     debug_info_recorder()->_oopmaps,
+     exception_handler_table(),
+     implicit_exception_table(),
+@@ -397,7 +397,7 @@
+         scope_depths->append(handler->scope_count());
+     }
+       pcos->append(handler->entry_pco());
+-      
++
+       // stop processing once we hit a catch any
+       if (handler->is_catch_all()) {
+         assert(i == handlers->length() - 1, "catch all must be last handler");
+@@ -435,7 +435,7 @@
+   assert(_arena == NULL, "shouldn't only one instance of Compilation in existence at a time");
+   _arena = Thread::current()->resource_area();
+   _compilation = this;
+-  _needs_debug_information = JvmtiExport::can_examine_or_deopt_anywhere() || 
++  _needs_debug_information = JvmtiExport::can_examine_or_deopt_anywhere() ||
+                                JavaMonitorsInStackTrace || AlwaysEmitDebugInfo || DeoptimizeALot;
+   _exception_info_list = new ExceptionInfoList();
+   _implicit_exception_table.set_size(0);
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_Compilation.hpp openjdk/hotspot/src/share/vm/c1/c1_Compilation.hpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_Compilation.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_Compilation.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c1_Compilation.hpp	1.88 07/05/17 15:49:31 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class BlockBegin;
+@@ -58,7 +55,7 @@
+   friend class CompilationResourceObj;
+  private:
+ 
+-  static Arena* _arena; 
++  static Arena* _arena;
+   static Arena* arena() { return _arena; }
+ 
+   static Compilation* _compilation;
+@@ -101,7 +98,7 @@
+ 
+   ExceptionInfoList* exception_info_list() const { return _exception_info_list; }
+   ExceptionHandlerTable* exception_handler_table() { return &_exception_handler_table; }
+-  
++
+   LinearScan* allocator()                          { return _allocator;      }
+   void        set_allocator(LinearScan* allocator) { _allocator = allocator; }
+ 
+@@ -179,7 +176,7 @@
+ };
+ 
+ 
+-// Macro definitions for unified bailout-support 
++// Macro definitions for unified bailout-support
+ // The methods bailout() and bailed_out() are present in all classes
+ // that might bailout, but forward all calls to Compilation
+ #define BAILOUT(msg)               { bailout(msg); return;              }
+@@ -193,7 +190,7 @@
+  private:
+   Compilation* _compilation;
+   Instruction*  _previous;
+-  
++
+  public:
+   InstructionMark(Compilation* compilation, Instruction* instr) {
+     _compilation = compilation;
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_Compiler.cpp openjdk/hotspot/src/share/vm/c1/c1_Compiler.cpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_Compiler.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_Compiler.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)c1_Compiler.cpp	1.105 07/05/05 17:05:06 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -68,7 +65,7 @@
+     _compiling = true;
+   }
+ #endif // TIERED
+-  { 
++  {
+     // We are nested here because we need for the destructor
+     // of Compilation to occur before we release the any
+     // competing compiler thread
+@@ -89,5 +86,3 @@
+ void Compiler::print_timers() {
+   Compilation::print_timers();
+ }
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_Compiler.hpp openjdk/hotspot/src/share/vm/c1/c1_Compiler.hpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_Compiler.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_Compiler.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c1_Compiler.hpp	1.49 07/05/05 17:05:06 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // There is one instance of the Compiler per CompilerThread.
+@@ -53,7 +50,7 @@
+ 
+   // Missing feature tests
+   virtual bool supports_native()                 { return true; }
+-  virtual bool supports_osr   ()                 { return true; } 
++  virtual bool supports_osr   ()                 { return true; }
+ 
+   // Customization
+   virtual bool needs_adapters         ()         { return false; }
+@@ -68,4 +65,3 @@
+   // Print compilation timers and statistics
+   virtual void print_timers();
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_Defs.cpp openjdk/hotspot/src/share/vm/c1/c1_Defs.cpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_Defs.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_Defs.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)c1_Defs.cpp	1.10 07/05/05 17:05:06 JVM"
+-#endif
+ /*
+  * Copyright 2001 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,9 +19,8 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+ #include "incls/_c1_Defs.cpp.incl"
+-
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_Defs.hpp openjdk/hotspot/src/share/vm/c1/c1_Defs.hpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_Defs.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_Defs.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c1_Defs.hpp	1.22 07/05/05 17:05:05 JVM"
+-#endif
+ /*
+  * Copyright 2000-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // set frame size and return address offset to these values in blobs
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_FpuStackSim.hpp openjdk/hotspot/src/share/vm/c1/c1_FpuStackSim.hpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_FpuStackSim.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_FpuStackSim.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c1_FpuStackSim.hpp	1.7 07/05/05 17:05:06 JVM"
+-#endif
+ /*
+  * Copyright 2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Provides location for forward declaration of this class, which is
+@@ -30,4 +27,3 @@
+ class FpuStackSim;
+ 
+ # include "incls/_c1_FpuStackSim_pd.hpp.incl"  // platform dependent declarations
+-
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_FrameMap.cpp openjdk/hotspot/src/share/vm/c1/c1_FrameMap.cpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_FrameMap.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_FrameMap.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)c1_FrameMap.cpp	1.37 07/05/05 17:05:06 JVM"
+-#endif
+ /*
+  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -83,7 +80,8 @@
+     args->append(opr);
+     if (opr->is_address()) {
+       LIR_Address* addr = opr->as_address_ptr();
+-      out_preserve = MAX2(out_preserve, addr->disp() / 4);
++      assert(addr->disp() == (int)addr->disp(), "out of range value");
++      out_preserve = MAX2(out_preserve, (intptr_t)addr->disp() / 4);
+     }
+     i += type2size[t];
+   }
+@@ -128,10 +126,13 @@
+     // C calls are always outgoing
+     bool outgoing = true;
+     LIR_Opr opr = map_to_opr(t, regs + i, outgoing);
++    // they might be of different types if for instance floating point
++    // values are passed in cpu registers, but the sizes must match.
++    assert(type2size[opr->type()] == type2size[t], "type mismatch");
+     args->append(opr);
+     if (opr->is_address()) {
+       LIR_Address* addr = opr->as_address_ptr();
+-      out_preserve = MAX2(out_preserve, addr->disp() / 4);
++      out_preserve = MAX2(out_preserve, (intptr_t)addr->disp() / 4);
+     }
+     i += type2size[t];
+   }
+@@ -172,7 +173,7 @@
+     LIR_Opr opr = _incoming_arguments->at(i);
+     if (opr->is_address()) {
+       LIR_Address* address = opr->as_address_ptr();
+-      _argument_locations->at_put(java_index, address->disp());
++      _argument_locations->at_put(java_index, address->disp() - STACK_BIAS);
+       _incoming_arguments->args()->at_put(i, LIR_OprFact::stack(java_index, as_BasicType(as_ValueType(address->type()))));
+     }
+     java_index += type2size[opr->type()];
+@@ -187,7 +188,7 @@
+   _num_spills = nof_slots;
+   assert(_framesize == -1, "should only be calculated once");
+   _framesize =  round_to(in_bytes(sp_offset_for_monitor_base(0)) +
+-                         _num_monitors * sizeof(BasicObjectLock) + 
++                         _num_monitors * sizeof(BasicObjectLock) +
+                          sizeof(intptr_t) +                        // offset of deopt orig pc
+                          frame_pad_in_bytes,
+                          StackAlignmentInBytes) / 4;
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_FrameMap.hpp openjdk/hotspot/src/share/vm/c1/c1_FrameMap.hpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_FrameMap.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_FrameMap.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c1_FrameMap.hpp	1.59 07/05/05 17:05:07 JVM"
+-#endif
+ /*
+  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class ciMethod;
+@@ -49,7 +46,7 @@
+ //  +----------+---+----------+-------+------------------------+-----+
+ //  |arguments | x | monitors | spill | reserved argument area | ABI |
+ //  +----------+---+----------+-------+------------------------+-----+
+-//  
++//
+ //  x =  ABI area (SPARC) or  return adress and link (i486)
+ //  ABI  = ABI area (SPARC) or nothing (i486)
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_globals.cpp openjdk/hotspot/src/share/vm/c1/c1_globals.cpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_globals.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_globals.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)c1_globals.cpp	1.11 07/05/05 17:05:09 JVM"
+-#endif
+ /*
+  * Copyright 2000-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_globals.hpp openjdk/hotspot/src/share/vm/c1/c1_globals.hpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_globals.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_globals.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c1_globals.hpp	1.104 07/05/05 17:05:10 JVM"
+-#endif
+ /*
+  * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //
+@@ -333,4 +330,3 @@
+ // #include "incls/_c1_globals_pd.hpp.incl"
+ 
+ C1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_NOTPRODUCT_FLAG)
+-
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp openjdk/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)c1_GraphBuilder.cpp	1.255 07/05/17 15:49:34 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -82,14 +79,14 @@
+ 
+ // Implementation of BlockListBuilder
+ 
+-BlockListBuilder::BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci) 
++BlockListBuilder::BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci)
+  : _compilation(compilation)
+  , _scope(scope)
+  , _blocks(16)
+  , _bci2block(new BlockList(scope->method()->code_size(), NULL))
+  , _next_block_number(0)
+- , _active(NULL, 0)         // size not known yet
+- , _visited(NULL, 0)        // size not known yet
++ , _active()         // size not known yet
++ , _visited()        // size not known yet
+  , _next_loop_index(0)
+  , _loop_map() // size not known yet
+ {
+@@ -141,7 +138,7 @@
+   if (block == NULL) {
+     block = new BlockBegin(cur_bci);
+     block->init_stores_to_locals(method()->max_locals());
+-    _bci2block->at_put(cur_bci, block);    
++    _bci2block->at_put(cur_bci, block);
+     _blocks.append(block);
+ 
+     assert(predecessor == NULL || predecessor->bci() < cur_bci, "targets for backward branches must already exist");
+@@ -173,7 +170,7 @@
+   // Draws edges from a block to its exception handlers
+   XHandlers* list = xhandlers();
+   const int n = list->length();
+- 
++
+   for (int i = 0; i < n; i++) {
+     XHandler* h = list->handler_at(i);
+ 
+@@ -215,7 +212,7 @@
+   // during bytecode iteration. This would require the creation of a new block at the
+   // branch target and a modification of the successor lists.
+   BitMap bci_block_start = method()->bci_block_start();
+-  
++
+   ciBytecodeStream s(method());
+   while (s.next() != ciBytecodeStream::EOBC()) {
+     int cur_bci = s.cur_bci();
+@@ -311,7 +308,7 @@
+         current = NULL;
+         break;
+ 
+-      case Bytecodes::_tableswitch: { 
++      case Bytecodes::_tableswitch: {
+         // set block for each case
+         Bytecode_tableswitch *switch_ = Bytecode_tableswitch_at(s.cur_bcp());
+         int l = switch_->length();
+@@ -323,10 +320,10 @@
+         break;
+       }
+ 
+-      case Bytecodes::_lookupswitch: { 
++      case Bytecodes::_lookupswitch: {
+         // set block for each case
+         Bytecode_lookupswitch *switch_ = Bytecode_lookupswitch_at(s.cur_bcp());
+-        int l = switch_->number_of_pairs();        
++        int l = switch_->number_of_pairs();
+         for (int i = 0; i < l; i++) {
+           make_block_at(cur_bci + switch_->pair_at(i)->offset(), current);
+         }
+@@ -342,7 +339,7 @@
+ void BlockListBuilder::mark_loops() {
+   ResourceMark rm;
+ 
+-  _active = BitMap(BlockBegin::number_of_blocks());         _active.clear(); 
++  _active = BitMap(BlockBegin::number_of_blocks());         _active.clear();
+   _visited = BitMap(BlockBegin::number_of_blocks());        _visited.clear();
+   _loop_map = intArray(BlockBegin::number_of_blocks(), 0);
+   _next_loop_index = 0;
+@@ -413,9 +410,9 @@
+ 
+   if (block->is_set(BlockBegin::parser_loop_header_flag)) {
+     int header_loop_state = _loop_map.at(block_id);
+-    assert(is_power_of_2(header_loop_state), "exactly one bit must be set");
++    assert(is_power_of_2((unsigned)header_loop_state), "exactly one bit must be set");
+ 
+-    // If the highest bit is set (i.e. when integer value is negative), the method 
++    // If the highest bit is set (i.e. when integer value is negative), the method
+     // has 32 or more loops. This bit is never cleared because it is used for multiple loops
+     if (header_loop_state >= 0) {
+       clear_bits(loop_state, header_loop_state);
+@@ -730,7 +727,7 @@
+       // BlockBegin::try_merge returns false when the flag is set, this leads
+       // to a compilation bailout
+       if (block->is_set(BlockBegin::was_visited_flag))  new_block->set(BlockBegin::was_visited_flag);
+-      
++
+       bci2block()->at_put(bci, new_block);
+       block = new_block;
+     }
+@@ -897,19 +894,19 @@
+       case T_FLOAT  : t = new FloatConstant   (con.as_float  ()); break;
+       case T_DOUBLE : t = new DoubleConstant  (con.as_double ()); break;
+       case T_ARRAY  : t = new ArrayConstant   (con.as_object ()->as_array   ()); break;
+-      case T_OBJECT : 
++      case T_OBJECT :
+        {
+         ciObject* obj = con.as_object();
+         if (obj->is_klass()) {
+           ciKlass* klass = obj->as_klass();
+           if (!klass->is_loaded() || PatchALot) {
+             patch_state = state()->copy();
+-            t = new ObjectConstant(obj); 
++            t = new ObjectConstant(obj);
+           } else {
+-            t = new InstanceConstant(klass->java_mirror()); 
++            t = new InstanceConstant(klass->java_mirror());
+           }
+         } else {
+-          t = new InstanceConstant(obj->as_instance()); 
++          t = new InstanceConstant(obj->as_instance());
+         }
+         break;
+        }
+@@ -1233,7 +1230,7 @@
+ 
+ void GraphBuilder::ret(int local_index) {
+   if (!parsing_jsr()) BAILOUT("ret encountered while not parsing subroutine");
+-  
++
+   if (local_index != scope_data()->jsr_return_address_local()) {
+     BAILOUT("can not handle complicated jsr/ret constructs");
+   }
+@@ -1396,7 +1393,7 @@
+     // the continuation point.
+     append_with_bci(goto_callee, scope_data()->continuation()->bci());
+     incr_num_returns();
+-    
++
+     return;
+   }
+ 
+@@ -1423,7 +1420,7 @@
+   BasicType field_type = field->type()->basic_type();
+   ValueType* type = as_ValueType(field_type);
+   // call will_link again to determine if the field is valid.
+-  const bool is_loaded = holder->is_loaded() && 
++  const bool is_loaded = holder->is_loaded() &&
+                          field->will_link(method()->holder(), code);
+   const bool is_initialized = is_loaded && holder->is_initialized();
+ 
+@@ -1464,7 +1461,7 @@
+             constant =  new Constant(as_ValueType(field_val));
+           }
+           break;
+-          
++
+         default:
+           constant = new Constant(as_ValueType(field_val));
+         }
+@@ -1517,10 +1514,10 @@
+ }
+ 
+ 
+-Dependencies* GraphBuilder::dependency_recorder() const { 
++Dependencies* GraphBuilder::dependency_recorder() const {
+   assert(DeoptC1, "need debug information");
+   compilation()->set_needs_debug_information(true);
+-  return compilation()->dependency_recorder(); 
++  return compilation()->dependency_recorder();
+ }
+ 
+ 
+@@ -1596,7 +1593,7 @@
+       actual_recv = receiver_klass;
+     }
+ 
+-    if ((code == Bytecodes::_invokevirtual && callee_holder->is_initialized()) || 
++    if ((code == Bytecodes::_invokevirtual && callee_holder->is_initialized()) ||
+         (code == Bytecodes::_invokeinterface && callee_holder->is_initialized() && !actual_recv->is_interface())) {
+       // Use CHA on the receiver to select a more precise method.
+       cha_monomorphic_target = target->find_monomorphic_target(calling_klass, callee_holder, actual_recv);
+@@ -1640,7 +1637,7 @@
+   }
+ 
+   if (cha_monomorphic_target != NULL) {
+-    if (!(target->is_final_method())) {         
++    if (!(target->is_final_method())) {
+       // If we inlined because CHA revealed only a single target method,
+       // then we are dependent on that target method not getting overridden
+       // by dynamic class loading.  Be sure to test the "static" receiver
+@@ -1652,7 +1649,7 @@
+   }
+   // check if we could do inlining
+   if (!PatchALot && Inline && klass->is_loaded() &&
+-      (klass->is_initialized() || klass->is_interface() && target->holder()->is_initialized()) 
++      (klass->is_initialized() || klass->is_interface() && target->holder()->is_initialized())
+       && target->will_link(klass, callee_holder, code)) {
+     // callee is known => check if we have static binding
+     assert(target->is_loaded(), "callee must be known");
+@@ -1715,7 +1712,7 @@
+     vtable_index = target->resolve_vtable_index(calling_klass, callee_holder);
+   }
+ #endif
+-  
++
+   if (recv != NULL &&
+       (code == Bytecodes::_invokespecial ||
+        !is_loaded || target->is_final() ||
+@@ -2026,7 +2023,7 @@
+ 
+         // xhandler start with an empty expression stack
+         s->truncate_stack(cur_scope_data->caller_stack_size());
+-        
++
+         // Note: Usually this join must work. However, very
+         // complicated jsr-ret structures where we don't ret from
+         // the subroutine can cause the objects on the monitor
+@@ -2137,7 +2134,7 @@
+     Value subst = NULL;
+     int opd_count = phi->operand_count();
+     for (int i = 0; i < opd_count; i++) {
+-      Value opd = phi->operand_at(i); 
++      Value opd = phi->operand_at(i);
+       assert(opd != NULL, "Operand must exist!");
+ 
+       if (opd->type()->is_illegal()) {
+@@ -2145,8 +2142,8 @@
+         phi->make_illegal();
+         phi->clear(Phi::visited);
+         return phi;
+-      } 
+-      
++      }
++
+       Value new_opd = simplify(opd);
+       assert(new_opd != NULL, "Simplified operand must exist!");
+ 
+@@ -2162,15 +2159,15 @@
+       }
+     }
+ 
+-    // sucessfully simplified phi function 
++    // sucessfully simplified phi function
+     assert(subst != NULL, "illegal phi function");
+     _has_substitutions = true;
+     phi->clear(Phi::visited);
+     phi->set_subst(subst);
+ 
+ #ifndef PRODUCT
+-    if (PrintPhiFunctions) { 
+-      tty->print_cr("simplified phi function %c%d to %c%d (Block B%d)", phi->type()->tchar(), phi->id(), subst->type()->tchar(), subst->id(), phi->block()->block_id()); 
++    if (PrintPhiFunctions) {
++      tty->print_cr("simplified phi function %c%d to %c%d (Block B%d)", phi->type()->tchar(), phi->id(), subst->type()->tchar(), subst->id(), phi->block()->block_id());
+     }
+ #endif
+ 
+@@ -2747,13 +2744,13 @@
+ 
+ void GraphBuilder::setup_osr_entry_block() {
+   assert(compilation()->is_osr_compile(), "only for osrs");
+-  
++
+   int osr_bci = compilation()->osr_bci();
+   ciBytecodeStream s(method());
+   s.reset_to_bci(osr_bci);
+   s.next();
+   scope_data()->set_stream(&s);
+-  
++
+   // create a new block to be the osr setup code
+   _osr_entry = new BlockBegin(osr_bci);
+   _osr_entry->set(BlockBegin::osr_entry_flag);
+@@ -2771,8 +2768,8 @@
+   Value e = append(new OsrEntry());
+   e->set_needs_null_check(false);
+ 
+-  // OSR buffer is 
+-  // 
++  // OSR buffer is
++  //
+   // locals[nlocals-1..0]
+   // monitors[number_of_locks-1..0]
+   //
+@@ -2852,7 +2849,7 @@
+   if (method()->is_synchronized()) {
+     state->lock(scope(), NULL);
+   }
+-  
++
+   return state;
+ }
+ 
+@@ -2889,7 +2886,7 @@
+     sync_handler->set(BlockBegin::exception_entry_flag);
+     sync_handler->set(BlockBegin::is_on_work_list_flag);
+     sync_handler->set(BlockBegin::default_exception_handler_flag);
+-    
++
+     ciExceptionHandler* desc = new ciExceptionHandler(method()->holder(), 0, method()->code_size(), -1, 0);
+     XHandler* h = new XHandler(desc);
+     h->set_entry_block(sync_handler);
+@@ -2922,7 +2919,7 @@
+       s.reset_to_bci(0);
+       scope_data()->set_stream(&s);
+       s.next();
+-      
++
+       // setup the initial block state
+       _block = start_block;
+       _state = start_block->state()->copy();
+@@ -2933,7 +2930,7 @@
+       bool result = try_inline_intrinsics(scope->method());
+       if (!result) BAILOUT("failed to inline intrinsic");
+       method_return(dpop());
+-      
++
+       // connect the begin and end blocks and we're all done.
+       BlockEnd* end = last()->as_BlockEnd();
+       block()->set_end(end);
+@@ -2952,10 +2949,10 @@
+     if (method()->is_synchronized()) {
+       lock = method()->is_static() ? new Constant(new InstanceConstant(method()->holder()->java_mirror())) :
+                                      _initial_state->local_at(0);
+-      
++
+       sync_handler->state()->unlock();
+       sync_handler->state()->lock(scope, lock);
+-      
++
+     }
+     fill_sync_handler(lock, sync_handler, true);
+   }
+@@ -2985,7 +2982,7 @@
+   // return a new ValueStack representing just the current lock stack
+   // (for debug info at safepoints in exception throwing or handling)
+   ValueStack* new_stack = state()->copy_locks();
+-  return new_stack; 
++  return new_stack;
+ }
+ 
+ 
+@@ -3014,13 +3011,13 @@
+     // intrinsics can be native or not
+     return true;
+   } else if (callee->is_native()) {
+-    // non-intrinsic natives cannot be inlined 
++    // non-intrinsic natives cannot be inlined
+     INLINE_BAILOUT("non-intrinsic native")
+   } else if (callee->is_abstract()) {
+     INLINE_BAILOUT("abstract")
+   } else {
+     return try_inline_full(callee, holder_known);
+-  }  
++  }
+ }
+ 
+ 
+@@ -3102,9 +3099,9 @@
+     case vmIntrinsics::_putInt    : return append_unsafe_put_obj(callee, T_INT,     false);
+     case vmIntrinsics::_putLong   : return append_unsafe_put_obj(callee, T_LONG,    false);
+     case vmIntrinsics::_putFloat  : return append_unsafe_put_obj(callee, T_FLOAT,   false);
+-    case vmIntrinsics::_putDouble : return append_unsafe_put_obj(callee, T_DOUBLE,  false); 
++    case vmIntrinsics::_putDouble : return append_unsafe_put_obj(callee, T_DOUBLE,  false);
+ 
+-    case vmIntrinsics::_getObjectVolatile : return append_unsafe_get_obj(callee, T_OBJECT,  true); 
++    case vmIntrinsics::_getObjectVolatile : return append_unsafe_get_obj(callee, T_OBJECT,  true);
+     case vmIntrinsics::_getBooleanVolatile: return append_unsafe_get_obj(callee, T_BOOLEAN, true);
+     case vmIntrinsics::_getByteVolatile   : return append_unsafe_get_obj(callee, T_BYTE,    true);
+     case vmIntrinsics::_getShortVolatile  : return append_unsafe_get_obj(callee, T_SHORT,   true);
+@@ -3153,13 +3150,13 @@
+     case vmIntrinsics::_putOrderedInt    : return append_unsafe_put_obj(callee, T_INT,     true);
+     case vmIntrinsics::_putOrderedLong   : return append_unsafe_put_obj(callee, T_LONG,    true);
+ 
+-    case vmIntrinsics::_compareAndSwapLong: 
++    case vmIntrinsics::_compareAndSwapLong:
+       if (!VM_Version::supports_cx8()) return false;
+       // fall through
+-    case vmIntrinsics::_compareAndSwapInt: 
+-    case vmIntrinsics::_compareAndSwapObject: 
++    case vmIntrinsics::_compareAndSwapInt:
++    case vmIntrinsics::_compareAndSwapObject:
+       append_unsafe_CAS(callee);
+-      return true; 
++      return true;
+ 
+     default                       : return false; // do not inline
+   }
+@@ -3238,7 +3235,7 @@
+ 
+   // If we bailed out during parsing, return immediately (this is bad news)
+   CHECK_BAILOUT_(false);
+-  
++
+   // Detect whether the continuation can actually be reached. If not,
+   // it has not had state set by the join() operations in
+   // iterate_bytecodes_for_block()/ret() and we should not touch the
+@@ -3322,7 +3319,7 @@
+ 
+     // exit the monitor in the context of the synchronized method
+     monitorexit(lock, SynchronizationEntryBCI);
+-    
++
+     // exit the context of the synchronized method
+     if (!default_handler) {
+       pop_scope();
+@@ -3361,7 +3358,7 @@
+ 
+   // Proper inlining of methods with jsrs requires a little more work.
+   if (callee->has_jsrs()                 ) INLINE_BAILOUT("jsrs not handled properly by inliner yet");
+- 
++
+   // now perform tests that are based on flag settings
+   if (inline_level() > MaxInlineLevel                         ) INLINE_BAILOUT("too-deep inlining");
+   if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("too-deep recursive inlining");
+@@ -3472,7 +3469,7 @@
+       store_local(callee_state, arg, arg->type()->base(), par_no);
+     }
+   }
+-  
++
+   // Remove args from stack.
+   // Note that we preserve locals state in case we can use it later
+   // (see use of pop_scope() below)
+@@ -3622,7 +3619,7 @@
+ 
+   callee_scope->set_caller_state(state());
+   set_state(state()->push_scope(callee_scope));
+-  
++
+   ScopeData* data = new ScopeData(scope_data());
+   data->set_scope(callee_scope);
+   data->set_bci2block(blb.bci2block());
+@@ -3666,12 +3663,14 @@
+   _scope_data = scope_data()->parent();
+ }
+ 
+-
+ bool GraphBuilder::append_unsafe_get_obj(ciMethod* callee, BasicType t, bool is_volatile) {
+   if (InlineUnsafeOps) {
+     Values* args = state()->pop_arguments(callee->arg_size());
+     null_check(args->at(0));
+-    Instruction* offset = append(new Convert(Bytecodes::_l2i, args->at(2), as_ValueType(T_INT)));
++    Instruction* offset = args->at(2);
++#ifndef _LP64
++    offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
++#endif
+     Instruction* op = append(new UnsafeGetObject(t, args->at(1), offset, is_volatile));
+     push(op->type(), op);
+     compilation()->set_has_unsafe_access(true);
+@@ -3684,7 +3683,10 @@
+   if (InlineUnsafeOps) {
+     Values* args = state()->pop_arguments(callee->arg_size());
+     null_check(args->at(0));
+-    Instruction* offset = append(new Convert(Bytecodes::_l2i, args->at(2), as_ValueType(T_INT)));
++    Instruction* offset = args->at(2);
++#ifndef _LP64
++    offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
++#endif
+     Instruction* op = append(new UnsafePutObject(t, args->at(1), offset, args->at(3), is_volatile));
+     compilation()->set_has_unsafe_access(true);
+     kill_all();
+@@ -3725,7 +3727,10 @@
+     } else {
+       null_check(args->at(0));
+     }
+-    Instruction* offset = append(new Convert(Bytecodes::_l2i, args->at(obj_arg_index + 1), as_ValueType(T_INT)));
++    Instruction* offset = args->at(obj_arg_index + 1);
++#ifndef _LP64
++    offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
++#endif
+     Instruction* op = is_store ? append(new UnsafePrefetchWrite(args->at(obj_arg_index), offset))
+                                : append(new UnsafePrefetchRead (args->at(obj_arg_index), offset));
+     compilation()->set_has_unsafe_access(true);
+@@ -3742,7 +3747,7 @@
+   // Pop off some args to speically handle, then push back
+   Value newval = args->pop();
+   Value cmpval = args->pop();
+-  Value long_offset = args->pop();
++  Value offset = args->pop();
+   Value src = args->pop();
+   Value unsafe_obj = args->pop();
+ 
+@@ -3750,7 +3755,9 @@
+   // generation, but must be null checked
+   null_check(unsafe_obj);
+ 
+-  Instruction* offset = append(new Convert(Bytecodes::_l2i, long_offset, as_ValueType(T_INT)));
++#ifndef _LP64
++  offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
++#endif
+ 
+   args->push(src);
+   args->push(offset);
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp openjdk/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c1_GraphBuilder.hpp	1.75 07/05/17 15:49:37 JVM"
+-#endif
+ /*
+  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class MemoryBuffer;
+@@ -62,7 +59,7 @@
+     BlockBegin*  _continuation;
+ 
+     // Without return value of inlined method on stack
+-    ValueStack*  _continuation_state; 
++    ValueStack*  _continuation_state;
+ 
+     // Was this ScopeData created only for the parsing and inlining of
+     // a jsr?
+@@ -391,4 +388,3 @@
+ 
+   BlockBegin* start() const                      { return _start; }
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_Instruction.cpp openjdk/hotspot/src/share/vm/c1/c1_Instruction.cpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_Instruction.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_Instruction.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)c1_Instruction.cpp	1.90 07/05/05 17:05:05 JVM"
+-#endif
+ /*
+  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -196,22 +193,22 @@
+ }
+ 
+ 
+-void ArithmeticOp::other_values_do(void f(Value*)) { 
++void ArithmeticOp::other_values_do(void f(Value*)) {
+   if (lock_stack() != NULL) lock_stack()->values_do(f);
+ }
+ 
+-void NullCheck::other_values_do(void f(Value*)) { 
++void NullCheck::other_values_do(void f(Value*)) {
+   lock_stack()->values_do(f);
+ }
+ 
+-void AccessArray::other_values_do(void f(Value*)) { 
++void AccessArray::other_values_do(void f(Value*)) {
+   if (lock_stack() != NULL) lock_stack()->values_do(f);
+ }
+ 
+ 
+ // Implementation of AccessField
+ 
+-void AccessField::other_values_do(void f(Value*)) { 
++void AccessField::other_values_do(void f(Value*)) {
+   if (state_before() != NULL) state_before()->values_do(f);
+   if (lock_stack() != NULL) lock_stack()->values_do(f);
+ }
+@@ -235,7 +232,7 @@
+     case Bytecodes::_imul: // fall through
+     case Bytecodes::_lmul: // fall through
+     case Bytecodes::_fmul: // fall through
+-    case Bytecodes::_dmul: return true; 
++    case Bytecodes::_dmul: return true;
+   }
+   return false;
+ }
+@@ -273,7 +270,7 @@
+ 
+ // Implementation of CompareOp
+ 
+-void CompareOp::other_values_do(void f(Value*)) { 
++void CompareOp::other_values_do(void f(Value*)) {
+   if (state_before() != NULL) state_before()->values_do(f);
+ }
+ 
+@@ -305,7 +302,7 @@
+ }
+ 
+ 
+-void StateSplit::state_values_do(void f(Value*)) { 
++void StateSplit::state_values_do(void f(Value*)) {
+   if (state() != NULL) state()->values_do(f);
+ }
+ 
+@@ -492,7 +489,7 @@
+ }
+ 
+ 
+-void Constant::other_values_do(void f(Value*)) { 
++void Constant::other_values_do(void f(Value*)) {
+   if (state() != NULL) state()->values_do(f);
+ }
+ 
+@@ -528,7 +525,7 @@
+   if (old_end != NULL) {
+     // disconnect from the old end
+     old_end->set_begin(NULL);
+-    
++
+     // disconnect this block from it's current successors
+     for (i = 0; i < _successors.length(); i++) {
+       _successors.at(i)->remove_predecessor(this);
+@@ -658,11 +655,11 @@
+   return new_sux;
+ }
+ 
+-  
++
+ void BlockBegin::remove_successor(BlockBegin* pred) {
+   int idx;
+   while ((idx = _successors.index_of(pred)) >= 0) {
+-    _successors.remove_at(idx);    
++    _successors.remove_at(idx);
+   }
+ }
+ 
+@@ -675,7 +672,7 @@
+ void BlockBegin::remove_predecessor(BlockBegin* pred) {
+   int idx;
+   while ((idx = _predecessors.index_of(pred)) >= 0) {
+-    _predecessors.remove_at(idx);    
++    _predecessors.remove_at(idx);
+   }
+ }
+ 
+@@ -687,7 +684,7 @@
+ }
+ 
+ int BlockBegin::add_exception_state(ValueStack* state) {
+-  assert(is_set(exception_entry_flag), "only for xhandlers"); 
++  assert(is_set(exception_entry_flag), "only for xhandlers");
+   if (_exception_states == NULL) {
+     _exception_states = new ValueStackStack(4);
+   }
+@@ -735,7 +732,7 @@
+ }
+ 
+ 
+-#ifndef PRODUCT               
++#ifndef PRODUCT
+   #define TRACE_PHI(code) if (PrintPhiFunctions) { code; }
+ #else
+   #define TRACE_PHI(coce)
+@@ -791,11 +788,11 @@
+           TRACE_PHI(tty->print_cr("creating phi-function %c%d for local %d", new_state->local_at(index)->type()->tchar(), new_state->local_at(index)->id(), index));
+         }
+       }
+-    } 
++    }
+ 
+     // initialize state of block
+     set_state(new_state);
+-    
++
+   } else if (existing_state->is_same_across_scopes(new_state)) {
+     TRACE_PHI(tty->print_cr("exisiting state found"));
+ 
+@@ -965,7 +962,7 @@
+ }
+ 
+ 
+-void BlockEnd::other_values_do(void f(Value*)) {  
++void BlockEnd::other_values_do(void f(Value*)) {
+   if (state_before() != NULL) state_before()->values_do(f);
+ }
+ 
+@@ -976,7 +973,7 @@
+ // predecessor. Special handling is needed for xhanlder entries because there
+ // the state of arbitrary instructions are needed.
+ 
+-Value Phi::operand_at(int i) const { 
++Value Phi::operand_at(int i) const {
+   ValueStack* state;
+   if (_block->is_set(BlockBegin::exception_entry_flag)) {
+     state = _block->exception_state_at(i);
+@@ -986,14 +983,14 @@
+   assert(state != NULL, "");
+ 
+   if (is_local()) {
+-    return state->local_at(local_index()); 
++    return state->local_at(local_index());
+   } else {
+     return state->stack_at(stack_index());
+   }
+ }
+ 
+ 
+-int Phi::operand_count() const { 
++int Phi::operand_count() const {
+   if (_block->is_set(BlockBegin::exception_entry_flag)) {
+     return _block->number_of_exception_states();
+   } else {
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_Instruction.hpp openjdk/hotspot/src/share/vm/c1/c1_Instruction.hpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_Instruction.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_Instruction.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c1_Instruction.hpp	1.195 07/05/05 17:05:07 JVM"
+-#endif
+ /*
+  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Predefined classes
+@@ -283,13 +280,13 @@
+ 
+ #ifdef ASSERT
+   HiWord*      _hi_word;
+-#endif  
++#endif
+ 
+   friend class UseCountComputer;
+ 
+  protected:
+   void set_bci(int bci)                          { assert(bci == SynchronizationEntryBCI || bci >= 0, "illegal bci"); _bci = bci; }
+-  void set_type(ValueType* type) { 
++  void set_type(ValueType* type) {
+     assert(type != NULL, "type must exist");
+     _type = type;
+   }
+@@ -573,7 +570,7 @@
+   int         _index;    // to value on operand stack (index < 0) or to local
+  public:
+   // creation
+-  Phi(ValueType* type, BlockBegin* b, int index) 
++  Phi(ValueType* type, BlockBegin* b, int index)
+   : Instruction(type->base())
+   , _pf_flags(0)
+   , _block(b)
+@@ -1490,7 +1487,7 @@
+                                                  // first instruction in this block
+   Label      _label;                             // the label associated with this block
+   LIR_List*  _lir;                               // the low level intermediate representation for this block
+- 
++
+   BitMap      _live_in;                          // set of live LIR_Opr registers at entry to this block
+   BitMap      _live_out;                         // set of live LIR_Opr registers at exit from this block
+   BitMap      _live_gen;                         // set of registers used before any redefinition in this block
+@@ -1528,16 +1525,16 @@
+   , _exception_handler_pco(-1)
+   , _lir(NULL)
+   , _loop_index(-1)
+-  , _live_in(NULL, 0)
+-  , _live_out(NULL, 0)
+-  , _live_gen(NULL, 0)
+-  , _live_kill(NULL, 0)
+-  , _fpu_register_usage(NULL, 0)
++  , _live_in()
++  , _live_out()
++  , _live_gen()
++  , _live_kill()
++  , _fpu_register_usage()
+   , _fpu_stack_state(NULL)
+   , _first_lir_instruction_id(-1)
+   , _last_lir_instruction_id(-1)
+   , _total_preds(0)
+-  , _stores_to_locals(NULL, 0)
++  , _stores_to_locals()
+   {
+     set_bci(bci);
+   }
+@@ -1596,11 +1593,11 @@
+   BlockBegin* sux_at(int i) const;
+   void add_successor(BlockBegin* sux);
+   void remove_successor(BlockBegin* pred);
+-  bool is_successor(BlockBegin* sux) const       { return _successors.contains(sux); }    
++  bool is_successor(BlockBegin* sux) const       { return _successors.contains(sux); }
+ 
+   void add_predecessor(BlockBegin* pred);
+   void remove_predecessor(BlockBegin* pred);
+-  bool is_predecessor(BlockBegin* pred) const    { return _predecessors.contains(pred); }    
++  bool is_predecessor(BlockBegin* pred) const    { return _predecessors.contains(pred); }
+   int number_of_preds() const                    { return _predecessors.length(); }
+   BlockBegin* pred_at(int i) const               { return _predecessors[i]; }
+ 
+@@ -1610,7 +1607,7 @@
+   int  number_of_exception_handlers() const      { return _exception_handlers.length(); }
+   BlockBegin* exception_handler_at(int i) const  { return _exception_handlers.at(i); }
+ 
+-  // states of the instructions that have an edge to this exception handler 
++  // states of the instructions that have an edge to this exception handler
+   int number_of_exception_states()               { assert(is_set(exception_entry_flag), "only for xhandlers"); return _exception_states == NULL ? 0 : _exception_states->length(); }
+   ValueStack* exception_state_at(int idx) const  { assert(is_set(exception_entry_flag), "only for xhandlers"); return _exception_states->at(idx); }
+   int add_exception_state(ValueStack* state);
+@@ -1690,7 +1687,7 @@
+   // accessors
+   ValueStack* state_before() const               { return _state_before; }
+   bool is_safepoint() const                      { return check_flag(IsSafepointFlag); }
+-  BlockBegin* begin() const                      { return _begin; } 
++  BlockBegin* begin() const                      { return _begin; }
+ 
+   // manipulation
+   void set_begin(BlockBegin* begin);
+@@ -1962,7 +1959,11 @@
+ LEAF(OsrEntry, Instruction)
+  public:
+   // creation
+-  OsrEntry() : Instruction(intType, false) { pin(); }
++#ifdef _LP64
++  OsrEntry() : Instruction(longType, false) { pin(); }
++#else
++  OsrEntry() : Instruction(intType,  false) { pin(); }
++#endif
+ 
+   // generic
+   virtual void input_values_do(void f(Value*))   { }
+@@ -1973,7 +1974,7 @@
+ LEAF(ExceptionObject, Instruction)
+  public:
+   // creation
+-  ExceptionObject() : Instruction(objectType, false) { 
++  ExceptionObject() : Instruction(objectType, false) {
+     pin();
+   }
+ 
+@@ -2266,8 +2267,8 @@
+ 
+ class BlockPair: public CompilationResourceObj {
+  private:
+-  BlockBegin* _from; 
+-  BlockBegin* _to;   
++  BlockBegin* _from;
++  BlockBegin* _to;
+  public:
+   BlockPair(BlockBegin* from, BlockBegin* to): _from(from), _to(to) {}
+   BlockBegin* from() const { return _from; }
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp openjdk/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)c1_InstructionPrinter.cpp	1.125 07/05/17 15:49:39 JVM"
+-#endif
+ /*
+  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -147,7 +144,7 @@
+     if (!klass->is_loaded()) {
+       output()->print("<unloaded> ");
+     }
+-    output()->print("class "); 
++    output()->print("class ");
+     print_klass(klass);
+   } else {
+     output()->print("???");
+@@ -269,6 +266,8 @@
+ void InstructionPrinter::print_unsafe_object_op(UnsafeObjectOp* op, const char* name) {
+   print_unsafe_op(op, name);
+   print_value(op->object());
++  output()->print(", ");
++  print_value(op->offset());
+ }
+ 
+ 
+@@ -849,4 +848,3 @@
+ 
+ 
+ #endif // PRODUCT
+-
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_InstructionPrinter.hpp openjdk/hotspot/src/share/vm/c1/c1_InstructionPrinter.hpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_InstructionPrinter.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_InstructionPrinter.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c1_InstructionPrinter.hpp	1.65 07/05/05 17:05:07 JVM"
+-#endif
+ /*
+  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #ifndef PRODUCT
+@@ -43,7 +40,7 @@
+ 
+  public:
+   InstructionPrinter(bool print_phis = true, outputStream* output = tty)
+-    : _print_phis(print_phis) 
++    : _print_phis(print_phis)
+     , _output(output)
+   {}
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_IR.cpp openjdk/hotspot/src/share/vm/c1/c1_IR.cpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_IR.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_IR.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)c1_IR.cpp	1.160 07/05/05 17:05:04 JVM"
+-#endif
+ /*
+  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -46,7 +43,7 @@
+ // deep copy of all XHandler contained in list
+ XHandlers::XHandlers(XHandlers* other) :
+   _list(other->length())
+-{ 
++{
+   for (int i = 0; i < other->length(); i++) {
+     _list.append(new XHandler(other->handler_at(i)));
+   }
+@@ -164,7 +161,7 @@
+   if (osr_bci == -1) {
+     _requires_phi_function.clear();
+   } else {
+-  	// selective creation of phi functions is not possibel in osr-methods
++        // selective creation of phi functions is not possibel in osr-methods
+     _requires_phi_function.set_range(0, method->max_locals());
+   }
+ 
+@@ -329,7 +326,7 @@
+ class CriticalEdgeFinder: public BlockClosure {
+   BlockPairList blocks;
+   IR*       _ir;
+-  
++
+  public:
+   CriticalEdgeFinder(IR* ir): _ir(ir) {}
+   void block_do(BlockBegin* bb) {
+@@ -459,7 +456,7 @@
+     if (TraceLinearScanLevel >= level) {       \
+       code;                                    \
+     }
+-#else 
++#else
+   #define TRACE_LINEAR_SCAN(level, code)
+ #endif
+ 
+@@ -531,7 +528,7 @@
+ };
+ 
+ 
+-ComputeLinearScanOrder::ComputeLinearScanOrder(BlockBegin* start_block) : 
++ComputeLinearScanOrder::ComputeLinearScanOrder(BlockBegin* start_block) :
+   _max_block_id(BlockBegin::number_of_blocks()),
+   _num_blocks(0),
+   _num_loops(0),
+@@ -611,7 +608,7 @@
+   clear_active(cur);
+ 
+   // Each loop has a unique number.
+-  // When multiple loops are nested, assign_loop_depth assumes that the 
++  // When multiple loops are nested, assign_loop_depth assumes that the
+   // innermost loop has the lowest number. This is guaranteed by setting
+   // the loop number after the recursive calls for the successors above
+   // have returned.
+@@ -630,7 +627,7 @@
+ void ComputeLinearScanOrder::mark_loops() {
+   TRACE_LINEAR_SCAN(3, tty->print_cr("----- marking loops"));
+ 
+-  _loop_map = BitMap2D(_num_loops, _max_block_id); 
++  _loop_map = BitMap2D(_num_loops, _max_block_id);
+   _loop_map.clear();
+ 
+   for (int i = _loop_end_blocks.length() - 1; i >= 0; i--) {
+@@ -714,10 +711,10 @@
+           loop_depth++;
+           min_loop_idx = i;
+         }
+-      } 
++      }
+       cur->set_loop_depth(loop_depth);
+       cur->set_loop_index(min_loop_idx);
+-  
++
+       // append all unvisited successors to work list
+       for (i = cur->number_of_sux() - 1; i >= 0; i--) {
+         _work_list.append(cur->sux_at(i));
+@@ -725,7 +722,7 @@
+       for (i = cur->number_of_exception_handlers() - 1; i >= 0; i--) {
+         _work_list.append(cur->exception_handler_at(i));
+       }
+-    } 
++    }
+   } while (!_work_list.is_empty());
+ }
+ 
+@@ -734,10 +731,10 @@
+   assert(a != NULL && b != NULL, "must have input blocks");
+ 
+   _dominator_blocks.clear();
+-  while (a != NULL) { 
+-    _dominator_blocks.set_bit(a->block_id()); 
++  while (a != NULL) {
++    _dominator_blocks.set_bit(a->block_id());
+     assert(a->dominator() != NULL || a == _linear_scan_order->at(0), "dominator must be initialized");
+-    a = a->dominator(); 
++    a = a->dominator();
+   }
+   while (b != NULL && !_dominator_blocks.at(b->block_id())) {
+     assert(b->dominator() != NULL || b == _linear_scan_order->at(0), "dominator must be initialized");
+@@ -795,7 +792,7 @@
+ 
+   // exceptions handlers are added as late as possible
+   INC_WEIGHT_IF(!cur->is_set(BlockBegin::exception_entry_flag));
+-  
++
+   // guarantee that weight is > 0
+   weight |= 1;
+ 
+@@ -808,7 +805,7 @@
+ 
+ bool ComputeLinearScanOrder::ready_for_processing(BlockBegin* cur) {
+   // Discount the edge just traveled.
+-  // When the number drops to zero, all forward branches were processed 
++  // When the number drops to zero, all forward branches were processed
+   if (dec_forward_branches(cur) != 0) {
+     return false;
+   }
+@@ -904,7 +901,7 @@
+     BlockBegin* cur = _work_list.pop();
+ 
+     if (cur == sux_of_osr_entry) {
+-      // the osr entry block is ignored in normal processing, it is never added to the 
++      // the osr entry block is ignored in normal processing, it is never added to the
+       // work list. Instead, it is added as late as possible manually here.
+       append_block(osr_entry);
+       compute_dominator(cur, osr_entry);
+@@ -962,7 +959,7 @@
+   TRACE_LINEAR_SCAN(3, tty->print_cr("----- computing dominators (iterative computation reqired: %d)", _iterative_dominators));
+ 
+   // iterative computation of dominators is only required for methods with non-natural loops
+-  // and OSR-methods. For all other methods, the dominators computed when generating the 
++  // and OSR-methods. For all other methods, the dominators computed when generating the
+   // linear scan block order are correct.
+   if (_iterative_dominators) {
+     do {
+@@ -981,7 +978,7 @@
+     tty->print_cr("----- loop information:");
+     for (int block_idx = 0; block_idx < _linear_scan_order->length(); block_idx++) {
+       BlockBegin* cur = _linear_scan_order->at(block_idx);
+-  
++
+       tty->print("%4d: B%2d: ", cur->linear_scan_number(), cur->block_id());
+       for (int loop_idx = 0; loop_idx < _num_loops; loop_idx++) {
+         tty->print ("%d ", is_block_in_loop(loop_idx, cur));
+@@ -1056,7 +1053,7 @@
+     int j;
+     for (j = cur->number_of_sux() - 1; j >= 0; j--) {
+       BlockBegin* sux = cur->sux_at(j);
+-      
++
+       assert(sux->linear_scan_number() >= 0 && sux->linear_scan_number() == _linear_scan_order->index_of(sux), "incorrect linear_scan_number");
+       if (!cur->is_set(BlockBegin::linear_scan_loop_end_flag)) {
+         assert(cur->linear_scan_number() < sux->linear_scan_number(), "invalid order");
+@@ -1242,7 +1239,7 @@
+       assert(preds->length() == block->number_of_preds(), "should be the same");
+     }
+   }
+-  
++
+   virtual void block_do(BlockBegin* block) {
+     _blocks->append(block);
+     BlockEnd* be = block->end();
+@@ -1251,7 +1248,7 @@
+     for (i = 0; i < n; i++) {
+       BlockBegin* sux = be->sux_at(i);
+       assert(!sux->is_set(BlockBegin::exception_entry_flag), "must not be xhandler");
+-      
++
+       BlockList* preds = _predecessors->at_grow(sux->block_id(), NULL);
+       if (preds == NULL) {
+         preds = new BlockList();
+@@ -1320,7 +1317,7 @@
+ 
+ #ifdef ASSERT
+   if (block->state()) block->state()->values_do(check_substitute);
+-  block->block_values_do(check_substitute); 
++  block->block_values_do(check_substitute);
+   if (block->end() && block->end()->state()) block->end()->state()->values_do(check_substitute);
+ #endif
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_IR.hpp openjdk/hotspot/src/share/vm/c1/c1_IR.hpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_IR.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_IR.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c1_IR.hpp	1.100 07/05/05 17:05:04 JVM"
+-#endif
+ /*
+  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // An XHandler is a C1 internal description for an exception handler
+@@ -43,7 +40,7 @@
+ 
+  public:
+   // creation
+-  XHandler(ciExceptionHandler* desc) 
++  XHandler(ciExceptionHandler* desc)
+     : _desc(desc)
+     , _entry_block(NULL)
+     , _entry_code(NULL)
+@@ -55,7 +52,7 @@
+ #endif
+   { }
+ 
+-  XHandler(XHandler* other) 
++  XHandler(XHandler* other)
+     : _desc(other->_desc)
+     , _entry_block(other->_entry_block)
+     , _entry_code(other->_entry_code)
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_LinearScan.cpp openjdk/hotspot/src/share/vm/c1/c1_LinearScan.cpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_LinearScan.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_LinearScan.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c1_LinearScan.cpp	1.12 07/05/05 17:05:10 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -561,7 +558,7 @@
+     if (block->is_set(BlockBegin::exception_entry_flag)) {
+       // Phi functions at the begin of an exception handler are
+       // implicitly defined (= killed) at the beginning of the block.
+-      for_each_phi_fun(block, phi, 
++      for_each_phi_fun(block, phi,
+         live_kill.set_bit(phi->operand()->vreg_number())
+       );
+     }
+@@ -628,7 +625,7 @@
+       for (k = 0; k < n; k++) {
+         CodeEmitInfo* info = visitor.info_at(k);
+         ValueStack* stack = info->stack();
+-        for_each_state_value(stack, value, 
++        for_each_state_value(stack, value,
+           set_live_gen_kill(value, op, live_gen, live_kill)
+         );
+       }
+@@ -732,7 +729,7 @@
+ 
+   // Perform a backward dataflow analysis to compute live_out and live_in for each block.
+   // The loop is executed until a fixpoint is reached (no changes in an iteration)
+-  // Exception handlers must be processed because not all live values are 
++  // Exception handlers must be processed because not all live values are
+   // present in the state array, e.g. because of global value numbering
+   do {
+     change_occurred = false;
+@@ -839,7 +836,7 @@
+       }
+     }
+ 
+-#endif    
++#endif
+     // when this fails, virtual registers are used before they are defined.
+     assert(false, "live_in set of first block must be empty");
+     // bailout of if this occurs in product mode.
+@@ -963,8 +960,8 @@
+ 
+   change_spill_definition_pos(interval, def_pos);
+   if (use_kind == noUse && interval->spill_state() <= startInMemory) {
+-  	// detection of method-parameters and roundfp-results
+-  	// TODO: move this directly to position where use-kind is computed
++        // detection of method-parameters and roundfp-results
++        // TODO: move this directly to position where use-kind is computed
+     interval->set_spill_state(startInMemory);
+   }
+ }
+@@ -1058,7 +1055,7 @@
+         // input operand must have a register instead of output operand (leads to better register allocation)
+         return mustHaveRegister;
+       }
+-      
++
+       // The input operand is not forced to a register (moves from stack to register are allowed),
+       // but it is faster if the input operand is in a register
+       return shouldHaveRegister;
+@@ -1375,7 +1372,7 @@
+       for (k = 0; k < n; k++) {
+         CodeEmitInfo* info = visitor.info_at(k);
+         ValueStack* stack = info->stack();
+-        for_each_state_value(stack, value, 
++        for_each_state_value(stack, value,
+           add_use(value, block_from, op_id + 1, noUse);
+         );
+       }
+@@ -1832,7 +1829,7 @@
+   }
+ 
+   // the live_in bits are not set for phi functions of the xhandler entry, so iterate them separately
+-  for_each_phi_fun(block, phi, 
++  for_each_phi_fun(block, phi,
+     resolve_exception_entry(block, phi->operand()->vreg_number(), move_resolver)
+   );
+ 
+@@ -1850,7 +1847,7 @@
+     return;
+   }
+ 
+-  // the computation of to_interval is equal to resolve_collect_mappings, 
++  // the computation of to_interval is equal to resolve_collect_mappings,
+   // but from_interval is more complicated because of phi functions
+   BlockBegin* to_block = handler->entry_block();
+   Interval* to_interval = interval_at_block_begin(to_block, reg_num);
+@@ -1918,7 +1915,7 @@
+     entry_code->jump(handler->entry_block());
+     handler->set_entry_code(entry_code);
+   }
+-}   
++}
+ 
+ 
+ void LinearScan::resolve_exception_handlers() {
+@@ -2337,19 +2334,19 @@
+ 
+       // caller-save registers must not be included into oop-maps at calls
+       assert(!is_call_site || assigned_reg >= nof_regs || !is_caller_save(assigned_reg), "interval is in a caller-save register at a call -> register will be overwritten");
+-      
++
+       VMReg name = vm_reg_for_interval(interval);
+       map->set_oop(name);
+-      
++
+       // Spill optimization: when the stack value is guaranteed to be always correct,
+       // then it must be added to the oop map even if the interval is currently in a register
+-      if (interval->always_in_memory() && 
++      if (interval->always_in_memory() &&
+           op->id() > interval->spill_definition_pos() &&
+           interval->assigned_reg() != interval->canonical_spill_slot()) {
+         assert(interval->spill_definition_pos() > 0, "position not set correctly");
+         assert(interval->canonical_spill_slot() >= LinearScan::nof_regs, "no spill slot assigned");
+         assert(interval->assigned_reg() < LinearScan::nof_regs, "interval is on stack, so stack slot is registered twice");
+-        
++
+         map->set_oop(frame_map()->slot_regname(interval->canonical_spill_slot() - LinearScan::nof_regs));
+       }
+     }
+@@ -2547,8 +2544,8 @@
+   } else {
+     // double-size operands
+ 
+-    LocationValue* first;
+-    LocationValue* second;
++    ScopeValue* first;
++    ScopeValue* second;
+ 
+     if (opr->is_double_stack()) {
+       Location loc1, loc2;
+@@ -2559,6 +2556,11 @@
+       second = new LocationValue(loc2);
+ 
+     } else if (opr->is_double_cpu()) {
++#ifdef _LP64
++      VMReg rname_first = opr->as_register_lo()->as_VMReg();
++      first = new LocationValue(Location::new_reg_loc(Location::lng, rname_first));
++      second = &_int_0_scope_value;
++#else
+       VMReg rname_first = opr->as_register_lo()->as_VMReg();
+       VMReg rname_second = opr->as_register_hi()->as_VMReg();
+ 
+@@ -2571,6 +2573,7 @@
+ 
+       first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
+       second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second));
++#endif
+ 
+ #ifdef IA32
+     } else if (opr->is_double_xmm()) {
+@@ -2644,7 +2647,7 @@
+     assert(con != NULL || opr->is_virtual(), "asumption: non-Constant instructions have only virtual operands");
+ 
+     if (con != NULL && !con->is_pinned() && !opr->is_constant()) {
+-      // Unpinned constants may have a virtual operand for a part of the lifetime 
++      // Unpinned constants may have a virtual operand for a part of the lifetime
+       // or may be illegal when it was optimized away,
+       // so always use a constant operand
+       opr = LIR_OprFact::value_type(con->type());
+@@ -2862,15 +2865,21 @@
+     op->verify();
+ #endif
+ 
++#ifndef _LP64
+     // remove useless moves
+     if (op->code() == lir_move) {
+       assert(op->as_Op1() != NULL, "move must be LIR_Op1");
+       LIR_Op1* move = (LIR_Op1*)op;
+-      if (!move->result_opr()->is_pointer() && !move->in_opr()->is_pointer() && move->in_opr()->is_equivalent(move->result_opr()->with_type_of(move->in_opr()))) {
++      LIR_Opr src = move->in_opr();
++      LIR_Opr dst = move->result_opr();
++      if (dst == src ||
++          !dst->is_pointer() && !src->is_pointer() &&
++          src->is_same_register(dst)) {
+         instructions->at_put(j, NULL);
+         has_dead = true;
+       }
+     }
++#endif
+   }
+ 
+   if (has_dead) {
+@@ -2929,7 +2938,7 @@
+     resolve_exception_handlers();
+   }
+   // fill in number of spill slots into frame_map
+-  propagate_spill_slots(); 
++  propagate_spill_slots();
+   CHECK_BAILOUT();
+ 
+   NOT_PRODUCT(print_intervals("After Register Allocation"));
+@@ -3227,7 +3236,7 @@
+   void process_operations(LIR_List* ops, IntervalList* input_state);
+ 
+  public:
+-  RegisterVerifier(LinearScan* allocator) 
++  RegisterVerifier(LinearScan* allocator)
+     : _allocator(allocator)
+     , _work_list(16)
+     , _saved_states(BlockBegin::number_of_blocks(), NULL)
+@@ -3252,7 +3261,7 @@
+     LIR_Opr opr = args->at(n);
+     if (opr->is_register()) {
+       Interval* interval = interval_at(reg_num(opr));
+-      
++
+       if (interval->assigned_reg() < state_size()) {
+         input_state->at_put(interval->assigned_reg(), interval);
+       }
+@@ -3335,7 +3344,7 @@
+           saved_state->at_put(i, NULL);
+ 
+           TRACE_LINEAR_SCAN(4, tty->print_cr("process_successor B%d: invalidating slot %d", block->block_id(), i));
+-        } 
++        }
+       }
+     }
+ 
+@@ -4373,7 +4382,7 @@
+   // print ranges
+   Range* cur = _first;
+   while (cur != Range::end()) {
+-    cur->print(out); 
++    cur->print(out);
+     cur = cur->next();
+     assert(cur != NULL, "range list not closed with range sentinel");
+   }
+@@ -4398,7 +4407,7 @@
+ 
+ // **** Implementation of IntervalWalker ****************************
+ 
+-IntervalWalker::IntervalWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first) 
++IntervalWalker::IntervalWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first)
+  : _compilation(allocator->compilation())
+  , _allocator(allocator)
+ {
+@@ -5480,7 +5489,7 @@
+   return in->is_virtual() && res->is_virtual() && in->vreg_number() == from->reg_num() && res->vreg_number() == to->reg_num();
+ }
+ 
+-// optimization (especially for phi functions of nested loops): 
++// optimization (especially for phi functions of nested loops):
+ // assign same spill slot to non-intersecting intervals
+ void LinearScanWalker::combine_spilled_intervals(Interval* cur) {
+   if (cur->is_split_child()) {
+@@ -5876,14 +5885,14 @@
+   int max_end = MIN2(header_idx + ShortLoopSize, code->length());
+   while (i < max_end && code->at(i)->loop_depth() >= header_block->loop_depth()) {
+     i++;
+-  } 
++  }
+ 
+   if (i == code->length() || code->at(i)->loop_depth() < header_block->loop_depth()) {
+     int end_idx = i - 1;
+     BlockBegin* end_block = code->at(end_idx);
+ 
+     if (end_block->number_of_sux() == 1 && end_block->sux_at(0) == header_block) {
+-      // short loop from header_idx to end_idx found -> reorder blocks such that 
++      // short loop from header_idx to end_idx found -> reorder blocks such that
+       // the header_block is the last block instead of the first block of the loop
+       TRACE_LINEAR_SCAN(1, tty->print_cr("Reordering short loop: length %d, header B%d, end B%d",
+                                          end_idx - header_idx + 1,
+@@ -6077,7 +6086,7 @@
+       //       -> this may lead to unnecesary return instructions in the final code
+ 
+       assert(cur_last_op->info() == NULL, "return instructions do not have debug information");
+-      assert(block->number_of_sux() == 0 || 
++      assert(block->number_of_sux() == 0 ||
+              (return_converted.at(block->block_id()) && block->number_of_sux() == 1),
+              "blocks that end with return must not have successors");
+ 
+@@ -6187,6 +6196,7 @@
+     case counter_move_reg_reg:    return "register->register";
+     case counter_move_reg_stack:  return "register->stack";
+     case counter_move_stack_reg:  return "stack->register";
++    case counter_move_stack_stack:return "stack->stack";
+     case counter_move_reg_mem:    return "register->memory";
+     case counter_move_mem_reg:    return "memory->register";
+     case counter_move_const_any:  return "constant->any";
+@@ -6333,8 +6343,11 @@
+               ShouldNotReachHere();
+             }
+           } else if (in->is_stack()) {
+-            assert(res->is_register(), "must be");
+-            inc_counter(counter_move_stack_reg);
++            if (res->is_register()) {
++              inc_counter(counter_move_stack_reg);
++            } else {
++              inc_counter(counter_move_stack_stack);
++            }
+           } else if (in->is_address()) {
+             assert(res->is_register(), "must be");
+             inc_counter(counter_move_mem_reg);
+@@ -6379,7 +6392,6 @@
+         case lir_logic_or:
+         case lir_logic_xor:
+         case lir_shl:
+-        case lir_shlx:
+         case lir_shr:
+         case lir_ushr:            inc_counter(counter_alu); break;
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_LinearScan.hpp openjdk/hotspot/src/share/vm/c1/c1_LinearScan.hpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_LinearScan.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_LinearScan.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c1_LinearScan.hpp	1.12 07/05/05 17:05:09 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class DebugInfoCache;
+@@ -84,7 +81,7 @@
+ 
+ enum IntervalSpillState {
+   noDefinitionFound,  // starting state of calculation: no definition found yet
+-  oneDefinitionFound, // one definition has already been found. 
++  oneDefinitionFound, // one definition has already been found.
+                       // Note: two consecutive definitions are treated as one (e.g. consecutive move and add because of two-operand LIR form)
+                       // the position of this definition is stored in _definition_pos
+   oneMoveInserted,    // one spill move has already been inserted.
+@@ -103,7 +100,7 @@
+ 
+ 
+ class LinearScan : public CompilationResourceObj {
+-  // declare classes used by LinearScan as friends because they 
++  // declare classes used by LinearScan as friends because they
+   // need a wide variety of functions declared here
+   //
+   // Only the small interface to the rest of the compiler is public
+@@ -223,8 +220,8 @@
+   static bool is_in_fpu_register(const Interval* i);
+   static bool is_oop_interval(const Interval* i);
+ 
+-  
+-  // General helper functions 
++
++  // General helper functions
+   int         allocate_spill_slot(bool double_word);
+   void        assign_spill_slot(Interval* it);
+   void        propagate_spill_slots();
+@@ -239,7 +236,7 @@
+   static bool requires_adjacent_regs(BasicType type);
+   static bool is_caller_save(int assigned_reg);
+ 
+-  // spill move optimization: eliminate moves from register to stack if 
++  // spill move optimization: eliminate moves from register to stack if
+   // stack slot is known to be correct
+   void        change_spill_definition_pos(Interval* interval, int def_pos);
+   void        change_spill_state(Interval* interval, int spill_pos);
+@@ -341,7 +338,7 @@
+ 
+   // methods used for debug information computation
+   void init_compute_debug_info();
+-  
++
+   MonitorValue*  location_for_monitor_index(int monitor_index);
+   LocationValue* location_for_name(int name, Location::Type loc_type);
+ 
+@@ -388,7 +385,7 @@
+   // accessors used by Compilation
+   int         max_spills()  const { return _max_spills; }
+   int         num_calls() const   { assert(_num_calls >= 0, "not set"); return _num_calls; }
+-  
++
+   // entry functions for printing
+ #ifndef PRODUCT
+   static void print_statistics();
+@@ -417,12 +414,12 @@
+   IntervalList     _mapping_to;
+   bool             _multiple_reads_allowed;
+   int              _register_blocked[LinearScan::nof_regs];
+- 
++
+   int  register_blocked(int reg)                    { assert(reg >= 0 && reg < LinearScan::nof_regs, "out of bounds"); return _register_blocked[reg]; }
+   void set_register_blocked(int reg, int direction) { assert(reg >= 0 && reg < LinearScan::nof_regs, "out of bounds"); assert(direction == 1 || direction == -1, "out of bounds"); _register_blocked[reg] += direction; }
+ 
+-  void block_registers(Interval* it);   
+-  void unblock_registers(Interval* it); 
++  void block_registers(Interval* it);
++  void unblock_registers(Interval* it);
+   bool save_to_process_move(Interval* from, Interval* to);
+ 
+   void create_insertion_buffer(LIR_List* list);
+@@ -554,7 +551,7 @@
+   int              assigned_regHi() const        { return _assigned_regHi; }
+   void             assign_reg(int reg)           { _assigned_reg = reg; _assigned_regHi = LinearScan::any_reg; }
+   void             assign_reg(int reg,int regHi) { _assigned_reg = reg; _assigned_regHi = regHi; }
+-  
++
+   Interval*        register_hint(bool search_split_child = true) const; // calculation needed
+   void             set_register_hint(Interval* i) { _register_hint = i; }
+ 
+@@ -569,7 +566,7 @@
+   Interval*        split_child_before_op_id(int op_id);
+   bool             split_child_covers(int op_id, LIR_OpVisitState::OprMode mode);
+   DEBUG_ONLY(void  check_split_children();)
+-   
++
+   // information stored in split parent, but available for all children
+   int              canonical_spill_slot() const            { return split_parent()->_canonical_spill_slot; }
+   void             set_canonical_spill_slot(int slot)      { assert(split_parent()->_canonical_spill_slot == -1, "overwriting existing value"); split_parent()->_canonical_spill_slot = slot; }
+@@ -709,7 +706,7 @@
+   int              _block_pos[LinearScan::nof_regs];
+   IntervalList*    _spill_intervals[LinearScan::nof_regs];
+ 
+-  MoveResolver     _move_resolver;   // for ordering spill moves 
++  MoveResolver     _move_resolver;   // for ordering spill moves
+ 
+   // accessors mapped to same functions in class LinearScan
+   int         block_count() const      { return allocator()->block_count(); }
+@@ -773,8 +770,8 @@
+ 
+ 
+ /*
+-When a block has more than one predecessor, and all predecessors end with 
+-the same sequence of move-instructions, than this moves can be placed once 
++When a block has more than one predecessor, and all predecessors end with
++the same sequence of move-instructions, than this moves can be placed once
+ at the beginning of the block instead of multiple times in the predecessors.
+ 
+ Similarly, when a block has more than one successor, then equal sequences of
+@@ -791,7 +788,7 @@
+ */
+ class EdgeMoveOptimizer : public StackObj {
+  private:
+-  // the class maintains a list with all lir-instruction-list of the 
++  // the class maintains a list with all lir-instruction-list of the
+   // successors (predecessors) and the current index into the lir-lists
+   LIR_OpListStack _edge_instructions;
+   intStack        _edge_instructions_idx;
+@@ -890,6 +887,7 @@
+     counter_move_reg_reg,
+     counter_move_reg_stack,
+     counter_move_stack_reg,
++    counter_move_stack_stack,
+     counter_move_reg_mem,
+     counter_move_mem_reg,
+     counter_move_const_any,
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp openjdk/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)c1_LIRAssembler.cpp	1.133 07/05/05 17:05:08 JVM"
+-#endif
+ /*
+  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -32,7 +29,7 @@
+ void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
+   // we must have enough patching space so that call can be inserted
+   while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) {
+-    _masm->nop(); 
++    _masm->nop();
+   }
+   patch->install(_masm, patch_code, obj, info);
+   append_patching_stub(patch);
+@@ -74,7 +71,7 @@
+ //---------------------------------------------------------------
+ 
+ 
+-LIR_Assembler::LIR_Assembler(Compilation* c): 
++LIR_Assembler::LIR_Assembler(Compilation* c):
+    _compilation(c)
+  , _masm(c->masm())
+  , _frame_map(c->frame_map())
+@@ -130,7 +127,7 @@
+ }
+ 
+ 
+-void LIR_Assembler::emit_slow_case_stubs() { 
++void LIR_Assembler::emit_slow_case_stubs() {
+   emit_stubs(_slow_case_stubs);
+ }
+ 
+@@ -199,7 +196,7 @@
+ 
+ void LIR_Assembler::emit_block(BlockBegin* block) {
+   if (block->is_set(BlockBegin::backward_branch_target_flag)) {
+-    align_backward_branch_target(); 
++    align_backward_branch_target();
+   }
+ 
+   // if this block is the start of an exception handler, record the
+@@ -218,7 +215,7 @@
+ #endif /* PRODUCT */
+ 
+   assert(block->lir() != NULL, "must have LIR");
+-  IA32_ONLY(assert(_masm->esp_offset() == 0, "frame size should be fixed"));
++  IA32_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
+ 
+ #ifndef PRODUCT
+   if (CommentedAssembly) {
+@@ -230,7 +227,7 @@
+ 
+   emit_lir_list(block->lir());
+ 
+-  IA32_ONLY(assert(_masm->esp_offset() == 0, "frame size should be fixed"));
++  IA32_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
+ }
+ 
+ 
+@@ -423,10 +420,10 @@
+   emit_static_call_stub();
+ 
+   switch (op->code()) {
+-  case lir_static_call:  
++  case lir_static_call:
+     call(op->addr(), relocInfo::static_call_type, op->info());
+     break;
+-  case lir_optvirtual_call: 
++  case lir_optvirtual_call:
+     call(op->addr(), relocInfo::opt_virtual_call_type, op->info());
+     break;
+   case lir_icvirtual_call:
+@@ -459,7 +456,7 @@
+ 
+ void LIR_Assembler::emit_op1(LIR_Op1* op) {
+   switch (op->code()) {
+-    case lir_move:   
++    case lir_move:
+       if (op->move_kind() == lir_move_volatile) {
+         assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
+         volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
+@@ -484,9 +481,9 @@
+     }
+ 
+     case lir_return:
+-      return_op(op->in_opr()); 
++      return_op(op->in_opr());
+       break;
+-    
++
+     case lir_safepoint:
+       if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
+         _masm->nop();
+@@ -520,11 +517,11 @@
+     case lir_neg:
+       negate(op->in_opr(), op->result_opr());
+       break;
+-    
++
+     case lir_leal:
+       leal(op->in_opr(), op->result_opr());
+       break;
+-    
++
+     case lir_null_check:
+       if (GenerateCompilerNullChecks) {
+         add_debug_info_for_null_check_here(op->info());
+@@ -620,7 +617,7 @@
+       get_thread(op->result_opr());
+       break;
+ 
+-    default: 
++    default:
+       ShouldNotReachHere();
+       break;
+   }
+@@ -637,7 +634,7 @@
+       }
+       comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
+       break;
+-    
++
+     case lir_cmp_l2i:
+     case lir_cmp_fd2i:
+     case lir_ucmp_fd2i:
+@@ -649,7 +646,6 @@
+       break;
+ 
+     case lir_shl:
+-    case lir_shlx:
+     case lir_shr:
+     case lir_ushr:
+       if (op->in_opr2()->is_constant()) {
+@@ -675,7 +671,7 @@
+         op->info(),
+         op->fpu_pop_count() == 1);
+       break;
+-    
++
+     case lir_abs:
+     case lir_sqrt:
+     case lir_sin:
+@@ -760,7 +756,7 @@
+     }
+ 
+   } else if (src->is_address()) {
+-    mem2reg(src, dest, type, patch_code, info, unaligned); 
++    mem2reg(src, dest, type, patch_code, info, unaligned);
+ 
+   } else {
+     ShouldNotReachHere();
+@@ -787,7 +783,7 @@
+           _masm->verify_oop(r->as_Register());
+ #endif
+         } else {
+-          _masm->verify_stack_oop(r->reg2stack() * wordSize);
++          _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
+         }
+       }
+       s.next();
+@@ -796,6 +792,3 @@
+   }
+ #endif
+ }
+-
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp openjdk/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c1_LIRAssembler.hpp	1.116 07/05/05 17:05:08 JVM"
+-#endif
+ /*
+  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class Compilation;
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_LIR.cpp openjdk/hotspot/src/share/vm/c1/c1_LIR.cpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_LIR.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_LIR.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)c1_LIR.cpp	1.118 07/05/05 17:05:04 JVM"
+-#endif
+ /*
+  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,22 +19,22 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_c1_LIR.cpp.incl"
+ 
+-Register LIR_OprDesc::as_register() const { 
+-  return FrameMap::cpu_rnr2reg(cpu_regnr()); 
++Register LIR_OprDesc::as_register() const {
++  return FrameMap::cpu_rnr2reg(cpu_regnr());
+ }
+ 
+-Register LIR_OprDesc::as_register_lo() const { 
+-  return FrameMap::cpu_rnr2reg(cpu_regnrLo()); 
++Register LIR_OprDesc::as_register_lo() const {
++  return FrameMap::cpu_rnr2reg(cpu_regnrLo());
+ }
+ 
+-Register LIR_OprDesc::as_register_hi() const { 
+-  return FrameMap::cpu_rnr2reg(cpu_regnrHi()); 
++Register LIR_OprDesc::as_register_hi() const {
++  return FrameMap::cpu_rnr2reg(cpu_regnrHi());
+ }
+ 
+ #ifdef IA32
+@@ -126,10 +123,17 @@
+   assert(scale() == times_1, "Scaled addressing mode not available on SPARC and should not be used");
+   assert(disp() == 0 || index()->is_illegal(), "can't have both");
+ #endif
++#ifdef _LP64
++  assert(base()->is_cpu_register(), "wrong base operand");
++  assert(index()->is_illegal() || index()->is_double_cpu(), "wrong index operand");
++  assert(base()->type() == T_OBJECT || base()->type() == T_LONG,
++         "wrong type for addresses");
++#else
+   assert(base()->is_single_cpu(), "wrong base operand");
+   assert(index()->is_illegal() || index()->is_single_cpu(), "wrong index operand");
+   assert(base()->type() == T_OBJECT || base()->type() == T_INT,
+          "wrong type for addresses");
++#endif
+ }
+ #endif
+ 
+@@ -162,7 +166,7 @@
+ }
+ 
+ #ifndef PRODUCT
+-void LIR_OprDesc::validate_type() const { 
++void LIR_OprDesc::validate_type() const {
+ 
+ #ifdef ASSERT
+   if (!is_pointer() && !is_illegal()) {
+@@ -183,7 +187,6 @@
+     case T_INT:
+     case T_OBJECT:
+     case T_ARRAY:
+-    case T_ADDRESS:
+       assert((kind_field() == cpu_register || kind_field() == stack_value) && size_field() == single_size, "must match");
+       break;
+ 
+@@ -216,6 +219,15 @@
+ 
+ void LIR_Op2::verify() const {
+ #ifdef ASSERT
++  switch (code()) {
++    case lir_cmove:
++      break;
++
++    default:
++      assert(!result_opr()->is_register() || !result_opr()->is_oop_register(),
++             "can't produce oops from arith");
++  }
++
+   if (TwoOperandLIRForm) {
+     switch (code()) {
+     case lir_add:
+@@ -229,7 +241,6 @@
+     case lir_logic_or:
+     case lir_logic_xor:
+     case lir_shl:
+-    case lir_shlx:
+     case lir_shr:
+       assert(in_opr1() == result_opr(), "opr1 and result must match");
+       assert(in_opr1()->is_valid() && in_opr2()->is_valid(), "must be valid");
+@@ -279,16 +290,16 @@
+ }
+ 
+ void LIR_OpBranch::change_block(BlockBegin* b) {
+-  assert(_block != NULL, "must have old block"); 
++  assert(_block != NULL, "must have old block");
+   assert(_block->label() == label(), "must be equal");
+-  
+-  _block = b; 
++
++  _block = b;
+   _label = b->label();
+ }
+ 
+ void LIR_OpBranch::change_ublock(BlockBegin* b) {
+-  assert(_ublock != NULL, "must have old block"); 
+-  _ublock = b; 
++  assert(_ublock != NULL, "must have old block");
++  _ublock = b;
+ }
+ 
+ void LIR_OpBranch::negate_cond() {
+@@ -416,7 +427,7 @@
+     case lir_membar:                   // result and info always invalid
+     case lir_membar_acquire:           // result and info always invalid
+     case lir_membar_release:           // result and info always invalid
+-    { 
++    {
+       assert(op->as_Op0() != NULL, "must be");
+       assert(op->_info == NULL, "info not used by this instruction");
+       assert(op->_result->is_illegal(), "not used");
+@@ -425,7 +436,7 @@
+ 
+     case lir_nop:                      // may have info, result always invalid
+     case lir_std_entry:                // may have result, info always invalid
+-    case lir_osr_entry:                // may have result, info always invalid 
++    case lir_osr_entry:                // may have result, info always invalid
+     case lir_get_thread:               // may have result, info always invalid
+     {
+       assert(op->as_Op0() != NULL, "must be");
+@@ -437,7 +448,7 @@
+ 
+ // LIR_OpLabel
+     case lir_label:                    // result and info always invalid
+-    { 
++    {
+       assert(op->as_OpLabel() != NULL, "must be");
+       assert(op->_info == NULL, "info not used by this instruction");
+       assert(op->_result->is_illegal(), "not used");
+@@ -499,7 +510,7 @@
+ // LIR_OpBranch;
+     case lir_branch:                   // may have info, input and result register always invalid
+     case lir_cond_float_branch:        // may have info, input and result register always invalid
+-    { 
++    {
+       assert(op->as_OpBranch() != NULL, "must be");
+       LIR_OpBranch* opBranch = (LIR_OpBranch*)op;
+ 
+@@ -512,7 +523,7 @@
+ 
+ 
+ // LIR_OpAllocObj
+-    case lir_alloc_object: 
++    case lir_alloc_object:
+     {
+       assert(op->as_OpAllocObj() != NULL, "must be");
+       LIR_OpAllocObj* opAllocObj = (LIR_OpAllocObj*)op;
+@@ -561,7 +572,6 @@
+     case lir_logic_or:
+     case lir_logic_xor:
+     case lir_shl:
+-    case lir_shlx:
+     case lir_shr:
+     case lir_ushr:
+     {
+@@ -577,7 +587,7 @@
+       break;
+     }
+ 
+-    // special handling for cmove: right input operand must not be equal 
++    // special handling for cmove: right input operand must not be equal
+     // to the result operand, otherwise the backend fails
+     case lir_cmove:
+     {
+@@ -623,7 +633,7 @@
+       LIR_Op2* op2 = (LIR_Op2*)op;
+ 
+       if (op2->_info)                     do_info(op2->_info);
+-      if (op2->_opr1->is_valid())         do_temp(op2->_opr1); 
++      if (op2->_opr1->is_valid())         do_temp(op2->_opr1);
+       if (op2->_opr2->is_valid())         do_input(op2->_opr2); // exception object is input parameter
+       assert(op2->_result->is_illegal(), "no result");
+ 
+@@ -726,11 +736,11 @@
+       LIR_OpArrayCopy* opArrayCopy = (LIR_OpArrayCopy*)op;
+ 
+       assert(opArrayCopy->_result->is_illegal(), "unused");
+-      assert(opArrayCopy->_src->is_valid(), "used");          do_input(opArrayCopy->_src);     do_temp(opArrayCopy->_src);    
++      assert(opArrayCopy->_src->is_valid(), "used");          do_input(opArrayCopy->_src);     do_temp(opArrayCopy->_src);
+       assert(opArrayCopy->_src_pos->is_valid(), "used");      do_input(opArrayCopy->_src_pos); do_temp(opArrayCopy->_src_pos);
+-      assert(opArrayCopy->_dst->is_valid(), "used");          do_input(opArrayCopy->_dst);     do_temp(opArrayCopy->_dst);    
++      assert(opArrayCopy->_dst->is_valid(), "used");          do_input(opArrayCopy->_dst);     do_temp(opArrayCopy->_dst);
+       assert(opArrayCopy->_dst_pos->is_valid(), "used");      do_input(opArrayCopy->_dst_pos); do_temp(opArrayCopy->_dst_pos);
+-      assert(opArrayCopy->_length->is_valid(), "used");       do_input(opArrayCopy->_length);  do_temp(opArrayCopy->_length); 
++      assert(opArrayCopy->_length->is_valid(), "used");       do_input(opArrayCopy->_length);  do_temp(opArrayCopy->_length);
+       assert(opArrayCopy->_tmp->is_valid(), "used");          do_temp(opArrayCopy->_tmp);
+       if (opArrayCopy->_info)                     do_info(opArrayCopy->_info);
+ 
+@@ -749,7 +759,7 @@
+ 
+       if (opLock->_info)                          do_info(opLock->_info);
+ 
+-      // TODO: check if these operands really have to be temp 
++      // TODO: check if these operands really have to be temp
+       // (or if input is sufficient). This may have influence on the oop map!
+       assert(opLock->_lock->is_valid(), "used");  do_temp(opLock->_lock);
+       assert(opLock->_hdr->is_valid(),  "used");  do_temp(opLock->_hdr);
+@@ -884,11 +894,11 @@
+ bool LIR_OpVisitState::no_operands(LIR_Op* op) {
+   visit(op);
+ 
+-  return opr_count(inputMode) == 0 && 
+-         opr_count(outputMode) == 0 && 
+-         opr_count(tempMode) == 0 && 
+-         info_count() == 0 && 
+-         !has_call() && 
++  return opr_count(inputMode) == 0 &&
++         opr_count(outputMode) == 0 &&
++         opr_count(tempMode) == 0 &&
++         info_count() == 0 &&
++         !has_call() &&
+          !has_slow_case();
+ }
+ #endif
+@@ -905,7 +915,7 @@
+ }
+ 
+ void LIR_OpLabel::emit_code(LIR_Assembler* masm) {
+-  masm->emit_opLabel(this); 
++  masm->emit_opLabel(this);
+ }
+ 
+ void LIR_OpArrayCopy::emit_code(LIR_Assembler* masm) {
+@@ -914,43 +924,43 @@
+ }
+ 
+ void LIR_Op0::emit_code(LIR_Assembler* masm) {
+-  masm->emit_op0(this); 
++  masm->emit_op0(this);
+ }
+ 
+ void LIR_Op1::emit_code(LIR_Assembler* masm) {
+-  masm->emit_op1(this); 
++  masm->emit_op1(this);
+ }
+ 
+ void LIR_OpAllocObj::emit_code(LIR_Assembler* masm) {
+-  masm->emit_alloc_obj(this); 
++  masm->emit_alloc_obj(this);
+   masm->emit_code_stub(stub());
+ }
+ 
+ void LIR_OpBranch::emit_code(LIR_Assembler* masm) {
+-  masm->emit_opBranch(this); 
++  masm->emit_opBranch(this);
+   if (stub()) {
+     masm->emit_code_stub(stub());
+   }
+ }
+ 
+ void LIR_OpConvert::emit_code(LIR_Assembler* masm) {
+-  masm->emit_opConvert(this); 
++  masm->emit_opConvert(this);
+   if (stub() != NULL) {
+     masm->emit_code_stub(stub());
+   }
+ }
+ 
+ void LIR_Op2::emit_code(LIR_Assembler* masm) {
+-  masm->emit_op2(this); 
++  masm->emit_op2(this);
+ }
+ 
+ void LIR_OpAllocArray::emit_code(LIR_Assembler* masm) {
+-  masm->emit_alloc_array(this); 
++  masm->emit_alloc_array(this);
+   masm->emit_code_stub(stub());
+ }
+ 
+ void LIR_OpTypeCheck::emit_code(LIR_Assembler* masm) {
+-  masm->emit_opTypeCheck(this); 
++  masm->emit_opTypeCheck(this);
+   if (stub()) {
+     masm->emit_code_stub(stub());
+   }
+@@ -961,7 +971,7 @@
+ }
+ 
+ void LIR_Op3::emit_code(LIR_Assembler* masm) {
+-  masm->emit_op3(this); 
++  masm->emit_op3(this);
+ }
+ 
+ void LIR_OpLock::emit_code(LIR_Assembler* masm) {
+@@ -983,7 +993,7 @@
+ 
+ 
+ // LIR_List
+-LIR_List::LIR_List(Compilation* compilation, BlockBegin* block) 
++LIR_List::LIR_List(Compilation* compilation, BlockBegin* block)
+   : _operations(8)
+   , _compilation(compilation)
+ #ifndef PRODUCT
+@@ -1014,7 +1024,7 @@
+ void LIR_List::append(LIR_InsertionBuffer* buffer) {
+   assert(this == buffer->lir_list(), "wrong lir list");
+   const int n = _operations.length();
+-  
++
+   if (buffer->number_of_ops() > 0) {
+     // increase size of instructions list
+     _operations.at_grow(n + buffer->number_of_ops() - 1, NULL);
+@@ -1047,32 +1057,32 @@
+ 
+ void LIR_List::load(LIR_Address* addr, LIR_Opr src, CodeEmitInfo* info, LIR_PatchCode patch_code) {
+   append(new LIR_Op1(
+-            lir_move, 
++            lir_move,
+             LIR_OprFact::address(addr),
+             src,
+             addr->type(),
+-            patch_code, 
++            patch_code,
+             info));
+ }
+ 
+ 
+ void LIR_List::volatile_load_mem_reg(LIR_Address* address, LIR_Opr dst, CodeEmitInfo* info, LIR_PatchCode patch_code) {
+   append(new LIR_Op1(
+-            lir_move, 
++            lir_move,
+             LIR_OprFact::address(address),
+             dst,
+             address->type(),
+-            patch_code, 
++            patch_code,
+             info, lir_move_volatile));
+ }
+ 
+ void LIR_List::volatile_load_unsafe_reg(LIR_Opr base, LIR_Opr offset, LIR_Opr dst, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code) {
+   append(new LIR_Op1(
+-            lir_move, 
+-            LIR_OprFact::address(new LIR_Address(base, offset, type)), 
++            lir_move,
++            LIR_OprFact::address(new LIR_Address(base, offset, type)),
+             dst,
+             type,
+-            patch_code, 
++            patch_code,
+             info, lir_move_volatile));
+ }
+ 
+@@ -1086,33 +1096,33 @@
+ 
+ void LIR_List::store_mem_int(jint v, LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code) {
+   append(new LIR_Op1(
+-            lir_move, 
++            lir_move,
+             LIR_OprFact::intConst(v),
+             LIR_OprFact::address(new LIR_Address(base, offset_in_bytes, type)),
+             type,
+-            patch_code, 
++            patch_code,
+             info));
+ }
+ 
+ 
+ void LIR_List::store_mem_oop(jobject o, LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code) {
+   append(new LIR_Op1(
+-            lir_move, 
++            lir_move,
+             LIR_OprFact::oopConst(o),
+             LIR_OprFact::address(new LIR_Address(base, offset_in_bytes, type)),
+             type,
+-            patch_code, 
++            patch_code,
+             info));
+ }
+ 
+ 
+ void LIR_List::store(LIR_Opr src, LIR_Address* addr, CodeEmitInfo* info, LIR_PatchCode patch_code) {
+   append(new LIR_Op1(
+-            lir_move, 
++            lir_move,
+             src,
+             LIR_OprFact::address(addr),
+             addr->type(),
+-            patch_code, 
++            patch_code,
+             info));
+ }
+ 
+@@ -1183,19 +1193,19 @@
+ }
+ 
+ 
+-void LIR_List::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { 
++void LIR_List::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
+   append(new LIR_Op2(
+                     lir_cmp,
+                     condition,
+-                    LIR_OprFact::address(new LIR_Address(base, disp, T_INT)), 
+-                    LIR_OprFact::intConst(c), 
+-                    info)); 
++                    LIR_OprFact::address(new LIR_Address(base, disp, T_INT)),
++                    LIR_OprFact::intConst(c),
++                    info));
+ }
+ 
+ 
+-void LIR_List::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Address* addr, CodeEmitInfo* info) { 
++void LIR_List::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Address* addr, CodeEmitInfo* info) {
+   append(new LIR_Op2(
+-                    lir_cmp, 
++                    lir_cmp,
+                     condition,
+                     reg,
+                     LIR_OprFact::address(addr),
+@@ -1232,18 +1242,18 @@
+ 
+ void LIR_List::shift_left(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp) {
+  append(new LIR_Op2(
+-                    lir_shl, 
+-                    value, 
+-                    count, 
++                    lir_shl,
++                    value,
++                    count,
+                     dst,
+                     tmp));
+ }
+ 
+ void LIR_List::shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp) {
+  append(new LIR_Op2(
+-                    lir_shr, 
+-                    value, 
+-                    count, 
++                    lir_shr,
++                    value,
++                    count,
+                     dst,
+                     tmp));
+ }
+@@ -1251,15 +1261,15 @@
+ 
+ void LIR_List::unsigned_shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp) {
+  append(new LIR_Op2(
+-                    lir_ushr, 
+-                    value, 
+-                    count, 
++                    lir_ushr,
++                    value,
++                    count,
+                     dst,
+                     tmp));
+ }
+ 
+ void LIR_List::fcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst, bool is_unordered_less) {
+-  append(new LIR_Op2(is_unordered_less ? lir_ucmp_fd2i : lir_cmp_fd2i,  
++  append(new LIR_Op2(is_unordered_less ? lir_ucmp_fd2i : lir_cmp_fd2i,
+                      left,
+                      right,
+                      dst));
+@@ -1267,7 +1277,7 @@
+ 
+ void LIR_List::lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info) {
+   append(new LIR_OpLock(
+-                    lir_lock, 
++                    lir_lock,
+                     hdr,
+                     obj,
+                     lock,
+@@ -1278,7 +1288,7 @@
+ 
+ void LIR_List::unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, CodeStub* stub) {
+   append(new LIR_OpLock(
+-                    lir_unlock, 
++                    lir_unlock,
+                     hdr,
+                     obj,
+                     lock,
+@@ -1419,7 +1429,7 @@
+     case times_8: out->print(" * 8"); break;
+     }
+   }
+-  out->print(" Disp: %d", _disp); 
++  out->print(" Disp: %d", _disp);
+ }
+ 
+ // debug output of block header without InstructionPrinter
+@@ -1434,7 +1444,7 @@
+   if (x->is_set(BlockBegin::osr_entry_flag))               tty->print("osr ");
+   if (x->is_set(BlockBegin::exception_entry_flag))         tty->print("ex ");
+   if (x->is_set(BlockBegin::subroutine_entry_flag))        tty->print("jsr ");
+-  if (x->is_set(BlockBegin::backward_branch_target_flag))  tty->print("bb "); 
++  if (x->is_set(BlockBegin::backward_branch_target_flag))  tty->print("bb ");
+   if (x->is_set(BlockBegin::linear_scan_loop_header_flag)) tty->print("lh ");
+   if (x->is_set(BlockBegin::linear_scan_loop_end_flag))    tty->print("le ");
+ 
+@@ -1567,7 +1577,6 @@
+      case lir_logic_or:              s = "logic_or";      break;
+      case lir_logic_xor:             s = "logic_xor";     break;
+      case lir_shl:                   s = "shift_left";    break;
+-     case lir_shlx:                  s = "shift_left_long";break;
+      case lir_shr:                   s = "shift_right";   break;
+      case lir_ushr:                  s = "ushift_right";  break;
+      case lir_alloc_array:           s = "alloc_array";   break;
+@@ -1642,7 +1651,7 @@
+ 
+ // LIR_Op0
+ void LIR_Op0::print_instr(outputStream* out) const {
+-  result_opr()->print(out); 
++  result_opr()->print(out);
+ }
+ 
+ // LIR_Op1
+@@ -1710,14 +1719,14 @@
+ 
+ void LIR_Op::print_condition(outputStream* out, LIR_Condition cond) {
+   switch(cond) {
+-    case lir_cond_equal:           out->print("[EQ]");      break; 
++    case lir_cond_equal:           out->print("[EQ]");      break;
+     case lir_cond_notEqual:        out->print("[NE]");      break;
+-    case lir_cond_less:            out->print("[LT]");      break; 
+-    case lir_cond_lessEqual:       out->print("[LE]");      break; 
+-    case lir_cond_greaterEqual:    out->print("[GE]");      break; 
+-    case lir_cond_greater:         out->print("[GT]");      break; 
+-    case lir_cond_belowEqual:      out->print("[BE]");      break; 
+-    case lir_cond_aboveEqual:      out->print("[AE]");      break; 
++    case lir_cond_less:            out->print("[LT]");      break;
++    case lir_cond_lessEqual:       out->print("[LE]");      break;
++    case lir_cond_greaterEqual:    out->print("[GE]");      break;
++    case lir_cond_greater:         out->print("[GT]");      break;
++    case lir_cond_belowEqual:      out->print("[BE]");      break;
++    case lir_cond_aboveEqual:      out->print("[AE]");      break;
+     case lir_cond_always:          out->print("[AL]");      break;
+     default:                       out->print("[%d]",cond); break;
+   }
+@@ -1732,21 +1741,21 @@
+ 
+ void LIR_OpConvert::print_bytecode(outputStream* out, Bytecodes::Code code) {
+   switch(code) {
+-    case Bytecodes::_d2f: out->print("[d2f] "); break; 
+-    case Bytecodes::_d2i: out->print("[d2i] "); break; 
++    case Bytecodes::_d2f: out->print("[d2f] "); break;
++    case Bytecodes::_d2i: out->print("[d2i] "); break;
+     case Bytecodes::_d2l: out->print("[d2l] "); break;
+-    case Bytecodes::_f2d: out->print("[f2d] "); break; 
+-    case Bytecodes::_f2i: out->print("[f2i] "); break; 
++    case Bytecodes::_f2d: out->print("[f2d] "); break;
++    case Bytecodes::_f2i: out->print("[f2i] "); break;
+     case Bytecodes::_f2l: out->print("[f2l] "); break;
+     case Bytecodes::_i2b: out->print("[i2b] "); break;
+     case Bytecodes::_i2c: out->print("[i2c] "); break;
+     case Bytecodes::_i2d: out->print("[i2d] "); break;
+-    case Bytecodes::_i2f: out->print("[i2f] "); break; 
+-    case Bytecodes::_i2l: out->print("[i2l] "); break; 
+-    case Bytecodes::_i2s: out->print("[i2s] "); break; 
+-    case Bytecodes::_l2i: out->print("[l2i] "); break; 
+-    case Bytecodes::_l2f: out->print("[l2f] "); break; 
+-    case Bytecodes::_l2d: out->print("[l2d] "); break; 
++    case Bytecodes::_i2f: out->print("[i2f] "); break;
++    case Bytecodes::_i2l: out->print("[i2l] "); break;
++    case Bytecodes::_i2s: out->print("[i2s] "); break;
++    case Bytecodes::_l2i: out->print("[l2i] "); break;
++    case Bytecodes::_l2f: out->print("[l2f] "); break;
++    case Bytecodes::_l2d: out->print("[l2d] "); break;
+     default:
+       out->print("[?%d]",code);
+     break;
+@@ -1779,7 +1788,7 @@
+   in_opr1()->print(out);    out->print(" ");
+   in_opr2()->print(out);    out->print(" ");
+   if (tmp_opr()->is_valid()) { tmp_opr()->print(out);    out->print(" "); }
+-  result_opr()->print(out); 
++  result_opr()->print(out);
+ }
+ 
+ void LIR_OpAllocArray::print_instr(outputStream* out) const {
+@@ -1855,7 +1864,7 @@
+ 
+ void LIR_InsertionBuffer::append(int index, LIR_Op* op) {
+   assert(_index_and_count.length() % 2 == 0, "must have a count for each index");
+-  
++
+   int i = number_of_insertion_points() - 1;
+   if (i < 0 || index_at(i) < index) {
+     append_new(index, 1);
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp openjdk/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)c1_LIRGenerator.cpp	1.22 07/05/17 15:49:41 JVM"
+-#endif
+ /*
+  * Copyright 2005-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -60,7 +57,7 @@
+ //  r2 := r3  becomes  r1 := r2
+ //  r1 := r2           r2 := r3
+ 
+-PhiResolver::PhiResolver(LIRGenerator* gen, int max_vregs) 
++PhiResolver::PhiResolver(LIRGenerator* gen, int max_vregs)
+  : _gen(gen)
+  , _state(gen->resolver_state())
+  , _temp(LIR_OprFact::illegalOpr)
+@@ -105,7 +102,7 @@
+       move(dest, dest->destination_at(i));
+     }
+   } else if (!dest->start_node()) {
+-    // cylce in graph detected 
++    // cylce in graph detected
+     assert(_loop == NULL, "only one loop valid!");
+     _loop = dest;
+     move_to_temp(src->operand());
+@@ -174,7 +171,7 @@
+ void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
+   assert(dest->is_virtual(), "");
+   // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
+-  assert(src->is_valid(), ""); 
++  assert(src->is_valid(), "");
+   assert(dest->is_valid(), "");
+   ResolveNode* source = source_node(src);
+   source->append(destination_node(dest));
+@@ -184,7 +181,7 @@
+ //--------------------------------------------------------------
+ // LIRItem
+ 
+-void LIRItem::set_result(LIR_Opr opr) {  
++void LIRItem::set_result(LIR_Opr opr) {
+   assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
+   value()->set_operand(opr);
+ 
+@@ -458,7 +455,7 @@
+ 
+ 
+ void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
+-                                    CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) { 
++                                    CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
+   CodeStub* stub = new RangeCheckStub(range_check_info, index);
+   if (index->is_constant()) {
+     cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
+@@ -746,7 +743,7 @@
+   int flags = LIR_OpArrayCopy::all_flags;
+   if (expected_type != NULL) {
+     // try to skip null checks
+-    if (src->as_NewArray() != NULL) 
++    if (src->as_NewArray() != NULL)
+       flags &= ~LIR_OpArrayCopy::src_null_check;
+     if (dst->as_NewArray() != NULL)
+       flags &= ~LIR_OpArrayCopy::dst_null_check;
+@@ -810,9 +807,10 @@
+ 
+ 
+ LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
++  assert(type2size[t] == type2size[value->type()], "size mismatch");
+   if (!value->is_register()) {
+     // force into a register
+-    LIR_Opr r = new_register(t);
++    LIR_Opr r = new_register(value->type());
+     __ move(value, r);
+     value = r;
+   }
+@@ -887,7 +885,7 @@
+              "these can be produced lazily");
+       operand = operand_for_instruction(cur_val);
+     }
+-    resolver->move(operand, operand_for_instruction(phi));    
++    resolver->move(operand, operand_for_instruction(phi));
+   }
+ }
+ 
+@@ -991,39 +989,11 @@
+ }
+ 
+ 
+-void LIRGenerator::write_barrier(LIR_Opr addr) {
+-  if (addr->is_address()) {
+-    LIR_Address* address = (LIR_Address*)addr;
+-    LIR_Opr ptr = new_register(T_OBJECT);
+-    if (!address->index()->is_valid() && address->disp() == 0) {
+-      __ move(address->base(), ptr);
+-    } else {
+-      __ leal(addr, ptr);
+-    }
+-    addr = ptr;
+-  }
+-  assert(addr->is_register(), "must be a register at this point");
+-
+-  LIR_Opr tmp = new_register(T_OBJECT);
+-  if (TwoOperandLIRForm) {
+-    __ move(addr, tmp);
+-    __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
+-  } else {
+-    __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
+-  }
+-  if (can_inline_as_constant(card_table_base())) {
+-    __ move(LIR_OprFact::intConst(0), new LIR_Address(tmp, card_table_base()->as_jint(), T_BYTE));
+-  } else {
+-    __ move(LIR_OprFact::intConst(0), new LIR_Address(tmp, load_constant(card_table_base()), T_BYTE));
+-  }
+-}
+-
+-
+ void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
+   assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
+   assert(block()->next() == x, "ExceptionObject must be first instruction of block");
+ 
+-  // no moves are created for phi functions at the begin of exception 
++  // no moves are created for phi functions at the begin of exception
+   // handlers, so assign operands manually here
+   for_each_phi_fun(block(), phi,
+                    operand_for_instruction(phi));
+@@ -1267,6 +1237,58 @@
+   return result;
+ }
+ 
++// Various barriers
++
++void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
++  switch (Universe::heap()->barrier_set()->kind()) {
++    case BarrierSet::CardTableModRef:
++    case BarrierSet::CardTableExtension:
++      CardTableModRef_post_barrier(addr,  new_val);
++      break;
++    case BarrierSet::ModRef:
++    case BarrierSet::Other:
++      // No post barriers
++      break;
++    default      :
++      ShouldNotReachHere();
++    }
++}
++
++void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
++
++  BarrierSet* bs = Universe::heap()->barrier_set();
++  assert(sizeof(*((CardTableModRefBS*)bs)->byte_map_base) == sizeof(jbyte), "adjust this code");
++  LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)bs)->byte_map_base);
++  if (addr->is_address()) {
++    LIR_Address* address = addr->as_address_ptr();
++    LIR_Opr ptr = new_register(T_OBJECT);
++    if (!address->index()->is_valid() && address->disp() == 0) {
++      __ move(address->base(), ptr);
++    } else {
++      assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
++      __ leal(addr, ptr);
++    }
++    addr = ptr;
++  }
++  assert(addr->is_register(), "must be a register at this point");
++
++  LIR_Opr tmp = new_pointer_register();
++  if (TwoOperandLIRForm) {
++    __ move(addr, tmp);
++    __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
++  } else {
++    __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
++  }
++  if (can_inline_as_constant(card_table_base)) {
++    __ move(LIR_OprFact::intConst(0),
++              new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
++  } else {
++    __ move(LIR_OprFact::intConst(0),
++              new LIR_Address(tmp, load_constant(card_table_base),
++                              T_BYTE));
++  }
++}
++
+ 
+ //------------------------field access--------------------------------------
+ 
+@@ -1277,9 +1299,9 @@
+ // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
+ // memory barrier (i.e., it's not sufficient that the interpreter does not
+ // reorder volatile references, the hardware also must not reorder them).
+-// 
++//
+ // According to the new Java Memory Model (JMM):
+-// (1) All volatiles are serialized wrt to each other.  
++// (1) All volatiles are serialized wrt to each other.
+ // ALSO reads & writes act as aquire & release, so:
+ // (2) A read cannot let unrelated NON-volatile memory refs that happen after
+ // the read float up to before the read.  It's OK for non-volatile memory refs
+@@ -1302,6 +1324,7 @@
+   bool needs_patching = x->needs_patching();
+   bool is_volatile = x->field()->is_volatile();
+   BasicType field_type = x->field_type();
++  bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT);
+ 
+   CodeEmitInfo* info = NULL;
+   if (needs_patching) {
+@@ -1327,9 +1350,9 @@
+     // load item if field not initialized
+     // load item if field not constant
+     // because of code patching we cannot inline constants
+-    if (field_type == T_BYTE || field_type == T_BOOLEAN) { 
++    if (field_type == T_BYTE || field_type == T_BOOLEAN) {
+       value.load_byte_item();
+-    } else  {  
++    } else  {
+       value.load_item();
+     }
+   } else {
+@@ -1343,8 +1366,8 @@
+                   x->is_static() ?  "static" : "field", x->bci());
+   }
+ 
+-  if (x->needs_null_check() && 
+-      (needs_patching || 
++  if (x->needs_null_check() &&
++      (needs_patching ||
+        MacroAssembler::needs_explicit_null_check(x->offset()))) {
+     // emit an explicit null check because the offset is too large
+     __ null_check(object.result(), new CodeEmitInfo(info));
+@@ -1374,8 +1397,8 @@
+     __ store(value.result(), address, info, patch_code);
+   }
+ 
+-  if (field_type == T_ARRAY || field_type == T_OBJECT) {
+-    write_barrier(object.result());
++  if (is_oop) {
++    post_barrier(object.result(), value.result());
+   }
+ 
+   if (is_volatile && os::is_MP()) {
+@@ -1411,8 +1434,8 @@
+                   x->is_static() ?  "static" : "field", x->bci());
+   }
+ 
+-  if (x->needs_null_check() && 
+-      (needs_patching || 
++  if (x->needs_null_check() &&
++      (needs_patching ||
+        MacroAssembler::needs_explicit_null_check(x->offset()))) {
+     // emit an explicit null check because the offset is too large
+     __ null_check(object.result(), new CodeEmitInfo(info));
+@@ -1538,7 +1561,7 @@
+ 
+   if (GenerateRangeChecks && needs_range_check) {
+     if (use_length) {
+-      // TODO: use a (modified) version of array_range_check that does not require a 
++      // TODO: use a (modified) version of array_range_check that does not require a
+       //       constant length to be loaded to a register
+       __ cmp(lir_cond_belowEqual, length.result(), index.result());
+       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
+@@ -1595,9 +1618,9 @@
+     }
+   }
+ 
+-  // do null check before moving exception oop into fixed register 
+-  // to avoid a fixed interval with an oop during the null check.  
+-  // Use a copy of the CodeEmitInfo because debug information is 
++  // do null check before moving exception oop into fixed register
++  // to avoid a fixed interval with an oop during the null check.
++  // Use a copy of the CodeEmitInfo because debug information is
+   // different for null_check and throw.
+   if (GenerateCompilerNullChecks &&
+       (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) {
+@@ -1631,7 +1654,7 @@
+ 
+   // move exception oop into fixed register
+   __ move(exception_opr, exceptionOopOpr());
+-    
++
+   if (unwind) {
+     __ unwind_exception(LIR_OprFact::illegalOpr, exceptionOopOpr(), info);
+   } else {
+@@ -1677,16 +1700,18 @@
+   assert(!x->has_index() || idx.value() == x->index(), "should match");
+ 
+   LIR_Opr base_op = base.result();
++#ifndef _LP64
+   if (x->base()->type()->tag() == longTag) {
+     base_op = new_register(T_INT);
+     __ convert(Bytecodes::_l2i, base.result(), base_op);
+   } else {
+     assert(x->base()->type()->tag() == intTag, "must be");
+   }
++#endif
+ 
+   BasicType dst_type = x->basic_type();
+   LIR_Opr index_op = idx.result();
+-  
++
+   LIR_Address* addr;
+   if (index_op->is_constant()) {
+     assert(log2_scale == 0, "must not have a scale");
+@@ -1725,7 +1750,7 @@
+   LIRItem base(x->base(), this);
+   LIRItem value(x->value(), this);
+   LIRItem idx(this);
+-  
++
+   base.load_item();
+   if (x->has_index()) {
+     idx.set_instruction(x->index());
+@@ -1740,8 +1765,15 @@
+ 
+   set_no_result(x);
+ 
+-  LIR_Opr intBase = new_register(T_INT);
+-  __ convert(Bytecodes::_l2i, base.result(), intBase);
++  LIR_Opr base_op = base.result();
++#ifndef _LP64
++  if (x->base()->type()->tag() == longTag) {
++    base_op = new_register(T_INT);
++    __ convert(Bytecodes::_l2i, base.result(), base_op);
++  } else {
++    assert(x->base()->type()->tag() == intTag, "must be");
++  }
++#endif
+ 
+   LIR_Opr index_op = idx.result();
+   if (log2_scale != 0) {
+@@ -1751,7 +1783,7 @@
+     __ shift_left(index_op, log2_scale, index_op);
+   }
+ 
+-  LIR_Address* addr = new LIR_Address(intBase, index_op, x->basic_type());
++  LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
+   __ move(value.result(), addr);
+ }
+ 
+@@ -1783,7 +1815,7 @@
+     data.load_byte_item();
+   } else {
+     data.load_item();
+-  }  
++  }
+   off.load_item();
+ 
+   set_no_result(x);
+@@ -2061,7 +2093,7 @@
+ 
+       CodeEmitInfo* info = new CodeEmitInfo(SynchronizationEntryBCI, scope()->start()->state(), NULL);
+       CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
+-      
++
+       // receiver is guaranteed non-NULL so don't need CodeEmitInfo
+       __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
+     }
+@@ -2182,7 +2214,7 @@
+ 
+   // emit invoke code
+   bool optimized = x->target_is_loaded() && x->target_is_final();
+-  assert(receiver->is_illegal() || receiver->is_equivalent(LIR_Assembler::receiverOpr()), "must match");
++  assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
+ 
+   switch (x->code()) {
+     case Bytecodes::_invokestatic:
+@@ -2321,14 +2353,14 @@
+   // java.nio.Buffer.checkIndex
+   case vmIntrinsics::_checkIndex:     do_NIOCheckIndex(x); break;
+ 
+-  case vmIntrinsics::_compareAndSwapObject: 
+-    do_CompareAndSwap(x, objectType); 
++  case vmIntrinsics::_compareAndSwapObject:
++    do_CompareAndSwap(x, objectType);
+     break;
+-  case vmIntrinsics::_compareAndSwapInt: 
+-    do_CompareAndSwap(x, intType); 
++  case vmIntrinsics::_compareAndSwapInt:
++    do_CompareAndSwap(x, intType);
+     break;
+-  case vmIntrinsics::_compareAndSwapLong: 
+-    do_CompareAndSwap(x, longType); 
++  case vmIntrinsics::_compareAndSwapLong:
++    do_CompareAndSwap(x, longType);
+     break;
+ 
+     // sun.misc.AtomicLongCSImpl.attemptUpdate
+@@ -2359,7 +2391,7 @@
+ void LIRGenerator::do_ProfileCounter(ProfileCounter* x) {
+   LIRItem mdo(x->mdo(), this);
+   mdo.load_item();
+-  
++
+   increment_counter(new LIR_Address(mdo.result(), x->offset(), T_INT), x->increment());
+ }
+ 
+@@ -2398,7 +2430,7 @@
+     result = new_register(result_type);
+     phys_reg = result_register_for(result_type);
+   }
+-  
++
+   // move the arguments into the correct location
+   CallingConvention* cc = frame_map()->c_calling_convention(signature);
+   assert(cc->length() == args->length(), "argument mismatch");
+@@ -2421,7 +2453,7 @@
+       }
+     }
+   }
+-      
++
+   if (info) {
+     __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
+   } else {
+@@ -2443,7 +2475,7 @@
+     result = new_register(result_type);
+     phys_reg = result_register_for(result_type);
+   }
+-  
++
+   // move the arguments into the correct location
+   CallingConvention* cc = frame_map()->c_calling_convention(signature);
+ 
+@@ -2463,7 +2495,7 @@
+       }
+     }
+   }
+-  
++
+   if (info) {
+     __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
+   } else {
+@@ -2500,5 +2532,3 @@
+   }
+ #endif
+ }
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp openjdk/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c1_LIRGenerator.hpp	1.13 07/05/05 17:05:07 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The classes responsible for code emission and register allocation
+@@ -70,9 +67,9 @@
+   bool       _assigned;      // Value assigned to this Node?
+   bool       _visited;       // Node already visited?
+   bool       _start_node;    // Start node already visited?
+-  
++
+  public:
+-  ResolveNode(LIR_Opr operand) 
++  ResolveNode(LIR_Opr operand)
+     : _operand(operand)
+     , _assigned(false)
+     , _visited(false)
+@@ -81,7 +78,7 @@
+   // accessors
+   LIR_Opr operand() const           { return _operand; }
+   int no_of_destinations() const    { return _destinations.length(); }
+-  ResolveNode* destination_at(int i)     { return _destinations[i]; } 
++  ResolveNode* destination_at(int i)     { return _destinations[i]; }
+   bool assigned() const             { return _assigned; }
+   bool visited() const              { return _visited; }
+   bool start_node() const           { return _start_node; }
+@@ -170,7 +167,7 @@
+ #endif
+   LIR_List* lir() const {
+     return _lir;
+-  }  
++  }
+ 
+   // a simple cache of constants used within a block
+   GrowableArray<LIR_Const*>       _constants;
+@@ -252,6 +249,19 @@
+   LIR_Opr call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info);
+   LIR_Opr call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info);
+ 
++  // GC Barriers
++
++  // generic interface
++
++  void post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
++
++  // specific implementations
++
++  // post barriers
++
++  void CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
++
++
+   static LIR_Opr result_register_for(ValueType* type, bool callee = false);
+ 
+   ciObject* get_jobject_constant(Value value);
+@@ -325,8 +335,6 @@
+   }
+   LIR_Address* emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, BasicType type, bool needs_card_mark);
+ 
+-  void write_barrier(LIR_Opr addr);
+-
+   // machine preferences and characteristics
+   bool can_inline_as_constant(Value i) const;
+   bool can_inline_as_constant(LIR_Const* c) const;
+@@ -360,6 +368,15 @@
+   LIR_Opr new_register(Value value)              { return new_register(as_BasicType(value->type())); }
+   LIR_Opr new_register(ValueType* type)          { return new_register(as_BasicType(type)); }
+ 
++  // returns a register suitable for doing pointer math
++  LIR_Opr new_pointer_register() {
++#ifdef _LP64
++    return new_register(T_LONG);
++#else
++    return new_register(T_INT);
++#endif
++  }
++
+   static LIR_Condition lir_cond(If::Condition cond) {
+     LIR_Condition l;
+     switch (cond) {
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_LIR.hpp openjdk/hotspot/src/share/vm/c1/c1_LIR.hpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_LIR.hpp	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_LIR.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c1_LIR.hpp	1.133 07/05/05 17:05:05 JVM"
+-#endif
+ /*
+  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class BlockBegin;
+@@ -95,7 +92,16 @@
+   LIR_Const(jfloat f)                            { _value.set_type(T_FLOAT);   _value.set_jfloat(f); }
+   LIR_Const(jdouble d)                           { _value.set_type(T_DOUBLE);  _value.set_jdouble(d); }
+   LIR_Const(jobject o)                           { _value.set_type(T_OBJECT);  _value.set_jobject(o); }
+-                                       
++  LIR_Const(void* p) {
++#ifdef _LP64
++    assert(sizeof(jlong) >= sizeof(p), "too small");;
++    _value.set_type(T_LONG);    _value.set_jlong((jlong)p);
++#else
++    assert(sizeof(jint) >= sizeof(p), "too small");;
++    _value.set_type(T_INT);     _value.set_jint((jint)p);
++#endif
++  }
++
+   virtual BasicType type()       const { return _value.get_type(); }
+   virtual LIR_Const* as_constant()     { return this; }
+ 
+@@ -107,6 +113,13 @@
+   jint      as_jint_lo() const         { type_check(T_LONG  ); return low(_value.get_jlong()); }
+   jint      as_jint_hi() const         { type_check(T_LONG  ); return high(_value.get_jlong()); }
+ 
++#ifdef _LP64
++  address   as_pointer() const         { type_check(T_LONG  ); return (address)_value.get_jlong(); }
++#else
++  address   as_pointer() const         { type_check(T_INT   ); return (address)_value.get_jint(); }
++#endif
++
++
+   jint      as_jint_bits() const       { type_check(T_FLOAT, T_INT); return _value.get_jint(); }
+   jint      as_jint_lo_bits() const    {
+     if (type() == T_DOUBLE) {
+@@ -131,18 +144,18 @@
+     jfloat ok = 0.0f;
+     return jint_cast(f) == jint_cast(ok);
+   }
+-  
++
+   bool is_one_float() {
+     jfloat f = as_jfloat();
+     return !g_isnan(f) && g_isfinite(f) && f == 1.0;
+   }
+-  
++
+   bool is_zero_double() {
+     jdouble d = as_jdouble();
+     jdouble ok = 0.0;
+     return jlong_cast(d) == jlong_cast(ok);
+   }
+-  
++
+   bool is_one_double() {
+     jdouble d = as_jdouble();
+     return !g_isnan(d) && g_isfinite(d) && d == 1.0;
+@@ -154,7 +167,7 @@
+ //
+ // The class LIR_OprDesc represents a LIR instruction operand;
+ // it can be a register (ALU/FPU), stack location or a constant;
+-// Constants and addresses are represented as resource area allocated 
++// Constants and addresses are represented as resource area allocated
+ // structures (see above).
+ // Registers and stack locations are inlined into the this pointer
+ // (see value function).
+@@ -167,7 +180,7 @@
+   // [max...........|7 6 5 4|3 2 1 0]
+   //                             ^
+   //                    is_pointer bit
+-  // 
++  //
+   // lowest bit cleared, means it is a structure pointer
+   // we need  4 bits to represent types
+ 
+@@ -244,8 +257,6 @@
+   OprKind kind_field() const                     { return (OprKind)(value() & kind_mask); }
+   OprSize size_field() const                     { return (OprSize)(value() & size_mask); }
+ 
+-  intptr_t value_without_type() const            { return is_illegal() ? value() : value() & no_type_mask; }
+-
+   static char type_char(BasicType t);
+ 
+  public:
+@@ -258,12 +269,12 @@
+ 
+   enum OprType {
+       unknown_type  = 0 << type_shift    // means: not set (catch uninitialized types)
+-    , int_type      = 1 << type_shift    
+-    , long_type     = 2 << type_shift    
+-    , object_type   = 3 << type_shift    
+-    , pointer_type  = 4 << type_shift    
+-    , float_type    = 5 << type_shift    
+-    , double_type   = 6 << type_shift    
++    , int_type      = 1 << type_shift
++    , long_type     = 2 << type_shift
++    , object_type   = 3 << type_shift
++    , pointer_type  = 4 << type_shift
++    , float_type    = 5 << type_shift
++    , double_type   = 6 << type_shift
+   };
+   friend OprType as_OprType(BasicType t);
+   friend BasicType as_BasicType(OprType t);
+@@ -286,22 +297,15 @@
+       case T_INT:
+       case T_OBJECT:
+       case T_ARRAY:
+-      case T_ADDRESS:
+         return single_size;
+         break;
+-        
++
+       default:
+         ShouldNotReachHere();
+       }
+   }
+ 
+ 
+-  // returns a new LIR_Opr with the OprType of from and all other information the current LIR_Opr
+-  LIR_Opr with_type_of(LIR_Opr from) {
+-    assert(!is_pointer() && !from->is_pointer(), "only simple LIR_Oprs");
+-    return (LIR_Opr)(from->type_field() | value_without_type());
+-  }
+-
+   void validate_type() const PRODUCT_RETURN;
+ 
+   BasicType type() const {
+@@ -316,23 +320,17 @@
+ 
+   char type_char() const                         { return type_char((is_pointer()) ? pointer()->type() : type()); }
+ 
+-  bool is_same(LIR_Opr opr) const                { return this == opr; }
+-  // checks whether types are same or one type is unknown
+-  bool is_type_compatible(LIR_Opr opr) const     {
+-    return type_field() == unknown_type ||
+-      opr->type_field() == unknown_type ||
+-      type_field() == opr->type_field();
+-  }
+-
+-  // is_type_compatible and equal
+-  bool is_equivalent(LIR_Opr opr) const {
+-    if (!is_pointer()) {
+-      if (is_type_compatible(opr) && data() == opr->data() && kind_field() == opr->kind_field() && is_xmm_register() == opr->is_xmm_register()) {
+-        return true;
+-      }
+-    }
+-    // could also do checks for other pointers types
+-    return is_same(opr);
++  bool is_equal(LIR_Opr opr) const         { return this == opr; }
++  // checks whether types are same
++  bool is_same_type(LIR_Opr opr) const     {
++    assert(type_field() != unknown_type &&
++           opr->type_field() != unknown_type, "shouldn't see unknown_type");
++    return type_field() == opr->type_field();
++  }
++  bool is_same_register(LIR_Opr opr) {
++    return (is_register() && opr->is_register() &&
++            kind_field() == opr->kind_field() &&
++            (value() & no_type_mask) == (opr->value() & no_type_mask));
+   }
+ 
+   bool is_pointer() const      { return check_value_mask(pointer_mask, pointer_value); }
+@@ -349,11 +347,11 @@
+   bool is_oop() const;
+ 
+   // semantic for fpu- and xmm-registers:
+-  // * is_float and is_double return true for xmm_registers 
++  // * is_float and is_double return true for xmm_registers
+   //   (so is_single_fpu and is_single_xmm are true)
+   // * So you must always check for is_???_xmm prior to is_???_fpu to
+   //   distinguish between fpu- and xmm-registers
+-  
++
+   bool is_stack() const        { validate_type(); return check_value_mask(kind_mask,                stack_value);                 }
+   bool is_single_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask,    stack_value  | single_size);  }
+   bool is_double_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask,    stack_value  | double_size);  }
+@@ -409,6 +407,16 @@
+   Register as_register_lo() const;
+   Register as_register_hi() const;
+ 
++  Register as_pointer_register() {
++#ifdef _LP64
++    if (is_double_cpu()) {
++      assert(as_register_lo() == as_register_hi(), "should be a single register");
++      return as_register_lo();
++    }
++#endif
++    return as_register();
++  }
++
+ #ifdef IA32
+   XMMRegister as_xmm_float_reg() const;
+   XMMRegister as_xmm_double_reg() const;
+@@ -481,14 +489,14 @@
+   BasicType _type;
+ 
+  public:
+-  LIR_Address(LIR_Opr base, LIR_Opr index, BasicType type): 
++  LIR_Address(LIR_Opr base, LIR_Opr index, BasicType type):
+        _base(base)
+      , _index(index)
+      , _scale(times_1)
+      , _type(type)
+      , _disp(0) { verify(); }
+ 
+-  LIR_Address(LIR_Opr base, int disp, BasicType type): 
++  LIR_Address(LIR_Opr base, int disp, BasicType type):
+        _base(base)
+      , _index(LIR_OprDesc::illegalOpr())
+      , _scale(times_1)
+@@ -496,7 +504,7 @@
+      , _disp(disp) { verify(); }
+ 
+ #ifdef IA32
+-  LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, int disp, BasicType type): 
++  LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, int disp, BasicType type):
+        _base(base)
+      , _index(index)
+      , _scale(scale)
+@@ -530,7 +538,7 @@
+   static LIR_Opr single_cpu(int reg)            { return (LIR_Opr)((reg  << LIR_OprDesc::reg1_shift) |                                     LIR_OprDesc::int_type    | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); }
+   static LIR_Opr single_cpu_oop(int reg)        { return (LIR_Opr)((reg  << LIR_OprDesc::reg1_shift) |                                     LIR_OprDesc::object_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); }
+   static LIR_Opr double_cpu(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::long_type   | LIR_OprDesc::cpu_register | LIR_OprDesc::double_size); }
+-  
++
+   static LIR_Opr single_fpu(int reg)            { return (LIR_Opr)((reg  << LIR_OprDesc::reg1_shift) |                                     LIR_OprDesc::float_type  | LIR_OprDesc::fpu_register | LIR_OprDesc::single_size); }
+ 
+ #ifdef SPARC
+@@ -542,7 +550,7 @@
+   static LIR_Opr double_xmm(int reg)            { return (LIR_Opr)((reg  << LIR_OprDesc::reg1_shift) | (reg  << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size | LIR_OprDesc::is_xmm_mask); }
+ #endif
+ 
+-  
++
+   static LIR_Opr virtual_register(int index, BasicType type) {
+     LIR_Opr res;
+     switch (type) {
+@@ -569,7 +577,7 @@
+                                LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask);
+     assert(res == old_res, "old and new method not equal");
+ #endif
+-  
++
+     return res;
+   }
+ 
+@@ -606,6 +614,8 @@
+   static LIR_Opr doubleConst(jdouble d)          { return (LIR_Opr)(new LIR_Const(d)); }
+   static LIR_Opr oopConst(jobject o)             { return (LIR_Opr)(new LIR_Const(o)); }
+   static LIR_Opr address(LIR_Address* a)         { return (LIR_Opr)a; }
++  static LIR_Opr intptrConst(void* p)            { return (LIR_Opr)(new LIR_Const(p)); }
++  static LIR_Opr intptrConst(intptr_t v)         { return (LIR_Opr)(new LIR_Const((void*)v)); }
+   static LIR_Opr illegal()                       { return (LIR_Opr)-1; }
+ 
+   static LIR_Opr value_type(ValueType* type);
+@@ -617,7 +627,7 @@
+ //                   LIR Instructions
+ //-------------------------------------------------------------------------------
+ //
+-// Note: 
++// Note:
+ //  - every instruction has a result operand
+ //  - every instruction has an CodeEmitInfo operand (can be revisited later)
+ //  - every instruction has a LIR_OpCode operand
+@@ -713,7 +723,6 @@
+       , lir_logic_or
+       , lir_logic_xor
+       , lir_shl
+-      , lir_shlx
+       , lir_shr
+       , lir_ushr
+       , lir_alloc_array
+@@ -771,7 +780,7 @@
+ };
+ 
+ 
+-enum LIR_PatchCode { 
++enum LIR_PatchCode {
+   lir_patch_none,
+   lir_patch_low,
+   lir_patch_high,
+@@ -814,7 +823,7 @@
+   static bool is_in_range(LIR_Code test, LIR_Code start, LIR_Code end)  { return start < test && test < end; }
+ 
+  public:
+-  LIR_Op() 
++  LIR_Op()
+     : _result(LIR_OprFact::illegalOpr)
+     , _code(lir_none)
+     , _flags(0)
+@@ -829,7 +838,7 @@
+ 
+   LIR_Op(LIR_Code code, LIR_Opr result, CodeEmitInfo* info)
+     : _result(result)
+-    , _code(code) 
++    , _code(code)
+     , _flags(0)
+     , _info(info)
+ #ifdef ASSERT
+@@ -1048,7 +1057,7 @@
+   LIR_Opr         _opr;   // input operand
+   BasicType       _type;  // Operand types
+   LIR_PatchCode   _patch; // only required with patchin (NEEDS_CLEANUP: do we want a special instruction for patching?)
+-  
++
+   static void print_patch_code(outputStream* out, LIR_PatchCode code);
+ 
+   void set_kind(LIR_MoveKind kind) {
+@@ -1092,7 +1101,7 @@
+   virtual const char * name() const PRODUCT_RETURN0;
+ 
+   void set_in_opr(LIR_Opr opr) { _opr = opr; }
+-  
++
+   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
+   virtual void verify() const;
+ };
+@@ -1130,7 +1139,7 @@
+   BlockBegin*   _block;  // if this is a branch to a block, this is the block
+   BlockBegin*   _ublock; // if this is a float-branch, this is the unorderd block
+   CodeStub*     _stub;   // if this is a branch to a stub, this is the stub
+-  
++
+  public:
+   LIR_OpBranch(LIR_Condition cond, Label* lbl)
+     : LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL)
+@@ -1365,7 +1374,7 @@
+   BasicType type()  const                        { return _type; }
+   LIR_Opr tmp_opr() const                        { return _tmp; }
+   LIR_Condition condition() const  {
+-    assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); return _condition; 
++    assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); return _condition;
+   }
+ 
+   void set_fpu_stack_size(int size)              { _fpu_stack_size = size; }
+@@ -1436,7 +1445,7 @@
+   LIR_Opr in_opr1() const                        { return _opr1; }
+   LIR_Opr in_opr2() const                        { return _opr2; }
+   LIR_Opr in_opr3() const                        { return _opr3; }
+-  
++
+   virtual void emit_code(LIR_Assembler* masm);
+   virtual LIR_Op3* as_Op3() { return this; }
+   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
+@@ -1463,7 +1472,7 @@
+   LIR_Opr _scratch;
+   CodeStub* _stub;
+  public:
+-  LIR_OpLock(LIR_Code code, LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info) 
++  LIR_OpLock(LIR_Code code, LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info)
+     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
+     , _hdr(hdr)
+     , _obj(obj)
+@@ -1534,7 +1543,6 @@
+   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
+ };
+ 
+-
+ // LIR_OpProfileCall
+ class LIR_OpProfileCall : public LIR_Op {
+  friend class LIR_OpVisitState;
+@@ -1573,10 +1581,9 @@
+ 
+ class LIR_InsertionBuffer;
+ 
+-
+ //--------------------------------LIR_List---------------------------------------------------
+ // Maintains a list of LIR instructions (one instance of LIR_List per basic block)
+-// The LIR instructions are appended by the LIR_List class itself; 
++// The LIR instructions are appended by the LIR_List class itself;
+ //
+ // Notes:
+ // - all offsets are(should be) in bytes
+@@ -1621,7 +1628,7 @@
+ #ifdef ASSERT
+   void set_file_and_line(const char * file, int line);
+ #endif
+-  
++
+   //---------- accessors ---------------
+   LIR_OpList* instructions_list()                { return &_operations; }
+   int         length() const                     { return _operations.length(); }
+@@ -1640,7 +1647,7 @@
+   void print_instructions() PRODUCT_RETURN;
+ 
+ 
+-  //---------- instructions ------------- 
++  //---------- instructions -------------
+   void call_opt_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
+                         address dest, LIR_OprList* arguments,
+                         CodeEmitInfo* info) {
+@@ -1658,7 +1665,7 @@
+                     intptr_t vtable_offset, LIR_OprList* arguments, CodeEmitInfo* info) {
+     append(new LIR_OpJavaCall(lir_virtual_call, method, receiver, result, vtable_offset, arguments, info));
+   }
+-  
++
+   void get_thread(LIR_Opr result)                { append(new LIR_Op0(lir_get_thread, result)); }
+   void word_align()                              { append(new LIR_Op0(lir_word_align)); }
+   void membar()                                  { append(new LIR_Op0(lir_membar)); }
+@@ -1692,9 +1699,9 @@
+   void oop2reg_patch(jobject o, LIR_Opr reg, CodeEmitInfo* info);
+ 
+   void return_op(LIR_Opr result)                 { append(new LIR_Op1(lir_return, result)); }
+- 
++
+   void safepoint(LIR_Opr tmp, CodeEmitInfo* info)  { append(new LIR_Op1(lir_safepoint, tmp, info)); }
+- 
++
+   void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, ConversionStub* stub = NULL/*, bool is_32bit = false*/) { append(new LIR_OpConvert(code, left, dst, stub)); }
+ 
+   void logical_and (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_and,  left, right, dst)); }
+@@ -1737,7 +1744,7 @@
+   void sin (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_sin , from, tmp1, to, tmp2)); }
+   void cos (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_cos , from, tmp1, to, tmp2)); }
+   void tan (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_tan , from, tmp1, to, tmp2)); }
+- 
++
+   void add (LIR_Opr left, LIR_Opr right, LIR_Opr res)      { append(new LIR_Op2(lir_add, left, right, res)); }
+   void sub (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL) { append(new LIR_Op2(lir_sub, left, right, res, info)); }
+   void mul (LIR_Opr left, LIR_Opr right, LIR_Opr res) { append(new LIR_Op2(lir_mul, left, right, res)); }
+@@ -1840,11 +1847,11 @@
+   // list of insertion points. index and count are stored alternately:
+   // _index_and_count[i * 2]:     the index into lir list where "count" ops should be inserted
+   // _index_and_count[i * 2 + 1]: the number of ops to be inserted at index
+-  intStack    _index_and_count; 
++  intStack    _index_and_count;
+ 
+   // the LIR_Ops to be inserted
+   LIR_OpList  _ops;
+-  
++
+   void append_new(int index, int count)  { _index_and_count.append(index); _index_and_count.append(count); }
+   void set_index_at(int i, int value)    { _index_and_count.at_put((i << 1),     value); }
+   void set_count_at(int i, int value)    { _index_and_count.at_put((i << 1) + 1, value); }
+@@ -1982,15 +1989,15 @@
+     assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
+     return *_oprs_new[mode][index];
+   }
+-                                           
++
+   void set_opr_at(OprMode mode, int index, LIR_Opr opr) const {
+     assert(mode >= 0 && mode < numModes, "bad mode");
+     assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
+     *_oprs_new[mode][index] = opr;
+   }
+ 
+-  int info_count() const { 
+-    return _info_len; 
++  int info_count() const {
++    return _info_len;
+   }
+ 
+   CodeEmitInfo* info_at(int index) const {
+@@ -2025,4 +2032,3 @@
+ 
+ 
+ inline LIR_Opr LIR_OprDesc::illegalOpr()   { return LIR_OprFact::illegalOpr; };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_MacroAssembler.hpp openjdk/hotspot/src/share/vm/c1/c1_MacroAssembler.hpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_MacroAssembler.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_MacroAssembler.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c1_MacroAssembler.hpp	1.23 07/05/05 17:05:08 JVM"
+-#endif
+ /*
+  * Copyright 2000-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class CodeEmitInfo;
+@@ -83,5 +80,3 @@
+   int call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2);
+   int call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2, Register arg3);
+ };
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_Optimizer.cpp openjdk/hotspot/src/share/vm/c1/c1_Optimizer.cpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_Optimizer.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_Optimizer.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)c1_Optimizer.cpp	1.71 07/05/05 17:05:09 JVM"
+-#endif
+ /*
+  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -59,7 +56,7 @@
+     for (int i = 0; i < e; i++) {
+       BlockBegin* xhandler = sux->exception_handler_at(i);
+       block->add_exception_handler(xhandler);
+-      
++
+       assert(xhandler->is_predecessor(sux), "missing predecessor");
+       if (sux->number_of_preds() == 0) {
+         // sux is disconnected from graph so disconnect from exception handlers
+@@ -120,7 +117,7 @@
+     Value sux_phi = sux_state->stack_at(if_->state()->stack_size());
+     if (sux_phi == NULL || sux_phi->as_Phi() == NULL || sux_phi->as_Phi()->block() != sux) return;
+     if (sux_phi->type()->size() != sux_state->stack_size() - if_->state()->stack_size()) return;
+-  
++
+     // get the values that were pushed in the true- and false-branch
+     Value t_value = t_goto->state()->stack_at(if_->state()->stack_size());
+     Value f_value = f_goto->state()->stack_at(if_->state()->stack_size());
+@@ -502,7 +499,7 @@
+     _visitable_instructions = new ValueSet();
+     _visitor.set_eliminator(this);
+   }
+-  
++
+   Optimizer*  opt()                               { return _opt; }
+   IR*         ir ()                               { return opt()->ir(); }
+ 
+@@ -1027,7 +1024,7 @@
+                   ir()->method()->name()->as_utf8(),
+                   ir()->method()->signature()->as_symbol()->as_utf8());
+   }
+-  
++
+   // Apply to graph
+   nce.iterate(ir()->start());
+ 
+@@ -1063,7 +1060,7 @@
+     }
+   }
+ 
+- 
++
+   if (PrintNullCheckElimination) {
+     tty->print_cr("Done with null check elimination for method %s::%s%s",
+                   ir()->method()->holder()->name()->as_utf8(),
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_Optimizer.hpp openjdk/hotspot/src/share/vm/c1/c1_Optimizer.hpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_Optimizer.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_Optimizer.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c1_Optimizer.hpp	1.16 07/05/05 17:05:09 JVM"
+-#endif
+ /*
+  * Copyright 1999-2001 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,13 +19,13 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class Optimizer VALUE_OBJ_CLASS_SPEC {
+  private:
+   IR* _ir;
+-  
++
+  public:
+   Optimizer(IR* ir);
+   IR* ir() const                                 { return _ir; }
+@@ -38,4 +35,3 @@
+   void eliminate_blocks();
+   void eliminate_null_checks();
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_Runtime1.cpp openjdk/hotspot/src/share/vm/c1/c1_Runtime1.cpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_Runtime1.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_Runtime1.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)c1_Runtime1.cpp	1.244 07/06/01 13:28:38 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -143,7 +140,7 @@
+                                         locs_buffer_size / sizeof(relocInfo));
+   code->initialize_consts_size(desired_max_constant_size());
+   // Call stubs + deopt/exception handler
+-  code->initialize_stubs_size((call_stub_estimate * LIR_Assembler::call_stub_size) + 
++  code->initialize_stubs_size((call_stub_estimate * LIR_Assembler::call_stub_size) +
+                               LIR_Assembler::exception_handler_size +
+                               LIR_Assembler::deopt_handler_size);
+ }
+@@ -193,12 +190,12 @@
+   // make sure all code is in code buffer
+   sasm->flush();
+   // create blob - distinguish a few special cases
+-  CodeBlob* blob = RuntimeStub::new_runtime_stub(name_for(id), 
+-						 &code,
+-						 CodeOffsets::frame_never_safe,
+-						 sasm->frame_size(),
+-						 oop_maps,
+-						 sasm->must_gc_arguments());
++  CodeBlob* blob = RuntimeStub::new_runtime_stub(name_for(id),
++                                                 &code,
++                                                 CodeOffsets::frame_never_safe,
++                                                 sasm->frame_size(),
++                                                 oop_maps,
++                                                 sasm->must_gc_arguments());
+   // install blob
+   assert(blob != NULL, "blob must exist");
+   _blobs[id] = blob;
+@@ -278,7 +275,7 @@
+   FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
+   FUNCTION_CASE(entry, trace_block_entry);
+ 
+-#undef FUNCTION_CASE    
++#undef FUNCTION_CASE
+ 
+   return "<unknown function>";
+ }
+@@ -464,7 +461,7 @@
+ 
+     RegisterMap reg_map(thread);
+     frame stub_frame = thread->last_frame();
+-    frame caller_frame = stub_frame.sender(&reg_map); 
++    frame caller_frame = stub_frame.sender(&reg_map);
+ 
+     // We don't really want to deoptimize the nmethod itself since we
+     // can actually continue in the exception handler ourselves but I
+@@ -701,7 +698,7 @@
+ // patch_stub: call Runtime1::patch_code (through a runtime stub)
+ //             jmp patch_site
+ //
+-// 
++//
+ // A normal patch is done by rewriting the patch body, usually a move,
+ // and then copying it into place over top of the jmp instruction
+ // being careful to flush caches and doing it in an MP-safe way.  The
+@@ -1024,7 +1021,7 @@
+ //
+ int Runtime1::move_klass_patching(JavaThread* thread) {
+ //
+-// NOTE: we are still in Java 
++// NOTE: we are still in Java
+ //
+   Thread* THREAD = thread;
+   debug_only(NoHandleMark nhm;)
+@@ -1053,7 +1050,7 @@
+ 
+ int Runtime1::access_field_patching(JavaThread* thread) {
+ //
+-// NOTE: we are still in Java 
++// NOTE: we are still in Java
+ //
+   Thread* THREAD = thread;
+   debug_only(NoHandleMark nhm;)
+@@ -1115,7 +1112,7 @@
+     // Note that we use the non-virtual inlineable variant of write_ref_array.
+     BarrierSet* bs = Universe::heap()->barrier_set();
+     assert(bs->has_write_ref_array_opt(),
+-	   "Barrier set must have ref array opt");
++           "Barrier set must have ref array opt");
+     if (src == dst) {
+       // same object, no check
+       Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_Runtime1.hpp openjdk/hotspot/src/share/vm/c1/c1_Runtime1.hpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_Runtime1.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_Runtime1.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c1_Runtime1.hpp	1.140 07/05/17 15:49:48 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class StubAssembler;
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_ValueMap.cpp openjdk/hotspot/src/share/vm/c1/c1_ValueMap.cpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_ValueMap.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_ValueMap.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)c1_ValueMap.cpp	1.29 07/05/05 17:05:08 JVM"
+-#endif
+ /*
+  * Copyright 1999-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -44,7 +41,7 @@
+ #endif
+ 
+ 
+-ValueMap::ValueMap() 
++ValueMap::ValueMap()
+   : _nesting(0)
+   , _entries(ValueMapInitialSize, NULL)
+   , _killed_values()
+@@ -54,7 +51,7 @@
+ }
+ 
+ 
+-ValueMap::ValueMap(ValueMap* old) 
++ValueMap::ValueMap(ValueMap* old)
+   : _nesting(old->_nesting + 1)
+   , _entries(old->_entries.length())
+   , _killed_values()
+@@ -118,10 +115,10 @@
+ 
+         if (!is_killed(f) && f->is_equal(x)) {
+           NOT_PRODUCT(_number_of_hits++);
+-          TRACE_VALUE_NUMBERING(tty->print_cr("Value Numbering: %s %c%d equal to %c%d  (size %d, entries %d, nesting-diff %d)", x->name(), x->type()->tchar(), x->id(), f->type()->tchar(), f->id(), size(), entry_count(), nesting() - entry->nesting())); 
++          TRACE_VALUE_NUMBERING(tty->print_cr("Value Numbering: %s %c%d equal to %c%d  (size %d, entries %d, nesting-diff %d)", x->name(), x->type()->tchar(), x->id(), f->type()->tchar(), f->id(), size(), entry_count(), nesting() - entry->nesting()));
+ 
+           if (entry->nesting() != nesting() && f->as_Constant() == NULL) {
+-            // non-constant values of of another block must be pinned, 
++            // non-constant values of of another block must be pinned,
+             // otherwise it is possible that they are not evaluated
+             f->pin(Instruction::PinGlobalValueNumbering);
+           }
+@@ -140,7 +137,7 @@
+     _entries.at_put(idx, new ValueMapEntry(hash, x, nesting(), entry_at(idx)));
+     _entry_count++;
+ 
+-    TRACE_VALUE_NUMBERING(tty->print_cr("Value Numbering: insert %s %c%d  (size %d, entries %d, nesting %d)", x->name(), x->type()->tchar(), x->id(), size(), entry_count(), nesting())); 
++    TRACE_VALUE_NUMBERING(tty->print_cr("Value Numbering: insert %s %c%d  (size %d, entries %d, nesting %d)", x->name(), x->type()->tchar(), x->id(), size(), entry_count(), nesting()));
+   }
+ 
+   return x;
+@@ -284,7 +281,7 @@
+   void      kill_array(ValueType* type)          { current_map()->kill_array(type); };
+ 
+  public:
+-  ShortLoopOptimizer(GlobalValueNumbering* gvn) 
++  ShortLoopOptimizer(GlobalValueNumbering* gvn)
+     : _gvn(gvn)
+     , _loop_blocks(ValueMapMaxLoopSize)
+     , _too_complicated_loop(false)
+@@ -340,7 +337,7 @@
+ }
+ 
+ 
+-GlobalValueNumbering::GlobalValueNumbering(IR* ir) 
++GlobalValueNumbering::GlobalValueNumbering(IR* ir)
+   : _current_map(NULL)
+   , _value_maps(ir->linear_scan_order()->length(), NULL)
+ {
+@@ -376,7 +373,7 @@
+     if (num_preds == 1) {
+       assert(dominator == block->pred_at(0), "dominator must be equal to predecessor");
+       // nothing to do here
+-    
++
+     } else if (block->is_set(BlockBegin::linear_scan_loop_header_flag)) {
+       // block has incoming backward branches -> try to optimize short loops
+       if (!short_loop_optimizer.process(block)) {
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_ValueMap.hpp openjdk/hotspot/src/share/vm/c1/c1_ValueMap.hpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_ValueMap.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_ValueMap.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c1_ValueMap.hpp	1.22 07/05/05 17:05:07 JVM"
+-#endif
+ /*
+  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class ValueMapEntry: public CompilationResourceObj {
+@@ -33,7 +30,7 @@
+   ValueMapEntry* _next;
+ 
+  public:
+-  ValueMapEntry(intx hash, Value value, int nesting, ValueMapEntry* next) 
++  ValueMapEntry(intx hash, Value value, int nesting, ValueMapEntry* next)
+     : _hash(hash)
+     , _value(value)
+     , _nesting(nesting)
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_ValueSet.cpp openjdk/hotspot/src/share/vm/c1/c1_ValueSet.cpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_ValueSet.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_ValueSet.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)c1_ValueSet.cpp	1.12 07/05/05 17:05:09 JVM"
+-#endif
+ /*
+  * Copyright 2001-2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_ValueSet.hpp openjdk/hotspot/src/share/vm/c1/c1_ValueSet.hpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_ValueSet.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_ValueSet.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c1_ValueSet.hpp	1.15 07/05/05 17:05:09 JVM"
+-#endif
+ /*
+  * Copyright 2001-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A ValueSet is a simple abstraction on top of a BitMap representing
+@@ -36,7 +33,7 @@
+ 
+  public:
+   ValueSet();
+-  
++
+   ValueSet* copy();
+   bool contains(Value x);
+   void put     (Value x);
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_ValueStack.cpp openjdk/hotspot/src/share/vm/c1/c1_ValueStack.cpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_ValueStack.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_ValueStack.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)c1_ValueStack.cpp	1.65 07/05/05 17:05:09 JVM"
+-#endif
+ /*
+  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -286,4 +283,3 @@
+   Unimplemented();
+ }
+ #endif // PRODUCT
+-
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_ValueStack.hpp openjdk/hotspot/src/share/vm/c1/c1_ValueStack.hpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_ValueStack.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_ValueStack.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c1_ValueStack.hpp	1.51 07/05/05 17:05:10 JVM"
+-#endif
+ /*
+  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,13 +19,13 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class ValueStack: public CompilationResourceObj {
+  private:
+   IRScope* _scope;                               // the enclosing scope
+-  bool     _lock_stack;                          // indicates that this ValueStack is for an exception site    
++  bool     _lock_stack;                          // indicates that this ValueStack is for an exception site
+   Values   _locals;                              // the locals
+   Values   _stack;                               // the expression stack
+   Values   _locks;                               // the monitor stack (holding the locked values)
+@@ -224,7 +221,7 @@
+   // SSA form IR support
+   void setup_phi_for_stack(BlockBegin* b, int index);
+   void setup_phi_for_local(BlockBegin* b, int index);
+-  
++
+   // debugging
+   void print()  PRODUCT_RETURN;
+   void verify() PRODUCT_RETURN;
+@@ -269,15 +266,15 @@
+   for (index = 0;                                                                              \
+        index < temp_var && (value = state->local_at(index), true);                             \
+        index += (value == NULL || value->type()->is_illegal() ? 1 : value->type()->size()))    \
+-    if (value != NULL)                
++    if (value != NULL)
+ 
+ 
+ #define for_each_stack_value(state, index, value)                                              \
+   int temp_var = state->stack_size();                                                          \
+   for (index = 0;                                                                              \
+        index < temp_var && (value = state->stack_at(index), true);                             \
+-       index += value->type()->size())                                   
+-        
++       index += value->type()->size())
++
+ 
+ #define for_each_lock_value(state, index, value)                                               \
+   int temp_var = state->locks_size();                                                          \
+@@ -312,10 +309,10 @@
+       v_code;                                                                                  \
+     }                                                                                          \
+   }                                                                                            \
+-}            
++}
+ 
+ 
+-// Macro definition for simple iteration of all phif functions of a block, i.e all 
++// Macro definition for simple iteration of all phif functions of a block, i.e all
+ // phi functions of the ValueStack where the block matches.
+ // Use the following code pattern to iterate all phi functions of a block:
+ //
+@@ -345,4 +342,4 @@
+       }                                                                                        \
+     }                                                                                          \
+   }                                                                                            \
+-}            
++}
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_ValueType.cpp openjdk/hotspot/src/share/vm/c1/c1_ValueType.cpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_ValueType.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_ValueType.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)c1_ValueType.cpp	1.22 07/05/05 17:05:10 JVM"
+-#endif
+ /*
+  * Copyright 1999-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -65,7 +62,7 @@
+   classType    = new ClassType();
+   addressType  = new AddressType();
+   illegalType  = new IllegalType();
+-  
++
+   // constants
+   intZero     = new IntConstant(0);
+   intOne      = new IntConstant(1);
+diff -ruN openjdk6/hotspot/src/share/vm/c1/c1_ValueType.hpp openjdk/hotspot/src/share/vm/c1/c1_ValueType.hpp
+--- openjdk6/hotspot/src/share/vm/c1/c1_ValueType.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/c1/c1_ValueType.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c1_ValueType.hpp	1.35 07/05/05 17:05:10 JVM"
+-#endif
+ /*
+  * Copyright 1999-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // type hierarchy
+@@ -101,7 +98,7 @@
+ 
+   // accessors
+   virtual ValueType* base() const                = 0; // the 'canonical' type (e.g., intType for an IntConstant)
+-  ValueTag tag() const { return _tag; }          // the 'canonical' tag  (useful for type matching) 
++  ValueTag tag() const { return _tag; }          // the 'canonical' tag  (useful for type matching)
+   int size() const {                             // the size of an object of the type in words
+     assert(_size > -1, "shouldn't be asking for size");
+     return _size;
+diff -ruN openjdk6/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp openjdk/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp
+--- openjdk6/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)bcEscapeAnalyzer.cpp	1.7 07/05/17 15:49:50 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ 
+@@ -175,7 +172,7 @@
+   ciInstanceKlass* calling_klass = method()->holder();
+   ciInstanceKlass* callee_holder = ciEnv::get_instance_klass_for_declared_method_holder(holder);
+   ciInstanceKlass* actual_recv = callee_holder;
+-  
++
+   // compute size of arguments
+   int arg_size = target->arg_size();
+   if (!target->is_loaded() && code == Bytecodes::_invokestatic) {
+@@ -220,7 +217,7 @@
+   if (inline_target != NULL && !is_recursive_call(inline_target)) {
+     // analyze callee
+     BCEscapeAnalyzer analyzer(inline_target, this);
+-    
++
+     // adjust escape state of actual parameters
+     bool must_record_dependencies = false;
+     for (i = arg_size - 1; i >= 0; i--) {
+@@ -247,7 +244,7 @@
+     }
+   } else {
+     TRACE_BCEA(1, tty->print_cr("[EA] virtual method %s is not monomorphic.",
+-				target->name()->as_utf8()));
++                                target->name()->as_utf8()));
+     // conservatively mark all actual parameters as escaping globally
+     for (i = 0; i < arg_size; i++) {
+       ArgumentMap arg = state.raw_pop();
+@@ -1145,22 +1142,22 @@
+     if (BCEATraceLevel >= 1) {
+       tty->print("Skipping method because: ");
+       if (method()->is_abstract())
+-	tty->print_cr("method is abstract.");
++        tty->print_cr("method is abstract.");
+       else if (method()->is_native())
+-	tty->print_cr("method is native.");
++        tty->print_cr("method is native.");
+       else if (!method()->holder()->is_initialized())
+-	tty->print_cr("class of method is not initialized.");
++        tty->print_cr("class of method is not initialized.");
+       else if (_level > MaxBCEAEstimateLevel)
+-	tty->print_cr("level (%d) exceeds MaxBCEAEstimateLevel (%d).",
+-		      _level, MaxBCEAEstimateLevel);
++        tty->print_cr("level (%d) exceeds MaxBCEAEstimateLevel (%d).",
++                      _level, MaxBCEAEstimateLevel);
+       else if (method()->code_size() > MaxBCEAEstimateSize)
+-	tty->print_cr("code size (%d) exceeds MaxBCEAEstimateSize.",
+-		      method()->code_size(), MaxBCEAEstimateSize);
+-      else 
+-	ShouldNotReachHere();
++        tty->print_cr("code size (%d) exceeds MaxBCEAEstimateSize.",
++                      method()->code_size(), MaxBCEAEstimateSize);
++      else
++        ShouldNotReachHere();
+     }
+     clear_escape_info();
+-    
++
+     return;
+   }
+ 
+@@ -1299,13 +1296,13 @@
+     bool printit = _method->should_print_assembly();
+     if (methodData()->has_escape_info()) {
+       TRACE_BCEA(2, tty->print_cr("[EA] Reading previous results for %s.%s",
+-				  method->holder()->name()->as_utf8(),
+-				  method->name()->as_utf8()));
++                                  method->holder()->name()->as_utf8(),
++                                  method->name()->as_utf8()));
+       read_escape_info();
+     } else {
+       TRACE_BCEA(2, tty->print_cr("[EA] computing results for %s.%s",
+-				  method->holder()->name()->as_utf8(),
+-				  method->name()->as_utf8()));
++                                  method->holder()->name()->as_utf8(),
++                                  method->name()->as_utf8()));
+ 
+       compute_escape_info();
+       methodData()->update_escape_info();
+@@ -1322,4 +1319,3 @@
+     deps->assert_unique_concrete_method(k, m);
+   }
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/bcEscapeAnalyzer.hpp openjdk/hotspot/src/share/vm/ci/bcEscapeAnalyzer.hpp
+--- openjdk6/hotspot/src/share/vm/ci/bcEscapeAnalyzer.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/bcEscapeAnalyzer.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)bcEscapeAnalyzer.hpp	1.6 07/05/05 17:05:11 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ define_array(ciObjectArray, ciObject*);
+@@ -38,7 +35,7 @@
+ class BCEscapeAnalyzer : public ResourceObj {
+  private:
+   bool              _conservative; // If true, return maximally
+-				   // conservative results.
++                                   // conservative results.
+   ciMethod*         _method;
+   ciMethodData*     _methodData;
+   int               _arg_size;
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciArray.cpp openjdk/hotspot/src/share/vm/ci/ciArray.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciArray.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciArray.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciArray.cpp	1.12 07/05/05 17:05:11 JVM"
+-#endif
+ /*
+  * Copyright 1999-2000 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -37,7 +34,7 @@
+ // ciArray::print_impl
+ //
+ // Implementation of the print method.
+-void ciArray::print_impl() {
+-  tty->print(" length=%d type=", length());
+-  klass()->print();
++void ciArray::print_impl(outputStream* st) {
++  st->print(" length=%d type=", length());
++  klass()->print(st);
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciArray.hpp openjdk/hotspot/src/share/vm/ci/ciArray.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciArray.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciArray.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciArray.hpp	1.12 07/05/05 17:05:11 JVM"
+-#endif
+ /*
+  * Copyright 1999-2001 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciArray
+@@ -44,7 +41,7 @@
+ 
+   const char* type_string() { return "ciArray"; }
+ 
+-  void print_impl();
++  void print_impl(outputStream* st);
+ 
+ public:
+   int length() { return _length; }
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciArrayKlass.cpp openjdk/hotspot/src/share/vm/ci/ciArrayKlass.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciArrayKlass.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciArrayKlass.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciArrayKlass.cpp	1.14 07/05/05 17:05:12 JVM"
+-#endif
+ /*
+  * Copyright 1999-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciArrayKlass.hpp openjdk/hotspot/src/share/vm/ci/ciArrayKlass.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciArrayKlass.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciArrayKlass.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciArrayKlass.hpp	1.15 07/05/05 17:05:12 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciArrayKlass
+@@ -48,7 +45,7 @@
+   ciType* element_type();       // JLS calls this the "component type"
+   ciType* base_element_type();  // JLS calls this the "element type"
+   bool is_leaf_type();          // No subtypes of this array type.
+-  
++
+   ciInstance* component_mirror() {
+     // This is a real field in arrayKlass, but we derive it from element_type.
+     return element_type()->java_mirror();
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciArrayKlassKlass.hpp openjdk/hotspot/src/share/vm/ci/ciArrayKlassKlass.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciArrayKlassKlass.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciArrayKlassKlass.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciArrayKlassKlass.hpp	1.12 07/05/05 17:05:12 JVM"
+-#endif
+ /*
+  * Copyright 1999-2001 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciArrayKlassKlass
+@@ -31,13 +28,13 @@
+ // whose Klass part in a arrayKlassKlass.
+ class ciArrayKlassKlass : public ciKlassKlass {
+ protected:
+-  ciArrayKlassKlass(KlassHandle h_k, ciSymbol* name) 
++  ciArrayKlassKlass(KlassHandle h_k, ciSymbol* name)
+     : ciKlassKlass(h_k, name) {}
+ 
+   arrayKlassKlass* get_arrayKlassKlass() {
+     return (arrayKlassKlass*)get_Klass();
+   }
+-  
++
+   const char* type_string() { return "ciArrayKlassKlass"; }
+ 
+ public:
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciCallProfile.hpp openjdk/hotspot/src/share/vm/ci/ciCallProfile.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciCallProfile.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciCallProfile.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciCallProfile.hpp	1.17 07/05/05 17:05:12 JVM"
+-#endif
+ /*
+  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciCallProfile
+@@ -59,11 +56,11 @@
+   int       morphism()          { return _morphism; }
+ 
+   int       count()             { return _count; }
+-  int       receiver_count(int i)  { 
++  int       receiver_count(int i)  {
+     assert(i < _limit, "out of Call Profile MorphismLimit");
+     return _receiver_count[i];
+   }
+-  float     receiver_prob(int i)  { 
++  float     receiver_prob(int i)  {
+     assert(i < _limit, "out of Call Profile MorphismLimit");
+     return (float)_receiver_count[i]/(float)_count;
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciClassList.hpp openjdk/hotspot/src/share/vm/ci/ciClassList.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciClassList.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciClassList.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciClassList.hpp	1.21 07/05/05 17:05:11 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class ciEnv;
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciConstant.cpp openjdk/hotspot/src/share/vm/ci/ciConstant.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciConstant.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciConstant.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciConstant.cpp	1.13 07/05/05 17:05:12 JVM"
+-#endif
+ /*
+  * Copyright 1999-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -36,7 +33,7 @@
+ // ciConstant::print
+ void ciConstant::print() {
+   tty->print("<ciConstant type=%s value=",
+-	     basictype_to_str(basic_type()));
++             basictype_to_str(basic_type()));
+   switch (basic_type()) {
+   case T_BOOLEAN:
+     tty->print("%s", bool_to_str(_value._int == 0));
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciConstant.hpp openjdk/hotspot/src/share/vm/ci/ciConstant.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciConstant.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciConstant.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciConstant.hpp	1.17 07/05/05 17:05:12 JVM"
+-#endif
+ /*
+  * Copyright 1999-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciConstant
+@@ -44,7 +41,7 @@
+   } _value;
+ 
+   // Implementation of the print method.
+-  void print_impl();
++  void print_impl(outputStream* st);
+ 
+ public:
+ 
+@@ -56,16 +53,16 @@
+            "using the wrong ciConstant constructor");
+     _type = type; _value._int = value;
+   }
+-  ciConstant(jlong value) { 
++  ciConstant(jlong value) {
+     _type = T_LONG; _value._long = value;
+   }
+-  ciConstant(jfloat value) { 
++  ciConstant(jfloat value) {
+     _type = T_FLOAT; _value._float = value;
+   }
+-  ciConstant(jdouble value) { 
++  ciConstant(jdouble value) {
+     _type = T_DOUBLE; _value._double = value;
+   }
+-  ciConstant(BasicType type, ciObject* p) { 
++  ciConstant(BasicType type, ciObject* p) {
+     _type = type; _value._object = p;
+   }
+ 
+@@ -113,4 +110,3 @@
+   // Debugging output
+   void print();
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciConstantPoolCache.cpp openjdk/hotspot/src/share/vm/ci/ciConstantPoolCache.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciConstantPoolCache.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciConstantPoolCache.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciConstantPoolCache.cpp	1.12 07/05/05 17:05:12 JVM"
+-#endif
+ /*
+  * Copyright 1999 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciConstantPoolCache.hpp openjdk/hotspot/src/share/vm/ci/ciConstantPoolCache.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciConstantPoolCache.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciConstantPoolCache.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciConstantPoolCache.hpp	1.11 07/05/05 17:05:11 JVM"
+-#endif
+ /*
+  * Copyright 1999-2000 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciConstantPoolCache
+@@ -42,10 +39,9 @@
+ 
+   // Get the element associated with some index.
+   void* get(int index);
+-	
++
+   // Associate an element with an index.
+   void insert(int index, void* element);
+ 
+   void print();
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciEnv.cpp openjdk/hotspot/src/share/vm/ci/ciEnv.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciEnv.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciEnv.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciEnv.cpp	1.128 07/05/17 15:49:53 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -40,7 +37,7 @@
+ ciInstanceKlassKlass*  ciEnv::_instance_klass_klass_instance;
+ ciTypeArrayKlassKlass* ciEnv::_type_array_klass_klass_instance;
+ ciObjArrayKlassKlass*  ciEnv::_obj_array_klass_klass_instance;
+- 
++
+ ciInstanceKlass* ciEnv::_ArrayStoreException;
+ ciInstanceKlass* ciEnv::_Class;
+ ciInstanceKlass* ciEnv::_ClassCastException;
+@@ -87,7 +84,7 @@
+   assert(task == NULL || thread->task() == task, "sanity");
+   _task = task;
+   _log = NULL;
+-  
++
+   // Temporary buffer for creating symbols and such.
+   _name_buffer = NULL;
+   _name_buffer_len = 0;
+@@ -140,7 +137,7 @@
+   _num_inlined_bytecodes = 0;
+   _task = NULL;
+   _log = NULL;
+-  
++
+   // Temporary buffer for creating symbols and such.
+   _name_buffer = NULL;
+   _name_buffer_len = 0;
+@@ -270,7 +267,7 @@
+ // Note: the logic of this method should mirror the logic of
+ // constantPoolOopDesc::verify_constant_pool_resolve.
+ bool ciEnv::check_klass_accessibility(ciKlass* accessing_klass,
+-				      klassOop resolved_klass) {
++                                      klassOop resolved_klass) {
+   if (accessing_klass == NULL || !accessing_klass->is_loaded()) {
+     return true;
+   }
+@@ -287,8 +284,8 @@
+   }
+   if (resolved_klass->klass_part()->oop_is_instance()) {
+     return Reflection::verify_class_access(accessing_klass->get_klassOop(),
+-					   resolved_klass,
+-					   true);
++                                           resolved_klass,
++                                           true);
+   }
+   return true;
+ }
+@@ -360,8 +357,8 @@
+       (sym->byte_at(1) == '[' || sym->byte_at(1) == 'L')) {
+     // We have an unloaded array.
+     // Build it on the fly if the element class exists.
+-    symbolOop elem_sym = oopFactory::new_symbol(sym->as_utf8()+1, 
+-                                                sym->utf8_length()-1, 
++    symbolOop elem_sym = oopFactory::new_symbol(sym->as_utf8()+1,
++                                                sym->utf8_length()-1,
+                                                 KILL_COMPILE_ON_FATAL_(fail_type));
+     // Get element ciKlass recursively.
+     ciKlass* elem_klass =
+@@ -409,17 +406,17 @@
+       // We have to lock the cpool to keep the oop from being resolved
+       // while we are accessing it.
+       ObjectLocker ol(cpool, THREAD);
+-      
++
+       constantTag tag = cpool->tag_at(index);
+       if (tag.is_klass()) {
+-	// The klass has been inserted into the constant pool
+-	// very recently.
+-	klass = KlassHandle(THREAD, cpool->resolved_klass_at(index));
++        // The klass has been inserted into the constant pool
++        // very recently.
++        klass = KlassHandle(THREAD, cpool->resolved_klass_at(index));
+       } else if (tag.is_symbol()) {
+-	klass_name = symbolHandle(THREAD, cpool->symbol_at(index));
++        klass_name = symbolHandle(THREAD, cpool->symbol_at(index));
+       } else {
+-	assert(cpool->tag_at(index).is_unresolved_klass(), "wrong tag");
+-	klass_name = symbolHandle(THREAD, cpool->unresolved_klass_at(index));
++        assert(cpool->tag_at(index).is_unresolved_klass(), "wrong tag");
++        klass_name = symbolHandle(THREAD, cpool->unresolved_klass_at(index));
+       }
+     }
+   }
+@@ -472,7 +469,7 @@
+ //
+ // Implementation of get_constant_by_index().
+ ciConstant ciEnv::get_constant_by_index_impl(ciInstanceKlass* accessor,
+-					     int index) {
++                                             int index) {
+   EXCEPTION_CONTEXT;
+   instanceKlass* ik_accessor = accessor->get_instanceKlass();
+   assert(ik_accessor->is_linked(), "must be linked before accessing constant pool");
+@@ -505,7 +502,7 @@
+       record_out_of_memory_failure();
+       return ciConstant();
+     }
+-    assert (klass->is_instance_klass() || klass->is_array_klass(), 
++    assert (klass->is_instance_klass() || klass->is_array_klass(),
+             "must be an instance or array klass ");
+     return ciConstant(T_OBJECT, klass);
+   } else {
+@@ -545,7 +542,7 @@
+ //
+ // Implementation note: this query is currently in no way cached.
+ ciConstant ciEnv::get_constant_by_index(ciInstanceKlass* accessor,
+-					int index) {
++                                        int index) {
+   GUARDED_VM_ENTRY(return get_constant_by_index_impl(accessor, index); )
+ }
+ 
+@@ -556,7 +553,7 @@
+ //
+ // Implementation note: this query is currently in no way cached.
+ bool ciEnv::is_unresolved_string(ciInstanceKlass* accessor,
+-					int index) const {
++                                        int index) const {
+   GUARDED_VM_ENTRY(return is_unresolved_string_impl(accessor->get_instanceKlass(), index); )
+ }
+ 
+@@ -567,7 +564,7 @@
+ //
+ // Implementation note: this query is currently in no way cached.
+ bool ciEnv::is_unresolved_klass(ciInstanceKlass* accessor,
+-					int index) const {
++                                        int index) const {
+   GUARDED_VM_ENTRY(return is_unresolved_klass_impl(accessor->get_instanceKlass(), index); )
+ }
+ 
+@@ -579,7 +576,7 @@
+ // Implementation note: the results of field lookups are cached
+ // in the accessor klass.
+ ciField* ciEnv::get_field_by_index_impl(ciInstanceKlass* accessor,
+-					int index) {
++                                        int index) {
+   ciConstantPoolCache* cache = accessor->field_cache();
+   if (cache == NULL) {
+     ciField* field = new (arena()) ciField(accessor, index);
+@@ -599,7 +596,7 @@
+ //
+ // Get a field by index from a klass's constant pool.
+ ciField* ciEnv::get_field_by_index(ciInstanceKlass* accessor,
+-				   int index) {
++                                   int index) {
+   GUARDED_VM_ENTRY(return get_field_by_index_impl(accessor, index);)
+ }
+ 
+@@ -609,10 +606,10 @@
+ // Perform an appropriate method lookup based on accessor, holder,
+ // name, signature, and bytecode.
+ methodOop ciEnv::lookup_method(instanceKlass*  accessor,
+-			       instanceKlass*  holder,
+-			       symbolOop       name,
+-			       symbolOop       sig,
+-			       Bytecodes::Code bc) {
++                               instanceKlass*  holder,
++                               symbolOop       name,
++                               symbolOop       sig,
++                               Bytecodes::Code bc) {
+   EXCEPTION_CONTEXT;
+   KlassHandle h_accessor(THREAD, accessor);
+   KlassHandle h_holder(THREAD, holder);
+@@ -622,22 +619,22 @@
+   methodHandle dest_method;
+   switch (bc) {
+   case Bytecodes::_invokestatic:
+-    dest_method = 
+-      LinkResolver::resolve_static_call_or_null(h_holder, h_name, h_sig, h_accessor); 
++    dest_method =
++      LinkResolver::resolve_static_call_or_null(h_holder, h_name, h_sig, h_accessor);
+     break;
+   case Bytecodes::_invokespecial:
+-    dest_method = 
+-      LinkResolver::resolve_special_call_or_null(h_holder, h_name, h_sig, h_accessor); 
++    dest_method =
++      LinkResolver::resolve_special_call_or_null(h_holder, h_name, h_sig, h_accessor);
+     break;
+-  case Bytecodes::_invokeinterface: 
++  case Bytecodes::_invokeinterface:
+     dest_method =
+       LinkResolver::linktime_resolve_interface_method_or_null(h_holder, h_name, h_sig,
+-							      h_accessor, true);
++                                                              h_accessor, true);
+     break;
+   case Bytecodes::_invokevirtual:
+-    dest_method = 
++    dest_method =
+       LinkResolver::linktime_resolve_virtual_method_or_null(h_holder, h_name, h_sig,
+-							    h_accessor, true);
++                                                            h_accessor, true);
+     break;
+   default: ShouldNotReachHere();
+   }
+@@ -651,7 +648,7 @@
+ ciMethod* ciEnv::get_method_by_index_impl(ciInstanceKlass* accessor,
+                                      int index, Bytecodes::Code bc) {
+   // Get the method's declared holder.
+-                       
++
+   assert(accessor->get_instanceKlass()->is_linked(), "must be linked before accessing constant pool");
+   constantPoolHandle cpool = accessor->get_instanceKlass()->constants();
+   int holder_index = cpool->klass_ref_index_at(index);
+@@ -704,7 +701,7 @@
+   }
+   return NULL;
+ }
+-  
++
+ 
+ 
+ 
+@@ -724,7 +721,7 @@
+       _name_buffer_len = req_len;
+     } else {
+       _name_buffer =
+-	(char*)arena()->Arealloc(_name_buffer, _name_buffer_len, req_len);
++        (char*)arena()->Arealloc(_name_buffer, _name_buffer_len, req_len);
+       _name_buffer_len = req_len;
+     }
+   }
+@@ -735,7 +732,7 @@
+ // ciEnv::is_in_vm
+ bool ciEnv::is_in_vm() {
+   return JavaThread::current()->thread_state() == _thread_in_vm;
+-} 
++}
+ 
+ bool ciEnv::system_dictionary_modification_counter_changed() {
+   return _system_dictionary_modification_counter != SystemDictionary::number_of_modifications();
+@@ -786,19 +783,19 @@
+ // ------------------------------------------------------------------
+ // ciEnv::register_method
+ void ciEnv::register_method(ciMethod* target,
+-			    int entry_bci,
++                            int entry_bci,
+                             CodeOffsets* offsets,
+-			    int orig_pc_offset,
+-			    CodeBuffer* code_buffer,
+-			    int frame_words,
+-			    OopMapSet* oop_map_set,
+-			    ExceptionHandlerTable* handler_table,
+-			    ImplicitExceptionTable* inc_table,
++                            int orig_pc_offset,
++                            CodeBuffer* code_buffer,
++                            int frame_words,
++                            OopMapSet* oop_map_set,
++                            ExceptionHandlerTable* handler_table,
++                            ImplicitExceptionTable* inc_table,
+                             AbstractCompiler* compiler,
+                             int comp_level,
+                             bool has_debug_info,
+                             bool has_unsafe_access) {
+-  VM_ENTRY_MARK;    
++  VM_ENTRY_MARK;
+   nmethod* nm = NULL;
+   {
+     // To prevent compile queue updates.
+@@ -828,7 +825,7 @@
+         mdo->inc_decompile_count();
+       }
+ 
+-      // All buffers in the CodeBuffer are allocated in the CodeCache. 
++      // All buffers in the CodeBuffer are allocated in the CodeCache.
+       // If the code buffer is created on each compile attempt
+       // as in C2, then it must be freed.
+       code_buffer->free_blob();
+@@ -843,8 +840,8 @@
+                                entry_bci,
+                                offsets,
+                                orig_pc_offset,
+-                               debug_info(), dependencies(), code_buffer, 
+-                               frame_words, oop_map_set, 
++                               debug_info(), dependencies(), code_buffer,
++                               frame_words, oop_map_set,
+                                handler_table, inc_table,
+                                compiler, comp_level);
+ 
+@@ -860,7 +857,7 @@
+       NativeJump::patch_verified_entry(nm->entry_point(), nm->verified_entry_point(),
+                   SharedRuntime::get_handle_wrong_method_stub());
+     }
+-    
++
+     if (nm == NULL) {
+       // The CodeCache is full.  Print out warning and disable compilation.
+       record_failure("code cache is full");
+@@ -874,7 +871,7 @@
+           vm_direct_exit(CompileTheWorld ? 0 : 1);
+         }
+ #endif
+-        UseCompiler               = false;    
++        UseCompiler               = false;
+         AlwaysCompileLoopMethods  = false;
+       }
+     } else {
+@@ -902,7 +899,7 @@
+           ResourceMark rm;
+           char *method_name = method->name_and_sig_as_C_string();
+           ttyLocker ttyl;
+-          tty->print_cr("Installing method (%d) %s ", 
++          tty->print_cr("Installing method (%d) %s ",
+                         comp_level,
+                         method_name);
+         }
+@@ -913,7 +910,7 @@
+           ResourceMark rm;
+           char *method_name = method->name_and_sig_as_C_string();
+           ttyLocker ttyl;
+-          tty->print_cr("Installing osr method (%d) %s @ %d", 
++          tty->print_cr("Installing osr method (%d) %s @ %d",
+                         comp_level,
+                         method_name,
+                         entry_bci);
+@@ -979,7 +976,7 @@
+ // ------------------------------------------------------------------
+ // ciEnv::record_method_not_compilable()
+ void ciEnv::record_method_not_compilable(const char* reason, bool all_tiers) {
+-  int new_compilable = 
++  int new_compilable =
+     all_tiers ? MethodCompilable_never : MethodCompilable_not_at_tier ;
+ 
+   // Only note transitions to a worse state
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciEnv.hpp openjdk/hotspot/src/share/vm/ci/ciEnv.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciEnv.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciEnv.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciEnv.hpp	1.70 07/05/05 17:05:11 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class CompileTask;
+@@ -127,24 +124,24 @@
+                                      int klass_index,
+                                      bool& is_accessible);
+   ciConstant get_constant_by_index_impl(ciInstanceKlass* loading_klass,
+-					int constant_index);
++                                        int constant_index);
+   bool       is_unresolved_string_impl (instanceKlass* loading_klass,
+-					int constant_index) const;
++                                        int constant_index) const;
+   bool       is_unresolved_klass_impl (instanceKlass* loading_klass,
+-					int constant_index) const;
++                                        int constant_index) const;
+   ciField*   get_field_by_index_impl(ciInstanceKlass* loading_klass,
+-				     int field_index);
++                                     int field_index);
+   ciMethod*  get_method_by_index_impl(ciInstanceKlass* loading_klass,
+-				      int method_index, Bytecodes::Code bc);
++                                      int method_index, Bytecodes::Code bc);
+ 
+   // Helper methods
+   bool       check_klass_accessibility(ciKlass* accessing_klass,
+-				      klassOop resolved_klassOop);
++                                      klassOop resolved_klassOop);
+   methodOop  lookup_method(instanceKlass*  accessor,
+-			   instanceKlass*  holder,
+-			   symbolOop       name,
+-			   symbolOop       sig,
+-			   Bytecodes::Code bc);
++                           instanceKlass*  holder,
++                           symbolOop       name,
++                           symbolOop       sig,
++                           Bytecodes::Code bc);
+ 
+   // Get a ciObject from the object factory.  Ensures uniqueness
+   // of ciObjects.
+@@ -360,5 +357,3 @@
+   void record_method_not_compilable(const char* reason, bool all_tiers = true);
+   void record_out_of_memory_failure();
+ };
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciExceptionHandler.cpp openjdk/hotspot/src/share/vm/ci/ciExceptionHandler.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciExceptionHandler.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciExceptionHandler.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciExceptionHandler.cpp	1.12 07/05/05 17:05:13 JVM"
+-#endif
+ /*
+  * Copyright 1999-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -34,7 +31,7 @@
+ 
+ // ------------------------------------------------------------------
+ // ciExceptionHandler::catch_klass
+-// 
++//
+ // Get the exception klass that this handler catches.
+ ciInstanceKlass* ciExceptionHandler::catch_klass() {
+   assert(!is_catch_all(), "bad index");
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciExceptionHandler.hpp openjdk/hotspot/src/share/vm/ci/ciExceptionHandler.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciExceptionHandler.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciExceptionHandler.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciExceptionHandler.hpp	1.12 07/05/05 17:05:13 JVM"
+-#endif
+ /*
+  * Copyright 1999-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciExceptionHandler
+@@ -76,4 +73,3 @@
+ 
+   void      print();
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciField.cpp openjdk/hotspot/src/share/vm/ci/ciField.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciField.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciField.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciField.cpp	1.33 07/05/05 17:05:13 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -123,7 +120,7 @@
+ 
+   // Perform the field lookup.
+   fieldDescriptor field_desc;
+-  klassOop canonical_holder = 
++  klassOop canonical_holder =
+     loaded_decl_holder->find_field(name(), signature(), &field_desc);
+   if (canonical_holder == NULL) {
+     // Field lookup failed.  Will be detected by will_link.
+@@ -143,12 +140,9 @@
+   _cp_index = -1;
+ 
+   // Get the field's name, signature, and type.
+-  symbolOop name = fd->name();
+-  symbolOop signature = fd->signature();
+-
+   ciEnv* env = CURRENT_ENV;
+-  _name = env->get_object(name)->as_symbol();
+-  _signature = env->get_object(signature)->as_symbol();
++  _name = env->get_object(fd->name())->as_symbol();
++  _signature = env->get_object(fd->signature())->as_symbol();
+ 
+   BasicType field_type = fd->field_type();
+ 
+@@ -172,7 +166,7 @@
+   _flags = ciFlags(fd->access_flags());
+   _offset = fd->offset();
+   _holder = CURRENT_ENV->get_object(fd->field_holder())->as_instance_klass();
+-  
++
+   // Check to see if the field is constant.
+   if (_holder->is_initialized() &&
+       this->is_final() && this->is_static()) {
+@@ -184,13 +178,13 @@
+     //    generated code.  For the time being we need to consider the
+     //    field to be not constant.
+     // 2. The field is a *special* static&final field whose value
+-    //    may change.  The three examples are java.lang.System.in, 
++    //    may change.  The three examples are java.lang.System.in,
+     //    java.lang.System.out, and java.lang.System.err.
+ 
+     klassOop k = _holder->get_klassOop();
+     assert( SystemDictionary::system_klass() != NULL, "Check once per vm");
+     if( k == SystemDictionary::system_klass() ) {
+-      // Check offsets for case 2: System.in, System.out, or System.err 
++      // Check offsets for case 2: System.in, System.out, or System.err
+       if( _offset == java_lang_System::in_offset_in_bytes()  ||
+           _offset == java_lang_System::out_offset_in_bytes() ||
+           _offset == java_lang_System::err_offset_in_bytes() ) {
+@@ -201,13 +195,13 @@
+ 
+     _is_constant = true;
+     switch(type()->basic_type()) {
+-    case T_BYTE: 
++    case T_BYTE:
+       _constant_value = ciConstant(type()->basic_type(), k->byte_field(_offset));
+       break;
+-    case T_CHAR: 
++    case T_CHAR:
+       _constant_value = ciConstant(type()->basic_type(), k->char_field(_offset));
+       break;
+-    case T_SHORT: 
++    case T_SHORT:
+       _constant_value = ciConstant(type()->basic_type(), k->short_field(_offset));
+       break;
+     case T_BOOLEAN:
+@@ -228,21 +222,21 @@
+     case T_OBJECT:
+     case T_ARRAY:
+       {
+-	oop o = k->obj_field(_offset);
++        oop o = k->obj_field(_offset);
+ 
+-	// A field will be "constant" if it is known always to be
+-	// a non-null reference to an instance of a particular class,
+-	// or to a particular array.  This can happen even if the instance
+-	// or array is not perm.  In such a case, an "unloaded" ciArray
+-	// or ciInstance is created.  The compiler may be able to use
+-	// information about the object's class (which is exact) or length.
+-
+-	if (o == NULL) {
+-	  _constant_value = ciConstant(type()->basic_type(), ciNullObject::make());
+-	} else {
+-	  _constant_value = ciConstant(type()->basic_type(), CURRENT_ENV->get_object(o));
+-	  assert(_constant_value.as_object() == CURRENT_ENV->get_object(o), "check interning");
+-	}
++        // A field will be "constant" if it is known always to be
++        // a non-null reference to an instance of a particular class,
++        // or to a particular array.  This can happen even if the instance
++        // or array is not perm.  In such a case, an "unloaded" ciArray
++        // or ciInstance is created.  The compiler may be able to use
++        // information about the object's class (which is exact) or length.
++
++        if (o == NULL) {
++          _constant_value = ciConstant(type()->basic_type(), ciNullObject::make());
++        } else {
++          _constant_value = ciConstant(type()->basic_type(), CURRENT_ENV->get_object(o));
++          assert(_constant_value.as_object() == CURRENT_ENV->get_object(o), "check interning");
++        }
+       }
+     }
+   } else {
+@@ -285,7 +279,7 @@
+ // Can a specific access to this field be made without causing
+ // link errors?
+ bool ciField::will_link(ciInstanceKlass* accessing_klass,
+-			Bytecodes::Code bc) {
++                        Bytecodes::Code bc) {
+   VM_ENTRY_MARK;
+   if (_offset == -1) {
+     // at creation we couldn't link to our holder so we need to
+@@ -303,7 +297,7 @@
+                          accessing_klass->get_instanceKlass()->constants());
+   LinkResolver::resolve_field(result, c_pool, _cp_index,
+                               Bytecodes::java_code(bc),
+-			      true, false, KILL_COMPILE_ON_FATAL_(false));
++                              true, false, KILL_COMPILE_ON_FATAL_(false));
+ 
+   // update the hit-cache, unless there is a problem with memory scoping:
+   if (accessing_klass->is_shared() || !is_shared())
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciField.hpp openjdk/hotspot/src/share/vm/ci/ciField.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciField.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciField.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciField.hpp	1.21 07/05/05 17:05:12 JVM"
+-#endif
+ /*
+  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciField
+@@ -34,6 +31,7 @@
+   CI_PACKAGE_ACCESS
+   friend class ciEnv;
+   friend class ciInstanceKlass;
++  friend class NonStaticFieldFiller;
+ 
+ private:
+   ciFlags          _flags;
+@@ -59,7 +57,7 @@
+   void initialize_from(fieldDescriptor* fd);
+ 
+   // The implementation of the print method.
+-  void print_impl();
++  void print_impl(outputStream* st);
+ 
+ public:
+   ciFlags flags() { return _flags; }
+@@ -84,7 +82,7 @@
+   //
+   //   A java compiler is permitted to compile the access to
+   //   field f as:
+-  //   
++  //
+   //     getfield B.f
+   //
+   //   In that case the declared holder of f would be B and
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciFlags.cpp openjdk/hotspot/src/share/vm/ci/ciFlags.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciFlags.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciFlags.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciFlags.cpp	1.11 07/05/05 17:05:13 JVM"
+-#endif
+ /*
+  * Copyright 1999 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -34,69 +31,69 @@
+ 
+ // ------------------------------------------------------------------
+ // ciFlags::print_klass_flags
+-void ciFlags::print_klass_flags() {
++void ciFlags::print_klass_flags(outputStream* st) {
+   if (is_public()) {
+-    tty->print("public");
++    st->print("public");
+   } else {
+-    tty->print("DEFAULT_ACCESS");
++    st->print("DEFAULT_ACCESS");
+   }
+ 
+   if (is_final()) {
+-    tty->print(",final");
++    st->print(",final");
+   }
+   if (is_super()) {
+-    tty->print(",super");
++    st->print(",super");
+   }
+   if (is_interface()) {
+-    tty->print(",interface");
++    st->print(",interface");
+   }
+   if (is_abstract()) {
+-    tty->print(",abstract");
++    st->print(",abstract");
+   }
+ }
+ 
+ // ------------------------------------------------------------------
+ // ciFlags::print_member_flags
+-void ciFlags::print_member_flags() {
++void ciFlags::print_member_flags(outputStream* st) {
+   if (is_public()) {
+-    tty->print("public");
++    st->print("public");
+   } else if (is_private()) {
+-    tty->print("private");
++    st->print("private");
+   } else if (is_protected()) {
+-    tty->print("protected");
++    st->print("protected");
+   } else {
+-    tty->print("DEFAULT_ACCESS");
++    st->print("DEFAULT_ACCESS");
+   }
+ 
+   if (is_static()) {
+-    tty->print(",static");
++    st->print(",static");
+   }
+   if (is_final()) {
+-    tty->print(",final");
++    st->print(",final");
+   }
+   if (is_synchronized()) {
+-    tty->print(",synchronized");
++    st->print(",synchronized");
+   }
+   if (is_volatile()) {
+-    tty->print(",volatile");
++    st->print(",volatile");
+   }
+   if (is_transient()) {
+-    tty->print(",transient");
++    st->print(",transient");
+   }
+   if (is_native()) {
+-    tty->print(",native");
++    st->print(",native");
+   }
+   if (is_abstract()) {
+-    tty->print(",abstract");
++    st->print(",abstract");
+   }
+   if (is_strict()) {
+-    tty->print(",strict");
++    st->print(",strict");
+   }
+-    
++
+ }
+ 
+ // ------------------------------------------------------------------
+ // ciFlags::print
+-void ciFlags::print() {
+-  tty->print(" flags=%x", _flags);
++void ciFlags::print(outputStream* st) {
++  st->print(" flags=%x", _flags);
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciFlags.hpp openjdk/hotspot/src/share/vm/ci/ciFlags.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciFlags.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciFlags.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciFlags.hpp	1.14 07/05/05 17:05:13 JVM"
+-#endif
+ /*
+  * Copyright 1999-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciFlags
+@@ -53,12 +50,12 @@
+   bool is_native      () const         { return (_flags & JVM_ACC_NATIVE      ) != 0; }
+   bool is_interface   () const         { return (_flags & JVM_ACC_INTERFACE   ) != 0; }
+   bool is_abstract    () const         { return (_flags & JVM_ACC_ABSTRACT    ) != 0; }
+-  bool is_strict      () const         { return (_flags & JVM_ACC_STRICT      ) != 0; }  
+-  
++  bool is_strict      () const         { return (_flags & JVM_ACC_STRICT      ) != 0; }
++
+   // Conversion
+   jint   as_int()                      { return _flags; }
+ 
+-  void print_klass_flags();
+-  void print_member_flags();
+-  void print();
++  void print_klass_flags(outputStream* st = tty);
++  void print_member_flags(outputStream* st = tty);
++  void print(outputStream* st = tty);
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciInstance.cpp openjdk/hotspot/src/share/vm/ci/ciInstance.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciInstance.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciInstance.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciInstance.cpp	1.16 07/05/05 17:05:13 JVM"
+-#endif
+ /*
+  * Copyright 1999-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -69,13 +66,13 @@
+   int offset = field->offset();
+ 
+   switch(field_btype) {
+-  case T_BYTE: 
++  case T_BYTE:
+     return ciConstant(field_btype, obj->byte_field(offset));
+     break;
+-  case T_CHAR: 
++  case T_CHAR:
+     return ciConstant(field_btype, obj->char_field(offset));
+     break;
+-  case T_SHORT: 
++  case T_SHORT:
+     return ciConstant(field_btype, obj->short_field(offset));
+     break;
+   case T_BOOLEAN:
+@@ -131,7 +128,7 @@
+ // ciInstance::print_impl
+ //
+ // Implementation of the print method.
+-void ciInstance::print_impl() {
+-  tty->print(" type=");
+-  klass()->print();
++void ciInstance::print_impl(outputStream* st) {
++  st->print(" type=");
++  klass()->print(st);
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciInstance.hpp openjdk/hotspot/src/share/vm/ci/ciInstance.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciInstance.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciInstance.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciInstance.hpp	1.14 07/05/05 17:05:13 JVM"
+-#endif
+ /*
+  * Copyright 1999-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciInstance
+@@ -44,7 +41,7 @@
+ 
+   const char* type_string() { return "ciInstance"; }
+ 
+-  void print_impl();
++  void print_impl(outputStream* st);
+ 
+ public:
+   // If this object is a java mirror, return the corresponding type.
+@@ -62,4 +59,3 @@
+   // Constant value of a field at the specified offset.
+   ciConstant field_value_by_offset(int field_offset);
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciInstanceKlass.cpp openjdk/hotspot/src/share/vm/ci/ciInstanceKlass.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciInstanceKlass.cpp	1.44 07/05/17 15:49:55 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -69,7 +66,7 @@
+     _protection_domain = JNIHandles::make_global(h_protection_domain);
+     _is_shared = true;
+   }
+-  
++
+   // Lazy fields get filled in only upon request.
+   _super  = NULL;
+   _java_mirror = NULL;
+@@ -206,7 +203,7 @@
+     // All header offsets belong properly to java/lang/Object.
+     return CURRENT_ENV->Object_klass();
+   }
+-  
++
+   ciInstanceKlass* self = this;
+   for (;;) {
+     assert(self->is_loaded(), "must be loaded to have size");
+@@ -238,27 +235,27 @@
+ // ciInstanceKlass::print_impl
+ //
+ // Implementation of the print method.
+-void ciInstanceKlass::print_impl() {
+-  ciKlass::print_impl();
+-  GUARDED_VM_ENTRY(tty->print(" loader=0x%x", (address)loader());)
++void ciInstanceKlass::print_impl(outputStream* st) {
++  ciKlass::print_impl(st);
++  GUARDED_VM_ENTRY(st->print(" loader=0x%x", (address)loader());)
+   if (is_loaded()) {
+-    tty->print(" loaded=true initialized=%s finalized=%s subklass=%s size=%d flags=",
+-               bool_to_str(is_initialized()),
+-               bool_to_str(has_finalizer()),
+-               bool_to_str(has_subklass()),
+-               layout_helper());
++    st->print(" loaded=true initialized=%s finalized=%s subklass=%s size=%d flags=",
++              bool_to_str(is_initialized()),
++              bool_to_str(has_finalizer()),
++              bool_to_str(has_subklass()),
++              layout_helper());
+ 
+     _flags.print_klass_flags();
+ 
+     if (_super) {
+-      tty->print(" super=");
++      st->print(" super=");
+       _super->print_name();
+     }
+     if (_java_mirror) {
+-      tty->print(" mirror=PRESENT");
++      st->print(" mirror=PRESENT");
+     }
+   } else {
+-    tty->print(" loaded=false");
++    st->print(" loaded=false");
+   }
+ }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciInstanceKlass.hpp openjdk/hotspot/src/share/vm/ci/ciInstanceKlass.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciInstanceKlass.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciInstanceKlass.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciInstanceKlass.hpp	1.35 07/05/05 17:05:13 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciInstanceKlass
+@@ -49,7 +46,7 @@
+   bool                   _has_subklass;
+   ciFlags                _flags;
+   jint                   _nonstatic_field_size;
+-  
++
+   // Lazy fields get filled in only upon request.
+   ciInstanceKlass*       _super;
+   ciInstance*            _java_mirror;
+@@ -77,7 +74,7 @@
+ 
+   const char* type_string() { return "ciInstanceKlass"; }
+ 
+-  void print_impl();
++  void print_impl(outputStream* st);
+ 
+   ciConstantPoolCache* field_cache();
+ 
+@@ -190,4 +187,3 @@
+   bool is_instance_klass() { return true; }
+   bool is_java_klass()     { return true; }
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciInstanceKlassKlass.cpp openjdk/hotspot/src/share/vm/ci/ciInstanceKlassKlass.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciInstanceKlassKlass.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciInstanceKlassKlass.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciInstanceKlassKlass.cpp	1.10 07/05/05 17:05:14 JVM"
+-#endif
+ /*
+  * Copyright 1999 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciInstanceKlassKlass.hpp openjdk/hotspot/src/share/vm/ci/ciInstanceKlassKlass.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciInstanceKlassKlass.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciInstanceKlassKlass.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciInstanceKlassKlass.hpp	1.12 07/05/05 17:05:14 JVM"
+-#endif
+ /*
+  * Copyright 1999-2001 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciInstanceKlassKlass
+@@ -41,7 +38,7 @@
+   instanceKlassKlass* get_instanceKlassKlass() {
+     return (instanceKlassKlass*)get_Klass();
+   }
+-  
++
+   const char* type_string() { return "ciInstanceKlassKlass"; }
+ 
+ public:
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciKlass.cpp openjdk/hotspot/src/share/vm/ci/ciKlass.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciKlass.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciKlass.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciKlass.cpp	1.30 07/05/05 17:05:11 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -190,7 +187,7 @@
+ ciKlass* ciKlass::find_klass(ciSymbol* klass_name) {
+   assert(is_loaded(), "cannot find_klass through an unloaded klass");
+   return CURRENT_ENV->get_klass_by_name(this,
+-					klass_name, false);
++                                        klass_name, false);
+ }
+ 
+ // ------------------------------------------------------------------
+@@ -224,9 +221,9 @@
+ // ciKlass::print_impl
+ //
+ // Implementation of the print method
+-void ciKlass::print_impl() {
+-  tty->print(" name=");
+-  print_name();
++void ciKlass::print_impl(outputStream* st) {
++  st->print(" name=");
++  print_name_on(st);
+ }
+ 
+ // ------------------------------------------------------------------
+@@ -236,4 +233,3 @@
+ void ciKlass::print_name_on(outputStream* st) {
+   name()->print_symbol_on(st);
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciKlass.hpp openjdk/hotspot/src/share/vm/ci/ciKlass.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciKlass.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciKlass.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciKlass.hpp	1.26 07/05/05 17:05:11 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciKlass
+@@ -49,7 +46,7 @@
+   ciKlass(KlassHandle k_h, ciSymbol* name);
+   ciKlass(ciSymbol* name, ciKlass* klass);
+ 
+-  klassOop get_klassOop() const { 
++  klassOop get_klassOop() const {
+     klassOop k = (klassOop)get_oop();
+     assert(k != NULL, "illegal use of unloaded klass");
+     return k;
+@@ -66,7 +63,7 @@
+ 
+   const char* type_string() { return "ciKlass"; }
+ 
+-  void print_impl();
++  void print_impl(outputStream* st);
+ 
+ public:
+   ciKlass(KlassHandle k_h);
+@@ -119,8 +116,4 @@
+   bool is_klass() { return true; }
+ 
+   void print_name_on(outputStream* st);
+-  void print_name() {
+-    print_name_on(tty);
+-  }
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciKlassKlass.cpp openjdk/hotspot/src/share/vm/ci/ciKlassKlass.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciKlassKlass.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciKlassKlass.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciKlassKlass.cpp	1.10 07/05/05 17:05:14 JVM"
+-#endif
+ /*
+  * Copyright 1999 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciKlassKlass.hpp openjdk/hotspot/src/share/vm/ci/ciKlassKlass.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciKlassKlass.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciKlassKlass.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciKlassKlass.hpp	1.13 07/05/05 17:05:14 JVM"
+-#endif
+ /*
+  * Copyright 1999-2001 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciKlassKlass
+@@ -42,7 +39,7 @@
+     : ciKlass(h_k, name) {}
+ 
+   klassKlass* get_klassKlass() { return (klassKlass*)get_Klass(); }
+-  
++
+   const char* type_string() { return "ciKlassKlass"; }
+ 
+ public:
+@@ -52,4 +49,3 @@
+   // Return the distinguished ciKlassKlass instance.
+   static ciKlassKlass* make();
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciMethodBlocks.cpp openjdk/hotspot/src/share/vm/ci/ciMethodBlocks.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciMethodBlocks.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciMethodBlocks.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciMethodBlocks.cpp	1.5 07/05/05 17:05:13 JVM"
+-#endif
+ /*
+  * Copyright 2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -357,7 +354,7 @@
+   }
+ 
+   if (Verbose || WizardMode) {
+-    method()->print_codes(start_bci(), limit_bci());
++    method()->print_codes_on(start_bci(), limit_bci(), st);
+   }
+ }
+ #endif
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciMethodBlocks.hpp openjdk/hotspot/src/share/vm/ci/ciMethodBlocks.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciMethodBlocks.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciMethodBlocks.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciMethodBlocks.hpp	1.5 07/05/05 17:05:14 JVM"
+-#endif
+ /*
+  * Copyright 2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciMethod.cpp openjdk/hotspot/src/share/vm/ci/ciMethod.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciMethod.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciMethod.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciMethod.cpp	1.104 07/05/05 17:05:15 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -46,7 +43,7 @@
+ 
+   // Easy to compute, so fill them in now.
+   _max_stack          = h_m()->max_stack();
+-  _max_locals         = h_m()->max_locals();  
++  _max_locals         = h_m()->max_locals();
+   _code_size          = h_m()->code_size();
+   _intrinsic_id       = h_m()->intrinsic_id();
+   _handler_count      = h_m()->exception_table()->length() / 4;
+@@ -64,7 +61,7 @@
+ #endif // COMPILER2
+ 
+   if (JvmtiExport::can_hotswap_or_post_breakpoint() && _is_compilable) {
+-    // 6328518 check hotswap conditions under the right lock.  
++    // 6328518 check hotswap conditions under the right lock.
+     MutexLocker locker(Compile_lock);
+     if (Dependencies::check_evol_method(h_m()) != NULL) {
+       _is_compilable = false;
+@@ -255,7 +252,7 @@
+   check_is_loaded();
+   VM_ENTRY_MARK;
+   methodHandle mh(THREAD, get_methodOop());
+-  return AbstractInterpreter::entry_for_method(mh);
++  return Interpreter::entry_for_method(mh);
+ }
+ 
+ 
+@@ -408,7 +405,6 @@
+     if (data != NULL && data->is_CounterData()) {
+       // Every profiled call site has a counter.
+       int count = data->as_CounterData()->count();
+-      result._count = count;
+ 
+       if (!data->is_ReceiverTypeData()) {
+         result._receiver_count[0] = 0;  // that's a definite zero
+@@ -421,8 +417,6 @@
+           ciKlass* receiver = call->receiver(i);
+           if (receiver == NULL)  continue;
+           morphism += 1;
+-          // we don't support array klasses this way
+-          if (!receiver->is_instance_klass()) continue;
+           int rcount = call->receiver_count(i);
+           if (rcount == 0) rcount = 1; // Should be valid value
+           receivers_count_total += rcount;
+@@ -436,15 +430,25 @@
+         // not only in the case of a polymorphic call but also in the case
+         // when a method data snapshot is taken after the site count was updated
+         // but before receivers counters were updated.
+-        if (morphism == result._limit) { 
++        if (morphism == result._limit) {
+            // There were no array klasses and morphism <= MorphismLimit.
+-           if (morphism <  ciCallProfile::MorphismLimit || 
+-               morphism == ciCallProfile::MorphismLimit && 
++           if (morphism <  ciCallProfile::MorphismLimit ||
++               morphism == ciCallProfile::MorphismLimit &&
+                (receivers_count_total+1) >= count) {
+              result._morphism = morphism;
+            }
+         }
++        // Make the count consistent if this is a call profile. If count is
++        // zero or less, presume that this is a typecheck profile and
++        // do nothing.  Otherwise, increase count to be the sum of all
++        // receiver's counts.
++        if (count > 0) {
++          if (count < receivers_count_total) {
++            count = receivers_count_total;
++          }
++        }
+       }
++      result._count = count;
+     }
+   }
+   return result;
+@@ -454,7 +458,7 @@
+ // Add new receiver and sort data by receiver's profile count.
+ void ciCallProfile::add_receiver(ciKlass* receiver, int receiver_count) {
+   // Add new receiver and sort data by receiver's counts when we have space
+-  // for it otherwise replace the less called receiver (less called receiver 
++  // for it otherwise replace the less called receiver (less called receiver
+   // is placed to the last array element which is not used).
+   // First array's element contains most called receiver.
+   int i = _limit;
+@@ -526,60 +530,6 @@
+   }
+ 
+ #ifndef PRODUCT
+-  if (VerifyDependencies || TraceDependencies) {
+-    // The CHA module is going to be honorably retired.
+-    // This is the only place where it is used.
+-    // For a while, test equivalence between old and new methods.
+-    KlassHandle caller_klass (THREAD, caller->get_klassOop());
+-    KlassHandle callee_klass (THREAD, callee_holder->get_klassOop());
+-    KlassHandle h_recv       (THREAD, actual_recv->get_klassOop());
+-    symbolHandle h_name      (THREAD, name()->get_symbolOop());
+-    symbolHandle h_signature (THREAD, signature()->get_symbolOop());
+-    methodHandle cha_target;
+-    CHAResult* result =
+-      CHA::analyze_call(caller_klass, callee_klass, h_recv, h_name, h_signature);
+-    if (TraceTypeProfile && Verbose) {
+-      result->print();
+-    }
+-    if (result->is_monomorphic()) {
+-      cha_target = result->monomorphic_target();
+-    }
+-
+-    if (target() != NULL && cha_target() == NULL) {
+-      ResourceMark rm;
+-      ttyLocker ttyl;
+-      if (xtty != NULL) {
+-        xtty->begin_elem("missed_by_CHA");
+-        xtty->method(target());
+-        xtty->klass(actual_recv->get_klassOop());
+-        xtty->end_elem("");
+-      }
+-      if (TraceDependencies) {
+-        tty->print_cr("found unique method missed by CHA:");
+-        tty->print_cr("  context = %s", instanceKlass::cast(actual_recv->get_klassOop())->external_name());
+-        tty->print("  method  = ");
+-        target->print_short_name(tty);
+-        tty->cr();
+-      }
+-    }
+-    if (cha_target() != NULL && target() == NULL) {
+-      ResourceMark rm;
+-      ttyLocker ttyl;
+-      if (xtty != NULL) {
+-        xtty->begin_elem("missed_by_deps_found_by_CHA");
+-        xtty->method(cha_target());
+-        xtty->klass(actual_recv->get_klassOop());
+-        xtty->end_elem("");
+-      }
+-      if (TraceDependencies) {
+-        tty->print_cr("missed unique method found by CHA:");
+-        tty->print_cr("  context = %s", instanceKlass::cast(actual_recv->get_klassOop())->external_name());
+-        tty->print("  method  = ");
+-        cha_target->print_short_name(tty);
+-        tty->cr();
+-      }
+-    }
+-  }
+   if (TraceDependencies && target() != NULL && target() != root_m->get_methodOop()) {
+     tty->print("found a non-root unique target method");
+     tty->print_cr("  context = %s", instanceKlass::cast(actual_recv->get_klassOop())->external_name());
+@@ -612,7 +562,7 @@
+ // ------------------------------------------------------------------
+ // ciMethod::resolve_invoke
+ //
+-// Given a known receiver klass, find the target for the call.  
++// Given a known receiver klass, find the target for the call.
+ // Return NULL if the call has no target or the target is abstract.
+ ciMethod* ciMethod::resolve_invoke(ciKlass* caller, ciKlass* exact_receiver) {
+    check_is_loaded();
+@@ -627,7 +577,9 @@
+    methodHandle m;
+    // Only do exact lookup if receiver klass has been linked.  Otherwise,
+    // the vtable has not been setup, and the LinkResolver will fail.
+-   if (instanceKlass::cast(h_recv())->is_linked() && !exact_receiver->is_interface()) {
++   if (h_recv->oop_is_javaArray()
++        ||
++       instanceKlass::cast(h_recv())->is_linked() && !exact_receiver->is_interface()) {
+      if (holder()->is_interface()) {
+        m = LinkResolver::resolve_interface_call_or_null(h_recv, h_resolved, h_name, h_signature, caller_klass);
+      } else {
+@@ -657,7 +609,7 @@
+ // ------------------------------------------------------------------
+ // ciMethod::resolve_vtable_index
+ //
+-// Given a known receiver klass, find the vtable index for the call.  
++// Given a known receiver klass, find the vtable index for the call.
+ // Return methodOopDesc::invalid_vtable_index if the vtable_index is unknown.
+ int ciMethod::resolve_vtable_index(ciKlass* caller, ciKlass* receiver) {
+    check_is_loaded();
+@@ -760,26 +712,23 @@
+   if (_method_data != NULL) {
+     return _method_data;
+   }
+-  if (ProfileInterpreter || Tier1UpdateMethodData) {
+-    VM_ENTRY_MARK;
+-    ciEnv* env = CURRENT_ENV;
+-    Thread* my_thread = JavaThread::current();
+-    methodHandle h_m(my_thread, get_methodOop());
++  VM_ENTRY_MARK;
++  ciEnv* env = CURRENT_ENV;
++  Thread* my_thread = JavaThread::current();
++  methodHandle h_m(my_thread, get_methodOop());
+ 
+-    if (Tier1UpdateMethodData && is_tier1_compile(env->comp_level())) {
+-      build_method_data(h_m);
+-    }
++  if (Tier1UpdateMethodData && is_tier1_compile(env->comp_level())) {
++    build_method_data(h_m);
++  }
+ 
+-    if (h_m()->method_data() != NULL) {
+-      _method_data = CURRENT_ENV->get_object(h_m()->method_data())->as_method_data();
+-      _method_data->load_data();
+-    } else {
+-      _method_data = CURRENT_ENV->get_empty_methodData();
+-    }
+-    return _method_data;
++  if (h_m()->method_data() != NULL) {
++    _method_data = CURRENT_ENV->get_object(h_m()->method_data())->as_method_data();
++    _method_data->load_data();
++  } else {
++    _method_data = CURRENT_ENV->get_empty_methodData();
+   }
+-                     
+-  return NULL;
++  return _method_data;
++
+ }
+ 
+ 
+@@ -949,7 +898,7 @@
+ bool ciMethod::is_not_reached(int bci) {
+   check_is_loaded();
+   VM_ENTRY_MARK;
+-  return AbstractInterpreter::is_not_reached(
++  return Interpreter::is_not_reached(
+                methodHandle(THREAD, get_methodOop()), bci);
+ }
+ 
+@@ -998,7 +947,7 @@
+       CLEAR_PENDING_EXCEPTION;
+       return false;
+     } else {
+-      return (spec_method->is_static() == is_static);    
++      return (spec_method->is_static() == is_static);
+     }
+   }
+   return false;
+@@ -1008,9 +957,9 @@
+ // ciMethod::print_codes
+ //
+ // Print the bytecodes for this method.
+-void ciMethod::print_codes() {
++void ciMethod::print_codes_on(outputStream* st) {
+   check_is_loaded();
+-  GUARDED_VM_ENTRY(get_methodOop()->print_codes();)
++  GUARDED_VM_ENTRY(get_methodOop()->print_codes_on(st);)
+ }
+ 
+ 
+@@ -1049,9 +998,9 @@
+ // ciMethod::print_codes
+ //
+ // Print a range of the bytecodes for this method.
+-void ciMethod::print_codes(int from, int to) {
++void ciMethod::print_codes_on(int from, int to, outputStream* st) {
+   check_is_loaded();
+-  GUARDED_VM_ENTRY(get_methodOop()->print_codes(from, to);)
++  GUARDED_VM_ENTRY(get_methodOop()->print_codes_on(from, to, st);)
+ }
+ 
+ // ------------------------------------------------------------------
+@@ -1076,20 +1025,18 @@
+ // ciMethod::print_impl
+ //
+ // Implementation of the print method.
+-void ciMethod::print_impl() {
+-  ciObject::print_impl();
+-  tty->print(" name=");
+-  name()->print_symbol();
+-  tty->print(" holder=");
+-  holder()->print_name();
+-  tty->print(" signature=");
+-  signature()->print_signature();
++void ciMethod::print_impl(outputStream* st) {
++  ciObject::print_impl(st);
++  st->print(" name=");
++  name()->print_symbol_on(st);
++  st->print(" holder=");
++  holder()->print_name_on(st);
++  st->print(" signature=");
++  signature()->as_symbol()->print_symbol_on(st);
+   if (is_loaded()) {
+-    tty->print(" loaded=true flags=");
+-    flags().print_member_flags();
++    st->print(" loaded=true flags=");
++    flags().print_member_flags(st);
+   } else {
+-    tty->print(" loaded=false");
++    st->print(" loaded=false");
+   }
+ }
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciMethodData.cpp openjdk/hotspot/src/share/vm/ci/ciMethodData.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciMethodData.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciMethodData.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciMethodData.cpp	1.28 07/05/05 17:05:15 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -68,11 +65,11 @@
+   methodDataOop mdo = get_methodDataOop();
+   if (mdo == NULL) return;
+ 
+-  // To do: don't copy the data if it is not "ripe" -- require a minimum # 
++  // To do: don't copy the data if it is not "ripe" -- require a minimum #
+   // of invocations.
+ 
+   // Snapshot the data -- actually, take an approximate snapshot of
+-  // the data.  Any concurrently executing threads may be changing the 
++  // the data.  Any concurrently executing threads may be changing the
+   // data as we copy it.
+   int skip_header = oopDesc::header_size();
+   Copy::disjoint_words((HeapWord*) mdo              + skip_header,
+@@ -86,7 +83,7 @@
+   _data = (intptr_t *) arena->Amalloc(total_size);
+   Copy::disjoint_words((HeapWord*) mdo->data_base(), (HeapWord*) _data, total_size / HeapWordSize);
+ 
+-  // Traverse the profile data, translating any oops into their 
++  // Traverse the profile data, translating any oops into their
+   // ci equivalents.
+   ResourceMark rm;
+   ciProfileData* ci_data = first_data();
+@@ -123,7 +120,7 @@
+     return NULL;
+   }
+   DataLayout* data_layout = data_layout_at(data_index);
+-  
++
+   switch (data_layout->tag()) {
+   case DataLayout::no_tag:
+   default:
+@@ -280,7 +277,7 @@
+ ByteSize ciMethodData::offset_of_slot(ciProfileData* data, ByteSize slot_offset_in_data) {
+   // Get offset within methodDataOop of the data array
+   ByteSize data_offset = methodDataOopDesc::data_offset();
+-  
++
+   // Get cell offset of the ProfileData within data array
+   int cell_offset = dp_to_di(data->dp());
+ 
+@@ -291,13 +288,13 @@
+ }
+ 
+ // Implementation of the print method.
+-void ciMethodData::print_impl() {
+-  ciObject::print_impl();
++void ciMethodData::print_impl(outputStream* st) {
++  ciObject::print_impl(st);
+ }
+ 
+ #ifndef PRODUCT
+ void ciMethodData::print() {
+-  print_data_on(tty); 
++  print_data_on(tty);
+ }
+ 
+ void ciMethodData::print_data_on(outputStream* st) {
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciMethodData.hpp openjdk/hotspot/src/share/vm/ci/ciMethodData.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciMethodData.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciMethodData.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciMethodData.hpp	1.27 07/05/05 17:05:14 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class ciBitData;
+@@ -57,8 +54,8 @@
+ 
+   void set_receiver(uint row, ciKlass* recv) {
+     assert((uint)row < row_limit(), "oob");
+-    set_intptr_at(receiver0_offset + row * receiver_type_row_cell_count, 
+-		  (intptr_t) recv);
++    set_intptr_at(receiver0_offset + row * receiver_type_row_cell_count,
++                  (intptr_t) recv);
+   }
+ 
+   ciKlass* receiver(uint row) {
+@@ -179,7 +176,7 @@
+ 
+   const char* type_string()                      { return "ciMethodData"; }
+ 
+-  void print_impl();
++  void print_impl(outputStream* st);
+ 
+   DataLayout* data_layout_at(int data_index) {
+     assert(data_index % sizeof(intptr_t) == 0, "unaligned");
+@@ -192,9 +189,9 @@
+ 
+   // hint accessors
+   int      hint_di() const  { return _hint_di; }
+-  void set_hint_di(int di)  { 
++  void set_hint_di(int di)  {
+     assert(!out_of_bounds(di), "hint_di out of bounds");
+-    _hint_di = di; 
++    _hint_di = di;
+   }
+   ciProfileData* data_before(int bci) {
+     // avoid SEGV on this edge case
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciMethod.hpp openjdk/hotspot/src/share/vm/ci/ciMethod.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciMethod.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciMethod.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciMethod.hpp	1.63 07/06/08 15:21:44 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class ciMethodBlocks;
+@@ -89,7 +86,7 @@
+ 
+   const char* type_string()                      { return "ciMethod"; }
+ 
+-  void print_impl();
++  void print_impl(outputStream* st);
+ 
+   void load_code();
+ 
+@@ -110,7 +107,7 @@
+   ciSymbol* name() const                         { return _name; }
+   ciInstanceKlass* holder() const                { return _holder; }
+   ciMethodData* method_data();
+-  
++
+   // Signature information.
+   ciSignature* signature() const                 { return _signature; }
+   ciType*      return_type() const               { return _signature->return_type(); }
+@@ -172,7 +169,7 @@
+   ciTypeFlow*   get_osr_flow_analysis(int osr_bci);  // alternate entry point
+   ciCallProfile call_profile_at_bci(int bci);
+   int           interpreter_call_site_count(int bci);
+-  
++
+   // Given a certain calling environment, find the monomorphic target
+   // for the call.  Return NULL if the call is not monomorphic in
+   // its calling environment.
+@@ -180,7 +177,7 @@
+                                     ciInstanceKlass* callee_holder,
+                                     ciInstanceKlass* actual_receiver);
+ 
+-  // Given a known receiver klass, find the target for the call.  
++  // Given a known receiver klass, find the target for the call.
+   // Return NULL if the call has no target or is abstract.
+   ciMethod* resolve_invoke(ciKlass* caller, ciKlass* exact_receiver);
+ 
+@@ -188,9 +185,9 @@
+   int resolve_vtable_index(ciKlass* caller, ciKlass* receiver);
+ 
+   // Compilation directives
+-  bool will_link(ciKlass* accessing_klass, 
+-		 ciKlass* declared_method_holder,
+-		 Bytecodes::Code bc);
++  bool will_link(ciKlass* accessing_klass,
++                 ciKlass* declared_method_holder,
++                 Bytecodes::Code bc);
+   bool should_exclude();
+   bool should_inline();
+   bool should_not_inline();
+@@ -237,11 +234,13 @@
+   bool can_be_statically_bound() const           { return _can_be_statically_bound; }
+ 
+   // Print the bytecodes of this method.
+-  void print_codes();
+-  void print_codes(int from, int to);
++  void print_codes_on(outputStream* st);
++  void print_codes() {
++    print_codes_on(tty);
++  }
++  void print_codes_on(int from, int to, outputStream* st);
+ 
+   // Print the name of this method in various incarnations.
+   void print_name(outputStream* st = tty);
+   void print_short_name(outputStream* st = tty);
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciMethodKlass.cpp openjdk/hotspot/src/share/vm/ci/ciMethodKlass.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciMethodKlass.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciMethodKlass.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciMethodKlass.cpp	1.10 07/05/05 17:05:15 JVM"
+-#endif
+ /*
+  * Copyright 1999 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciMethodKlass.hpp openjdk/hotspot/src/share/vm/ci/ciMethodKlass.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciMethodKlass.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciMethodKlass.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciMethodKlass.hpp	1.13 07/05/05 17:05:15 JVM"
+-#endif
+ /*
+  * Copyright 1999-2001 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciMethodKlass
+@@ -39,7 +36,7 @@
+   }
+ 
+   methodKlass* get_methodKlass() { return (methodKlass*)get_Klass(); }
+-  
++
+   const char* type_string() { return "ciMethodKlass"; }
+ 
+ public:
+@@ -49,4 +46,3 @@
+   // Return the distinguished ciMethodKlass instance.
+   static ciMethodKlass* make();
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciNullObject.cpp openjdk/hotspot/src/share/vm/ci/ciNullObject.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciNullObject.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciNullObject.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciNullObject.cpp	1.11 07/05/05 17:05:15 JVM"
+-#endif
+ /*
+  * Copyright 1999 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -37,9 +34,9 @@
+ // ciNullObject::print_impl
+ //
+ // Implementation of the print method.
+-void ciNullObject::print_impl() {
+-  ciObject::print_impl();
+-  tty->print(" unique");
++void ciNullObject::print_impl(outputStream* st) {
++  ciObject::print_impl(st);
++  st->print(" unique");
+ }
+ 
+ // ------------------------------------------------------------------
+@@ -49,4 +46,3 @@
+ ciNullObject* ciNullObject::make() {
+   return CURRENT_ENV->_null_object_instance->as_null_object();
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciNullObject.hpp openjdk/hotspot/src/share/vm/ci/ciNullObject.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciNullObject.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciNullObject.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciNullObject.hpp	1.12 07/05/05 17:05:14 JVM"
+-#endif
+ /*
+  * Copyright 1999-2000 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciNullObject
+@@ -36,7 +33,7 @@
+ 
+   const char* type_string() { return "ciNullObject"; }
+ 
+-  void print_impl();
++  void print_impl(outputStream* st);
+ 
+ public:
+   // Is this ciObject a Java Language Object?  That is,
+@@ -50,5 +47,3 @@
+   // Get the distinguished instance of this klass.
+   static ciNullObject* make();
+ };
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciObjArray.hpp openjdk/hotspot/src/share/vm/ci/ciObjArray.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciObjArray.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciObjArray.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciObjArray.hpp	1.12 07/05/05 17:05:15 JVM"
+-#endif
+ /*
+  * Copyright 1999-2001 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciObjArray
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciObjArrayKlass.cpp openjdk/hotspot/src/share/vm/ci/ciObjArrayKlass.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciObjArrayKlass.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciObjArrayKlass.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciObjArrayKlass.cpp	1.23 07/05/05 17:05:15 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciObjArrayKlass.hpp openjdk/hotspot/src/share/vm/ci/ciObjArrayKlass.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciObjArrayKlass.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciObjArrayKlass.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciObjArrayKlass.hpp	1.14 07/05/05 17:05:16 JVM"
+-#endif
+ /*
+  * Copyright 1999-2001 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciObjArrayKlass
+@@ -72,4 +69,3 @@
+ 
+   static ciObjArrayKlass* make(ciKlass* element_klass);
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciObjArrayKlassKlass.cpp openjdk/hotspot/src/share/vm/ci/ciObjArrayKlassKlass.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciObjArrayKlassKlass.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciObjArrayKlassKlass.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciObjArrayKlassKlass.cpp	1.10 07/05/05 17:05:15 JVM"
+-#endif
+ /*
+  * Copyright 1999 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciObjArrayKlassKlass.hpp openjdk/hotspot/src/share/vm/ci/ciObjArrayKlassKlass.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciObjArrayKlassKlass.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciObjArrayKlassKlass.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciObjArrayKlassKlass.hpp	1.12 07/05/05 17:05:16 JVM"
+-#endif
+ /*
+  * Copyright 1999-2001 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciObjArrayKlassKlass
+@@ -41,7 +38,7 @@
+   objArrayKlassKlass* get_objArrayKlassKlass() {
+     return (objArrayKlassKlass*)get_Klass();
+   }
+-  
++
+   const char* type_string() { return "ciObjArrayKlassKlass"; }
+ 
+ public:
+@@ -51,4 +48,3 @@
+   // Return the distinguished ciObjArrayKlassKlass instance.
+   static ciObjArrayKlassKlass* make();
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciObject.cpp openjdk/hotspot/src/share/vm/ci/ciObject.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciObject.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciObject.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciObject.cpp	1.28 07/05/17 15:49:59 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -180,7 +177,7 @@
+   assert(has_encoding(), "oop must be NULL or perm");
+   return handle();
+ }
+-  
++
+ // ------------------------------------------------------------------
+ // ciObject::has_encoding
+ bool ciObject::has_encoding() {
+@@ -195,10 +192,10 @@
+ //
+ // Implementation note: dispatch to the virtual print_impl behavior
+ // for this ciObject.
+-void ciObject::print() {
+-  tty->print("<%s", type_string());
+-  GUARDED_VM_ENTRY(print_impl();)
+-  tty->print(" ident=%d %s address=0x%x>", ident(),
++void ciObject::print(outputStream* st) {
++  st->print("<%s", type_string());
++  GUARDED_VM_ENTRY(print_impl(st);)
++  st->print(" ident=%d %s address=0x%x>", ident(),
+         is_perm() ? "PERM" : "",
+         (address)this);
+ }
+@@ -207,13 +204,12 @@
+ // ciObject::print_oop
+ //
+ // Print debugging output about the oop this ciObject represents.
+-void ciObject::print_oop() {
++void ciObject::print_oop(outputStream* st) {
+   if (is_null_object()) {
+-    tty->print_cr("NULL");
++    st->print_cr("NULL");
+   } else if (!is_loaded()) {
+-    tty->print_cr("UNLOADED");
++    st->print_cr("UNLOADED");
+   } else {
+-    GUARDED_VM_ENTRY(get_oop()->print();)
++    GUARDED_VM_ENTRY(get_oop()->print_on(st);)
+   }
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciObjectFactory.cpp openjdk/hotspot/src/share/vm/ci/ciObjectFactory.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciObjectFactory.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciObjectFactory.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciObjectFactory.cpp	1.39 07/05/17 15:50:05 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -99,7 +96,7 @@
+ void ciObjectFactory::init_shared_objects() {
+ 
+   _next_ident = 1;  // start numbering CI objects at 1
+-  
++
+   {
+     // Create the shared symbols, but not in _shared_ci_objects.
+     int i;
+@@ -170,7 +167,7 @@
+       ->as_instance_klass();
+   ciEnv::_String =
+     get(SystemDictionary::string_klass())
+-      ->as_instance_klass(); 
++      ->as_instance_klass();
+ 
+   for (int len = -1; len != _ci_objects->length(); ) {
+     len = _ci_objects->length();
+@@ -250,7 +247,7 @@
+     if (key->is_symbol()) {
+       vmSymbols::SID sid = vmSymbols::find_sid((symbolOop)key);
+       if (sid != vmSymbols::NO_SID) {
+-	// do not pollute the main cache with it
++        // do not pollute the main cache with it
+         return vm_symbol_at(sid);
+       }
+     }
+@@ -455,7 +452,7 @@
+ //------------------------------------------------------------------
+ // ciObjectFactory::get_empty_methodData
+ //
+-// Get the ciMethodData representing the methodData for a method with 
++// Get the ciMethodData representing the methodData for a method with
+ // none.
+ ciMethodData* ciObjectFactory::get_empty_methodData() {
+   ciMethodData* new_methodData = new (arena()) ciMethodData();
+@@ -475,7 +472,7 @@
+       return entry;
+     }
+   }
+-  
++
+   ciReturnAddress* new_ret_addr = new (arena()) ciReturnAddress(bci);
+   init_ident_of(new_ret_addr);
+   _return_addresses->append(new_ret_addr);
+@@ -521,7 +518,7 @@
+ // Verify that the binary seach found the given key.
+ bool ciObjectFactory::is_found_at(int index, oop key, GrowableArray<ciObject*>* objects) {
+   return (index < objects->length() &&
+-	  objects->at(index)->get_oop() == key);
++          objects->at(index)->get_oop() == key);
+ }
+ 
+ 
+@@ -648,4 +645,3 @@
+              _ci_objects->length(), _unloaded_methods->length(),
+              _unloaded_klasses->length());
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciObjectFactory.hpp openjdk/hotspot/src/share/vm/ci/ciObjectFactory.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciObjectFactory.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciObjectFactory.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciObjectFactory.hpp	1.20 07/05/17 15:50:07 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciObjectFactory
+@@ -102,7 +99,7 @@
+                               bool create_if_not_found);
+ 
+ 
+-  // Get the ciMethodData representing the methodData for a method 
++  // Get the ciMethodData representing the methodData for a method
+   // with none.
+   ciMethodData* get_empty_methodData();
+ 
+@@ -111,5 +108,3 @@
+   void print_contents();
+   void print();
+ };
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciObject.hpp openjdk/hotspot/src/share/vm/ci/ciObject.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciObject.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciObject.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciObject.hpp	1.24 07/05/05 17:05:16 JVM"
+-#endif
+ /*
+  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciObject
+@@ -71,13 +68,12 @@
+     return JNIHandles::resolve_non_null(_handle);
+   }
+ 
+-  bool is_perm() { return (_ident & PERM_FLAG) != 0; }
+   void set_perm() {
+     _ident |=  PERM_FLAG;
+   }
+ 
+   // Virtual behavior of the print() method.
+-  virtual void print_impl() {}
++  virtual void print_impl(outputStream* st) {}
+ 
+   virtual const char* type_string() { return "ciObject"; }
+ 
+@@ -101,6 +97,11 @@
+   // See ciEnv::make_perm_array
+   bool has_encoding();
+ 
++  // Is this object guaranteed to be in the permanent part of the heap?
++  // If so, CollectedHeap::can_elide_permanent_oop_store_barriers is relevant.
++  // If the answer is false, no guarantees are made.
++  bool is_perm() { return (_ident & PERM_FLAG) != 0; }
++
+   // The address which the compiler should embed into the
+   // generated code to represent this oop.  This address
+   // is not the true address of the oop -- it will get patched
+@@ -252,9 +253,8 @@
+   }
+ 
+   // Print debugging output about this ciObject.
+-  void print();
++  void print(outputStream* st = tty);
+ 
+   // Print debugging output about the oop this ciObject represents.
+-  void print_oop();
++  void print_oop(outputStream* st = tty);
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciSignature.cpp openjdk/hotspot/src/share/vm/ci/ciSignature.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciSignature.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciSignature.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciSignature.cpp	1.21 07/05/05 17:05:16 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -56,7 +53,7 @@
+     } else {
+       symbolOop name = ss.as_symbol(THREAD);
+       if (HAS_PENDING_EXCEPTION) {
+-        type = ss.is_array() ? (ciType*)ciEnv::unloaded_ciobjarrayklass() 
++        type = ss.is_array() ? (ciType*)ciEnv::unloaded_ciobjarrayklass()
+           : (ciType*)ciEnv::unloaded_ciinstance_klass();
+         env->record_out_of_memory_failure();
+         CLEAR_PENDING_EXCEPTION;
+@@ -111,4 +108,3 @@
+   _accessing_klass->print();
+   tty->print(" address=0x%x>", (address)this);
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciSignature.hpp openjdk/hotspot/src/share/vm/ci/ciSignature.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciSignature.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciSignature.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciSignature.hpp	1.16 07/05/05 17:05:14 JVM"
+-#endif
+ /*
+  * Copyright 1999-2000 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciSignature
+@@ -57,6 +54,3 @@
+   void print_signature();
+   void print();
+ };
+-
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciStreams.cpp openjdk/hotspot/src/share/vm/ci/ciStreams.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciStreams.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciStreams.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciStreams.cpp	1.30 07/05/05 17:05:16 JVM"
+-#endif
+ /*
+  * Copyright 1999-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -90,12 +87,12 @@
+ Bytecodes::Code ciBytecodeStream::wide()
+ {
+   // Get following bytecode; do not return wide
+-  Bytecodes::Code bc = (Bytecodes::Code)_pc[1];	
+-  _pc += 2;			// Skip both bytecodes
+-  _pc += 2;			// Skip index always
+-  if( bc == Bytecodes::_iinc ) 
+-    _pc += 2;			// Skip optional constant
+-  _was_wide = _pc;		// Flag last wide bytecode found
++  Bytecodes::Code bc = (Bytecodes::Code)_pc[1];
++  _pc += 2;                     // Skip both bytecodes
++  _pc += 2;                     // Skip index always
++  if( bc == Bytecodes::_iinc )
++    _pc += 2;                   // Skip optional constant
++  _was_wide = _pc;              // Flag last wide bytecode found
+   return bc;
+ }
+ 
+@@ -104,27 +101,27 @@
+ //
+ // Special handling for switch ops
+ Bytecodes::Code ciBytecodeStream::table( Bytecodes::Code bc ) {
+-  switch( bc ) {		// Check for special bytecode handling
+-    
++  switch( bc ) {                // Check for special bytecode handling
++
+   case Bytecodes::_lookupswitch:
+-    _pc++;			// Skip wide bytecode
+-    _pc += (_start-_pc)&3;	// Word align
+-    _table_base = (jint*)_pc;	// Capture for later usage
+-				// table_base[0] is default far_dest
++    _pc++;                      // Skip wide bytecode
++    _pc += (_start-_pc)&3;      // Word align
++    _table_base = (jint*)_pc;   // Capture for later usage
++                                // table_base[0] is default far_dest
+     // Table has 2 lead elements (default, length), then pairs of u4 values.
+     // So load table length, and compute address at end of table
+     _pc = (address)&_table_base[2+ 2*Bytes::get_Java_u4((address)&_table_base[1])];
+     break;
+ 
+-  case Bytecodes::_tableswitch: { 
+-    _pc++;			// Skip wide bytecode
+-    _pc += (_start-_pc)&3;	// Word align
+-    _table_base = (jint*)_pc;	// Capture for later usage
+-				// table_base[0] is default far_dest
++  case Bytecodes::_tableswitch: {
++    _pc++;                      // Skip wide bytecode
++    _pc += (_start-_pc)&3;      // Word align
++    _table_base = (jint*)_pc;   // Capture for later usage
++                                // table_base[0] is default far_dest
+     int lo = Bytes::get_Java_u4((address)&_table_base[1]);// Low bound
+     int hi = Bytes::get_Java_u4((address)&_table_base[2]);// High bound
+-    int len = hi - lo + 1;	// Dense table size
+-    _pc = (address)&_table_base[3+len];	// Skip past table
++    int len = hi - lo + 1;      // Dense table size
++    _pc = (address)&_table_base[3+len]; // Skip past table
+     break;
+   }
+ 
+@@ -137,8 +134,8 @@
+ // ------------------------------------------------------------------
+ // ciBytecodeStream::reset_to_bci
+ void ciBytecodeStream::reset_to_bci( int bci ) {
+-  _bc_start=_was_wide=0; 
+-  _pc = _start+bci; 
++  _bc_start=_was_wide=0;
++  _pc = _start+bci;
+ }
+ 
+ // ------------------------------------------------------------------
+@@ -162,7 +159,7 @@
+ // ------------------------------------------------------------------
+ // ciBytecodeStream::get_klass_index
+ //
+-// If this bytecodes references a klass, return the index of the 
++// If this bytecodes references a klass, return the index of the
+ // referenced klass.
+ int ciBytecodeStream::get_klass_index() const {
+   switch(cur_bc()) {
+@@ -314,7 +311,7 @@
+   default:
+     ShouldNotReachHere();
+     return 0;
+-  }  
++  }
+ }
+ 
+ // ------------------------------------------------------------------
+@@ -368,5 +365,3 @@
+   int name_and_type_index = cpool->name_and_type_ref_index_at(method_index);
+   return cpool->signature_ref_index_at(name_and_type_index);
+ }
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciStreams.hpp openjdk/hotspot/src/share/vm/ci/ciStreams.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciStreams.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciStreams.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciStreams.hpp	1.36 07/05/05 17:05:14 JVM"
+-#endif
+ /*
+  * Copyright 1999-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciBytecodeStream
+@@ -35,7 +32,7 @@
+ class ciBytecodeStream : StackObj {
+ private:
+  // Handling for the weird bytecodes
+-  Bytecodes::Code wide();	// Handle wide bytecode
++  Bytecodes::Code wide();       // Handle wide bytecode
+   Bytecodes::Code table(Bytecodes::Code); // Handle complicated inline table
+ 
+   static Bytecodes::Code check_java(Bytecodes::Code c) {
+@@ -45,17 +42,17 @@
+ 
+   ciMethod* _method;           // the method
+   ciInstanceKlass* _holder;
+-  address _bc_start;	       	// Start of current bytecode for table
+-  address _was_wide;		// Address past last wide bytecode
+-  jint* _table_base;		// Aligned start of last table or switch 
+-
+-  address _start;		   // Start of bytecodes
+-  address _end;			   // Past end of bytecodes
+-  address _pc;			   // Current PC
++  address _bc_start;            // Start of current bytecode for table
++  address _was_wide;            // Address past last wide bytecode
++  jint* _table_base;            // Aligned start of last table or switch
++
++  address _start;                  // Start of bytecodes
++  address _end;                    // Past end of bytecodes
++  address _pc;                     // Current PC
+   Bytecodes::Code _bc;             // Current bytecode
+ 
+-  void reset( address base, unsigned int size ) { 
+-    _bc_start =_was_wide = 0; 
++  void reset( address base, unsigned int size ) {
++    _bc_start =_was_wide = 0;
+     _start = _pc = base; _end = base + size; }
+ 
+ public:
+@@ -93,20 +90,20 @@
+   void set_max_bci( int max ) {
+     _end = _start + max;
+   }
+- 
++
+   address cur_bcp()             { return _bc_start; }  // Returns bcp to current instruction
+   int next_bci() const          { return _pc -_start; }
+   int cur_bci() const           { return _bc_start - _start; }
+ 
+   Bytecodes::Code cur_bc() const{ return check_java(_bc); }
+   Bytecodes::Code next_bc()     { return Bytecodes::java_code((Bytecodes::Code)* _pc); }
+-  
++
+   // Return current ByteCode and increment PC to next bytecode, skipping all
+   // intermediate constants.  Returns EOBC at end.
+   // Expected usage:
+   //     while( (bc = iter.next()) != EOBC() ) { ... }
+   Bytecodes::Code next() {
+-    _bc_start = _pc;		            // Capture start of bc
++    _bc_start = _pc;                        // Capture start of bc
+     if( _pc >= _end ) return EOBC();        // End-Of-Bytecodes
+ 
+     // Fetch Java bytecode
+@@ -115,11 +112,11 @@
+     int csize = Bytecodes::length_for(_bc); // Expected size
+ 
+     if( _bc == Bytecodes::_wide ) {
+-      _bc=wide();	                    // Handle wide bytecode
++      _bc=wide();                           // Handle wide bytecode
+     } else if( csize == 0 ) {
+       _bc=table(_bc);                       // Handle inline tables
+     } else {
+-      _pc += csize;		            // Bump PC past bytecode
++      _pc += csize;                         // Bump PC past bytecode
+     }
+     return check_java(_bc);
+   }
+@@ -129,18 +126,18 @@
+   // Get a byte index following this bytecode.
+   // If prefixed with a wide bytecode, get a wide index.
+   int get_index() const {
+-    return (_pc == _was_wide)	// was widened?
+-      ? Bytes::get_Java_u2(_bc_start+2)	// yes, return wide index
+-      : _bc_start[1];		// no, return narrow index
++    return (_pc == _was_wide)   // was widened?
++      ? Bytes::get_Java_u2(_bc_start+2) // yes, return wide index
++      : _bc_start[1];           // no, return narrow index
+   }
+ 
+   // Set a byte index following this bytecode.
+   // If prefixed with a wide bytecode, get a wide index.
+   void put_index(int idx) {
+-      if (_pc == _was_wide)	// was widened?
+-         Bytes::put_Java_u2(_bc_start+2,idx);	// yes, set wide index
++      if (_pc == _was_wide)     // was widened?
++         Bytes::put_Java_u2(_bc_start+2,idx);   // yes, set wide index
+       else
+-         _bc_start[1]=idx;		// no, set narrow index
++         _bc_start[1]=idx;              // no, set narrow index
+   }
+ 
+   // Get 2-byte index (getfield/putstatic/etc)
+@@ -187,8 +184,8 @@
+     return _bc_start-_start + (int)Bytes::get_Java_u4(_pc-4);
+   }
+ 
+-  // For a lookup or switch table, return target destination 
+-  int get_int_table( int index ) const { 
++  // For a lookup or switch table, return target destination
++  int get_int_table( int index ) const {
+     return Bytes::get_Java_u4((address)&_table_base[index]); }
+ 
+   // For tableswitch - get length of offset part
+@@ -323,7 +320,7 @@
+ 
+   // Count the number of handlers this stream will produce from now on.
+   // Include the current handler, and the final rethrow handler.
+-  // The remaining count will be zero iff is_done() is true, 
++  // The remaining count will be zero iff is_done() is true,
+   int count_remaining();
+ 
+   bool is_done() {
+@@ -370,7 +367,3 @@
+     return _method->_exception_handlers[_pos];
+   }
+ };
+-
+-
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciSymbol.cpp openjdk/hotspot/src/share/vm/ci/ciSymbol.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciSymbol.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciSymbol.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciSymbol.cpp	1.22 07/05/05 17:05:16 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -72,9 +69,9 @@
+ // ciSymbol::print_impl
+ //
+ // Implementation of the print method
+-void ciSymbol::print_impl() {
+-  tty->print(" value=");
+-  print_symbol();
++void ciSymbol::print_impl(outputStream* st) {
++  st->print(" value=");
++  print_symbol_on(st);
+ }
+ 
+ // ------------------------------------------------------------------
+@@ -109,4 +106,3 @@
+ ciSymbol* ciSymbol::make(const char* s) {
+   GUARDED_VM_ENTRY(return make_impl(s);)
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciSymbol.hpp openjdk/hotspot/src/share/vm/ci/ciSymbol.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciSymbol.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciSymbol.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciSymbol.hpp	1.15 07/05/17 15:50:09 JVM"
+-#endif
+ /*
+  * Copyright 1999-2001 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciSymbol
+@@ -44,8 +41,8 @@
+   symbolOop get_symbolOop() { return (symbolOop)get_oop(); }
+ 
+   const char* type_string() { return "ciSymbol"; }
+-  
+-  void print_impl();
++
++  void print_impl(outputStream* st);
+ 
+   int         byte_at(int i);
+   jbyte*      base();
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciSymbolKlass.cpp openjdk/hotspot/src/share/vm/ci/ciSymbolKlass.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciSymbolKlass.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciSymbolKlass.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciSymbolKlass.cpp	1.10 07/05/05 17:05:16 JVM"
+-#endif
+ /*
+  * Copyright 1999 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciSymbolKlass.hpp openjdk/hotspot/src/share/vm/ci/ciSymbolKlass.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciSymbolKlass.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciSymbolKlass.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciSymbolKlass.hpp	1.13 07/05/05 17:05:17 JVM"
+-#endif
+ /*
+  * Copyright 1999-2001 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciSymbolKlass
+@@ -43,7 +40,7 @@
+   }
+ 
+   symbolKlass* get_symbolKlass() { return (symbolKlass*)get_Klass(); }
+-  
++
+   const char* type_string() { return "ciSymbolKlass"; }
+ 
+ public:
+@@ -53,4 +50,3 @@
+   // Return the distinguished ciSymbolKlass instance.
+   static ciSymbolKlass* make();
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciTypeArray.cpp openjdk/hotspot/src/share/vm/ci/ciTypeArray.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciTypeArray.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciTypeArray.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciTypeArray.cpp	1.6 07/05/05 17:05:17 JVM"
+-#endif
+ /*
+  * Copyright 2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciTypeArray.hpp openjdk/hotspot/src/share/vm/ci/ciTypeArray.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciTypeArray.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciTypeArray.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciTypeArray.hpp	1.13 07/05/05 17:05:17 JVM"
+-#endif
+ /*
+  * Copyright 1999-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciTypeArray
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciTypeArrayKlass.cpp openjdk/hotspot/src/share/vm/ci/ciTypeArrayKlass.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciTypeArrayKlass.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciTypeArrayKlass.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciTypeArrayKlass.cpp	1.14 07/05/05 17:05:17 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -56,4 +53,3 @@
+ ciTypeArrayKlass* ciTypeArrayKlass::make(BasicType t) {
+   GUARDED_VM_ENTRY(return make_impl(t);)
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciTypeArrayKlass.hpp openjdk/hotspot/src/share/vm/ci/ciTypeArrayKlass.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciTypeArrayKlass.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciTypeArrayKlass.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciTypeArrayKlass.hpp	1.13 07/05/05 17:05:17 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciTypeArrayKlass
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciTypeArrayKlassKlass.cpp openjdk/hotspot/src/share/vm/ci/ciTypeArrayKlassKlass.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciTypeArrayKlassKlass.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciTypeArrayKlassKlass.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciTypeArrayKlassKlass.cpp	1.10 07/05/05 17:05:17 JVM"
+-#endif
+ /*
+  * Copyright 1999 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciTypeArrayKlassKlass.hpp openjdk/hotspot/src/share/vm/ci/ciTypeArrayKlassKlass.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciTypeArrayKlassKlass.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciTypeArrayKlassKlass.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciTypeArrayKlassKlass.hpp	1.12 07/05/05 17:05:18 JVM"
+-#endif
+ /*
+  * Copyright 1999-2001 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciTypeArrayKlassKlass
+@@ -42,7 +39,7 @@
+   typeArrayKlassKlass* get_typeArrayKlassKlass() {
+     return (typeArrayKlassKlass*)get_Klass();
+   }
+-  
++
+   const char* type_string() { return "ciTypeArrayKlassKlass"; }
+ 
+ public:
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciType.cpp openjdk/hotspot/src/share/vm/ci/ciType.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciType.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciType.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciType.cpp	1.16 07/05/05 17:05:16 JVM"
+-#endif
+ /*
+  * Copyright 2000-2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -67,17 +64,17 @@
+ // ciType::print_impl
+ //
+ // Implementation of the print method.
+-void ciType::print_impl() {
+-  tty->print(" type=");
+-  print_name();
++void ciType::print_impl(outputStream* st) {
++  st->print(" type=");
++  print_name_on(st);
+ }
+ 
+ // ------------------------------------------------------------------
+ // ciType::print_name
+ //
+ // Print the name of this type
+-void ciType::print_name() {
+-  tty->print(type2name(basic_type()));
++void ciType::print_name_on(outputStream* st) {
++  st->print(type2name(basic_type()));
+ }
+ 
+ 
+@@ -87,7 +84,7 @@
+ //
+ ciInstance* ciType::java_mirror() {
+   VM_ENTRY_MARK;
+-  return CURRENT_THREAD_ENV->get_object(SystemDictionary::java_mirror(basic_type()))->as_instance();
++  return CURRENT_THREAD_ENV->get_object(Universe::java_mirror(basic_type()))->as_instance();
+ }
+ 
+ // ------------------------------------------------------------------
+@@ -136,8 +133,8 @@
+ // ciReturnAddress::print_impl
+ //
+ // Implementation of the print method.
+-void ciReturnAddress::print_impl() {
+-  tty->print(" bci=%d", _bci);
++void ciReturnAddress::print_impl(outputStream* st) {
++  st->print(" bci=%d", _bci);
+ }
+ 
+ // ------------------------------------------------------------------
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciTypeFlow.cpp openjdk/hotspot/src/share/vm/ci/ciTypeFlow.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciTypeFlow.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciTypeFlow.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciTypeFlow.cpp	1.46 07/05/05 17:05:17 JVM"
+-#endif
+ /*
+  * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -83,7 +80,7 @@
+   // the sets to be compatible.
+   int size1 = size();
+   int size2 = other->size();
+-  
++
+   // Special case.  If nothing is on the jsr stack, then there can
+   // be no ret.
+   if (size2 == 0) {
+@@ -95,8 +92,8 @@
+       JsrRecord* record1 = record_at(i);
+       JsrRecord* record2 = other->record_at(i);
+       if (record1->entry_address() != record2->entry_address() ||
+-	  record1->return_address() != record2->return_address()) {
+-	return false;
++          record1->return_address() != record2->return_address()) {
++        return false;
+       }
+     }
+     return true;
+@@ -118,11 +115,11 @@
+       pos2++;
+     } else {
+       if (record1->return_address() == record2->return_address()) {
+-	pos1++;
+-	pos2++;
++        pos1++;
++        pos2++;
+       } else {
+-	// These two JsrSets are incompatible.
+-	return false;
++        // These two JsrSets are incompatible.
++        return false;
+       }
+     }
+   }
+@@ -150,7 +147,7 @@
+     } else if (entry < current->entry_address()) {
+       break;
+     }
+-  }   
++  }
+ 
+   // Insert the record into the list.
+   JsrRecord* swap = record;
+@@ -175,7 +172,7 @@
+       // We have found the proper entry.  Remove it from the
+       // JsrSet and exit.
+       for (int j = i+1; j < len ; j++) {
+-	_set->at_put(j-1, _set->at(j));
++        _set->at_put(j-1, _set->at(j));
+       }
+       _set->trunc_to(len-1);
+       assert(size() == len-1, "must be smaller");
+@@ -191,8 +188,8 @@
+ // Apply the effect of a control-flow bytecode on the JsrSet.  The
+ // only bytecodes that modify the JsrSet are jsr and ret.
+ void ciTypeFlow::JsrSet::apply_control(ciTypeFlow* analyzer,
+-				       ciBytecodeStream* str,
+-				       ciTypeFlow::StateVector* state) {
++                                       ciBytecodeStream* str,
++                                       ciTypeFlow::StateVector* state) {
+   Bytecodes::Code code = str->cur_bc();
+   if (code == Bytecodes::_jsr) {
+     JsrRecord* record =
+@@ -266,14 +263,14 @@
+     // is T.  null_type meet null_type is null_type.
+     if (t1->equals(null_type())) {
+       if (!t2->is_primitive_type() || t2->equals(null_type())) {
+-	return t2;
++        return t2;
+       }
+     } else if (t2->equals(null_type())) {
+       if (!t1->is_primitive_type()) {
+-	return t1;
++        return t1;
+       }
+     }
+-	  
++
+     // At least one of the two types is a non-top primitive type.
+     // The other type is not equal to it.  Fall to bottom.
+     return bottom_type();
+@@ -298,22 +295,22 @@
+       // And when typeArray meets different typeArray, we again get Object.
+       // But when objArray meets objArray, we look carefully at element types.
+       if (k1->is_obj_array_klass() && k2->is_obj_array_klass()) {
+-	// Meet the element types, then construct the corresponding array type.
+-	ciKlass* elem1 = k1->as_obj_array_klass()->element_klass();
+-	ciKlass* elem2 = k2->as_obj_array_klass()->element_klass();
+-	ciKlass* elem  = type_meet_internal(elem1, elem2, analyzer)->as_klass();
+-	// Do an easy shortcut if one type is a super of the other.
+-	if (elem == elem1) {
+-	  assert(k1 == ciObjArrayKlass::make(elem), "shortcut is OK");
+-	  return k1;
+-	} else if (elem == elem2) {
+-	  assert(k2 == ciObjArrayKlass::make(elem), "shortcut is OK");
+-	  return k2;
+-	} else {
+-	  return ciObjArrayKlass::make(elem);
+-	}
++        // Meet the element types, then construct the corresponding array type.
++        ciKlass* elem1 = k1->as_obj_array_klass()->element_klass();
++        ciKlass* elem2 = k2->as_obj_array_klass()->element_klass();
++        ciKlass* elem  = type_meet_internal(elem1, elem2, analyzer)->as_klass();
++        // Do an easy shortcut if one type is a super of the other.
++        if (elem == elem1) {
++          assert(k1 == ciObjArrayKlass::make(elem), "shortcut is OK");
++          return k1;
++        } else if (elem == elem2) {
++          assert(k2 == ciObjArrayKlass::make(elem), "shortcut is OK");
++          return k2;
++        } else {
++          return ciObjArrayKlass::make(elem);
++        }
+       } else {
+-	return object_klass;
++        return object_klass;
+       }
+     } else {
+       // Must be two plain old instance klasses.
+@@ -323,7 +320,7 @@
+     }
+   }
+ }
+-      
++
+ 
+ // ------------------------------------------------------------------
+ // ciTypeFlow::StateVector::StateVector
+@@ -367,7 +364,7 @@
+     if (non_osr_start != start_bci()) {
+       // must flow forward from it
+       if (CITraceTypeFlow) {
+-	tty->print_cr(">> Interpreting pre-OSR block %d:", non_osr_start);
++        tty->print_cr(">> Interpreting pre-OSR block %d:", non_osr_start);
+       }
+       Block* block = block_at(non_osr_start, jsrs);
+       assert(block->limit() == start_bci(), "must flow forward to start");
+@@ -459,8 +456,8 @@
+     if (!t1->equals(t2)) {
+       ciType* new_type = type_meet(t1, t2);
+       if (!t1->equals(new_type)) {
+-	set_type_at(c, new_type);
+-	different = true;
++        set_type_at(c, new_type);
++        different = true;
+       }
+     }
+   }
+@@ -474,7 +471,7 @@
+ // one.  The incoming state is coming via an exception.  Returns true
+ // if any modification takes place.
+ bool ciTypeFlow::StateVector::meet_exception(ciInstanceKlass* exc,
+-				     const ciTypeFlow::StateVector* incoming) {
++                                     const ciTypeFlow::StateVector* incoming) {
+   if (monitor_count() == -1) {
+     set_monitor_count(incoming->monitor_count());
+   }
+@@ -496,12 +493,12 @@
+     if (!t1->equals(t2)) {
+       ciType* new_type = type_meet(t1, t2);
+       if (!t1->equals(new_type)) {
+-	set_type_at(c, new_type);
+-	different = true;
++        set_type_at(c, new_type);
++        different = true;
+       }
+     }
+   }
+-  
++
+   // Handle stack separately.  When an exception occurs, the
+   // only stack entry is the exception instance.
+   ciType* tos_type = type_at_tos();
+@@ -512,7 +509,7 @@
+       different = true;
+     }
+   }
+-  
++
+   return different;
+ }
+ 
+@@ -625,13 +622,13 @@
+     } else {
+       push_translate(field_type);
+     }
+-  }    
++  }
+ }
+ 
+ // ------------------------------------------------------------------
+ // ciTypeFlow::StateVector::do_invoke
+ void ciTypeFlow::StateVector::do_invoke(ciBytecodeStream* str,
+-					bool has_receiver) {
++                                        bool has_receiver) {
+   bool will_link;
+   ciMethod* method = str->get_method(will_link);
+   if (!will_link) {
+@@ -650,8 +647,8 @@
+       // Do I want to check this type?
+       // assert(stack_type->is_subtype_of(type), "bad type for field value");
+       if (type->is_two_word()) {
+-	ciType* stack_type2 = type_at(stack(stack_base + i++));
+-	assert(stack_type2->equals(half_type(type)), "must be 2nd half");
++        ciType* stack_type2 = type_at(stack(stack_base + i++));
++        assert(stack_type2->equals(half_type(type)), "must be 2nd half");
+       }
+     }
+     assert(arg_size == i, "must match");
+@@ -666,11 +663,11 @@
+     ciType* return_type = sigstr.type();
+     if (!return_type->is_void()) {
+       if (!return_type->is_loaded()) {
+-        // As in do_getstatic(), generally speaking, we need the return type to 
++        // As in do_getstatic(), generally speaking, we need the return type to
+         // be loaded if we are to do anything interesting with its value.
+         // We used to do this:  trap(str, str->get_method_signature_index());
+         //
+-        // We do not trap here since execution can get past this invoke if 
++        // We do not trap here since execution can get past this invoke if
+         // the return value is null.  As long as the value is null, the class
+         // does not need to be loaded!  The compiler must assume that
+         // the value of the unloaded class reference is null; if the code
+@@ -679,7 +676,7 @@
+         // See do_getstatic() for similar explanation, as well as bug 4684993.
+         do_null_assert(return_type->as_klass());
+       } else {
+-	push_translate(return_type);
++        push_translate(return_type);
+       }
+     }
+   }
+@@ -779,14 +776,14 @@
+       assert(type2->is_two_word(), "must be 2nd half");
+       assert(type == half_type(type2), "must be 2nd half");
+     }
+-  }    
++  }
+ }
+ 
+ // ------------------------------------------------------------------
+ // ciTypeFlow::StateVector::do_ret
+ void ciTypeFlow::StateVector::do_ret(ciBytecodeStream* str) {
+   Cell index = local(str->get_index());
+-  
++
+   ciType* address = type_at(index);
+   assert(address->is_return_address(), "bad return address");
+   set_type_at(index, bottom_type());
+@@ -833,7 +830,7 @@
+     // class later.
+     push_null();
+   }
+-} 
++}
+ 
+ 
+ // ------------------------------------------------------------------
+@@ -846,7 +843,7 @@
+ 
+   if (CITraceTypeFlow) {
+     tty->print_cr(">> Interpreting bytecode %d:%s", str->cur_bci(),
+-		  Bytecodes::name(str->cur_bc()));
++                  Bytecodes::name(str->cur_bc()));
+   }
+ 
+   switch(str->cur_bc()) {
+@@ -1185,7 +1182,7 @@
+ 
+   case Bytecodes::_getfield:  do_getfield(str);                      break;
+   case Bytecodes::_getstatic: do_getstatic(str);                     break;
+-    
++
+   case Bytecodes::_goto:
+   case Bytecodes::_goto_w:
+   case Bytecodes::_nop:
+@@ -1377,7 +1374,7 @@
+   case Bytecodes::_lload_1: load_local_long(1);                     break;
+   case Bytecodes::_lload_2: load_local_long(2);                     break;
+   case Bytecodes::_lload_3: load_local_long(3);                     break;
+-    
++
+   case Bytecodes::_lneg:
+     {
+       pop_long();
+@@ -1409,7 +1406,7 @@
+   case Bytecodes::_new:      do_new(str);                           break;
+ 
+   case Bytecodes::_newarray: do_newarray(str);                      break;
+-    
++
+   case Bytecodes::_pop:
+     {
+       pop();
+@@ -1424,7 +1421,7 @@
+ 
+   case Bytecodes::_putfield:       do_putfield(str);                 break;
+   case Bytecodes::_putstatic:      do_putstatic(str);                break;
+-  
++
+   case Bytecodes::_ret: do_ret(str);                                 break;
+ 
+   case Bytecodes::_swap:
+@@ -1507,7 +1504,7 @@
+     }
+   }
+ }
+-#endif 
++#endif
+ 
+ // ciTypeFlow::Block
+ //
+@@ -1516,8 +1513,8 @@
+ // ------------------------------------------------------------------
+ // ciTypeFlow::Block::Block
+ ciTypeFlow::Block::Block(ciTypeFlow* outer,
+-			 ciBlock *ciblk,
+-			 ciTypeFlow::JsrSet* jsrs) {
++                         ciBlock *ciblk,
++                         ciTypeFlow::JsrSet* jsrs) {
+   _ciblock = ciblk;
+   _exceptions = NULL;
+   _exc_klasses = NULL;
+@@ -1564,13 +1561,13 @@
+   // the initial iteration of the loop.  (We know we are simulating
+   // the initial iteration right now, since we have never calculated
+   // successors before for this block.)
+-  
++
+   if (branch_bci <= start()
+       && (target->limit() - target->start()) <= CICloneLoopTestLimit
+       && target->private_copy_count() == 0) {
+     // Setting the private_copy bit ensures that the target block cannot be
+     // reached by any other paths, such as fall-in from the loop body.
+-    // The private copy will be accessible only on successor lists 
++    // The private copy will be accessible only on successor lists
+     // created up to this point.
+     target->set_private_copy(true);
+     if (CITraceTypeFlow) {
+@@ -1578,8 +1575,8 @@
+       print_value_on(tty);
+       tty->cr();
+     }
+-    // If the target is the current block, then later on a new copy of the 
+-    // target block will be created when its bytecodes are reached by 
++    // If the target is the current block, then later on a new copy of the
++    // target block will be created when its bytecodes are reached by
+     // an alternate path. (This is the case for loops with the loop
+     // head at the bci-wise bottom of the loop, as with pre-1.4.2 javac.)
+     //
+@@ -1601,15 +1598,15 @@
+ // Get the successors for this Block.
+ GrowableArray<ciTypeFlow::Block*>*
+ ciTypeFlow::Block::successors(ciBytecodeStream* str,
+-			      ciTypeFlow::StateVector* state,
+-			      ciTypeFlow::JsrSet* jsrs) {
++                              ciTypeFlow::StateVector* state,
++                              ciTypeFlow::JsrSet* jsrs) {
+   if (_successors == NULL) {
+     if (CITraceTypeFlow) {
+       tty->print(">> Computing successors for block ");
+       print_value_on(tty);
+       tty->cr();
+     }
+-    
++
+     ciTypeFlow* analyzer = outer();
+     Arena* arena = analyzer->arena();
+     Block* block = NULL;
+@@ -1617,13 +1614,13 @@
+                          (control() != ciBlock::fall_through_bci || limit() < analyzer->code_size());
+     if (!has_successor) {
+       _successors =
+-	new (arena) GrowableArray<Block*>(arena, 1, 0, NULL);
++        new (arena) GrowableArray<Block*>(arena, 1, 0, NULL);
+       // No successors
+     } else if (control() == ciBlock::fall_through_bci) {
+       assert(str->cur_bci() == limit(), "bad block end");
+       // This block simply falls through to the next.
+       _successors =
+-	new (arena) GrowableArray<Block*>(arena, 1, 0, NULL);
++        new (arena) GrowableArray<Block*>(arena, 1, 0, NULL);
+ 
+       Block* block = analyzer->block_at(limit(), _jsrs);
+       assert(_successors->length() == FALL_THROUGH, "");
+@@ -1645,115 +1642,115 @@
+       case Bytecodes::_if_icmpgt:    case Bytecodes::_if_icmple:
+       case Bytecodes::_if_acmpeq:    case Bytecodes::_if_acmpne:
+       case Bytecodes::_ifnull:       case Bytecodes::_ifnonnull:
+-	// Our successors are the branch target and the next bci.
+-	branch_bci = str->get_dest();
+-	clone_loop_head(analyzer, branch_bci, this, jsrs);
+-	_successors =
+-	  new (arena) GrowableArray<Block*>(arena, 2, 0, NULL);
+-	assert(_successors->length() == IF_NOT_TAKEN, "");
+-	_successors->append(analyzer->block_at(next_bci, jsrs));
+-	assert(_successors->length() == IF_TAKEN, "");
+-	_successors->append(analyzer->block_at(branch_bci, jsrs));
+-	break;
+-	
++        // Our successors are the branch target and the next bci.
++        branch_bci = str->get_dest();
++        clone_loop_head(analyzer, branch_bci, this, jsrs);
++        _successors =
++          new (arena) GrowableArray<Block*>(arena, 2, 0, NULL);
++        assert(_successors->length() == IF_NOT_TAKEN, "");
++        _successors->append(analyzer->block_at(next_bci, jsrs));
++        assert(_successors->length() == IF_TAKEN, "");
++        _successors->append(analyzer->block_at(branch_bci, jsrs));
++        break;
++
+       case Bytecodes::_goto:
+-	branch_bci = str->get_dest();
+-	_successors =
+-	  new (arena) GrowableArray<Block*>(arena, 1, 0, NULL);
+-	assert(_successors->length() == GOTO_TARGET, "");
+-	target = analyzer->block_at(branch_bci, jsrs);
+-	// If the target block has not been visited yet, and looks like
+-	// a two-way branch, attempt to clone it if it is a loop head.
+-	if (target->_successors != NULL
+-	    && target->_successors->length() == (IF_TAKEN + 1)) {
+-	  target = clone_loop_head(analyzer, branch_bci, target, jsrs);
+-	}
+-	_successors->append(target);
+-	break;
++        branch_bci = str->get_dest();
++        _successors =
++          new (arena) GrowableArray<Block*>(arena, 1, 0, NULL);
++        assert(_successors->length() == GOTO_TARGET, "");
++        target = analyzer->block_at(branch_bci, jsrs);
++        // If the target block has not been visited yet, and looks like
++        // a two-way branch, attempt to clone it if it is a loop head.
++        if (target->_successors != NULL
++            && target->_successors->length() == (IF_TAKEN + 1)) {
++          target = clone_loop_head(analyzer, branch_bci, target, jsrs);
++        }
++        _successors->append(target);
++        break;
+ 
+       case Bytecodes::_jsr:
+-	branch_bci = str->get_dest();
+-	_successors =
+-	  new (arena) GrowableArray<Block*>(arena, 1, 0, NULL);
+-	assert(_successors->length() == GOTO_TARGET, "");
+-	_successors->append(analyzer->block_at(branch_bci, jsrs));
+-	break;
++        branch_bci = str->get_dest();
++        _successors =
++          new (arena) GrowableArray<Block*>(arena, 1, 0, NULL);
++        assert(_successors->length() == GOTO_TARGET, "");
++        _successors->append(analyzer->block_at(branch_bci, jsrs));
++        break;
+ 
+-      case Bytecodes::_goto_w:         
++      case Bytecodes::_goto_w:
+       case Bytecodes::_jsr_w:
+-	_successors =
+-	  new (arena) GrowableArray<Block*>(arena, 1, 0, NULL);
+-	assert(_successors->length() == GOTO_TARGET, "");
+-	_successors->append(analyzer->block_at(str->get_far_dest(), jsrs));
+-	break;
++        _successors =
++          new (arena) GrowableArray<Block*>(arena, 1, 0, NULL);
++        assert(_successors->length() == GOTO_TARGET, "");
++        _successors->append(analyzer->block_at(str->get_far_dest(), jsrs));
++        break;
+ 
+       case Bytecodes::_tableswitch:  {
+-	Bytecode_tableswitch *tableswitch =
+-	  Bytecode_tableswitch_at(str->cur_bcp());
+-	
+-	int len = tableswitch->length();        
+-	_successors =
+-	  new (arena) GrowableArray<Block*>(arena, len+1, 0, NULL);
+-	int bci = current_bci + tableswitch->default_offset();
+-	Block* block = analyzer->block_at(bci, jsrs);
+-	assert(_successors->length() == SWITCH_DEFAULT, "");
+-	_successors->append(block);
+-	while (--len >= 0) {
+-	  int bci = current_bci + tableswitch->dest_offset_at(len);
+-	  block = analyzer->block_at(bci, jsrs);
+-	  assert(_successors->length() >= SWITCH_CASES, "");
+-	  _successors->append_if_missing(block);
+-	}
+-	break; 
++        Bytecode_tableswitch *tableswitch =
++          Bytecode_tableswitch_at(str->cur_bcp());
++
++        int len = tableswitch->length();
++        _successors =
++          new (arena) GrowableArray<Block*>(arena, len+1, 0, NULL);
++        int bci = current_bci + tableswitch->default_offset();
++        Block* block = analyzer->block_at(bci, jsrs);
++        assert(_successors->length() == SWITCH_DEFAULT, "");
++        _successors->append(block);
++        while (--len >= 0) {
++          int bci = current_bci + tableswitch->dest_offset_at(len);
++          block = analyzer->block_at(bci, jsrs);
++          assert(_successors->length() >= SWITCH_CASES, "");
++          _successors->append_if_missing(block);
++        }
++        break;
+       }
+-      
++
+       case Bytecodes::_lookupswitch: {
+-	Bytecode_lookupswitch *lookupswitch =
+-	  Bytecode_lookupswitch_at(str->cur_bcp());
+-          
+-	int npairs = lookupswitch->number_of_pairs(); 
+-	_successors =
+-	  new (arena) GrowableArray<Block*>(arena, npairs+1, 0, NULL);
+-	int bci = current_bci + lookupswitch->default_offset();
+-	Block* block = analyzer->block_at(bci, jsrs);
+-	assert(_successors->length() == SWITCH_DEFAULT, "");
+-	_successors->append(block);
+-	while(--npairs >= 0) {
+-	  LookupswitchPair *pair = lookupswitch->pair_at(npairs);
+-	  int bci = current_bci + pair->offset();
+-	  Block* block = analyzer->block_at(bci, jsrs);
+-	  assert(_successors->length() >= SWITCH_CASES, "");
+-	  _successors->append_if_missing(block);
+-	}
+-	break; 
++        Bytecode_lookupswitch *lookupswitch =
++          Bytecode_lookupswitch_at(str->cur_bcp());
++
++        int npairs = lookupswitch->number_of_pairs();
++        _successors =
++          new (arena) GrowableArray<Block*>(arena, npairs+1, 0, NULL);
++        int bci = current_bci + lookupswitch->default_offset();
++        Block* block = analyzer->block_at(bci, jsrs);
++        assert(_successors->length() == SWITCH_DEFAULT, "");
++        _successors->append(block);
++        while(--npairs >= 0) {
++          LookupswitchPair *pair = lookupswitch->pair_at(npairs);
++          int bci = current_bci + pair->offset();
++          Block* block = analyzer->block_at(bci, jsrs);
++          assert(_successors->length() >= SWITCH_CASES, "");
++          _successors->append_if_missing(block);
++        }
++        break;
+       }
+ 
+       case Bytecodes::_athrow:     case Bytecodes::_ireturn:
+       case Bytecodes::_lreturn:    case Bytecodes::_freturn:
+       case Bytecodes::_dreturn:    case Bytecodes::_areturn:
+       case Bytecodes::_return:
+-	_successors =
+-	  new (arena) GrowableArray<Block*>(arena, 1, 0, NULL);
+-	// No successors
+-	break;
++        _successors =
++          new (arena) GrowableArray<Block*>(arena, 1, 0, NULL);
++        // No successors
++        break;
+ 
+       case Bytecodes::_ret: {
+-	_successors =
+-	  new (arena) GrowableArray<Block*>(arena, 1, 0, NULL);
++        _successors =
++          new (arena) GrowableArray<Block*>(arena, 1, 0, NULL);
+ 
+-	Cell local = state->local(str->get_index());
+-	ciType* return_address = state->type_at(local);
+-	assert(return_address->is_return_address(), "verify: wrong type");
+-	int bci = return_address->as_return_address()->bci();
+-	assert(_successors->length() == GOTO_TARGET, "");
+-	_successors->append(analyzer->block_at(bci, jsrs));
+-	break;
+-      }
+-
+-      case Bytecodes::_wide:           
+-      default:                 
+-	ShouldNotReachHere();
+-	break;
++        Cell local = state->local(str->get_index());
++        ciType* return_address = state->type_at(local);
++        assert(return_address->is_return_address(), "verify: wrong type");
++        int bci = return_address->as_return_address()->bci();
++        assert(_successors->length() == GOTO_TARGET, "");
++        _successors->append(analyzer->block_at(bci, jsrs));
++        break;
++      }
++
++      case Bytecodes::_wide:
++      default:
++        ShouldNotReachHere();
++        break;
+       }
+     }
+   }
+@@ -1772,10 +1769,10 @@
+     print_value_on(tty);
+     tty->cr();
+   }
+-  
++
+   ciTypeFlow* analyzer = outer();
+   Arena* arena = analyzer->arena();
+-  
++
+   // Any bci in the block will do.
+   ciExceptionHandlerStream str(analyzer->method(), start());
+ 
+@@ -1783,7 +1780,7 @@
+   int exc_count = str.count();
+   _exceptions = new (arena) GrowableArray<Block*>(arena, exc_count, 0, NULL);
+   _exc_klasses = new (arena) GrowableArray<ciInstanceKlass*>(arena, exc_count,
+-							     0, NULL);
++                                                             0, NULL);
+ 
+   for ( ; !str.is_done(); str.next()) {
+     ciExceptionHandler* handler = str.handler();
+@@ -1825,24 +1822,24 @@
+     } else {
+ #if 0
+       if (size1 > 0) {
+-	int r1 = _jsrs->record_at(0)->return_address();
+-	int r2 = _jsrs->record_at(0)->return_address();
+-	if (r1 < r2) {
+-	  return true;
+-	} else if (r2 < r1) {
+-	  return false;
+-	} else {
+-	  int e1 = _jsrs->record_at(0)->return_address();
+-	  int e2 = _jsrs->record_at(0)->return_address();
+-	  if (e1 < e2) {
+-	    return true;
+-	  } else if (e2 < e1) {
+-	    return false;
+-	  }
+-	}
++        int r1 = _jsrs->record_at(0)->return_address();
++        int r2 = _jsrs->record_at(0)->return_address();
++        if (r1 < r2) {
++          return true;
++        } else if (r2 < r1) {
++          return false;
++        } else {
++          int e1 = _jsrs->record_at(0)->return_address();
++          int e2 = _jsrs->record_at(0)->return_address();
++          if (e1 < e2) {
++            return true;
++          } else if (e2 < e1) {
++            return false;
++          }
++        }
+       }
+ #endif
+-      return (start() <= other->start()); 
++      return (start() <= other->start());
+     }
+   }
+ }
+@@ -1869,7 +1866,7 @@
+ // ciTypeFlow::Block::print_on
+ void ciTypeFlow::Block::print_on(outputStream* st) const {
+   if ((Verbose || WizardMode)) {
+-    outer()->method()->print_codes(start(), limit());
++    outer()->method()->print_codes_on(start(), limit(), st);
+   }
+   st->print_cr("  ====================================================  ");
+   st->print ("  ");
+@@ -1982,7 +1979,7 @@
+     Block *temp = _work_list;
+     while (!block->is_simpler_than(temp->next())) {
+       if (CITraceTypeFlow) {
+-	tty->print(".");
++        tty->print(".");
+       }
+       temp = temp->next();
+     }
+@@ -2022,46 +2019,46 @@
+       tty->print_cr(">> No such block.");
+     }
+   }
+-  
++
+   return block;
+ }
+- 
++
+ // ------------------------------------------------------------------
+ // ciTypeFlow::make_jsr_record
+ //
+ // Make a JsrRecord for a given (entry, return) pair, if such a record
+ // does not already exist.
+ ciTypeFlow::JsrRecord* ciTypeFlow::make_jsr_record(int entry_address,
+-						   int return_address) {
++                                                   int return_address) {
+   if (_jsr_records == NULL) {
+     _jsr_records = new (arena()) GrowableArray<JsrRecord*>(arena(),
+-							   _jsr_count,
+-							   0,
+-							   NULL);
++                                                           _jsr_count,
++                                                           0,
++                                                           NULL);
+   }
+   JsrRecord* record = NULL;
+   int len = _jsr_records->length();
+   for (int i = 0; i < len; i++) {
+     JsrRecord* record = _jsr_records->at(i);
+     if (record->entry_address() == entry_address &&
+-	record->return_address() == return_address) {
++        record->return_address() == return_address) {
+       return record;
+     }
+   }
+-  
++
+   record = new (arena()) JsrRecord(entry_address, return_address);
+   _jsr_records->append(record);
+   return record;
+ }
+- 
++
+ // ------------------------------------------------------------------
+ // ciTypeFlow::flow_exceptions
+ //
+ // Merge the current state into all exceptional successors at the
+ // current point in the code.
+ void ciTypeFlow::flow_exceptions(GrowableArray<ciTypeFlow::Block*>* exceptions,
+-				 GrowableArray<ciInstanceKlass*>* exc_klasses,
+-				 ciTypeFlow::StateVector* state) {
++                                 GrowableArray<ciInstanceKlass*>* exc_klasses,
++                                 ciTypeFlow::StateVector* state) {
+   int len = exceptions->length();
+   assert(exc_klasses->length() == len, "must have same length");
+   for (int i = 0; i < len; i++) {
+@@ -2077,10 +2074,10 @@
+     if (block->meet_exception(exception_klass, state)) {
+       // Block was modified.  Add it to the work list.
+       if (!block->is_on_work_list()) {
+-	add_to_work_list(block);
++        add_to_work_list(block);
+       }
+     }
+-  }    
++  }
+ }
+ 
+ // ------------------------------------------------------------------
+@@ -2089,14 +2086,14 @@
+ // Merge the current state into all successors at the current point
+ // in the code.
+ void ciTypeFlow::flow_successors(GrowableArray<ciTypeFlow::Block*>* successors,
+-				 ciTypeFlow::StateVector* state) {
++                                 ciTypeFlow::StateVector* state) {
+   int len = successors->length();
+   for (int i = 0; i < len; i++) {
+     Block* block = successors->at(i);
+     if (block->meet(state)) {
+       // Block was modified.  Add it to the work list.
+       if (!block->is_on_work_list()) {
+-	add_to_work_list(block);
++        add_to_work_list(block);
+       }
+     }
+   }
+@@ -2144,15 +2141,15 @@
+ // vector of a basic block.  Push the changed state to succeeding
+ // basic blocks.
+ void ciTypeFlow::flow_block(ciTypeFlow::Block* block,
+-			    ciTypeFlow::StateVector* state,
+-			    ciTypeFlow::JsrSet* jsrs) {
++                            ciTypeFlow::StateVector* state,
++                            ciTypeFlow::JsrSet* jsrs) {
+   if (CITraceTypeFlow) {
+     tty->print("\n>> ANALYZING BLOCK : ");
+     tty->cr();
+     block->print_on(tty);
+   }
+   assert(block->has_pre_order(), "pre-order is assigned before 1st flow");
+-  
++
+   int start = block->start();
+   int limit = block->limit();
+   int control = block->control();
+@@ -2171,7 +2168,7 @@
+   str.reset_to_bci(start);
+   Bytecodes::Code code;
+   while ((code = str.next()) != ciBytecodeStream::EOBC() &&
+-	 str.cur_bci() < limit) {
++         str.cur_bci() < limit) {
+     // Check for exceptional control flow from this point.
+     if (has_exceptions && can_trap(str)) {
+       flow_exceptions(exceptions, exc_klasses, state);
+@@ -2186,10 +2183,10 @@
+ 
+       // We have encountered a trap.  Record it in this block.
+       block->set_trap(state->trap_bci(), state->trap_index());
+-      
++
+       if (CITraceTypeFlow) {
+-	tty->print_cr(">> Found trap");
+-	block->print_on(tty);
++        tty->print_cr(">> Found trap");
++        block->print_on(tty);
+       }
+ 
+       // Record (no) successors.
+@@ -2296,12 +2293,12 @@
+       int curidx = ciblk->index();
+       int block_count = (_idx_to_blocklist[curidx] == NULL) ? 0 : _idx_to_blocklist[curidx]->length();
+       for (int i = 0; i < block_count; i++) {
+-	Block* block = _idx_to_blocklist[curidx]->at(i);
+-	if (!block->has_pre_order())  continue;
+-	int po = block->pre_order();
+-	assert(_block_map[po] == NULL, "unique ref to block");
+-	assert(0 <= po && po < pre_order_limit, "");
+-	_block_map[po] = block;
++        Block* block = _idx_to_blocklist[curidx]->at(i);
++        if (!block->has_pre_order())  continue;
++        int po = block->pre_order();
++        assert(_block_map[po] == NULL, "unique ref to block");
++        assert(0 <= po && po < pre_order_limit, "");
++        _block_map[po] = block;
+       }
+     }
+   }
+@@ -2312,16 +2309,16 @@
+     for (int e = 0; e <= 1; e++) {
+       GrowableArray<Block*>* l = e? block->exceptions(): block->successors();
+       for (int i = 0; i < l->length(); i++) {
+-	Block* s = l->at(i);
+-	if (!s->has_pre_order()) {
+-	  if (CITraceTypeFlow) {
+-	    tty->print("Removing dead %s successor of #%d: ", (e? "exceptional":  "normal"), block->pre_order());
+-	    s->print_value_on(tty);
+-	    tty->cr();
+-	  }
+-	  l->remove(s);
+-	  --i;
+-	}
++        Block* s = l->at(i);
++        if (!s->has_pre_order()) {
++          if (CITraceTypeFlow) {
++            tty->print("Removing dead %s successor of #%d: ", (e? "exceptional":  "normal"), block->pre_order());
++            s->print_value_on(tty);
++            tty->cr();
++          }
++          l->remove(s);
++          --i;
++        }
+       }
+     }
+   }
+@@ -2350,7 +2347,7 @@
+     for (int i = 0; i < len; i++) {
+       Block* block = blocks->at(i);
+       if (!block->is_private_copy() && block->is_compatible_with(jsrs)) {
+-	return block;
++        return block;
+       }
+     }
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciTypeFlow.hpp openjdk/hotspot/src/share/vm/ci/ciTypeFlow.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciTypeFlow.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciTypeFlow.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciTypeFlow.hpp	1.24 07/05/05 17:05:17 JVM"
+-#endif
+ /*
+  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ 
+@@ -110,7 +107,7 @@
+ 
+   public:
+     JsrSet(Arena* arena, int default_len = 4);
+-    
++
+     // Copy this JsrSet.
+     void copy_into(JsrSet* jsrs);
+ 
+@@ -119,8 +116,8 @@
+ 
+     // Apply the effect of a single bytecode to the JsrSet.
+     void apply_control(ciTypeFlow* analyzer,
+-		       ciBytecodeStream* str,
+-		       StateVector* state);
++                       ciBytecodeStream* str,
++                       StateVector* state);
+ 
+     // What is the cardinality of this set?
+     int size() const { return _set->length(); }
+@@ -229,7 +226,7 @@
+     void      pop() {
+       debug_only(set_type_at_tos(bottom_type()));
+       _stack_size--;
+-    } 
++    }
+     ciType*   pop_value() {
+       ciType* t = type_at_tos();
+       pop();
+@@ -301,7 +298,7 @@
+     }
+     void      pop_array() {
+       assert(type_at_tos() == null_type() ||
+-	     type_at_tos()->is_array_klass(), "must be array type");
++             type_at_tos()->is_array_klass(), "must be array type");
+       pop();
+     }
+     // pop_objArray and pop_typeArray narrow the tos to ciObjArrayKlass
+@@ -323,7 +320,7 @@
+       push(null_type());
+     }
+     void      do_null_assert(ciKlass* unloaded_klass);
+-    
++
+     // Helper convenience routines.
+     void do_aaload(ciBytecodeStream* str);
+     void do_checkcast(ciBytecodeStream* str);
+@@ -340,7 +337,7 @@
+     void do_ret(ciBytecodeStream* str);
+ 
+     void overwrite_local_double_long(int index) {
+-      // Invalidate the previous local if it contains first half of 
++      // Invalidate the previous local if it contains first half of
+       // a double or long value since it's seconf half is being overwritten.
+       int prev_index = index - 1;
+       if (prev_index >= 0 &&
+@@ -358,7 +355,7 @@
+     void store_local_object(int index) {
+       ciType* type = pop_value();
+       assert(is_reference(type) || type->is_return_address(),
+-	     "must be reference type or return address");
++             "must be reference type or return address");
+       overwrite_local_double_long(index);
+       set_type_at(local(index), type);
+     }
+@@ -520,24 +517,24 @@
+ 
+     // Get the successors for this Block.
+     GrowableArray<Block*>* successors(ciBytecodeStream* str,
+-				      StateVector* state,
+-				      JsrSet* jsrs);
++                                      StateVector* state,
++                                      JsrSet* jsrs);
+     GrowableArray<Block*>* successors() {
+       assert(_successors != NULL, "must be filled in");
+       return _successors;
+     }
+ 
+-    // Helper function for "successors" when making private copies of 
++    // Helper function for "successors" when making private copies of
+     // loop heads for C2.
+     Block * clone_loop_head(ciTypeFlow* analyzer,
+                             int branch_bci,
+                             Block* target,
+                             JsrSet* jsrs);
+-    
++
+     // Get the exceptional successors for this Block.
+     GrowableArray<Block*>* exceptions() {
+       if (_exceptions == NULL) {
+-	compute_exceptions();
++        compute_exceptions();
+       }
+       return _exceptions;
+     }
+@@ -546,7 +543,7 @@
+     // exceptional successors for this Block.
+     GrowableArray<ciInstanceKlass*>* exc_klasses() {
+       if (_exc_klasses == NULL) {
+-	compute_exceptions();
++        compute_exceptions();
+       }
+       return _exc_klasses;
+     }
+@@ -687,20 +684,20 @@
+   // Merge the current state into all exceptional successors at the
+   // current point in the code.
+   void flow_exceptions(GrowableArray<Block*>* exceptions,
+-		       GrowableArray<ciInstanceKlass*>* exc_klasses,
+-		       StateVector* state);
++                       GrowableArray<ciInstanceKlass*>* exc_klasses,
++                       StateVector* state);
+ 
+   // Merge the current state into all successors at the current point
+   // in the code.
+   void flow_successors(GrowableArray<Block*>* successors,
+-		       StateVector* state);
++                       StateVector* state);
+ 
+   // Interpret the effects of the bytecodes on the incoming state
+   // vector of a basic block.  Push the changed state to succeeding
+   // basic blocks.
+   void flow_block(Block* block,
+-		  StateVector* scratch_state,
+-		  JsrSet* scratch_jsrs);
++                  StateVector* scratch_state,
++                  JsrSet* scratch_jsrs);
+ 
+   // Perform the type flow analysis, creating and cloning Blocks as
+   // necessary.
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciType.hpp openjdk/hotspot/src/share/vm/ci/ciType.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciType.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciType.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciType.hpp	1.14 07/05/05 17:05:16 JVM"
+-#endif
+ /*
+  * Copyright 2000-2001 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ciType
+@@ -43,7 +40,7 @@
+ 
+   const char* type_string() { return "ciType"; }
+ 
+-  void print_impl();
++  void print_impl(outputStream* st);
+ 
+   // Distinguished instances of primitive ciTypes..
+   static ciType* _basic_types[T_CONFLICT+1];
+@@ -76,7 +73,10 @@
+   bool is_type()                            { return true; }
+   bool is_classless() const                 { return is_primitive_type(); }
+ 
+-  virtual void print_name();
++  virtual void print_name_on(outputStream* st);
++  void print_name() {
++    print_name_on(tty);
++  }
+ 
+   static ciType* make(BasicType t);
+ };
+@@ -94,10 +94,10 @@
+   int _bci;
+ 
+   ciReturnAddress(int bci);
+-  
++
+   const char* type_string() { return "ciReturnAddress"; }
+ 
+-  void print_impl();
++  void print_impl(outputStream* st);
+ 
+ public:
+   bool is_return_address()  { return true; }
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciUtilities.cpp openjdk/hotspot/src/share/vm/ci/ciUtilities.cpp
+--- openjdk6/hotspot/src/share/vm/ci/ciUtilities.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciUtilities.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ciUtilities.cpp	1.11 07/05/05 17:05:18 JVM"
+-#endif
+ /*
+  * Copyright 1999-2000 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/ci/ciUtilities.hpp openjdk/hotspot/src/share/vm/ci/ciUtilities.hpp
+--- openjdk6/hotspot/src/share/vm/ci/ciUtilities.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/ciUtilities.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ciUtilities.hpp	1.20 07/05/05 17:05:18 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,10 +19,10 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+-// The following routines and definitions are used internally in the 
++// The following routines and definitions are used internally in the
+ // compiler interface.
+ 
+ 
+@@ -107,4 +104,3 @@
+ 
+ const char* basictype_to_str(BasicType t);
+ const char  basictype_to_char(BasicType t);
+-
+diff -ruN openjdk6/hotspot/src/share/vm/ci/compilerInterface.hpp openjdk/hotspot/src/share/vm/ci/compilerInterface.hpp
+--- openjdk6/hotspot/src/share/vm/ci/compilerInterface.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/ci/compilerInterface.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)compilerInterface.hpp	1.11 07/05/05 17:05:18 JVM"
+-#endif
+ /*
+  * Copyright 1999 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This is a dummy file used for including the complete
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/classFileError.cpp openjdk/hotspot/src/share/vm/classfile/classFileError.cpp
+--- openjdk6/hotspot/src/share/vm/classfile/classFileError.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/classFileError.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)classFileError.cpp	1.12 07/05/05 17:06:44 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/classFileParser.cpp openjdk/hotspot/src/share/vm/classfile/classFileParser.cpp
+--- openjdk6/hotspot/src/share/vm/classfile/classFileParser.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/classFileParser.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)classFileParser.cpp	1.279 07/05/25 15:14:21 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -47,7 +44,7 @@
+ 
+ // Used for backward compatibility reasons:
+ // - to check for javac bug fixes that happened after 1.5
+-#define JAVA_6_VERSION                    50 
++#define JAVA_6_VERSION                    50
+ 
+ 
+ void ClassFileParser::parse_constant_pool_entries(constantPoolHandle cp, int length, TRAPS) {
+@@ -69,7 +66,7 @@
+   int indices[SymbolTable::symbol_alloc_batch_size];
+   unsigned int hashValues[SymbolTable::symbol_alloc_batch_size];
+   int names_count = 0;
+-    
++
+   // parsing  Index 0 is unused
+   for (int index = 1; index < length; index++) {
+     // Each of the following case guarantees one more byte in the stream
+@@ -131,8 +128,8 @@
+         break;
+       case JVM_CONSTANT_Long :
+         // A mangled type might cause you to overrun allocated memory
+-        guarantee_property(index+1 < length, 
+-                           "Invalid constant pool entry %u in class file %s", 
++        guarantee_property(index+1 < length,
++                           "Invalid constant pool entry %u in class file %s",
+                            index, CHECK);
+         {
+           cfs->guarantee_more(9, CHECK);  // bytes, tag/access_flags
+@@ -143,8 +140,8 @@
+         break;
+       case JVM_CONSTANT_Double :
+         // A mangled type might cause you to overrun allocated memory
+-        guarantee_property(index+1 < length, 
+-                           "Invalid constant pool entry %u in class file %s", 
++        guarantee_property(index+1 < length,
++                           "Invalid constant pool entry %u in class file %s",
+                            index, CHECK);
+         {
+           cfs->guarantee_more(9, CHECK);  // bytes, tag/access_flags
+@@ -219,12 +216,12 @@
+   cfs->guarantee_more(3, CHECK_(nullHandle)); // length, first cp tag
+   u2 length = cfs->get_u2_fast();
+   guarantee_property(
+-    length >= 1, "Illegal constant pool size %u in class file %s", 
++    length >= 1, "Illegal constant pool size %u in class file %s",
+     length, CHECK_(nullHandle));
+   constantPoolOop constant_pool =
+                       oopFactory::new_constantPool(length, CHECK_(nullHandle));
+   constantPoolHandle cp (THREAD, constant_pool);
+-  
++
+   cp->set_partially_loaded();    // Enables heap verify to work on partial constantPoolOops
+ 
+   // parsing constant pool entries
+@@ -247,13 +244,13 @@
+         int klass_ref_index = cp->klass_ref_index_at(index);
+         int name_and_type_ref_index = cp->name_and_type_ref_index_at(index);
+         check_property(valid_cp_range(klass_ref_index, length) &&
+-                       cp->tag_at(klass_ref_index).is_klass_reference(), 
+-                       "Invalid constant pool index %u in class file %s", 
+-                       klass_ref_index, 
++                       cp->tag_at(klass_ref_index).is_klass_reference(),
++                       "Invalid constant pool index %u in class file %s",
++                       klass_ref_index,
+                        CHECK_(nullHandle));
+         check_property(valid_cp_range(name_and_type_ref_index, length) &&
+-                       cp->tag_at(name_and_type_ref_index).is_name_and_type(), 
+-                       "Invalid constant pool index %u in class file %s", 
++                       cp->tag_at(name_and_type_ref_index).is_name_and_type(),
++                       "Invalid constant pool index %u in class file %s",
+                        name_and_type_ref_index,
+                        CHECK_(nullHandle));
+         break;
+@@ -269,8 +266,8 @@
+       case JVM_CONSTANT_Double :
+         index++;
+         check_property(
+-          (index < length && cp->tag_at(index).is_invalid()), 
+-          "Improper constant pool long/double index %u in class file %s", 
++          (index < length && cp->tag_at(index).is_invalid()),
++          "Improper constant pool long/double index %u in class file %s",
+           index, CHECK_(nullHandle));
+         break;
+       case JVM_CONSTANT_NameAndType : {
+@@ -278,20 +275,20 @@
+         int name_ref_index = cp->name_ref_index_at(index);
+         int signature_ref_index = cp->signature_ref_index_at(index);
+         check_property(
+-          valid_cp_range(name_ref_index, length) && 
+-            cp->tag_at(name_ref_index).is_utf8(), 
+-          "Invalid constant pool index %u in class file %s", 
++          valid_cp_range(name_ref_index, length) &&
++            cp->tag_at(name_ref_index).is_utf8(),
++          "Invalid constant pool index %u in class file %s",
+           name_ref_index, CHECK_(nullHandle));
+         check_property(
+-          valid_cp_range(signature_ref_index, length) && 
+-            cp->tag_at(signature_ref_index).is_utf8(), 
+-          "Invalid constant pool index %u in class file %s", 
++          valid_cp_range(signature_ref_index, length) &&
++            cp->tag_at(signature_ref_index).is_utf8(),
++          "Invalid constant pool index %u in class file %s",
+           signature_ref_index, CHECK_(nullHandle));
+         break;
+       }
+       case JVM_CONSTANT_Utf8 :
+         break;
+-      case JVM_CONSTANT_UnresolvedClass :	  // fall-through
++      case JVM_CONSTANT_UnresolvedClass :         // fall-through
+       case JVM_CONSTANT_UnresolvedClassInError:
+         ShouldNotReachHere();     // Only JVM_CONSTANT_ClassIndex should be present
+         break;
+@@ -299,9 +296,9 @@
+         {
+           int class_index = cp->klass_index_at(index);
+           check_property(
+-            valid_cp_range(class_index, length) && 
+-              cp->tag_at(class_index).is_utf8(), 
+-            "Invalid constant pool index %u in class file %s", 
++            valid_cp_range(class_index, length) &&
++              cp->tag_at(class_index).is_utf8(),
++            "Invalid constant pool index %u in class file %s",
+             class_index, CHECK_(nullHandle));
+           cp->unresolved_klass_at_put(index, cp->symbol_at(class_index));
+         }
+@@ -313,9 +310,9 @@
+         {
+           int string_index = cp->string_index_at(index);
+           check_property(
+-            valid_cp_range(string_index, length) && 
+-              cp->tag_at(string_index).is_utf8(), 
+-            "Invalid constant pool index %u in class file %s", 
++            valid_cp_range(string_index, length) &&
++              cp->tag_at(string_index).is_utf8(),
++            "Invalid constant pool index %u in class file %s",
+             string_index, CHECK_(nullHandle));
+           symbolOop sym = cp->symbol_at(string_index);
+           cp->unresolved_string_at_put(index, sym);
+@@ -346,9 +343,9 @@
+       case JVM_CONSTANT_InterfaceMethodref: {
+         int name_and_type_ref_index = cp->name_and_type_ref_index_at(index);
+         // already verified to be utf8
+-        int name_ref_index = cp->name_ref_index_at(name_and_type_ref_index);  
++        int name_ref_index = cp->name_ref_index_at(name_and_type_ref_index);
+         // already verified to be utf8
+-        int signature_ref_index = cp->signature_ref_index_at(name_and_type_ref_index); 
++        int signature_ref_index = cp->signature_ref_index_at(name_and_type_ref_index);
+         symbolHandle name(THREAD, cp->symbol_at(name_ref_index));
+         symbolHandle signature(THREAD, cp->symbol_at(signature_ref_index));
+         if (tag == JVM_CONSTANT_Fieldref) {
+@@ -365,17 +362,17 @@
+             if (name->byte_at(0) == '<') {
+               if (name() != vmSymbols::object_initializer_name()) {
+                 classfile_parse_error(
+-                  "Bad method name at constant pool index %u in class file %s", 
++                  "Bad method name at constant pool index %u in class file %s",
+                   name_ref_index, CHECK_(nullHandle));
+               }
+             }
+           }
+         }
+         break;
+-      }                                                  
++      }
+     }  // end of switch
+   }  // end of for
+-  
++
+   return cp;
+ }
+ 
+@@ -425,7 +422,7 @@
+   entry = new NameSigHash();
+   entry->_name = name;
+   entry->_sig = sig;
+- 
++
+   // Insert into hash table
+   entry->_next = table[index];
+   table[index] = entry;
+@@ -436,11 +433,11 @@
+ 
+ objArrayHandle ClassFileParser::parse_interfaces(constantPoolHandle cp,
+                                                  int length,
+-                                                 Handle class_loader, 
++                                                 Handle class_loader,
+                                                  Handle protection_domain,
+                                                  PerfTraceTime* vmtimer,
+                                                  symbolHandle class_name,
+-                                                 TRAPS) {  
++                                                 TRAPS) {
+   ClassFileStream* cfs = stream();
+   assert(length > 0, "only called for length>0");
+   objArrayHandle nullHandle;
+@@ -451,21 +448,21 @@
+   for (index = 0; index < length; index++) {
+     u2 interface_index = cfs->get_u2(CHECK_(nullHandle));
+     check_property(
+-      valid_cp_range(interface_index, cp->length()) && 
+-        cp->tag_at(interface_index).is_unresolved_klass(), 
+-      "Interface name has bad constant pool index %u in class file %s", 
++      valid_cp_range(interface_index, cp->length()) &&
++        cp->tag_at(interface_index).is_unresolved_klass(),
++      "Interface name has bad constant pool index %u in class file %s",
+       interface_index, CHECK_(nullHandle));
+     symbolHandle unresolved_klass (THREAD, cp->klass_name_at(interface_index));
+ 
+     // Don't need to check legal name because it's checked when parsing constant pool.
+     // But need to make sure it's not an array type.
+-    guarantee_property(unresolved_klass->byte_at(0) != JVM_SIGNATURE_ARRAY, 
++    guarantee_property(unresolved_klass->byte_at(0) != JVM_SIGNATURE_ARRAY,
+                        "Bad interface name in class file %s", CHECK_(nullHandle));
+ 
+     vmtimer->suspend();  // do not count recursive loading twice
+     // Call resolve_super so classcircularity is checked
+     klassOop k = SystemDictionary::resolve_super_or_fail(class_name,
+-                  unresolved_klass, class_loader, protection_domain, 
++                  unresolved_klass, class_loader, protection_domain,
+                   false, CHECK_(nullHandle));
+     KlassHandle interf (THREAD, k);
+     vmtimer->resume();
+@@ -510,10 +507,10 @@
+ void ClassFileParser::verify_constantvalue(int constantvalue_index, int signature_index, constantPoolHandle cp, TRAPS) {
+   // Make sure the constant pool entry is of a type appropriate to this field
+   guarantee_property(
+-    (constantvalue_index > 0 && 
+-      constantvalue_index < cp->length()), 
+-    "Bad initial value index %u in ConstantValue attribute in class file %s", 
+-    constantvalue_index, CHECK); 
++    (constantvalue_index > 0 &&
++      constantvalue_index < cp->length()),
++    "Bad initial value index %u in ConstantValue attribute in class file %s",
++    constantvalue_index, CHECK);
+   constantTag value_type = cp->tag_at(constantvalue_index);
+   switch ( cp->basic_type_for_signature_at(signature_index) ) {
+     case T_LONG:
+@@ -528,14 +525,14 @@
+     case T_BYTE: case T_CHAR: case T_SHORT: case T_BOOLEAN: case T_INT:
+       guarantee_property(value_type.is_int(), "Inconsistent constant value type in class file %s", CHECK);
+       break;
+-    case T_OBJECT: 
+-      guarantee_property((cp->symbol_at(signature_index)->equals("Ljava/lang/String;", 18) 
++    case T_OBJECT:
++      guarantee_property((cp->symbol_at(signature_index)->equals("Ljava/lang/String;", 18)
+                          && (value_type.is_string() || value_type.is_unresolved_string())),
+                          "Bad string initial value in class file %s", CHECK);
+       break;
+     default:
+       classfile_parse_error(
+-        "Unable to set initial value %u in class file %s", 
++        "Unable to set initial value %u in class file %s",
+         constantvalue_index, CHECK);
+   }
+ }
+@@ -564,42 +561,42 @@
+     u2 attribute_name_index = cfs->get_u2_fast();
+     u4 attribute_length = cfs->get_u4_fast();
+     check_property(valid_cp_range(attribute_name_index, cp->length()) &&
+-                   cp->tag_at(attribute_name_index).is_utf8(), 
+-                   "Invalid field attribute index %u in class file %s", 
++                   cp->tag_at(attribute_name_index).is_utf8(),
++                   "Invalid field attribute index %u in class file %s",
+                    attribute_name_index,
+                    CHECK);
+     symbolOop attribute_name = cp->symbol_at(attribute_name_index);
+-    if (is_static && attribute_name == vmSymbols::tag_constant_value()) { 
+-      // ignore if non-static   
++    if (is_static && attribute_name == vmSymbols::tag_constant_value()) {
++      // ignore if non-static
+       if (constantvalue_index != 0) {
+         classfile_parse_error("Duplicate ConstantValue attribute in class file %s", CHECK);
+       }
+       check_property(
+-        attribute_length == 2, 
+-        "Invalid ConstantValue field attribute length %u in class file %s", 
++        attribute_length == 2,
++        "Invalid ConstantValue field attribute length %u in class file %s",
+         attribute_length, CHECK);
+       constantvalue_index = cfs->get_u2(CHECK);
+-      if (_need_verify) { 
+-        verify_constantvalue(constantvalue_index, signature_index, cp, CHECK); 
++      if (_need_verify) {
++        verify_constantvalue(constantvalue_index, signature_index, cp, CHECK);
+       }
+     } else if (attribute_name == vmSymbols::tag_synthetic()) {
+       if (attribute_length != 0) {
+         classfile_parse_error(
+-          "Invalid Synthetic field attribute length %u in class file %s", 
++          "Invalid Synthetic field attribute length %u in class file %s",
+           attribute_length, CHECK);
+       }
+       is_synthetic = true;
+     } else if (attribute_name == vmSymbols::tag_deprecated()) { // 4276120
+       if (attribute_length != 0) {
+         classfile_parse_error(
+-          "Invalid Deprecated field attribute length %u in class file %s", 
++          "Invalid Deprecated field attribute length %u in class file %s",
+           attribute_length, CHECK);
+       }
+     } else if (_major_version >= JAVA_1_5_VERSION) {
+       if (attribute_name == vmSymbols::tag_signature()) {
+         if (attribute_length != 2) {
+           classfile_parse_error(
+-            "Wrong size %u for field's Signature attribute in class file %s", 
++            "Wrong size %u for field's Signature attribute in class file %s",
+             attribute_length, CHECK);
+         }
+         generic_signature_index = cfs->get_u2(CHECK);
+@@ -617,7 +614,7 @@
+         cfs->skip_u1(attribute_length, CHECK);  // Skip unknown attributes
+       }
+     } else {
+-      cfs->skip_u1(attribute_length, CHECK);  // Skip unknown attributes			
++      cfs->skip_u1(attribute_length, CHECK);  // Skip unknown attributes
+     }
+   }
+ 
+@@ -631,18 +628,18 @@
+                                             CHECK);
+   return;
+ }
+-  
++
+ 
+ // Field allocation types. Used for computing field offsets.
+ 
+ enum FieldAllocationType {
+-  STATIC_OOP,		// Oops
+-  STATIC_BYTE,		// Boolean, Byte, char
+-  STATIC_SHORT,		// shorts
+-  STATIC_WORD,		// ints
+-  STATIC_DOUBLE,	// long or double
++  STATIC_OOP,           // Oops
++  STATIC_BYTE,          // Boolean, Byte, char
++  STATIC_SHORT,         // shorts
++  STATIC_WORD,          // ints
++  STATIC_DOUBLE,        // long or double
+   STATIC_ALIGNED_DOUBLE,// aligned long or double
+-  NONSTATIC_OOP,	 
++  NONSTATIC_OOP,
+   NONSTATIC_BYTE,
+   NONSTATIC_SHORT,
+   NONSTATIC_WORD,
+@@ -664,7 +661,7 @@
+   int nonstatic_double_count;
+ };
+ 
+-typeArrayHandle ClassFileParser::parse_fields(constantPoolHandle cp, bool is_interface, 
++typeArrayHandle ClassFileParser::parse_fields(constantPoolHandle cp, bool is_interface,
+                                               struct FieldAllocationCount *fac,
+                                               objArrayHandle* fields_annotations, TRAPS) {
+   ClassFileStream* cfs = stream();
+@@ -674,7 +671,7 @@
+   // Tuples of shorts [access, name index, sig index, initial value index, byte offset, generic signature index]
+   typeArrayOop new_fields = oopFactory::new_permanent_shortArray(length*instanceKlass::next_offset, CHECK_(nullHandle));
+   typeArrayHandle fields(THREAD, new_fields);
+- 
++
+   int index = 0;
+   typeArrayHandle field_annotations;
+   for (int n = 0; n < length; n++) {
+@@ -688,17 +685,17 @@
+     u2 name_index = cfs->get_u2_fast();
+     int cp_size = cp->length();
+     check_property(
+-      valid_cp_range(name_index, cp_size) && cp->tag_at(name_index).is_utf8(), 
+-      "Invalid constant pool index %u for field name in class file %s", 
++      valid_cp_range(name_index, cp_size) && cp->tag_at(name_index).is_utf8(),
++      "Invalid constant pool index %u for field name in class file %s",
+       name_index, CHECK_(nullHandle));
+     symbolHandle name(THREAD, cp->symbol_at(name_index));
+     verify_legal_field_name(name, CHECK_(nullHandle));
+ 
+     u2 signature_index = cfs->get_u2_fast();
+     check_property(
+-      valid_cp_range(signature_index, cp_size) && 
+-        cp->tag_at(signature_index).is_utf8(), 
+-      "Invalid constant pool index %u for field signature in class file %s", 
++      valid_cp_range(signature_index, cp_size) &&
++        cp->tag_at(signature_index).is_utf8(),
++      "Invalid constant pool index %u for field signature in class file %s",
+       signature_index, CHECK_(nullHandle));
+     symbolHandle sig(THREAD, cp->symbol_at(signature_index));
+     verify_legal_field_signature(name, sig, CHECK_(nullHandle));
+@@ -725,11 +722,11 @@
+         access_flags.set_is_synthetic();
+       }
+     }
+-    
++
+     fields->short_at_put(index++, access_flags.as_short());
+     fields->short_at_put(index++, name_index);
+     fields->short_at_put(index++, signature_index);
+-    fields->short_at_put(index++, constantvalue_index);	
++    fields->short_at_put(index++, constantvalue_index);
+ 
+     // Remember how many oops we encountered and compute allocation type
+     BasicType type = cp->basic_type_for_signature_at(signature_index);
+@@ -750,8 +747,8 @@
+           }
+           fac->static_double_count++;
+           break;
+-        case  T_CHAR:     
+-        case  T_SHORT: 
++        case  T_CHAR:
++        case  T_SHORT:
+           fac->static_short_count++;
+           atype = STATIC_SHORT;
+           break;
+@@ -760,14 +757,14 @@
+           fac->static_word_count++;
+           atype = STATIC_WORD;
+           break;
+-        case  T_ARRAY: 
++        case  T_ARRAY:
+         case  T_OBJECT:
+           fac->static_oop_count++;
+           atype = STATIC_OOP;
+           break;
+-        case  T_ADDRESS: 
++        case  T_ADDRESS:
+         case  T_VOID:
+-        default: 
++        default:
+           assert(0, "bad field type");
+       }
+     } else {
+@@ -786,8 +783,8 @@
+           }
+           fac->nonstatic_double_count++;
+           break;
+-        case  T_CHAR:     
+-        case  T_SHORT: 
++        case  T_CHAR:
++        case  T_SHORT:
+           fac->nonstatic_short_count++;
+           atype = NONSTATIC_SHORT;
+           break;
+@@ -796,14 +793,14 @@
+           fac->nonstatic_word_count++;
+           atype = NONSTATIC_WORD;
+           break;
+-        case  T_ARRAY: 
++        case  T_ARRAY:
+         case  T_OBJECT:
+           fac->nonstatic_oop_count++;
+           atype = NONSTATIC_OOP;
+           break;
+-        case  T_ADDRESS: 
++        case  T_ADDRESS:
+         case  T_VOID:
+-        default: 
++        default:
+           assert(0, "bad field type");
+       }
+     }
+@@ -853,9 +850,9 @@
+ }
+ 
+ 
+-typeArrayHandle ClassFileParser::parse_exception_table(u4 code_length, 
+-                                                       u4 exception_table_length, 
+-                                                       constantPoolHandle cp, 
++typeArrayHandle ClassFileParser::parse_exception_table(u4 code_length,
++                                                       u4 exception_table_length,
++                                                       constantPoolHandle cp,
+                                                        TRAPS) {
+   ClassFileStream* cfs = stream();
+   typeArrayHandle nullHandle;
+@@ -863,7 +860,7 @@
+   // 4-tuples of ints [start_pc, end_pc, handler_pc, catch_type index]
+   typeArrayOop eh = oopFactory::new_permanent_intArray(exception_table_length*4, CHECK_(nullHandle));
+   typeArrayHandle exception_handlers = typeArrayHandle(THREAD, eh);
+-  
++
+   int index = 0;
+   cfs->guarantee_more(8 * exception_table_length, CHECK_(nullHandle)); // start_pc, end_pc, handler_pc, catch_type_index
+   for (unsigned int i = 0; i < exception_table_length; i++) {
+@@ -873,63 +870,57 @@
+     u2 catch_type_index = cfs->get_u2_fast();
+     // Will check legal target after parsing code array in verifier.
+     if (_need_verify) {
+-      guarantee_property((start_pc < end_pc) && (end_pc <= code_length), 
+-                         "Illegal exception table range in class file %s", CHECK_(nullHandle)); 
+-      guarantee_property(handler_pc < code_length, 
+-                         "Illegal exception table handler in class file %s", CHECK_(nullHandle)); 
++      guarantee_property((start_pc < end_pc) && (end_pc <= code_length),
++                         "Illegal exception table range in class file %s", CHECK_(nullHandle));
++      guarantee_property(handler_pc < code_length,
++                         "Illegal exception table handler in class file %s", CHECK_(nullHandle));
+       if (catch_type_index != 0) {
+-        guarantee_property(valid_cp_range(catch_type_index, cp->length()) && 
+-                          (cp->tag_at(catch_type_index).is_klass() || 
++        guarantee_property(valid_cp_range(catch_type_index, cp->length()) &&
++                          (cp->tag_at(catch_type_index).is_klass() ||
+                            cp->tag_at(catch_type_index).is_unresolved_klass()),
+                            "Catch type in exception table has bad constant type in class file %s", CHECK_(nullHandle));
+       }
+-    }	      
+-    exception_handlers->int_at_put(index++, start_pc); 
+-    exception_handlers->int_at_put(index++, end_pc);  
+-    exception_handlers->int_at_put(index++, handler_pc);  
+-    exception_handlers->int_at_put(index++, catch_type_index);  
++    }
++    exception_handlers->int_at_put(index++, start_pc);
++    exception_handlers->int_at_put(index++, end_pc);
++    exception_handlers->int_at_put(index++, handler_pc);
++    exception_handlers->int_at_put(index++, catch_type_index);
+   }
+   return exception_handlers;
+ }
+ 
+-u_char* ClassFileParser::parse_linenumber_table(u4 code_attribute_length, 
+-                                                u4 code_length,
+-                                                int* compressed_linenumber_table_size, 
+-                                                TRAPS) {
++void ClassFileParser::parse_linenumber_table(
++    u4 code_attribute_length, u4 code_length,
++    CompressedLineNumberWriteStream** write_stream, TRAPS) {
+   ClassFileStream* cfs = stream();
+-  cfs->guarantee_more(2, CHECK_NULL);  // linenumber_table_length
+-  unsigned int linenumber_table_length = cfs->get_u2_fast();
++  unsigned int num_entries = cfs->get_u2(CHECK);
++
++  // Each entry is a u2 start_pc, and a u2 line_number
++  unsigned int length_in_bytes = num_entries * (sizeof(u2) + sizeof(u2));
+ 
+   // Verify line number attribute and table length
+-  if (_need_verify) {
+-    guarantee_property(code_attribute_length ==
+-                       (sizeof(u2) /* linenumber table length */ +
+-                        linenumber_table_length*(sizeof(u2) /* start_pc */ +
+-                        sizeof(u2) /* line_number */)),
+-                       "LineNumberTable attribute has wrong length in class file %s", CHECK_NULL);
+-  }          
+-  
+-  u_char* compressed_linenumber_table = NULL;
+-  if (linenumber_table_length > 0) {
+-    // initial_size large enough
+-    int initial_size = linenumber_table_length * sizeof(u2) * 2;
+-    CompressedLineNumberWriteStream c_stream =
+-      (initial_size <= fixed_buffer_size) ? 
+-      CompressedLineNumberWriteStream(_fixed_buffer, fixed_buffer_size) :
+-      CompressedLineNumberWriteStream(initial_size);
+-    cfs->guarantee_more(4 * linenumber_table_length, CHECK_NULL);  // bci, line
+-    while (linenumber_table_length-- > 0) {
+-      u2 bci  = cfs->get_u2_fast(); // start_pc
+-      u2 line = cfs->get_u2_fast(); // line_number
+-      guarantee_property(bci < code_length,
+-                         "Invalid pc in LineNumberTable in class file %s", CHECK_NULL);
+-      c_stream.write_pair(bci, line);
+-    }
+-    c_stream.write_terminator();
+-    *compressed_linenumber_table_size = c_stream.position();
+-    compressed_linenumber_table = c_stream.buffer();
++  check_property(
++    code_attribute_length == sizeof(u2) + length_in_bytes,
++    "LineNumberTable attribute has wrong length in class file %s", CHECK);
++
++  cfs->guarantee_more(length_in_bytes, CHECK);
++
++  if ((*write_stream) == NULL) {
++    if (length_in_bytes > fixed_buffer_size) {
++      (*write_stream) = new CompressedLineNumberWriteStream(length_in_bytes);
++    } else {
++      (*write_stream) = new CompressedLineNumberWriteStream(
++        linenumbertable_buffer, fixed_buffer_size);
++    }
++  }
++
++  while (num_entries-- > 0) {
++    u2 bci  = cfs->get_u2_fast(); // start_pc
++    u2 line = cfs->get_u2_fast(); // line_number
++    guarantee_property(bci < code_length,
++        "Invalid pc in LineNumberTable in class file %s", CHECK);
++    (*write_stream)->write_pair(bci, line);
+   }
+-  return compressed_linenumber_table;
+ }
+ 
+ 
+@@ -990,7 +981,7 @@
+    */
+   while (entry != NULL) {
+     if (elem->start_bci           == entry->_elem->start_bci
+-     && elem->length              == entry->_elem->length 
++     && elem->length              == entry->_elem->length
+      && elem->name_cp_index       == entry->_elem->name_cp_index
+      && elem->slot                == entry->_elem->slot
+     ) {
+@@ -1017,7 +1008,7 @@
+     return false;
+   }
+   entry->_elem = elem;
+- 
++
+   // Insert into hash table
+   entry->_next = table[index];
+   table[index] = entry;
+@@ -1054,7 +1045,7 @@
+   }
+   u2* localvariable_table_start = cfs->get_u2_buffer();
+   assert(localvariable_table_start != NULL, "null local variable table");
+-  if (!_need_verify) { 
++  if (!_need_verify) {
+     cfs->skip_u2_fast(size);
+   } else {
+     cfs->guarantee_more(size * 2, CHECK_NULL);
+@@ -1069,17 +1060,17 @@
+ 
+       if (start_pc >= code_length) {
+         classfile_parse_error(
+-          "Invalid start_pc %u in %s in class file %s", 
++          "Invalid start_pc %u in %s in class file %s",
+           start_pc, tbl_name, CHECK_NULL);
+       }
+       if (end_pc > code_length) {
+         classfile_parse_error(
+-          "Invalid length %u in %s in class file %s", 
++          "Invalid length %u in %s in class file %s",
+           length, tbl_name, CHECK_NULL);
+       }
+       int cp_size = cp->length();
+       guarantee_property(
+-        valid_cp_range(name_index, cp_size) && 
++        valid_cp_range(name_index, cp_size) &&
+           cp->tag_at(name_index).is_utf8(),
+         "Name index %u in %s has bad constant type in class file %s",
+         name_index, tbl_name, CHECK_NULL);
+@@ -1097,7 +1088,7 @@
+         verify_legal_field_signature(name, sig, CHECK_NULL);
+ 
+         // 4894874: check special cases for double and long local variables
+-        if (sig() == vmSymbols::type_signature(T_DOUBLE) || 
++        if (sig() == vmSymbols::type_signature(T_DOUBLE) ||
+             sig() == vmSymbols::type_signature(T_LONG)) {
+           extra_slot = 1;
+         }
+@@ -1116,41 +1107,41 @@
+   ClassFileStream* cfs = stream();
+   u2 index = 0; // index in the array with long/double occupying two slots
+   u4 i1 = *u1_index;
+-  u4 i2 = *u2_index + 1;  
++  u4 i2 = *u2_index + 1;
+   for(int i = 0; i < array_length; i++) {
+     u1 tag = u1_array[i1++] = cfs->get_u1(CHECK);
+     index++;
+     if (tag == ITEM_Long || tag == ITEM_Double) {
+-      index++; 
++      index++;
+     } else if (tag == ITEM_Object) {
+       u2 class_index = u2_array[i2++] = cfs->get_u2(CHECK);
+       guarantee_property(valid_cp_range(class_index, cp->length()) &&
+-                         cp->tag_at(class_index).is_unresolved_klass(), 
+-                         "Bad class index %u in StackMap in class file %s", 
++                         cp->tag_at(class_index).is_unresolved_klass(),
++                         "Bad class index %u in StackMap in class file %s",
+                          class_index, CHECK);
+     } else if (tag == ITEM_Uninitialized) {
+       u2 offset = u2_array[i2++] = cfs->get_u2(CHECK);
+       guarantee_property(
+-        offset < code_length, 
+-        "Bad uninitialized type offset %u in StackMap in class file %s", 
++        offset < code_length,
++        "Bad uninitialized type offset %u in StackMap in class file %s",
+         offset, CHECK);
+     } else {
+       guarantee_property(
+         tag <= (u1)ITEM_Uninitialized,
+-        "Unknown variable type %u in StackMap in class file %s", 
++        "Unknown variable type %u in StackMap in class file %s",
+         tag, CHECK);
+     }
+   }
+-  u2_array[*u2_index] = index; 
++  u2_array[*u2_index] = index;
+   *u1_index = i1;
+   *u2_index = i2;
+ }
+ 
+ typeArrayOop ClassFileParser::parse_stackmap_table(
+     u4 code_attribute_length, TRAPS) {
+-  if (code_attribute_length == 0) 
++  if (code_attribute_length == 0)
+     return NULL;
+-  
++
+   ClassFileStream* cfs = stream();
+   u1* stackmap_table_start = cfs->get_u1_buffer();
+   assert(stackmap_table_start != NULL, "null stackmap table");
+@@ -1162,16 +1153,16 @@
+     return NULL;
+   }
+ 
+-  typeArrayOop stackmap_data = 
++  typeArrayOop stackmap_data =
+     oopFactory::new_permanent_byteArray(code_attribute_length, CHECK_NULL);
+ 
+   stackmap_data->set_length(code_attribute_length);
+-  memcpy((void*)stackmap_data->byte_at_addr(0), 
++  memcpy((void*)stackmap_data->byte_at_addr(0),
+          (void*)stackmap_table_start, code_attribute_length);
+   return stackmap_data;
+ }
+ 
+-u2* ClassFileParser::parse_checked_exceptions(u2* checked_exceptions_length, 
++u2* ClassFileParser::parse_checked_exceptions(u2* checked_exceptions_length,
+                                               u4 method_attribute_length,
+                                               constantPoolHandle cp, TRAPS) {
+   ClassFileStream* cfs = stream();
+@@ -1180,7 +1171,7 @@
+   unsigned int size = (*checked_exceptions_length) * sizeof(CheckedExceptionElement) / sizeof(u2);
+   u2* checked_exceptions_start = cfs->get_u2_buffer();
+   assert(checked_exceptions_start != NULL, "null checked exceptions");
+-  if (!_need_verify) { 
++  if (!_need_verify) {
+     cfs->skip_u2_fast(size);
+   } else {
+     // Verify each value in the checked exception table
+@@ -1191,8 +1182,8 @@
+       checked_exception = cfs->get_u2_fast();
+       check_property(
+         valid_cp_range(checked_exception, cp->length()) &&
+-        cp->tag_at(checked_exception).is_klass_reference(), 
+-        "Exception name has bad type at constant pool %u in class file %s", 
++        cp->tag_at(checked_exception).is_klass_reference(),
++        "Exception name has bad type at constant pool %u in class file %s",
+         checked_exception, CHECK_NULL);
+     }
+   }
+@@ -1235,22 +1226,22 @@
+   u2 name_index = cfs->get_u2_fast();
+   int cp_size = cp->length();
+   check_property(
+-    valid_cp_range(name_index, cp_size) && 
+-      cp->tag_at(name_index).is_utf8(), 
+-    "Illegal constant pool index %u for method name in class file %s", 
++    valid_cp_range(name_index, cp_size) &&
++      cp->tag_at(name_index).is_utf8(),
++    "Illegal constant pool index %u for method name in class file %s",
+     name_index, CHECK_(nullHandle));
+   symbolHandle name(THREAD, cp->symbol_at(name_index));
+-  verify_legal_method_name(name, CHECK_(nullHandle));  
++  verify_legal_method_name(name, CHECK_(nullHandle));
+ 
+   u2 signature_index = cfs->get_u2_fast();
+   guarantee_property(
+     valid_cp_range(signature_index, cp_size) &&
+-      cp->tag_at(signature_index).is_utf8(), 
+-    "Illegal constant pool index %u for method signature in class file %s", 
++      cp->tag_at(signature_index).is_utf8(),
++    "Illegal constant pool index %u for method signature in class file %s",
+     signature_index, CHECK_(nullHandle));
+   symbolHandle signature(THREAD, cp->symbol_at(signature_index));
+ 
+-  AccessFlags access_flags;  
++  AccessFlags access_flags;
+   if (name == vmSymbols::class_initializer_name()) {
+     // We ignore the access flags for a class initializer. (JVM Spec. p. 116)
+     flags = JVM_ACC_STATIC;
+@@ -1260,15 +1251,15 @@
+ 
+   int args_size = -1;  // only used when _need_verify is true
+   if (_need_verify) {
+-    args_size = ((flags & JVM_ACC_STATIC) ? 0 : 1) + 
++    args_size = ((flags & JVM_ACC_STATIC) ? 0 : 1) +
+                  verify_legal_method_signature(name, signature, CHECK_(nullHandle));
+     if (args_size > MAX_ARGS_SIZE) {
+       classfile_parse_error("Too many arguments in method signature in class file %s", CHECK_(nullHandle));
+     }
+   }
+-        
++
+   access_flags.set_flags(flags & JVM_RECOGNIZED_METHOD_MODIFIERS);
+-  
++
+   // Default values for code and exceptions attribute elements
+   u2 max_stack = 0;
+   u2 max_locals = 0;
+@@ -1278,8 +1269,8 @@
+   typeArrayHandle exception_handlers(THREAD, Universe::the_empty_int_array());
+   u2 checked_exceptions_length = 0;
+   u2* checked_exceptions_start = NULL;
+-  int compressed_linenumber_table_size = 0;
+-  u_char* compressed_linenumber_table = NULL;
++  CompressedLineNumberWriteStream* linenumber_table = NULL;
++  int linenumber_table_length = 0;
+   int total_lvt_length = 0;
+   u2 lvt_cnt = 0;
+   u2 lvtt_cnt = 0;
+@@ -1309,22 +1300,22 @@
+ 
+   // Parse code and exceptions attribute
+   u2 method_attributes_count = cfs->get_u2_fast();
+-  while (method_attributes_count--) {   
++  while (method_attributes_count--) {
+     cfs->guarantee_more(6, CHECK_(nullHandle));  // method_attribute_name_index, method_attribute_length
+     u2 method_attribute_name_index = cfs->get_u2_fast();
+     u4 method_attribute_length = cfs->get_u4_fast();
+     check_property(
+       valid_cp_range(method_attribute_name_index, cp_size) &&
+-        cp->tag_at(method_attribute_name_index).is_utf8(), 
+-      "Invalid method attribute name index %u in class file %s", 
++        cp->tag_at(method_attribute_name_index).is_utf8(),
++      "Invalid method attribute name index %u in class file %s",
+       method_attribute_name_index, CHECK_(nullHandle));
+ 
+     symbolOop method_attribute_name = cp->symbol_at(method_attribute_name_index);
+     if (method_attribute_name == vmSymbols::tag_code()) {
+       // Parse Code attribute
+       if (_need_verify) {
+-        guarantee_property(!access_flags.is_native() && !access_flags.is_abstract(), 
+-                        "Code attribute in native or abstract methods in class file %s", 
++        guarantee_property(!access_flags.is_native() && !access_flags.is_abstract(),
++                        "Code attribute in native or abstract methods in class file %s",
+                          CHECK_(nullHandle));
+       }
+       if (parsed_code_attribute) {
+@@ -1345,10 +1336,10 @@
+         code_length = cfs->get_u4_fast();
+       }
+       if (_need_verify) {
+-        guarantee_property(args_size <= max_locals, 
++        guarantee_property(args_size <= max_locals,
+                            "Arguments can't fit into locals in class file %s", CHECK_(nullHandle));
+-        guarantee_property(code_length > 0 && code_length <= MAX_CODE_SIZE, 
+-                           "Invalid method Code length %u in class file %s", 
++        guarantee_property(code_length > 0 && code_length <= MAX_CODE_SIZE,
++                           "Invalid method Code length %u in class file %s",
+                            code_length, CHECK_(nullHandle));
+       }
+       // Code pointer
+@@ -1361,17 +1352,17 @@
+       cfs->guarantee_more(2, CHECK_(nullHandle));  // exception_table_length
+       exception_table_length = cfs->get_u2_fast();
+       if (exception_table_length > 0) {
+-        exception_handlers = 
++        exception_handlers =
+               parse_exception_table(code_length, exception_table_length, cp, CHECK_(nullHandle));
+       }
+ 
+       // Parse additional attributes in code attribute
+       cfs->guarantee_more(2, CHECK_(nullHandle));  // code_attributes_count
+       u2 code_attributes_count = cfs->get_u2_fast();
+-      unsigned int calculated_attribute_length = sizeof(max_stack) + 
+-                                                 sizeof(max_locals) + 
++      unsigned int calculated_attribute_length = sizeof(max_stack) +
++                                                 sizeof(max_locals) +
+                                                  sizeof(code_length) +
+-                                                 code_length + 
++                                                 code_length +
+                                                  sizeof(exception_table_length) +
+                                                  sizeof(code_attributes_count) +
+                                                  exception_table_length*(sizeof(u2) /* start_pc */+
+@@ -1383,23 +1374,21 @@
+         cfs->guarantee_more(6, CHECK_(nullHandle));  // code_attribute_name_index, code_attribute_length
+         u2 code_attribute_name_index = cfs->get_u2_fast();
+         u4 code_attribute_length = cfs->get_u4_fast();
+-        calculated_attribute_length += code_attribute_length + 
++        calculated_attribute_length += code_attribute_length +
+                                        sizeof(code_attribute_name_index) +
+                                        sizeof(code_attribute_length);
+         check_property(valid_cp_range(code_attribute_name_index, cp_size) &&
+-                       cp->tag_at(code_attribute_name_index).is_utf8(), 
+-                       "Invalid code attribute name index %u in class file %s", 
++                       cp->tag_at(code_attribute_name_index).is_utf8(),
++                       "Invalid code attribute name index %u in class file %s",
+                        code_attribute_name_index,
+                        CHECK_(nullHandle));
+-        if (LoadLineNumberTables && 
++        if (LoadLineNumberTables &&
+             cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_line_number_table()) {
+           // Parse and compress line number table
+-          compressed_linenumber_table = parse_linenumber_table(code_attribute_length, 
+-                                                               code_length,
+-                                                               &compressed_linenumber_table_size, 
+-                                                               CHECK_(nullHandle));
+-                                         
+-        } else if (LoadLocalVariableTables && 
++          parse_linenumber_table(code_attribute_length, code_length,
++            &linenumber_table, CHECK_(nullHandle));
++
++        } else if (LoadLocalVariableTables &&
+                    cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_local_variable_table()) {
+           // Parse local variable table
+           if (!lvt_allocated) {
+@@ -1424,11 +1413,11 @@
+                                       code_attribute_length,
+                                       cp,
+                                       &localvariable_table_length[lvt_cnt],
+-                                      false,	// is not LVTT
++                                      false,    // is not LVTT
+                                       CHECK_(nullHandle));
+           total_lvt_length += localvariable_table_length[lvt_cnt];
+           lvt_cnt++;
+-        } else if (LoadLocalVariableTypeTables && 
++        } else if (LoadLocalVariableTypeTables &&
+                    _major_version >= JAVA_1_5_VERSION &&
+                    cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_local_variable_type_table()) {
+           if (!lvt_allocated) {
+@@ -1454,7 +1443,7 @@
+                                       code_attribute_length,
+                                       cp,
+                                       &localvariable_type_table_length[lvtt_cnt],
+-                                      true,	// is LVTT
++                                      true,     // is LVTT
+                                       CHECK_(nullHandle));
+           lvtt_cnt++;
+         } else if (UseSplitVerifier &&
+@@ -1464,7 +1453,7 @@
+           if (parsed_stackmap_attribute) {
+             classfile_parse_error("Multiple StackMapTable attributes in class file %s", CHECK_(nullHandle));
+           }
+-          typeArrayOop sm = 
++          typeArrayOop sm =
+             parse_stackmap_table(code_attribute_length, CHECK_(nullHandle));
+           stackmap_data = typeArrayHandle(THREAD, sm);
+           parsed_stackmap_attribute = true;
+@@ -1485,13 +1474,13 @@
+       }
+       parsed_checked_exceptions_attribute = true;
+       checked_exceptions_start =
+-            parse_checked_exceptions(&checked_exceptions_length, 
+-                                     method_attribute_length, 
++            parse_checked_exceptions(&checked_exceptions_length,
++                                     method_attribute_length,
+                                      cp, CHECK_(nullHandle));
+     } else if (method_attribute_name == vmSymbols::tag_synthetic()) {
+       if (method_attribute_length != 0) {
+         classfile_parse_error(
+-          "Invalid Synthetic method attribute length %u in class file %s", 
++          "Invalid Synthetic method attribute length %u in class file %s",
+           method_attribute_length, CHECK_(nullHandle));
+       }
+       // Should we check that there hasn't already been a synthetic attribute?
+@@ -1499,14 +1488,14 @@
+     } else if (method_attribute_name == vmSymbols::tag_deprecated()) { // 4276120
+       if (method_attribute_length != 0) {
+         classfile_parse_error(
+-          "Invalid Deprecated method attribute length %u in class file %s", 
++          "Invalid Deprecated method attribute length %u in class file %s",
+           method_attribute_length, CHECK_(nullHandle));
+       }
+     } else if (_major_version >= JAVA_1_5_VERSION) {
+       if (method_attribute_name == vmSymbols::tag_signature()) {
+         if (method_attribute_length != 2) {
+           classfile_parse_error(
+-            "Invalid Signature attribute length %u in class file %s", 
++            "Invalid Signature attribute length %u in class file %s",
+             method_attribute_length, CHECK_(nullHandle));
+         }
+         cfs->guarantee_more(2, CHECK_(nullHandle));  // generic_signature_index
+@@ -1543,8 +1532,14 @@
+     } else {
+       // Skip unknown attributes
+       cfs->skip_u1(method_attribute_length, CHECK_(nullHandle));
+-    }      
++    }
+   }
++
++  if (linenumber_table != NULL) {
++    linenumber_table->write_terminator();
++    linenumber_table_length = linenumber_table->position();
++  }
++
+   // Make sure there's at least one Code attribute in non-native/non-abstract method
+   if (_need_verify) {
+     guarantee_property(access_flags.is_native() || access_flags.is_abstract() || parsed_code_attribute,
+@@ -1552,11 +1547,9 @@
+   }
+ 
+   // All sizing information for a methodOop is finally available, now create it
+-  methodOop m_oop  = oopFactory::new_method(code_length, access_flags,
+-                               compressed_linenumber_table_size, 
+-                               total_lvt_length, 
+-                               checked_exceptions_length, 
+-                               CHECK_(nullHandle));
++  methodOop m_oop  = oopFactory::new_method(
++    code_length, access_flags, linenumber_table_length,
++    total_lvt_length, checked_exceptions_length, CHECK_(nullHandle));
+   methodHandle m (THREAD, m_oop);
+ 
+   ClassLoadingService::add_class_method_size(m_oop->size()*HeapWordSize);
+@@ -1574,7 +1567,7 @@
+ 
+   if (args_size >= 0) {
+     m->set_size_of_parameters(args_size);
+-  } else { 
++  } else {
+     m->compute_size_of_parameters(THREAD);
+   }
+ #ifdef ASSERT
+@@ -1591,10 +1584,10 @@
+ 
+   /**
+    * The exception_table field is the flag used to indicate
+-   * that the methodOop and it's associated constMethodOop are partially 
+-   * initialized and thus are exempt from pre/post GC verification.  Once 
+-   * the field is set, the oops are considered fully initialized so make 
+-   * sure that the oops can pass verification when this field is set. 
++   * that the methodOop and it's associated constMethodOop are partially
++   * initialized and thus are exempt from pre/post GC verification.  Once
++   * the field is set, the oops are considered fully initialized so make
++   * sure that the oops can pass verification when this field is set.
+    */
+   m->set_exception_table(exception_handlers());
+ 
+@@ -1602,10 +1595,13 @@
+   if (code_length > 0) {
+     memcpy(m->code_base(), code_start, code_length);
+   }
++
+   // Copy line number table
+-  if (compressed_linenumber_table_size > 0) {
+-    memcpy(m->compressed_linenumber_table(), compressed_linenumber_table, compressed_linenumber_table_size);
++  if (linenumber_table != NULL) {
++    memcpy(m->compressed_linenumber_table(),
++           linenumber_table->buffer(), linenumber_table_length);
+   }
++
+   // Copy checked exceptions
+   if (checked_exceptions_length > 0) {
+     int size = checked_exceptions_length * sizeof(CheckedExceptionElement) / sizeof(u2);
+@@ -1625,7 +1621,7 @@
+    *     Each LVTT entry has to match some LVT entry.
+    *   - HotSpot internal LVT keeps natural ordering of class file LVT entries.
+    */
+-  if (total_lvt_length > 0) {  
++  if (total_lvt_length > 0) {
+     int tbl_no, idx;
+ 
+     promoted_flags->set_has_localvariable_table();
+@@ -1642,8 +1638,8 @@
+       for (idx = 0; idx < localvariable_table_length[tbl_no]; idx++, lvt++) {
+         copy_lvt_element(&cf_lvt[idx], lvt);
+         // If no duplicates, add LVT elem in hashtable lvt_Hash.
+-        if (LVT_put_after_lookup(lvt, lvt_Hash) == false 
+-          && _need_verify 
++        if (LVT_put_after_lookup(lvt, lvt_Hash) == false
++          && _need_verify
+           && _major_version >= JAVA_1_5_VERSION ) {
+           clear_hashtable(lvt_Hash);
+           classfile_parse_error("Duplicated LocalVariableTable attribute "
+@@ -1720,12 +1716,12 @@
+   return m;
+ }
+ 
+-  
++
+ // The promoted_flags parameter is used to pass relevant access_flags
+ // from the methods back up to the containing klass. These flag values
+ // are added to klass's access_flags.
+ 
+-objArrayHandle ClassFileParser::parse_methods(constantPoolHandle cp, bool is_interface, 
++objArrayHandle ClassFileParser::parse_methods(constantPoolHandle cp, bool is_interface,
+                                               AccessFlags* promoted_flags,
+                                               bool* has_final_method,
+                                               objArrayOop* methods_annotations_oop,
+@@ -1749,7 +1745,7 @@
+     objArrayHandle methods_parameter_annotations;
+     objArrayHandle methods_default_annotations;
+     for (int index = 0; index < length; index++) {
+-      methodHandle method = parse_method(cp, is_interface, 
++      methodHandle method = parse_method(cp, is_interface,
+                                          promoted_flags,
+                                          &method_annotations,
+                                          &method_parameter_annotations,
+@@ -1758,7 +1754,7 @@
+       if (method->is_final()) {
+         *has_final_method = true;
+       }
+-      methods->obj_at_put(index, method());  
++      methods->obj_at_put(index, method());
+       if (method_annotations.not_null()) {
+         if (methods_annotations.is_null()) {
+           objArrayOop md = oopFactory::new_system_objArray(length, CHECK_(nullHandle));
+@@ -1821,7 +1817,7 @@
+                                               TRAPS) {
+   typeArrayHandle nullHandle;
+   int length = methods()->length();
+-  // If JVMTI original method ordering is enabled we have to 
++  // If JVMTI original method ordering is enabled we have to
+   // remember the original class file ordering.
+   // We temporarily use the vtable_index field in the methodOop to store the
+   // class file index, so we can read in after calling qsort.
+@@ -1863,16 +1859,16 @@
+   u2 sourcefile_index = cfs->get_u2_fast();
+   check_property(
+     valid_cp_range(sourcefile_index, cp->length()) &&
+-      cp->tag_at(sourcefile_index).is_utf8(), 
+-    "Invalid SourceFile attribute at constant pool index %u in class file %s", 
+-    sourcefile_index, CHECK);  
++      cp->tag_at(sourcefile_index).is_utf8(),
++    "Invalid SourceFile attribute at constant pool index %u in class file %s",
++    sourcefile_index, CHECK);
+   k->set_source_file_name(cp->symbol_at(sourcefile_index));
+ }
+ 
+ 
+ 
+-void ClassFileParser::parse_classfile_source_debug_extension_attribute(constantPoolHandle cp, 
+-                                                                       instanceKlassHandle k, 
++void ClassFileParser::parse_classfile_source_debug_extension_attribute(constantPoolHandle cp,
++                                                                       instanceKlassHandle k,
+                                                                        int length, TRAPS) {
+   ClassFileStream* cfs = stream();
+   u1* sde_buffer = cfs->get_u1_buffer();
+@@ -1882,7 +1878,7 @@
+   if (JvmtiExport::can_get_source_debug_extension()) {
+     // Optimistically assume that only 1 byte UTF format is used
+     // (common case)
+-    symbolOop sde_symbol = oopFactory::new_symbol((char*)sde_buffer, 
++    symbolOop sde_symbol = oopFactory::new_symbol((char*)sde_buffer,
+                                                   length, CHECK);
+     k->set_source_debug_extension(sde_symbol);
+   }
+@@ -1895,13 +1891,13 @@
+ #define RECOGNIZED_INNER_CLASS_MODIFIERS (JVM_RECOGNIZED_CLASS_MODIFIERS | JVM_ACC_PRIVATE | JVM_ACC_PROTECTED | JVM_ACC_STATIC)
+ 
+ // Return number of classes in the inner classes attribute table
+-u2 ClassFileParser::parse_classfile_inner_classes_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS) {  
++u2 ClassFileParser::parse_classfile_inner_classes_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS) {
+   ClassFileStream* cfs = stream();
+   cfs->guarantee_more(2, CHECK_0);  // length
+   u2 length = cfs->get_u2_fast();
+ 
+   // 4-tuples of shorts [inner_class_info_index, outer_class_info_index, inner_name_index, inner_class_access_flags]
+-  typeArrayOop ic = oopFactory::new_permanent_shortArray(length*4, CHECK_0);  
++  typeArrayOop ic = oopFactory::new_permanent_shortArray(length*4, CHECK_0);
+   typeArrayHandle inner_classes(THREAD, ic);
+   int index = 0;
+   int cp_size = cp->length();
+@@ -1910,28 +1906,28 @@
+     // Inner class index
+     u2 inner_class_info_index = cfs->get_u2_fast();
+     check_property(
+-      inner_class_info_index == 0 || 
+-        (valid_cp_range(inner_class_info_index, cp_size) && 
+-        cp->tag_at(inner_class_info_index).is_klass_reference()), 
+-      "inner_class_info_index %u has bad constant type in class file %s", 
++      inner_class_info_index == 0 ||
++        (valid_cp_range(inner_class_info_index, cp_size) &&
++        cp->tag_at(inner_class_info_index).is_klass_reference()),
++      "inner_class_info_index %u has bad constant type in class file %s",
+       inner_class_info_index, CHECK_0);
+     // Outer class index
+     u2 outer_class_info_index = cfs->get_u2_fast();
+     check_property(
+-      outer_class_info_index == 0 || 
++      outer_class_info_index == 0 ||
+         (valid_cp_range(outer_class_info_index, cp_size) &&
+-        cp->tag_at(outer_class_info_index).is_klass_reference()), 
+-      "outer_class_info_index %u has bad constant type in class file %s", 
++        cp->tag_at(outer_class_info_index).is_klass_reference()),
++      "outer_class_info_index %u has bad constant type in class file %s",
+       outer_class_info_index, CHECK_0);
+     // Inner class name
+     u2 inner_name_index = cfs->get_u2_fast();
+     check_property(
+       inner_name_index == 0 || (valid_cp_range(inner_name_index, cp_size) &&
+-        cp->tag_at(inner_name_index).is_utf8()), 
+-      "inner_name_index %u has bad constant type in class file %s", 
+-      inner_name_index, CHECK_0);    
++        cp->tag_at(inner_name_index).is_utf8()),
++      "inner_name_index %u has bad constant type in class file %s",
++      inner_name_index, CHECK_0);
+     if (_need_verify) {
+-      guarantee_property(inner_class_info_index != outer_class_info_index, 
++      guarantee_property(inner_class_info_index != outer_class_info_index,
+                          "Class is both outer and inner class in class file %s", CHECK_0);
+     }
+     // Access flags
+@@ -1946,7 +1942,7 @@
+ 
+     inner_classes->short_at_put(index++, inner_class_info_index);
+     inner_classes->short_at_put(index++, outer_class_info_index);
+-    inner_classes->short_at_put(index++, inner_name_index);	
++    inner_classes->short_at_put(index++, inner_name_index);
+     inner_classes->short_at_put(index++, inner_access_flags.as_short());
+   }
+ 
+@@ -1961,12 +1957,12 @@
+                             "Duplicate entry in InnerClasses in class file %s",
+                             CHECK_0);
+       }
+-    }  
+-  }  
++    }
++  }
+ 
+-  // Update instanceKlass with inner class info.  
++  // Update instanceKlass with inner class info.
+   k->set_inner_classes(inner_classes());
+-  return length;  
++  return length;
+ }
+ 
+ void ClassFileParser::parse_classfile_synthetic_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS) {
+@@ -1978,9 +1974,9 @@
+   u2 signature_index = cfs->get_u2(CHECK);
+   check_property(
+     valid_cp_range(signature_index, cp->length()) &&
+-      cp->tag_at(signature_index).is_utf8(), 
+-    "Invalid constant pool index %u in Signature attribute in class file %s", 
+-    signature_index, CHECK);    
++      cp->tag_at(signature_index).is_utf8(),
++    "Invalid constant pool index %u in Signature attribute in class file %s",
++    signature_index, CHECK);
+   k->set_generic_signature(cp->symbol_at(signature_index));
+ }
+ 
+@@ -1998,14 +1994,14 @@
+   u1* runtime_invisible_annotations = NULL;
+   int runtime_invisible_annotations_length = 0;
+   // Iterate over attributes
+-  while (attributes_count--) {    
++  while (attributes_count--) {
+     cfs->guarantee_more(6, CHECK);  // attribute_name_index, attribute_length
+     u2 attribute_name_index = cfs->get_u2_fast();
+     u4 attribute_length = cfs->get_u4_fast();
+     check_property(
+       valid_cp_range(attribute_name_index, cp->length()) &&
+-        cp->tag_at(attribute_name_index).is_utf8(), 
+-      "Attribute name has bad constant pool index %u in class file %s", 
++        cp->tag_at(attribute_name_index).is_utf8(),
++      "Attribute name has bad constant pool index %u in class file %s",
+       attribute_name_index, CHECK);
+     symbolOop tag = cp->symbol_at(attribute_name_index);
+     if (tag == vmSymbols::tag_source_file()) {
+@@ -2039,7 +2035,7 @@
+       // Shouldn't we check that the synthetic flags wasn't already set? - not required in spec
+       if (attribute_length != 0) {
+         classfile_parse_error(
+-          "Invalid Synthetic classfile attribute length %u in class file %s", 
++          "Invalid Synthetic classfile attribute length %u in class file %s",
+           attribute_length, CHECK);
+       }
+       parse_classfile_synthetic_attribute(cp, k, CHECK);
+@@ -2047,14 +2043,14 @@
+       // Check for Deprecatd tag - 4276120
+       if (attribute_length != 0) {
+         classfile_parse_error(
+-          "Invalid Deprecated classfile attribute length %u in class file %s", 
++          "Invalid Deprecated classfile attribute length %u in class file %s",
+           attribute_length, CHECK);
+       }
+     } else if (_major_version >= JAVA_1_5_VERSION) {
+       if (tag == vmSymbols::tag_signature()) {
+         if (attribute_length != 2) {
+           classfile_parse_error(
+-            "Wrong Signature attribute length %u in class file %s", 
++            "Wrong Signature attribute length %u in class file %s",
+             attribute_length, CHECK);
+         }
+         parse_classfile_signature_attribute(cp, k, CHECK);
+@@ -2089,7 +2085,7 @@
+             (!cp->is_within_bounds(method_index) ||
+              !cp->tag_at(method_index).is_name_and_type())) {
+           classfile_parse_error("Invalid or out-of-bounds method index in EnclosingMethod attribute in class file %s", CHECK);
+-        }           
++        }
+         k->set_enclosing_method_indices(class_index, method_index);
+       } else {
+         // Unknown attribute
+@@ -2138,16 +2134,16 @@
+     switch (t) {
+       case T_BYTE:
+         h_k()->byte_field_put(fd->offset(), fd->int_initial_value());
+-	      break;
++              break;
+       case T_BOOLEAN:
+         h_k()->bool_field_put(fd->offset(), fd->int_initial_value());
+-	      break;
++              break;
+       case T_CHAR:
+         h_k()->char_field_put(fd->offset(), fd->int_initial_value());
+-	      break;
++              break;
+       case T_SHORT:
+         h_k()->short_field_put(fd->offset(), fd->int_initial_value());
+-	      break;
++              break;
+       case T_INT:
+         h_k()->int_field_put(fd->offset(), fd->int_initial_value());
+         break;
+@@ -2162,16 +2158,16 @@
+         break;
+       case T_OBJECT:
+         {
+-          #ifdef ASSERT      
++          #ifdef ASSERT
+           symbolOop sym = oopFactory::new_symbol("Ljava/lang/String;", CHECK);
+-          assert(fd->signature() == sym, "just checking");      
++          assert(fd->signature() == sym, "just checking");
+           #endif
+           oop string = fd->string_initial_value(CHECK);
+           h_k()->obj_field_put(fd->offset(), string);
+         }
+         break;
+       default:
+-        THROW_MSG(vmSymbols::java_lang_ClassFormatError(), 
++        THROW_MSG(vmSymbols::java_lang_ClassFormatError(),
+                   "Illegal ConstantValue attribute in class file");
+     }
+   }
+@@ -2198,7 +2194,7 @@
+   // present.
+ 
+   //
+-  // Increment fac.nonstatic_oop_count so that the start of the 
++  // Increment fac.nonstatic_oop_count so that the start of the
+   // next type of non-static oops leaves room for the fake oop.
+   // Do not increment next_nonstatic_oop_offset so that the
+   // fake oop is place after the java.lang.ref.Reference oop
+@@ -2214,9 +2210,9 @@
+   int extra = java_lang_ref_Reference::number_of_fake_oop_fields;
+   const int n = (*fields_ptr)()->length();
+   for (int i = 0; i < n; i += instanceKlass::next_offset ) {
+-    int name_index = 
++    int name_index =
+     (*fields_ptr)()->ushort_at(i + instanceKlass::name_index_offset);
+-    int sig_index  = 
++    int sig_index  =
+       (*fields_ptr)()->ushort_at(i + instanceKlass::signature_index_offset);
+     symbolOop f_name = cp->symbol_at(name_index);
+     symbolOop f_sig  = cp->symbol_at(sig_index);
+@@ -2224,7 +2220,7 @@
+       // Save the index for reference signature for later use.
+       // The fake discovered field does not entries in the
+       // constant pool so the index for its signature cannot
+-      // be extracted from the constant pool.  It will need 
++      // be extracted from the constant pool.  It will need
+       // later, however.  It's signature is vmSymbols::reference_signature()
+       // so same an index for that signature.
+       reference_sig_index = sig_index;
+@@ -2234,18 +2230,18 @@
+     if (f_name == vmSymbols::reference_discovered_name() &&
+       f_sig == vmSymbols::reference_signature()) {
+       // The values below are fake but will force extra
+-      // non-static oop fields and a corresponding non-static 
++      // non-static oop fields and a corresponding non-static
+       // oop map block to be allocated.
+       extra = 0;
+       break;
+     }
+   }
+-  if (extra != 0) { 
++  if (extra != 0) {
+     fac_ptr->nonstatic_oop_count += extra;
+     // Add the additional entry to "fields" so that the klass
+     // contains the "discoverd" field and the field will be initialized
+     // in instances of the object.
+-    int fields_with_fix_length = (*fields_ptr)()->length() + 
++    int fields_with_fix_length = (*fields_ptr)()->length() +
+       instanceKlass::next_offset;
+     typeArrayOop ff = oopFactory::new_permanent_shortArray(
+                                                 fields_with_fix_length, CHECK);
+@@ -2258,7 +2254,7 @@
+ 
+     // Add the fake field at the end.
+     int i = (*fields_ptr)->length();
+-    // There is no name index for the fake "discovered" field nor 
++    // There is no name index for the fake "discovered" field nor
+     // signature but a signature is needed so that the field will
+     // be properly initialized.  Use one found for
+     // one of the other reference fields. Be sure the index for the
+@@ -2271,12 +2267,12 @@
+ 
+     int j;
+     for (j = 0; j < instanceKlass::next_offset; j++) {
+-      fields_with_fix->ushort_at_put(i + j, 
+-	(*fields_ptr)->ushort_at(reference_index +j));
++      fields_with_fix->ushort_at_put(i + j,
++        (*fields_ptr)->ushort_at(reference_index +j));
+     }
+     // Clear the public access flag and set the private access flag.
+     short flags;
+-    flags = 
++    flags =
+       fields_with_fix->ushort_at(i + instanceKlass::access_flags_offset);
+     assert(!(flags & JVM_RECOGNIZED_FIELD_MODIFIERS), "Unexpected access flags set");
+     flags = flags & (~JVM_ACC_PUBLIC);
+@@ -2285,10 +2281,10 @@
+     access_flags.set_flags(flags);
+     assert(!access_flags.is_public(), "Failed to clear public flag");
+     assert(access_flags.is_private(), "Failed to set private flag");
+-    fields_with_fix->ushort_at_put(i + instanceKlass::access_flags_offset, 
++    fields_with_fix->ushort_at_put(i + instanceKlass::access_flags_offset,
+       flags);
+ 
+-    assert(fields_with_fix->ushort_at(i + instanceKlass::name_index_offset) 
++    assert(fields_with_fix->ushort_at(i + instanceKlass::name_index_offset)
+       == reference_name_index, "The fake reference name is incorrect");
+     assert(fields_with_fix->ushort_at(i + instanceKlass::signature_index_offset)
+       == reference_sig_index, "The fake reference signature is incorrect");
+@@ -2305,7 +2301,7 @@
+ }
+ 
+ 
+-void ClassFileParser::java_lang_Class_fix_pre(objArrayHandle* methods_ptr, 
++void ClassFileParser::java_lang_Class_fix_pre(objArrayHandle* methods_ptr,
+   FieldAllocationCount *fac_ptr, TRAPS) {
+   // Add fake fields for java.lang.Class instances
+   //
+@@ -2323,7 +2319,7 @@
+   // versions because when the offsets are computed at bootstrap
+   // time we don't know yet which version of the JDK we're running in.
+ 
+-  // The values below are fake but will force two non-static oop fields and 
++  // The values below are fake but will force two non-static oop fields and
+   // a corresponding non-static oop map block to be allocated.
+   const int extra = java_lang_Class::number_of_fake_oop_fields;
+   fac_ptr->nonstatic_oop_count += extra;
+@@ -2333,16 +2329,16 @@
+ void ClassFileParser::java_lang_Class_fix_post(int* next_nonstatic_oop_offset_ptr) {
+   // Cause the extra fake fields in java.lang.Class to show up before
+   // the Java fields for layout compatibility between 1.3 and 1.4
+-  // Incrementing next_nonstatic_oop_offset here advances the 
++  // Incrementing next_nonstatic_oop_offset here advances the
+   // location where the real java fields are placed.
+   const int extra = java_lang_Class::number_of_fake_oop_fields;
+   (*next_nonstatic_oop_offset_ptr) += (extra * wordSize);
+ }
+ 
+ 
+-instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name, 
+-                                                    Handle class_loader, 
+-                                                    Handle protection_domain, 
++instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
++                                                    Handle class_loader,
++                                                    Handle protection_domain,
+                                                     symbolHandle& parsed_name,
+                                                     TRAPS) {
+   // So that JVMTI can cache class file in the state before retransformable agents
+@@ -2360,15 +2356,15 @@
+     unsigned char* ptr = cfs->buffer();
+     unsigned char* end_ptr = cfs->buffer() + cfs->length();
+ 
+-    JvmtiExport::post_class_file_load_hook(name, class_loader, protection_domain, 
++    JvmtiExport::post_class_file_load_hook(name, class_loader, protection_domain,
+                                            &ptr, &end_ptr,
+-                                           &cached_class_file_bytes, 
++                                           &cached_class_file_bytes,
+                                            &cached_class_file_length);
+ 
+     if (ptr != cfs->buffer()) {
+       // JVMTI agent has modified class file data.
+       // Set new class file stream using JVMTI agent modified
+-      // class file data.       
++      // class file data.
+       cfs = new ClassFileStream(ptr, end_ptr - ptr, cfs->source());
+       set_stream(cfs);
+     }
+@@ -2379,7 +2375,7 @@
+ 
+   // Figure out whether we can skip format checking (matching classic VM behavior)
+   _need_verify = Verifier::should_verify_for(class_loader());
+-  
++
+   // Set the verify flag in stream
+   cfs->set_verify(_need_verify);
+ 
+@@ -2389,31 +2385,31 @@
+   cfs->guarantee_more(8, CHECK_(nullHandle));  // magic, major, minor
+   // Magic value
+   u4 magic = cfs->get_u4_fast();
+-  guarantee_property(magic == JAVA_CLASSFILE_MAGIC, 
+-                     "Incompatible magic value %u in class file %s", 
++  guarantee_property(magic == JAVA_CLASSFILE_MAGIC,
++                     "Incompatible magic value %u in class file %s",
+                      magic, CHECK_(nullHandle));
+ 
+-  // Version numbers  
++  // Version numbers
+   u2 minor_version = cfs->get_u2_fast();
+   u2 major_version = cfs->get_u2_fast();
+ 
+   // Check version numbers - we check this even with verifier off
+   if (!is_supported_version(major_version, minor_version)) {
+     if (name.is_null()) {
+-      Exceptions::fthrow( 
++      Exceptions::fthrow(
+         THREAD_AND_LOCATION,
+-        vmSymbolHandles::java_lang_UnsupportedClassVersionError(), 
++        vmSymbolHandles::java_lang_UnsupportedClassVersionError(),
+         "Unsupported major.minor version %u.%u",
+-        major_version, 
++        major_version,
+         minor_version);
+     } else {
+       ResourceMark rm(THREAD);
+-      Exceptions::fthrow( 
++      Exceptions::fthrow(
+         THREAD_AND_LOCATION,
+-        vmSymbolHandles::java_lang_UnsupportedClassVersionError(), 
++        vmSymbolHandles::java_lang_UnsupportedClassVersionError(),
+         "%s : Unsupported major.minor version %u.%u",
+         name->as_C_string(),
+-        major_version, 
++        major_version,
+         minor_version);
+     }
+     return nullHandle;
+@@ -2449,8 +2445,8 @@
+   u2 this_class_index = cfs->get_u2_fast();
+   check_property(
+     valid_cp_range(this_class_index, cp_size) &&
+-      cp->tag_at(this_class_index).is_unresolved_klass(), 
+-    "Invalid this class index %u in constant pool in class file %s", 
++      cp->tag_at(this_class_index).is_unresolved_klass(),
++    "Invalid this class index %u in constant pool in class file %s",
+     this_class_index, CHECK_(nullHandle));
+ 
+   symbolHandle class_name (THREAD, cp->unresolved_klass_at(this_class_index));
+@@ -2467,11 +2463,11 @@
+   // It has been checked when constant pool is parsed.
+   // However, make sure it is not an array type.
+   if (_need_verify) {
+-    guarantee_property(class_name->byte_at(0) != JVM_SIGNATURE_ARRAY, 
+-                       "Bad class name in class file %s", 
++    guarantee_property(class_name->byte_at(0) != JVM_SIGNATURE_ARRAY,
++                       "Bad class name in class file %s",
+                        CHECK_(nullHandle));
+   }
+-  
++
+   klassOop preserve_this_klass;   // for storing result across HandleMark
+ 
+   // release all handles when parsing is done
+@@ -2482,9 +2478,9 @@
+       ResourceMark rm(THREAD);
+       Exceptions::fthrow(
+         THREAD_AND_LOCATION,
+-        vmSymbolHandles::java_lang_NoClassDefFoundError(), 
+-        "%s (wrong name: %s)", 
+-        name->as_C_string(), 
++        vmSymbolHandles::java_lang_NoClassDefFoundError(),
++        "%s (wrong name: %s)",
++        name->as_C_string(),
+         class_name->as_C_string()
+       );
+       return nullHandle;
+@@ -2499,19 +2495,19 @@
+     u2 super_class_index = cfs->get_u2_fast();
+     if (super_class_index == 0) {
+       check_property(class_name() == vmSymbols::java_lang_Object(),
+-                     "Invalid superclass index %u in class file %s", 
++                     "Invalid superclass index %u in class file %s",
+                      super_class_index,
+                      CHECK_(nullHandle));
+     } else {
+       check_property(valid_cp_range(super_class_index, cp_size) &&
+-                     cp->tag_at(super_class_index).is_unresolved_klass(), 
+-                     "Invalid superclass index %u in class file %s", 
++                     cp->tag_at(super_class_index).is_unresolved_klass(),
++                     "Invalid superclass index %u in class file %s",
+                      super_class_index,
+                      CHECK_(nullHandle));
+       // The class name should be legal because it is checked when parsing constant pool.
+       // However, make sure it is not an array type.
+       if (_need_verify) {
+-        guarantee_property(cp->unresolved_klass_at(super_class_index)->byte_at(0) != JVM_SIGNATURE_ARRAY, 
++        guarantee_property(cp->unresolved_klass_at(super_class_index)->byte_at(0) != JVM_SIGNATURE_ARRAY,
+                           "Bad superclass name in class file %s", CHECK_(nullHandle));
+       }
+     }
+@@ -2538,7 +2534,7 @@
+     objArrayOop methods_annotations_oop = NULL;
+     objArrayOop methods_parameter_annotations_oop = NULL;
+     objArrayOop methods_default_annotations_oop = NULL;
+-    objArrayHandle methods = parse_methods(cp, access_flags.is_interface(), 
++    objArrayHandle methods = parse_methods(cp, access_flags.is_interface(),
+                                            &promoted_flags,
+                                            &has_final_method,
+                                            &methods_annotations_oop,
+@@ -2561,9 +2557,9 @@
+                            CHECK_(nullHandle));
+       }
+       klassOop k = SystemDictionary::resolve_super_or_fail(class_name,
+-                                                           sk, 
+-                                                           class_loader, 
+-                                                           protection_domain, 
++                                                           sk,
++                                                           class_loader,
++                                                           protection_domain,
+                                                            true,
+                                                            CHECK_(nullHandle));
+       KlassHandle kh (THREAD, k);
+@@ -2599,22 +2595,22 @@
+     access_flags.add_promoted_flags(promoted_flags.as_int());
+ 
+     // Size of Java vtable (in words)
+-    int vtable_size = 0;    
++    int vtable_size = 0;
+     int itable_size = 0;
+     int num_miranda_methods = 0;
+ 
+-    klassVtable::compute_vtable_size_and_num_mirandas(vtable_size, 
+-                                                      num_miranda_methods, 
++    klassVtable::compute_vtable_size_and_num_mirandas(vtable_size,
++                                                      num_miranda_methods,
+                                                       super_klass(),
+                                                       methods(),
+                                                       access_flags,
+                                                       class_loader(),
+-                                                      class_name(), 
+-                                                      local_interfaces());  
+-       
++                                                      class_name(),
++                                                      local_interfaces());
++
+     // Size of Java itable (in words)
+-    itable_size = access_flags.is_interface() ? 0 : klassItable::compute_itable_size(transitive_interfaces);  
+-    
++    itable_size = access_flags.is_interface() ? 0 : klassItable::compute_itable_size(transitive_interfaces);
++
+     // Field size and offset computation
+     int nonstatic_field_size = super_klass() == NULL ? 0 : super_klass->nonstatic_field_size();
+ #ifndef PRODUCT
+@@ -2638,29 +2634,29 @@
+     int next_nonstatic_field_offset;
+ 
+     // Calculate the starting byte offsets
+-    next_static_oop_offset      = (instanceKlass::header_size() + 
+-		 		  align_object_offset(vtable_size) + 
+-				  align_object_offset(itable_size)) * wordSize;
+-    next_static_double_offset   = next_static_oop_offset + 
+-			 	  (fac.static_oop_count * oopSize);
+-    if ( fac.static_double_count && 
+-	 (Universe::field_type_should_be_aligned(T_DOUBLE) || 
+- 	  Universe::field_type_should_be_aligned(T_LONG)) ) {
++    next_static_oop_offset      = (instanceKlass::header_size() +
++                                  align_object_offset(vtable_size) +
++                                  align_object_offset(itable_size)) * wordSize;
++    next_static_double_offset   = next_static_oop_offset +
++                                  (fac.static_oop_count * oopSize);
++    if ( fac.static_double_count &&
++         (Universe::field_type_should_be_aligned(T_DOUBLE) ||
++          Universe::field_type_should_be_aligned(T_LONG)) ) {
+       next_static_double_offset = align_size_up(next_static_double_offset, BytesPerLong);
+     }
+ 
+-    next_static_word_offset     = next_static_double_offset + 
+-				  (fac.static_double_count * BytesPerLong);
+-    next_static_short_offset    = next_static_word_offset + 
+-				  (fac.static_word_count * BytesPerInt);
+-    next_static_byte_offset     = next_static_short_offset + 
+-				  (fac.static_short_count * BytesPerShort);
++    next_static_word_offset     = next_static_double_offset +
++                                  (fac.static_double_count * BytesPerLong);
++    next_static_short_offset    = next_static_word_offset +
++                                  (fac.static_word_count * BytesPerInt);
++    next_static_byte_offset     = next_static_short_offset +
++                                  (fac.static_short_count * BytesPerShort);
+     next_static_type_offset     = align_size_up((next_static_byte_offset +
+-			          fac.static_byte_count ), wordSize );
+-    static_field_size 	        = (next_static_type_offset - 
+-			          next_static_oop_offset) / wordSize;
+-    first_nonstatic_field_offset = (instanceOopDesc::header_size() + 
+-				    nonstatic_field_size) * wordSize;
++                                  fac.static_byte_count ), wordSize );
++    static_field_size           = (next_static_type_offset -
++                                  next_static_oop_offset) / wordSize;
++    first_nonstatic_field_offset = (instanceOopDesc::header_size() +
++                                    nonstatic_field_size) * wordSize;
+     next_nonstatic_field_offset = first_nonstatic_field_offset;
+ 
+     // Add fake fields for java.lang.Class instances (also see below)
+@@ -2668,9 +2664,9 @@
+       java_lang_Class_fix_pre(&methods, &fac, CHECK_(nullHandle));
+     }
+ 
+-    // Add a fake "discovered" field if it is not present 
++    // Add a fake "discovered" field if it is not present
+     // for compatibility with earlier jdk's.
+-    if (class_name() == vmSymbols::java_lang_ref_Reference() 
++    if (class_name() == vmSymbols::java_lang_ref_Reference()
+       && class_loader.is_null()) {
+       java_lang_ref_Reference_fix_pre(&fields, cp, &fac, CHECK_(nullHandle));
+     }
+@@ -2709,20 +2705,20 @@
+ 
+ #ifndef PRODUCT
+     if( PrintCompactFieldsSavings ) {
+-      next_nonstatic_double_offset = next_nonstatic_field_offset + 
++      next_nonstatic_double_offset = next_nonstatic_field_offset +
+                                      (nonstatic_oop_count * oopSize);
+       if ( nonstatic_double_count > 0 ) {
+-        next_nonstatic_double_offset = align_size_up(next_nonstatic_double_offset, BytesPerLong); 
++        next_nonstatic_double_offset = align_size_up(next_nonstatic_double_offset, BytesPerLong);
+       }
+-      next_nonstatic_word_offset  = next_nonstatic_double_offset + 
++      next_nonstatic_word_offset  = next_nonstatic_double_offset +
+                                     (nonstatic_double_count * BytesPerLong);
+-      next_nonstatic_short_offset = next_nonstatic_word_offset + 
++      next_nonstatic_short_offset = next_nonstatic_word_offset +
+                                     (nonstatic_word_count * BytesPerInt);
+-      next_nonstatic_byte_offset  = next_nonstatic_short_offset + 
++      next_nonstatic_byte_offset  = next_nonstatic_short_offset +
+                                     (nonstatic_short_count * BytesPerShort);
+       next_nonstatic_type_offset  = align_size_up((next_nonstatic_byte_offset +
+                                     nonstatic_byte_count ), wordSize );
+-      orig_nonstatic_field_size   = nonstatic_field_size + 
++      orig_nonstatic_field_size   = nonstatic_field_size +
+         ((next_nonstatic_type_offset - first_nonstatic_field_offset)/wordSize);
+     }
+ #endif
+@@ -2752,8 +2748,8 @@
+     if( allocation_style == 0 ) {
+       // Fields order: oops, longs/doubles, ints, shorts/chars, bytes
+       next_nonstatic_oop_offset    = next_nonstatic_field_offset;
+-      next_nonstatic_double_offset = next_nonstatic_oop_offset + 
+-			 	     (nonstatic_oop_count * oopSize);
++      next_nonstatic_double_offset = next_nonstatic_oop_offset +
++                                     (nonstatic_oop_count * oopSize);
+     } else if( allocation_style == 1 ) {
+       // Fields order: longs/doubles, ints, shorts/chars, bytes, oops
+       next_nonstatic_double_offset = next_nonstatic_field_offset;
+@@ -2799,7 +2795,7 @@
+         }
+         // Allocate oop field in the gap if there are no other fields for that.
+         nonstatic_oop_space_offset = offset;
+-        if( length >= oopSize && nonstatic_oop_count > 0 &&  
++        if( length >= oopSize && nonstatic_oop_count > 0 &&
+             allocation_style != 0 ) { // when oop fields not first
+           nonstatic_oop_count      -= 1;
+           nonstatic_oop_space_count = 1; // Only one will fit
+@@ -2809,17 +2805,17 @@
+       }
+     }
+ 
+-    next_nonstatic_word_offset  = next_nonstatic_double_offset + 
++    next_nonstatic_word_offset  = next_nonstatic_double_offset +
+                                   (nonstatic_double_count * BytesPerLong);
+-    next_nonstatic_short_offset = next_nonstatic_word_offset + 
++    next_nonstatic_short_offset = next_nonstatic_word_offset +
+                                   (nonstatic_word_count * BytesPerInt);
+-    next_nonstatic_byte_offset  = next_nonstatic_short_offset + 
++    next_nonstatic_byte_offset  = next_nonstatic_short_offset +
+                                   (nonstatic_short_count * BytesPerShort);
+ 
+     int notaligned_offset;
+     if( allocation_style == 0 ) {
+       notaligned_offset = next_nonstatic_byte_offset + nonstatic_byte_count;
+-    } else { // allocation_style == 1 
++    } else { // allocation_style == 1
+       next_nonstatic_oop_offset = next_nonstatic_byte_offset + nonstatic_byte_count;
+       if( nonstatic_oop_count > 0 ) {
+         notaligned_offset = next_nonstatic_oop_offset;
+@@ -2871,7 +2867,7 @@
+           }
+           // Update oop maps
+           if( nonstatic_oop_map_count > 0 &&
+-              nonstatic_oop_offsets[nonstatic_oop_map_count - 1] == 
++              nonstatic_oop_offsets[nonstatic_oop_map_count - 1] ==
+               (u2)(real_offset - nonstatic_oop_length[nonstatic_oop_map_count - 1] * oopSize) ) {
+             // Extend current oop map
+             nonstatic_oop_length[nonstatic_oop_map_count - 1] += 1;
+@@ -2924,7 +2920,7 @@
+           ShouldNotReachHere();
+       }
+       fields->short_at_put(i+4, extract_low_short_from_int(real_offset) );
+-      fields->short_at_put(i+5, extract_high_short_from_int(real_offset) ); 
++      fields->short_at_put(i+5, extract_high_short_from_int(real_offset) );
+     }
+ 
+     // Size of instances
+@@ -2945,16 +2941,16 @@
+       rt = super_klass->reference_type();
+     }
+ 
+-    // We can now create the basic klassOop for this klass    
++    // We can now create the basic klassOop for this klass
+     klassOop ik = oopFactory::new_instanceKlass(
+-                                    vtable_size, itable_size, 
+-                                    static_field_size, nonstatic_oop_map_size, 
++                                    vtable_size, itable_size,
++                                    static_field_size, nonstatic_oop_map_size,
+                                     rt, CHECK_(nullHandle));
+-    instanceKlassHandle this_klass (THREAD, ik); 
++    instanceKlassHandle this_klass (THREAD, ik);
+ 
+-    assert(this_klass->static_field_size() == static_field_size && 
++    assert(this_klass->static_field_size() == static_field_size &&
+            this_klass->nonstatic_oop_map_size() == nonstatic_oop_map_size, "sanity check");
+-    
++
+     // Fill in information already parsed
+     this_klass->set_access_flags(access_flags);
+     jint lh = Klass::instance_layout_helper(instance_size, false);
+@@ -2962,10 +2958,10 @@
+     assert(this_klass->oop_is_instance(), "layout is correct");
+     assert(this_klass->size_helper() == instance_size, "correct size_helper");
+     // Not yet: supers are done below to support the new subtype-checking fields
+-    //this_klass->set_super(super_klass());  
+-    this_klass->set_class_loader(class_loader());    
++    //this_klass->set_super(super_klass());
++    this_klass->set_class_loader(class_loader());
+     this_klass->set_nonstatic_field_size(nonstatic_field_size);
+-    this_klass->set_static_oop_field_size(fac.static_oop_count);       
++    this_klass->set_static_oop_field_size(fac.static_oop_count);
+     cp->set_pool_holder(this_klass());
+     this_klass->set_constants(cp());
+     this_klass->set_local_interfaces(local_interfaces());
+@@ -2988,16 +2984,16 @@
+ 
+     if (cached_class_file_bytes != NULL) {
+       // JVMTI: we have an instanceKlass now, tell it about the cached bytes
+-      this_klass->set_cached_class_file(cached_class_file_bytes, 
++      this_klass->set_cached_class_file(cached_class_file_bytes,
+                                         cached_class_file_length);
+     }
+-      
++
+     // Miranda methods
+-    if ((num_miranda_methods > 0) || 
+-	// if this class introduced new miranda methods or
+-	(super_klass.not_null() && (super_klass->has_miranda_methods()))
+-	// super class exists and this class inherited miranda methods
+-	) {
++    if ((num_miranda_methods > 0) ||
++        // if this class introduced new miranda methods or
++        (super_klass.not_null() && (super_klass->has_miranda_methods()))
++        // super class exists and this class inherited miranda methods
++        ) {
+       this_klass->set_has_miranda_methods(); // then set a flag
+     }
+ 
+@@ -3012,7 +3008,7 @@
+ 
+     // VerifyOops believes that once this has been set, the object is completely loaded.
+     // Compute transitive closure of interfaces this class implements
+-    this_klass->set_transitive_interfaces(transitive_interfaces());    
++    this_klass->set_transitive_interfaces(transitive_interfaces());
+ 
+     // Fill in information needed to compute superclasses.
+     this_klass->initialize_supers(super_klass(), CHECK_(nullHandle));
+@@ -3043,9 +3039,9 @@
+       check_illegal_static_method(this_klass, CHECK_(nullHandle));
+     }
+ 
+-    ClassLoadingService::notify_class_loaded(instanceKlass::cast(this_klass()), 
++    ClassLoadingService::notify_class_loaded(instanceKlass::cast(this_klass()),
+                                              false /* not shared class */);
+-	  
++
+     if (TraceClassLoading) {
+       // print in a single call to reduce interleaving of output
+       if (cfs->source() != NULL) {
+@@ -3078,7 +3074,7 @@
+       if (!local_interfaces.is_null()) {
+         int length = local_interfaces->length();
+         for (int i = 0; i < length; i++) {
+-          klassOop k = klassOop(local_interfaces->obj_at(i)); 
++          klassOop k = klassOop(local_interfaces->obj_at(i));
+           instanceKlass* to_class = instanceKlass::cast(k);
+           const char * to = to_class->external_name();
+           tty->print("RESOLVE %s %s\n", from, to);
+@@ -3089,19 +3085,19 @@
+ #ifndef PRODUCT
+     if( PrintCompactFieldsSavings ) {
+       if( nonstatic_field_size < orig_nonstatic_field_size ) {
+-        tty->print("[Saved %d of %3d words in %s]\n", 
++        tty->print("[Saved %d of %3d words in %s]\n",
+                  orig_nonstatic_field_size - nonstatic_field_size,
+                  orig_nonstatic_field_size, this_klass->external_name());
+       } else if( nonstatic_field_size > orig_nonstatic_field_size ) {
+-        tty->print("[Wasted %d over %3d words in %s]\n", 
++        tty->print("[Wasted %d over %3d words in %s]\n",
+                  nonstatic_field_size - orig_nonstatic_field_size,
+                  orig_nonstatic_field_size, this_klass->external_name());
+       }
+     }
+ #endif
+ 
+-    // preserve result across HandleMark  
+-    preserve_this_klass = this_klass();    
++    // preserve result across HandleMark
++    preserve_this_klass = this_klass();
+   }
+ 
+   // Create new handle outside HandleMark
+@@ -3125,7 +3121,7 @@
+ 
+       int next_offset = last_map->offset() + (last_map->length() * oopSize);
+       if (next_offset == first_nonstatic_oop_offset) {
+-        // There is no gap bettwen superklass's last oop field and first 
++        // There is no gap bettwen superklass's last oop field and first
+         // local oop field, merge maps.
+         nonstatic_oop_map_count -= 1;
+       } else {
+@@ -3139,8 +3135,8 @@
+ }
+ 
+ 
+-void ClassFileParser::fill_oop_maps(instanceKlassHandle k, 
+-                        int nonstatic_oop_map_count, 
++void ClassFileParser::fill_oop_maps(instanceKlassHandle k,
++                        int nonstatic_oop_map_count,
+                         u2* nonstatic_oop_offsets, u2* nonstatic_oop_length) {
+   OopMapBlock* this_oop_map = k->start_of_nonstatic_oop_maps();
+   OopMapBlock* last_oop_map = this_oop_map + k->nonstatic_oop_map_size();
+@@ -3155,9 +3151,9 @@
+   }
+   if (nonstatic_oop_map_count > 0) {
+     if (this_oop_map + nonstatic_oop_map_count > last_oop_map) {
+-      // Calculated in compute_oop_map_size() number of oop maps is less then 
+-      // collected oop maps since there is no gap between superklass's last oop 
+-      // field and first local oop field. Extend the last oop map copied 
++      // Calculated in compute_oop_map_size() number of oop maps is less then
++      // collected oop maps since there is no gap between superklass's last oop
++      // field and first local oop field. Extend the last oop map copied
+       // from the superklass instead of creating new one.
+       nonstatic_oop_map_count--;
+       nonstatic_oop_offsets++;
+@@ -3273,7 +3269,7 @@
+     super_size = super->transitive_interfaces()->length();
+     max_transitive_size += super_size;
+   }
+-  // Add local interfaces' super interfaces  
++  // Add local interfaces' super interfaces
+   int local_size = local_ifs->length();
+   for (int i = 0; i < local_size; i++) {
+     klassOop l = klassOop(local_ifs->obj_at(i));
+@@ -3300,7 +3296,7 @@
+     // Copy down from superclass
+     if (super.not_null()) {
+       append_interfaces(result, index, super->transitive_interfaces());
+-    }    
++    }
+     // Copy down from local interfaces' superinterfaces
+     for (int i = 0; i < local_ifs->length(); i++) {
+       klassOop l = klassOop(local_ifs->obj_at(i));
+@@ -3321,7 +3317,7 @@
+       result = objArrayHandle(THREAD, new_result);
+     }
+   }
+-  return result;  
++  return result;
+ }
+ 
+ 
+@@ -3330,7 +3326,7 @@
+   if ((super != NULL) &&
+       (!Reflection::verify_class_access(this_klass->as_klassOop(), super, false))) {
+     ResourceMark rm(THREAD);
+-    Exceptions::fthrow(  
++    Exceptions::fthrow(
+       THREAD_AND_LOCATION,
+       vmSymbolHandles::java_lang_IllegalAccessError(),
+       "class %s cannot access its superclass %s",
+@@ -3346,11 +3342,11 @@
+   objArrayHandle local_interfaces (THREAD, this_klass->local_interfaces());
+   int lng = local_interfaces->length();
+   for (int i = lng - 1; i >= 0; i--) {
+-    klassOop k = klassOop(local_interfaces->obj_at(i)); 
++    klassOop k = klassOop(local_interfaces->obj_at(i));
+     assert (k != NULL && Klass::cast(k)->is_interface(), "invalid interface");
+     if (!Reflection::verify_class_access(this_klass->as_klassOop(), k, false)) {
+       ResourceMark rm(THREAD);
+-      Exceptions::fthrow(  
++      Exceptions::fthrow(
+         THREAD_AND_LOCATION,
+         vmSymbolHandles::java_lang_IllegalAccessError(),
+         "class %s cannot access its superinterface %s",
+@@ -3366,7 +3362,7 @@
+ void ClassFileParser::check_final_method_override(instanceKlassHandle this_klass, TRAPS) {
+   objArrayHandle methods (THREAD, this_klass->methods());
+   int num_methods = methods->length();
+-  
++
+   // go thru each method and check if it overrides a final method
+   for (int index = 0; index < num_methods; index++) {
+     methodOop m = (methodOop)methods->obj_at(index);
+@@ -3375,7 +3371,7 @@
+     if ((!m->is_private()) &&
+         (!m->is_static()) &&
+         (m->name() != vmSymbols::object_initializer_name())) {
+-	
++
+       symbolOop name = m->name();
+       symbolOop signature = m->signature();
+       klassOop k = this_klass->super();
+@@ -3384,21 +3380,21 @@
+         // skip supers that don't have final methods.
+         if (k->klass_part()->has_final_method()) {
+           // lookup a matching method in the super class hierarchy
+-          super_m = instanceKlass::cast(k)->lookup_method(name, signature); 
++          super_m = instanceKlass::cast(k)->lookup_method(name, signature);
+           if (super_m == NULL) {
+             break; // didn't find any match; get out
+           }
+-  
++
+           if (super_m->is_final() &&
+               // matching method in super is final
+-              (Reflection::verify_field_access(this_klass->as_klassOop(), 
++              (Reflection::verify_field_access(this_klass->as_klassOop(),
+                                                super_m->method_holder(),
+                                                super_m->method_holder(),
+                                                super_m->access_flags(), false))
+             // this class can access super final method and therefore override
+             ) {
+             ResourceMark rm(THREAD);
+-            Exceptions::fthrow(  
++            Exceptions::fthrow(
+               THREAD_AND_LOCATION,
+               vmSymbolHandles::java_lang_VerifyError(),
+               "class %s overrides final method %s.%s",
+@@ -3432,7 +3428,7 @@
+     // if m is static and not the init method, throw a verify error
+     if ((m->is_static()) && (m->name() != vmSymbols::class_initializer_name())) {
+       ResourceMark rm(THREAD);
+-      Exceptions::fthrow(  
++      Exceptions::fthrow(
+         THREAD_AND_LOCATION,
+         vmSymbolHandles::java_lang_VerifyError(),
+         "Illegal static method %s in interface %s",
+@@ -3444,7 +3440,7 @@
+   }
+ }
+ 
+-// utility methods for format checking 
++// utility methods for format checking
+ 
+ void ClassFileParser::verify_legal_class_modifiers(jint flags, TRAPS) {
+   if (!_need_verify) { return; }
+@@ -3483,9 +3479,9 @@
+ }
+ 
+ bool ClassFileParser::is_supported_version(u2 major, u2 minor) {
+-  return (major >= JAVA_MIN_SUPPORTED_VERSION) && 
+-         (major <= JAVA_MAX_SUPPORTED_VERSION) && 
+-         ((major != JAVA_MAX_SUPPORTED_VERSION) || 
++  return (major >= JAVA_MIN_SUPPORTED_VERSION) &&
++         (major <= JAVA_MAX_SUPPORTED_VERSION) &&
++         ((major != JAVA_MAX_SUPPORTED_VERSION) ||
+           (minor <= JAVA_MAX_SUPPORTED_MINOR_VERSION));
+ }
+ 
+@@ -3506,8 +3502,8 @@
+   bool is_illegal = false;
+ 
+   if (is_interface) {
+-    if (!is_public || !is_static || !is_final || is_private || 
+-        is_protected || is_volatile || is_transient || 
++    if (!is_public || !is_static || !is_final || is_private ||
++        is_protected || is_volatile || is_transient ||
+         (major_gte_15 && is_enum)) {
+       is_illegal = true;
+     }
+@@ -3547,19 +3543,19 @@
+   bool is_illegal = false;
+ 
+   if (is_interface) {
+-    if (!is_abstract || !is_public || is_static || is_final || 
++    if (!is_abstract || !is_public || is_static || is_final ||
+         is_native || (major_gte_15 && (is_synchronized || is_strict))) {
+       is_illegal = true;
+     }
+   } else { // not interface
+     if (is_initializer) {
+-      if (is_static || is_final || is_synchronized || is_native || 
++      if (is_static || is_final || is_synchronized || is_native ||
+           is_abstract || (major_gte_15 && is_bridge)) {
+         is_illegal = true;
+       }
+     } else { // not initializer
+       if (is_abstract) {
+-        if ((is_final || is_native || is_private || is_static || 
++        if ((is_final || is_native || is_private || is_static ||
+             (major_gte_15 && (is_synchronized || is_strict)))) {
+           is_illegal = true;
+         }
+@@ -3575,7 +3571,7 @@
+     Exceptions::fthrow(
+       THREAD_AND_LOCATION,
+       vmSymbolHandles::java_lang_ClassFormatError(),
+-      "Method %s in class %s has illegal modifiers: 0x%X", 
++      "Method %s in class %s has illegal modifiers: 0x%X",
+       name->as_C_string(), _class_name->as_C_string(), flags);
+     return;
+   }
+@@ -3612,7 +3608,7 @@
+         c = UTF8::get_supplementary_character(&buffer[i]);
+         i += 5;
+         continue;
+-      } 
++      }
+     }
+     switch (buffer[i] >> 4) {
+       default: break;
+@@ -3627,7 +3623,7 @@
+             // for classes with major > 47, c must a null or a character in its shortest form
+             break;
+           }
+-        } 
++        }
+         classfile_parse_error("Illegal UTF8 string in constant pool in class file %s", CHECK);
+       case 0xE:  // 1110xxxx 10xxxxxx 10xxxxxx
+         c = (buffer[i] & 0xF) << 12;
+@@ -3668,9 +3664,9 @@
+       // Class names may be drawn from the entire Unicode character set.
+       // Identifiers between '/' must be unqualified names.
+       // The utf8 string has been verified when parsing cpool entries.
+-      legal = verify_unqualified_name(bytes, length, LegalClass);  
++      legal = verify_unqualified_name(bytes, length, LegalClass);
+     }
+-  } 
++  }
+   if (!legal) {
+     ResourceMark rm(THREAD);
+     Exceptions::fthrow(
+@@ -3694,7 +3690,7 @@
+ 
+   if (length > 0) {
+     if (_major_version < JAVA_1_5_VERSION) {
+-      if (bytes[0] != '<') { 
++      if (bytes[0] != '<') {
+         char* p = skip_over_field_name(bytes, false, length);
+         legal = (p != NULL) && ((p - bytes) == (int)length);
+       }
+@@ -3768,7 +3764,7 @@
+     Exceptions::fthrow(
+       THREAD_AND_LOCATION,
+       vmSymbolHandles::java_lang_ClassFormatError(),
+-      "Field \"%s\" in class %s has illegal signature \"%s\"", 
++      "Field \"%s\" in class %s has illegal signature \"%s\"",
+       name->as_C_string(), _class_name->as_C_string(), bytes
+     );
+     return;
+@@ -3826,7 +3822,7 @@
+   Exceptions::fthrow(
+     THREAD_AND_LOCATION,
+     vmSymbolHandles::java_lang_ClassFormatError(),
+-    "Method \"%s\" in class %s has illegal signature \"%s\"", 
++    "Method \"%s\" in class %s has illegal signature \"%s\"",
+     name->as_C_string(),  _class_name->as_C_string(), p
+   );
+   return 0;
+@@ -3863,17 +3859,17 @@
+ }
+ 
+ 
+-// Take pointer to a string. Skip over the longest part of the string that could 
++// Take pointer to a string. Skip over the longest part of the string that could
+ // be taken as a fieldname. Allow '/' if slash_ok is true.
+-// Return a pointer to just past the fieldname. 
+-// Return NULL if no fieldname at all was found, or in the case of slash_ok 
+-// being true, we saw consecutive slashes (meaning we were looking for a 
++// Return a pointer to just past the fieldname.
++// Return NULL if no fieldname at all was found, or in the case of slash_ok
++// being true, we saw consecutive slashes (meaning we were looking for a
+ // qualified path but found something that was badly-formed).
+ char* ClassFileParser::skip_over_field_name(char* name, bool slash_ok, unsigned int length) {
+   char* p;
+-  jchar ch;                     
+-  jboolean last_is_slash = false;            
+-  jboolean not_first_ch = false; 
++  jchar ch;
++  jboolean last_is_slash = false;
++  jboolean not_first_ch = false;
+ 
+   for (p = name; p != name + length; not_first_ch = true) {
+     char* old_p = p;
+@@ -3914,29 +3910,29 @@
+       // public static boolean isJavaIdentifierStart(char ch);
+       JavaCalls::call_static(&result,
+                              klass,
+-                             vmSymbolHandles::isJavaIdentifierStart_name(), 
++                             vmSymbolHandles::isJavaIdentifierStart_name(),
+                              vmSymbolHandles::int_bool_signature(),
+                              &args,
+                              THREAD);
+-         
+-      if (HAS_PENDING_EXCEPTION) {      
++
++      if (HAS_PENDING_EXCEPTION) {
+         CLEAR_PENDING_EXCEPTION;
+         return 0;
+       }
+       if (result.get_jboolean()) {
+         continue;
+       }
+-        
++
+       if (not_first_ch) {
+         // public static boolean isJavaIdentifierPart(char ch);
+         JavaCalls::call_static(&result,
+                                klass,
+-                               vmSymbolHandles::isJavaIdentifierPart_name(), 
++                               vmSymbolHandles::isJavaIdentifierPart_name(),
+                                vmSymbolHandles::int_bool_signature(),
+                                &args,
+                                THREAD);
+-     
+-        if (HAS_PENDING_EXCEPTION) {    
++
++        if (HAS_PENDING_EXCEPTION) {
+           CLEAR_PENDING_EXCEPTION;
+           return 0;
+         }
+@@ -3954,10 +3950,10 @@
+ 
+ // Take pointer to a string. Skip over the longest part of the string that could
+ // be taken as a field signature. Allow "void" if void_ok.
+-// Return a pointer to just past the signature. 
++// Return a pointer to just past the signature.
+ // Return NULL if no legal signature is found.
+-char* ClassFileParser::skip_over_field_signature(char* signature, 
+-                                                 bool void_ok, 
++char* ClassFileParser::skip_over_field_signature(char* signature,
++                                                 bool void_ok,
+                                                  unsigned int length,
+                                                  TRAPS) {
+   unsigned int array_dim = 0;
+@@ -3977,25 +3973,25 @@
+         if (_major_version < JAVA_1_5_VERSION) {
+           // Skip over the class name if one is there
+           char* p = skip_over_field_name(signature + 1, true, --length);
+-        
++
+           // The next character better be a semicolon
+           if (p && (p - signature) > 1 && p[0] == ';') {
+             return p + 1;
+           }
+         } else {
+           // 4900761: For class version > 48, any unicode is allowed in class name.
+-          length--; 
+-          signature++; 
++          length--;
++          signature++;
+           while (length > 0 && signature[0] != ';') {
+             if (signature[0] == '.') {
+               classfile_parse_error("Class name contains illegal character '.' in descriptor in class file %s", CHECK_0);
+             }
+-            length--; 
+-            signature++; 
+-          }            
++            length--;
++            signature++;
++          }
+           if (signature[0] == ';') { return signature + 1; }
+         }
+-            
++
+         return NULL;
+       }
+       case JVM_SIGNATURE_ARRAY:
+@@ -4016,4 +4012,3 @@
+   }
+   return NULL;
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/classFileParser.hpp openjdk/hotspot/src/share/vm/classfile/classFileParser.hpp
+--- openjdk6/hotspot/src/share/vm/classfile/classFileParser.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/classFileParser.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)classFileParser.hpp	1.84 07/05/05 17:06:45 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Parser for for .class files
+@@ -32,7 +29,7 @@
+ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
+  private:
+   bool _need_verify;
+-  bool _relax_verify;  
++  bool _relax_verify;
+   u2   _major_version;
+   u2   _minor_version;
+   symbolHandle _class_name;
+@@ -42,7 +39,7 @@
+   bool _has_vanilla_constructor;
+ 
+   enum { fixed_buffer_size = 128 };
+-  u_char _fixed_buffer[fixed_buffer_size];
++  u_char linenumbertable_buffer[fixed_buffer_size];
+ 
+   ClassFileStream* _stream;              // Actual input stream
+ 
+@@ -60,7 +57,7 @@
+   // Interface parsing
+   objArrayHandle parse_interfaces(constantPoolHandle cp,
+                                   int length,
+-                                  Handle class_loader, 
++                                  Handle class_loader,
+                                   Handle protection_domain,
+                                   PerfTraceTime* vmtimer,
+                                   symbolHandle class_name,
+@@ -68,23 +65,23 @@
+ 
+   // Field parsing
+   void parse_field_attributes(constantPoolHandle cp, u2 attributes_count,
+-                              bool is_static, u2 signature_index, 
++                              bool is_static, u2 signature_index,
+                               u2* constantvalue_index_addr,
+-                              bool* is_synthetic_addr, 
++                              bool* is_synthetic_addr,
+                               u2* generic_signature_index_addr,
+                               typeArrayHandle* field_annotations, TRAPS);
+-  typeArrayHandle parse_fields(constantPoolHandle cp, bool is_interface, 
++  typeArrayHandle parse_fields(constantPoolHandle cp, bool is_interface,
+                                struct FieldAllocationCount *fac,
+                                objArrayHandle* fields_annotations, TRAPS);
+ 
+   // Method parsing
+-  methodHandle parse_method(constantPoolHandle cp, bool is_interface, 
++  methodHandle parse_method(constantPoolHandle cp, bool is_interface,
+                             AccessFlags* promoted_flags,
+                             typeArrayHandle* method_annotations,
+                             typeArrayHandle* method_parameter_annotations,
+                             typeArrayHandle* method_default_annotations,
+                             TRAPS);
+-  objArrayHandle parse_methods (constantPoolHandle cp, bool is_interface, 
++  objArrayHandle parse_methods (constantPoolHandle cp, bool is_interface,
+                                 AccessFlags* promoted_flags,
+                                 bool* has_final_method,
+                                 objArrayOop* methods_annotations_oop,
+@@ -96,10 +93,11 @@
+                                 objArrayHandle methods_parameter_annotations,
+                                 objArrayHandle methods_default_annotations,
+                                 TRAPS);
+-  typeArrayHandle parse_exception_table(u4 code_length, u4 exception_table_length, 
++  typeArrayHandle parse_exception_table(u4 code_length, u4 exception_table_length,
+                                         constantPoolHandle cp, TRAPS);
+-  u_char* parse_linenumber_table(u4 code_attribute_length, u4 code_length,
+-                                 int* compressed_linenumber_table_size, TRAPS);
++  void parse_linenumber_table(
++      u4 code_attribute_length, u4 code_length,
++      CompressedLineNumberWriteStream** write_stream, TRAPS);
+   u2* parse_localvariable_table(u4 code_length, u2 max_locals, u4 code_attribute_length,
+                                 constantPoolHandle cp, u2* localvariable_table_length,
+                                 bool isLVTT, TRAPS);
+@@ -111,14 +109,14 @@
+ 
+   // Classfile attribute parsing
+   void parse_classfile_sourcefile_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
+-  void parse_classfile_source_debug_extension_attribute(constantPoolHandle cp, 
++  void parse_classfile_source_debug_extension_attribute(constantPoolHandle cp,
+                                                 instanceKlassHandle k, int length, TRAPS);
+-  u2   parse_classfile_inner_classes_attribute(constantPoolHandle cp, 
++  u2   parse_classfile_inner_classes_attribute(constantPoolHandle cp,
+                                                instanceKlassHandle k, TRAPS);
+   void parse_classfile_attributes(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
+   void parse_classfile_synthetic_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
+   void parse_classfile_signature_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
+-  
++
+   // Annotations handling
+   typeArrayHandle assemble_annotations(u1* runtime_visible_annotations,
+                                        int runtime_visible_annotations_length,
+@@ -126,18 +124,18 @@
+                                        int runtime_invisible_annotations_length, TRAPS);
+ 
+   // Final setup
+-  int  compute_oop_map_size(instanceKlassHandle super, int nonstatic_oop_count, 
++  int  compute_oop_map_size(instanceKlassHandle super, int nonstatic_oop_count,
+                             int first_nonstatic_oop_offset);
+-  void fill_oop_maps(instanceKlassHandle k, int nonstatic_oop_map_count, 
++  void fill_oop_maps(instanceKlassHandle k, int nonstatic_oop_map_count,
+                      u2* nonstatic_oop_offsets, u2* nonstatic_oop_length);
+   void set_precomputed_flags(instanceKlassHandle k);
+-  objArrayHandle compute_transitive_interfaces(instanceKlassHandle super, 
++  objArrayHandle compute_transitive_interfaces(instanceKlassHandle super,
+                                                objArrayHandle local_ifs, TRAPS);
+ 
+   // Special handling for certain classes.
+   // Add the "discovered" field to java.lang.ref.Reference if
+   // it does not exist.
+-  void java_lang_ref_Reference_fix_pre(typeArrayHandle* fields_ptr, 
++  void java_lang_ref_Reference_fix_pre(typeArrayHandle* fields_ptr,
+     constantPoolHandle cp, FieldAllocationCount *fac_ptr, TRAPS);
+   // Adjust the field allocation counts for java.lang.Class to add
+   // fake fields.
+@@ -169,6 +167,15 @@
+       assert_property(property, msg, CHECK);
+     }
+   }
++
++  inline void check_property(bool property, const char* msg, TRAPS) {
++    if (_need_verify) {
++      guarantee_property(property, msg, CHECK);
++    } else {
++      assert_property(property, msg, CHECK);
++    }
++  }
++
+   inline void guarantee_property(bool b, const char* msg, int index, TRAPS) {
+     if (!b) { classfile_parse_error(msg, index, CHECK); }
+   }
+@@ -201,15 +208,15 @@
+   ClassFileParser(ClassFileStream* st) { set_stream(st); }
+ 
+   // Parse .class file and return new klassOop. The klassOop is not hooked up
+-  // to the system dictionary or any other structures, so a .class file can 
+-  // be loaded several times if desired. 
++  // to the system dictionary or any other structures, so a .class file can
++  // be loaded several times if desired.
+   // The system dictionary hookup is done by the caller.
+   //
+   // "parsed_name" is updated by this method, and is the name found
+   // while parsing the stream.
+-  instanceKlassHandle parseClassFile(symbolHandle name, 
+-                                     Handle class_loader, 
+-                                     Handle protection_domain, 
++  instanceKlassHandle parseClassFile(symbolHandle name,
++                                     Handle class_loader,
++                                     Handle protection_domain,
+                                      symbolHandle& parsed_name,
+                                      TRAPS);
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/classFileStream.cpp openjdk/hotspot/src/share/vm/classfile/classFileStream.cpp
+--- openjdk6/hotspot/src/share/vm/classfile/classFileStream.cpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/classFileStream.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)classFileStream.cpp	1.41 07/05/31 14:29:24 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -85,13 +82,13 @@
+ void ClassFileStream::skip_u1(int length, TRAPS) {
+   if (_need_verify) {
+     guarantee_more(length, CHECK);
+-  } 
++  }
+   _current += length;
+ }
+ 
+ void ClassFileStream::skip_u2(int length, TRAPS) {
+   if (_need_verify) {
+     guarantee_more(length * 2, CHECK);
+-  } 
++  }
+   _current += length * 2;
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/classFileStream.hpp openjdk/hotspot/src/share/vm/classfile/classFileStream.hpp
+--- openjdk6/hotspot/src/share/vm/classfile/classFileStream.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/classFileStream.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)classFileStream.hpp	1.33 07/05/31 14:29:25 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Input stream for reading .class file
+@@ -94,8 +91,8 @@
+     return res;
+   }
+ 
+-  // Get direct pointer into stream at current position. 
+-  // Returns NULL if length elements are not remaining. The caller is 
++  // Get direct pointer into stream at current position.
++  // Returns NULL if length elements are not remaining. The caller is
+   // responsible for calling skip below if buffer contents is used.
+   u1* get_u1_buffer() {
+     return _current;
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/classLoader.cpp openjdk/hotspot/src/share/vm/classfile/classLoader.cpp
+--- openjdk6/hotspot/src/share/vm/classfile/classLoader.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/classLoader.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)classLoader.cpp	1.186 07/05/05 17:06:44 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -209,7 +206,7 @@
+ void ClassPathZipEntry::contents_do(void f(const char* name, void* context), void* context) {
+   JavaThread* thread = JavaThread::current();
+   HandleMark  handle_mark(thread);
+-  ThreadToNativeFromVM ttn(thread);  
++  ThreadToNativeFromVM ttn(thread);
+   for (int n = 0; ; n++) {
+     jzentry * ze = ((*GetNextEntry)(_zip, n));
+     if (ze == NULL) break;
+@@ -259,7 +256,7 @@
+   return true;
+ }
+ 
+-static void print_meta_index(LazyClassPathEntry* entry, 
++static void print_meta_index(LazyClassPathEntry* entry,
+                              GrowableArray<char*>& meta_packages) {
+   tty->print("[Meta index for %s=", entry->name());
+   for (int i = 0; i < meta_packages.length(); i++) {
+@@ -292,7 +289,7 @@
+         case '%':
+         {
+           if ((line_no == 1) && (strcmp(package_name, known_version) != 0)) {
+-            if (TraceClassLoading && Verbose) {  
++            if (TraceClassLoading && Verbose) {
+               tty->print("[Unsupported meta index version]");
+             }
+             fclose(file);
+@@ -312,13 +309,13 @@
+           // Hand off current packages to current lazy entry (if any)
+           if ((cur_entry != NULL) &&
+               (boot_class_path_packages.length() > 0)) {
+-            if (TraceClassLoading && Verbose) {  
++            if (TraceClassLoading && Verbose) {
+               print_meta_index(cur_entry, boot_class_path_packages);
+             }
+             MetaIndex* index = new MetaIndex(boot_class_path_packages.adr_at(0),
+                                              boot_class_path_packages.length());
+             cur_entry->set_meta_index(index);
+-          }         
++          }
+           cur_entry = NULL;
+           boot_class_path_packages.clear();
+ 
+@@ -331,7 +328,7 @@
+               break;
+             }
+           }
+-   
++
+           // If the first character is '@', it indicates the following jar
+           // file is a resource only jar file in which case, we should skip
+           // reading the subsequent entries since the resource loading is
+@@ -345,7 +342,7 @@
+           } else {
+             skipCurrentJar = false;
+           }
+-  
++
+           break;
+         }
+ 
+@@ -361,13 +358,13 @@
+     // Hand off current packages to current lazy entry (if any)
+     if ((cur_entry != NULL) &&
+         (boot_class_path_packages.length() > 0)) {
+-      if (TraceClassLoading && Verbose) {  
++      if (TraceClassLoading && Verbose) {
+         print_meta_index(cur_entry, boot_class_path_packages);
+       }
+       MetaIndex* index = new MetaIndex(boot_class_path_packages.adr_at(0),
+                                        boot_class_path_packages.length());
+       cur_entry->set_meta_index(index);
+-    }          
++    }
+     fclose(file);
+   }
+ }
+@@ -375,7 +372,7 @@
+ void ClassLoader::setup_bootstrap_search_path() {
+   assert(_first_entry == NULL, "should not setup bootstrap class search path twice");
+   char* sys_class_path = os::strdup(Arguments::get_sysclasspath());
+-  if (TraceClassLoading && Verbose) {  
++  if (TraceClassLoading && Verbose) {
+     tty->print_cr("[Bootstrap loader class path=%s]", sys_class_path);
+   }
+ 
+@@ -390,7 +387,7 @@
+     char* path = NEW_C_HEAP_ARRAY(char, end-start+1);
+     strncpy(path, &sys_class_path[start], end-start);
+     path[end-start] = '\0';
+-    update_class_path_entry_list(path);
++    update_class_path_entry_list(path, false);
+     FREE_C_HEAP_ARRAY(char, path);
+     while (sys_class_path[end] == os::path_separator()[0]) {
+       end++;
+@@ -411,7 +408,7 @@
+     if (!get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
+       // This matches the classic VM
+       EXCEPTION_MARK;
+-      THROW_MSG(vmSymbols::java_io_IOException(), "Bad pathname");          
++      THROW_MSG(vmSymbols::java_io_IOException(), "Bad pathname");
+     }
+     char* error_msg = NULL;
+     jzfile* zip;
+@@ -426,7 +423,7 @@
+       if (TraceClassLoading) {
+         tty->print_cr("[Opened %s]", path);
+       }
+-    } else { 
++    } else {
+       ResourceMark rm(thread);
+       char *msg;
+       if (error_msg == NULL) {
+@@ -438,15 +435,15 @@
+         jio_snprintf(msg, len - 1, "error in opening JAR file <%s> %s", error_msg, path);
+       }
+       EXCEPTION_MARK;
+-      THROW_MSG(vmSymbols::java_lang_ClassNotFoundException(), msg);          
+-    } 
++      THROW_MSG(vmSymbols::java_lang_ClassNotFoundException(), msg);
++    }
+   } else {
+     // Directory
+     *new_entry = new ClassPathDirEntry(path);
+     if (TraceClassLoading) {
+       tty->print_cr("[Path %s]", path);
+     }
+-  }      
++  }
+ }
+ 
+ 
+@@ -456,25 +453,25 @@
+   // check for a regular file
+   struct stat st;
+   if (os::stat(path, &st) == 0) {
+-    if ((st.st_mode & S_IFREG) == S_IFREG) {	        
++    if ((st.st_mode & S_IFREG) == S_IFREG) {
+       char orig_path[JVM_MAXPATHLEN];
+       char canonical_path[JVM_MAXPATHLEN];
+-      
++
+       strcpy(orig_path, path);
+       if (get_canonical_path(orig_path, canonical_path, JVM_MAXPATHLEN)) {
+         char* error_msg = NULL;
+-	jzfile* zip;
+-	{
+-	  // enable call to C land
+-	  JavaThread* thread = JavaThread::current();
+-	  ThreadToNativeFromVM ttn(thread);
+-	  HandleMark hm(thread);
+-	  zip = (*ZipOpen)(canonical_path, &error_msg);
+-	}
+-	if (zip != NULL && error_msg == NULL) {
+-	  // create using canonical path
++        jzfile* zip;
++        {
++          // enable call to C land
++          JavaThread* thread = JavaThread::current();
++          ThreadToNativeFromVM ttn(thread);
++          HandleMark hm(thread);
++          zip = (*ZipOpen)(canonical_path, &error_msg);
++        }
++        if (zip != NULL && error_msg == NULL) {
++          // create using canonical path
+           return new ClassPathZipEntry(zip, canonical_path);
+-	}
++        }
+       }
+     }
+   }
+@@ -486,7 +483,7 @@
+   ClassPathEntry* e = _first_entry;
+   while (e != NULL) {
+     // assume zip entries have been canonicalized
+-    if (strcmp(entry->name(), e->name()) == 0) {   
++    if (strcmp(entry->name(), e->name()) == 0) {
+       return true;
+     }
+     e = e->next();
+@@ -505,17 +502,33 @@
+   }
+ }
+ 
+-void ClassLoader::update_class_path_entry_list(const char *path) {
++void ClassLoader::update_class_path_entry_list(const char *path,
++                                               bool check_for_duplicates) {
+   struct stat st;
+   if (os::stat((char *)path, &st) == 0) {
+     // File or directory found
+     ClassPathEntry* new_entry = NULL;
+     create_class_path_entry((char *)path, st, &new_entry, LazyBootClassLoader);
+-    // Add new entry to linked list 
+-    add_to_list(new_entry);
++    // The kernel VM adds dynamically to the end of the classloader path and
++    // doesn't reorder the bootclasspath which would break java.lang.Package
++    // (see PackageInfo).
++    // Add new entry to linked list
++    if (!check_for_duplicates || !contains_entry(new_entry)) {
++      add_to_list(new_entry);
++    }
+   }
+ }
+ 
++void ClassLoader::print_bootclasspath() {
++  ClassPathEntry* e = _first_entry;
++  tty->print("[bootclasspath= ");
++  while (e != NULL) {
++    tty->print("%s ;", e->name());
++    e = e->next();
++  }
++  tty->print_cr("]");
++}
++
+ void ClassLoader::load_zip_library() {
+   assert(ZipOpen == NULL, "should not load zip library twice");
+   // First make sure native library is loaded
+@@ -541,7 +554,7 @@
+     vm_exit_during_initialization("Corrupted ZIP library", path);
+   }
+ 
+-  // Lookup canonicalize entry in libjava.dll  
++  // Lookup canonicalize entry in libjava.dll
+   void *javalib_handle = os::native_java_library();
+   CanonicalizeEntry = CAST_TO_FN_PTR(canonicalize_fn_t, hpi::dll_lookup(javalib_handle, "Canonicalize"));
+   // This lookup only works on 1.3. Do not check for non-null here
+@@ -572,7 +585,7 @@
+ class PackageInfo: public BasicHashtableEntry {
+ public:
+   const char* _pkgname;       // Package name
+-  int _classpath_index;	      // Index of directory or JAR file loaded from
++  int _classpath_index;       // Index of directory or JAR file loaded from
+ 
+   PackageInfo* next() {
+     return (PackageInfo*)BasicHashtableEntry::next();
+@@ -740,12 +753,12 @@
+       if (new_pkgname == NULL) {
+         return false;
+       }
+-  
++
+       memcpy(new_pkgname, pkgname, n);
+       new_pkgname[n] = '\0';
+       pp = _package_hash_table->new_entry(new_pkgname, n);
+       pp->set_index(classpath_index);
+-      
++
+       // Insert into hash table
+       _package_hash_table->add_entry(pp);
+     }
+@@ -832,9 +845,9 @@
+     Handle class_loader;
+     Handle protection_domain;
+     symbolHandle parsed_name;
+-    instanceKlassHandle result = parser.parseClassFile(h_name, 
+-                                                       class_loader, 
+-                                                       protection_domain, 
++    instanceKlassHandle result = parser.parseClassFile(h_name,
++                                                       class_loader,
++                                                       protection_domain,
+                                                        parsed_name,
+                                                        CHECK_(h));
+ 
+@@ -866,7 +879,8 @@
+ 
+ // Initialize the class loader's access to methods in libzip.  Parse and
+ // process the boot classpath into a list ClassPathEntry objects.  Once
+-// this list has been created, it must not change (see class PackageInfo).
++// this list has been created, it must not change order (see class PackageInfo)
++// it can be appended to and is by jvmti and the kernel vm.
+ 
+ void ClassLoader::initialize() {
+   assert(_package_hash_table == NULL, "should have been initialized by now.");
+@@ -874,7 +888,7 @@
+ 
+   if (UsePerfData) {
+     // jvmstat performance counters
+-    NEWPERFTICKCOUNTER(_perf_accumulated_time, SUN_CLS, "time"); 
++    NEWPERFTICKCOUNTER(_perf_accumulated_time, SUN_CLS, "time");
+     NEWPERFTICKCOUNTER(_perf_class_init_time, SUN_CLS, "classInitTime");
+     NEWPERFTICKCOUNTER(_perf_class_verify_time, SUN_CLS, "classVerifyTime");
+     NEWPERFTICKCOUNTER(_perf_class_link_time, SUN_CLS, "classLinkedTime");
+@@ -884,30 +898,30 @@
+ 
+     // The following performance counters are added for measuring the impact
+     // of the bug fix of 6365597. They are mainly focused on finding out
+-    // the behavior of system & user-defined classloader lock, whether 
++    // the behavior of system & user-defined classloader lock, whether
+     // ClassLoader.loadClass/findClass is being called synchronized or not.
+     // Also two additional counters are created to see whether 'UnsyncloadClass'
+     // flag is being set or not and how many times load_instance_class call
+     // fails with linkageError etc.
+-    NEWPERFEVENTCOUNTER(_sync_systemLoaderLockContentionRate, SUN_CLS, 
+-			"systemLoaderLockContentionRate");    
++    NEWPERFEVENTCOUNTER(_sync_systemLoaderLockContentionRate, SUN_CLS,
++                        "systemLoaderLockContentionRate");
+     NEWPERFEVENTCOUNTER(_sync_nonSystemLoaderLockContentionRate, SUN_CLS,
+-			"nonSystemLoaderLockContentionRate");
++                        "nonSystemLoaderLockContentionRate");
+     NEWPERFEVENTCOUNTER(_sync_JVMFindLoadedClassLockFreeCounter, SUN_CLS,
+-			"jvmFindLoadedClassNoLockCalls");
++                        "jvmFindLoadedClassNoLockCalls");
+     NEWPERFEVENTCOUNTER(_sync_JVMDefineClassLockFreeCounter, SUN_CLS,
+-			"jvmDefineClassNoLockCalls");
++                        "jvmDefineClassNoLockCalls");
+ 
+     NEWPERFEVENTCOUNTER(_sync_JNIDefineClassLockFreeCounter, SUN_CLS,
+-			"jniDefineClassNoLockCalls");
+-    
++                        "jniDefineClassNoLockCalls");
++
+     NEWPERFEVENTCOUNTER(_unsafe_defineClassCallCounter, SUN_CLS,
+-			"unsafeDefineClassCalls");
+-    
++                        "unsafeDefineClassCalls");
++
+     NEWPERFEVENTCOUNTER(_isUnsyncloadClass, SUN_CLS, "isUnsyncloadClassSet");
+     NEWPERFEVENTCOUNTER(_load_instance_class_failCounter, SUN_CLS,
+-			"loadInstanceClassFailRate");
+-    
++                        "loadInstanceClassFailRate");
++
+     // increment the isUnsyncloadClass counter if UnsyncloadClass is set.
+     if (UnsyncloadClass) {
+       _isUnsyncloadClass->inc();
+@@ -935,12 +949,12 @@
+ }
+ 
+ jlong ClassLoader::class_init_time_ms() {
+-  return UsePerfData ? 
++  return UsePerfData ?
+     Management::ticks_to_ms(_perf_class_init_time->get_value()) : -1;
+ }
+ 
+ jlong ClassLoader::class_verify_time_ms() {
+-  return UsePerfData ? 
++  return UsePerfData ?
+     Management::ticks_to_ms(_perf_class_verify_time->get_value()) : -1;
+ }
+ 
+@@ -949,7 +963,7 @@
+ }
+ 
+ jlong ClassLoader::class_link_time_ms() {
+-  return UsePerfData ? 
++  return UsePerfData ?
+     Management::ticks_to_ms(_perf_class_link_time->get_value()) : -1;
+ }
+ 
+@@ -968,16 +982,16 @@
+ 
+ 
+ bool ClassLoader::get_canonical_path(char* orig, char* out, int len) {
+-  assert(orig != NULL && out != NULL && len > 0, "bad arguments");        
++  assert(orig != NULL && out != NULL && len > 0, "bad arguments");
+   if (CanonicalizeEntry != NULL) {
+     JNIEnv* env = JavaThread::current()->jni_environment();
+-    if ((CanonicalizeEntry)(env, hpi::native_path(orig), out, len) < 0) {    
+-      return false;  
+-    }    
++    if ((CanonicalizeEntry)(env, hpi::native_path(orig), out, len) < 0) {
++      return false;
++    }
+   } else {
+     // On JDK 1.2.2 the Canonicalize does not exist, so just do nothing
+     strncpy(out, orig, len);
+-    out[len - 1] = '\0';    
++    out[len - 1] = '\0';
+   }
+   return true;
+ }
+@@ -993,10 +1007,10 @@
+ //
+ // Iterates over all class path entries and forces compilation of all methods
+ // in all classes found. Currently, only zip/jar archives are searched.
+-// 
++//
+ // The classes are loaded by the Java level bootstrap class loader, and the
+ // initializer is called. If DelayCompilationDuringStartup is true (default),
+-// the interpreter will run the initialization code. Note that forcing 
++// the interpreter will run the initialization code. Note that forcing
+ // initialization in this way could potentially lead to initialization order
+ // problems, in which case we could just force the initialization bit to be set.
+ 
+@@ -1012,33 +1026,33 @@
+ 
+ 
+ // JDK 1.3 version
+-typedef struct real_jzentry13 { 	/* Zip file entry */
+-    char *name;	  	  	/* entry name */
+-    jint time;            	/* modification time */
+-    jint size;	  	  	/* size of uncompressed data */
+-    jint csize;  	  	/* size of compressed data (zero if uncompressed) */
+-    jint crc;		  	/* crc of uncompressed data */
+-    char *comment;	  	/* optional zip file comment */
+-    jbyte *extra;	  	/* optional extra data */
+-    jint pos;	  	  	/* position of LOC header (if negative) or data */
++typedef struct real_jzentry13 {         /* Zip file entry */
++    char *name;                 /* entry name */
++    jint time;                  /* modification time */
++    jint size;                  /* size of uncompressed data */
++    jint csize;                 /* size of compressed data (zero if uncompressed) */
++    jint crc;                   /* crc of uncompressed data */
++    char *comment;              /* optional zip file comment */
++    jbyte *extra;               /* optional extra data */
++    jint pos;                   /* position of LOC header (if negative) or data */
+ } real_jzentry13;
+ 
+ typedef struct real_jzfile13 {  /* Zip file */
+-    char *name;	  	        /* zip file name */
+-    jint refs;		        /* number of active references */
+-    jint fd;		        /* open file descriptor */
+-    void *lock;		        /* read lock */
+-    char *comment; 	        /* zip file comment */
+-    char *msg;		        /* zip error message */
+-    void *entries;          	/* array of hash cells */
+-    jint total;	  	        /* total number of entries */
++    char *name;                 /* zip file name */
++    jint refs;                  /* number of active references */
++    jint fd;                    /* open file descriptor */
++    void *lock;                 /* read lock */
++    char *comment;              /* zip file comment */
++    char *msg;                  /* zip error message */
++    void *entries;              /* array of hash cells */
++    jint total;                 /* total number of entries */
+     unsigned short *table;      /* Hash chain heads: indexes into entries */
+-    jint tablelen;	        /* number of hash eads */
++    jint tablelen;              /* number of hash eads */
+     real_jzfile13 *next;        /* next zip file in search list */
+     jzentry *cache;             /* we cache the most recently freed jzentry */
+     /* Information on metadata names in META-INF directory */
+     char **metanames;           /* array of meta names (may have null names) */
+-    jint metacount;	        /* number of slots in metanames array */
++    jint metacount;             /* number of slots in metanames array */
+     /* If there are any per-entry comments, they are in the comments array */
+     char **comments;
+ } real_jzfile13;
+@@ -1232,23 +1246,23 @@
+           for (int n = 0; n < k->methods()->length(); n++) {
+             methodHandle m (THREAD, methodOop(k->methods()->obj_at(n)));
+             if (CompilationPolicy::canBeCompiled(m)) {
+-              // Force compilation           
++              // Force compilation
+               CompileBroker::compile_method(m, InvocationEntryBci,
+                                             methodHandle(), 0, "CTW", THREAD);
+               if (HAS_PENDING_EXCEPTION) {
+                 CLEAR_PENDING_EXCEPTION;
+                 tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_counter, m->name()->as_C_string());
+               }
+-  	    if (TieredCompilation) {
+-  	      // Clobber the first compile and force second tier compilation
+-  	      m->clear_code();
+-  	      CompileBroker::compile_method(m, InvocationEntryBci,
++            if (TieredCompilation) {
++              // Clobber the first compile and force second tier compilation
++              m->clear_code();
++              CompileBroker::compile_method(m, InvocationEntryBci,
+                                             methodHandle(), 0, "CTW", THREAD);
+-  	      if (HAS_PENDING_EXCEPTION) {
+-  		CLEAR_PENDING_EXCEPTION;
+-  		tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_counter, m->name()->as_C_string());
+-  	      }
+-  	    }
++              if (HAS_PENDING_EXCEPTION) {
++                CLEAR_PENDING_EXCEPTION;
++                tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_counter, m->name()->as_C_string());
++              }
++            }
+             }
+           }
+         }
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/classLoader.hpp openjdk/hotspot/src/share/vm/classfile/classLoader.hpp
+--- openjdk6/hotspot/src/share/vm/classfile/classLoader.hpp	2008-02-28 05:02:33.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/classLoader.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)classLoader.hpp	1.64 07/05/05 17:06:45 JVM"
+-#endif 
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The VM class loader.
+@@ -49,7 +46,10 @@
+  public:
+   // Next entry in class path
+   ClassPathEntry* next()              { return _next; }
+-  void set_next(ClassPathEntry* next) { _next = next; }
++  void set_next(ClassPathEntry* next) {
++    // may have unlocked readers, so write atomically.
++    OrderAccess::release_store_ptr(&_next, next);
++  }
+   virtual bool is_jar_file() = 0;
+   virtual const char* name() = 0;
+   virtual bool is_lazy();
+@@ -81,14 +81,14 @@
+ // Type definitions for zip file and zip file entry
+ typedef void* jzfile;
+ typedef struct {
+-  char *name;	  	  	/* entry name */
+-  jlong time;            	/* modification time */
+-  jlong size;	  	  	/* size of uncompressed data */
+-  jlong csize;  	  	/* size of compressed data (zero if uncompressed) */
+-  jint crc;		  	/* crc of uncompressed data */
+-  char *comment;	  	/* optional zip file comment */
+-  jbyte *extra;	  		/* optional extra data */
+-  jlong pos;	  	  	/* position of LOC header (if negative) or data */
++  char *name;                   /* entry name */
++  jlong time;                   /* modification time */
++  jlong size;                   /* size of uncompressed data */
++  jlong csize;                  /* size of compressed data (zero if uncompressed) */
++  jint crc;                     /* crc of uncompressed data */
++  char *comment;                /* optional zip file comment */
++  jbyte *extra;                 /* optional extra data */
++  jlong pos;                    /* position of LOC header (if negative) or data */
+ } jzentry;
+ 
+ 
+@@ -144,7 +144,7 @@
+   };
+  private:
+   friend class LazyClassPathEntry;
+-  
++
+   // Performance counters
+   static PerfCounter* _perf_accumulated_time;
+   static PerfCounter* _perf_classes_inited;
+@@ -152,13 +152,13 @@
+   static PerfCounter* _perf_class_verify_time;
+   static PerfCounter* _perf_classes_linked;
+   static PerfCounter* _perf_class_link_time;
+-  
++
+   static PerfCounter* _sync_systemLoaderLockContentionRate;
+   static PerfCounter* _sync_nonSystemLoaderLockContentionRate;
+   static PerfCounter* _sync_JVMFindLoadedClassLockFreeCounter;
+   static PerfCounter* _sync_JVMDefineClassLockFreeCounter;
+   static PerfCounter* _sync_JNIDefineClassLockFreeCounter;
+-  
++
+   static PerfCounter* _unsafe_defineClassCallCounter;
+   static PerfCounter* _isUnsyncloadClass;
+   static PerfCounter* _load_instance_class_failCounter;
+@@ -173,7 +173,7 @@
+ 
+   // Hash function
+   static unsigned int hash(const char *s, int n);
+-  // Returns the package file name corresponding to the specified package 
++  // Returns the package file name corresponding to the specified package
+   // or class name, or null if not found.
+   static PackageInfo* lookup_package(const char *pkgname);
+   // Adds a new package entry for the specified class or package name and
+@@ -185,12 +185,16 @@
+   static void setup_bootstrap_search_path();
+   static void load_zip_library();
+   static void create_class_path_entry(char *path, struct stat st, ClassPathEntry **new_entry, bool lazy);
+-  static void update_class_path_entry_list(const char *path);
+ 
+   // Canonicalizes path names, so strcmp will work properly. This is mainly
+   // to avoid confusing the zip library
+   static bool get_canonical_path(char* orig, char* out, int len);
+  public:
++  // Used by the kernel jvm.
++  static void update_class_path_entry_list(const char *path,
++                                           bool check_for_duplicates);
++  static void print_bootclasspath();
++
+   // Timing
+   static PerfCounter* perf_accumulated_time()  { return _perf_accumulated_time; }
+   static PerfCounter* perf_classes_inited()    { return _perf_classes_inited; }
+@@ -213,7 +217,7 @@
+   static PerfCounter* sync_JVMFindLoadedClassLockFreeCounter() {
+     return _sync_JVMFindLoadedClassLockFreeCounter;
+   }
+-  
++
+   // Record how many calls to JVM_DefineClass w/o holding a lock
+   static PerfCounter* sync_JVMDefineClassLockFreeCounter() {
+     return _sync_JVMDefineClassLockFreeCounter;
+@@ -234,9 +238,9 @@
+   static PerfCounter* load_instance_class_failCounter() {
+     return _load_instance_class_failCounter;
+   }
+-  
++
+   // Load individual .class file
+-  static instanceKlassHandle load_classfile(symbolHandle h_name, TRAPS);  
++  static instanceKlassHandle load_classfile(symbolHandle h_name, TRAPS);
+ 
+   // If the specified package has been loaded by the system, then returns
+   // the name of the directory or ZIP file that the package was loaded from.
+@@ -288,7 +292,7 @@
+   static void add_to_list(ClassPathEntry* new_entry);
+ 
+   // creates a class path zip entry (returns NULL if JAR file cannot be opened)
+-  static ClassPathZipEntry* create_class_path_zip_entry(const char *apath);   
++  static ClassPathZipEntry* create_class_path_zip_entry(const char *apath);
+ 
+   // Debugging
+   static void verify()              PRODUCT_RETURN;
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/dictionary.cpp openjdk/hotspot/src/share/vm/classfile/dictionary.cpp
+--- openjdk6/hotspot/src/share/vm/classfile/dictionary.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/dictionary.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)dictionary.cpp	1.26 07/05/17 15:50:16 JVM"
+-#endif
+ /*
+  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -83,12 +80,12 @@
+   if (protection_domain == instanceKlass::cast(klass())->protection_domain()) {
+     // Ensure this doesn't show up in the pd_set (invariant)
+     bool in_pd_set = false;
+-    for (ProtectionDomainEntry* current = _pd_set; 
+-                                current != NULL; 
++    for (ProtectionDomainEntry* current = _pd_set;
++                                current != NULL;
+                                 current = current->next()) {
+       if (current->protection_domain() == protection_domain) {
+-	in_pd_set = true;
+-	break;
++        in_pd_set = true;
++        break;
+       }
+     }
+     if (in_pd_set) {
+@@ -103,8 +100,8 @@
+     return true;
+   }
+ 
+-  for (ProtectionDomainEntry* current = _pd_set; 
+-                              current != NULL; 
++  for (ProtectionDomainEntry* current = _pd_set;
++                              current != NULL;
+                               current = current->next()) {
+     if (current->protection_domain() == protection_domain) return true;
+   }
+@@ -167,7 +164,7 @@
+             // do anything special with the index.
+             continue;  // robustness
+           }
+-      
++
+           constantPoolOop pvcp = (constantPoolOop)JNIHandles::resolve(cp_ref);
+           if (pvcp == NULL) {
+             // this entry has been GC'ed so remove it
+@@ -185,7 +182,7 @@
+               guarantee(false, "sanity check");
+             }
+           }
+-      
++
+           GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
+           if (method_refs != NULL) {
+             RC_TRACE(0x00000200, ("unload: previous methods length=%d",
+@@ -199,7 +196,7 @@
+                 // do anything special with the index.
+                 continue;  // robustness
+               }
+-            
++
+               methodOop method = (methodOop)JNIHandles::resolve(method_ref);
+               if (method == NULL) {
+                 // this method entry has been GC'ed so remove it
+@@ -297,7 +294,7 @@
+                           probe != NULL;
+                           probe = probe->next()) {
+       oop e = probe->klass();
+-      oop class_loader = probe->loader();            
++      oop class_loader = probe->loader();
+       if (is_strongly_reachable(class_loader, e)) {
+         blk->do_oop((oop*)probe->klass_addr());
+         if (class_loader != NULL) {
+@@ -447,8 +444,8 @@
+   symbolOop name_ = class_name();
+   oop loader_ = class_loader();
+   debug_only(_lookup_count++);
+-  for (DictionaryEntry* entry = bucket(index); 
+-                        entry != NULL; 
++  for (DictionaryEntry* entry = bucket(index);
++                        entry != NULL;
+                         entry = entry->next()) {
+     if (entry->hash() == hash && entry->equals(name_, loader_)) {
+       return entry;
+@@ -500,12 +497,12 @@
+   DictionaryEntry* entry = get_entry(index, hash, klass_name, loader);
+ 
+   assert(entry != NULL,"entry must be present, we just created it");
+-  assert(protection_domain() != NULL, 
++  assert(protection_domain() != NULL,
+          "real protection domain should be present");
+ 
+   entry->add_protection_domain(protection_domain());
+ 
+-  assert(entry->contains_protection_domain(protection_domain()), 
++  assert(entry->contains_protection_domain(protection_domain()),
+          "now protection domain should be present");
+ }
+ 
+@@ -564,16 +561,16 @@
+   tty->print_cr("^ indicates that initiating loader is different from "
+                 "defining loader");
+ 
+-  for (int index = 0; index < table_size(); index++) {    
++  for (int index = 0; index < table_size(); index++) {
+     for (DictionaryEntry* probe = bucket(index);
+                           probe != NULL;
+                           probe = probe->next()) {
+       if (Verbose) tty->print("%4d: ", index);
+       klassOop e = probe->klass();
+       oop class_loader =  probe->loader();
+-      bool is_defining_class = 
++      bool is_defining_class =
+          (class_loader == instanceKlass::cast(e)->class_loader());
+-      tty->print("%s%s", is_defining_class ? " " : "^", 
++      tty->print("%s%s", is_defining_class ? " " : "^",
+                    Klass::cast(e)->external_name());
+       if (class_loader != NULL) {
+         tty->print(", loader ");
+@@ -595,15 +592,15 @@
+                           probe = probe->next()) {
+       klassOop e = probe->klass();
+       oop class_loader = probe->loader();
+-      guarantee(Klass::cast(e)->oop_is_instance(), 
++      guarantee(Klass::cast(e)->oop_is_instance(),
+                               "Verify of system dictionary failed");
+       // class loader must be present;  a null class loader is the
+       // boostrap loader
+-      guarantee(class_loader == NULL || class_loader->is_instance(), 
++      guarantee(class_loader == NULL || class_loader->is_instance(),
+                 "checking type of class_loader");
+       e->verify();
+       probe->verify_protection_domain_set();
+-      element_count++; 
++      element_count++;
+     }
+   }
+   guarantee(number_of_entries() == element_count,
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/dictionary.hpp openjdk/hotspot/src/share/vm/classfile/dictionary.hpp
+--- openjdk6/hotspot/src/share/vm/classfile/dictionary.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/dictionary.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)dictionary.hpp	1.15 07/05/05 17:05:47 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class DictionaryEntry;
+@@ -59,7 +56,7 @@
+ public:
+   Dictionary(int table_size);
+   Dictionary(int table_size, HashtableBucket* t, int number_of_entries);
+-  
++
+   DictionaryEntry* new_entry(unsigned int hash, klassOop klass, oop loader);
+ 
+   DictionaryEntry* new_entry();
+@@ -114,7 +111,7 @@
+   void restore(SerializeOopClosure* soc);
+   void reorder_dictionary();
+ 
+-  
++
+ #ifndef PRODUCT
+   void print();
+ #endif
+@@ -220,4 +217,3 @@
+     tty->print_cr("pd set = #%d", count);
+   }
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/javaAssertions.cpp openjdk/hotspot/src/share/vm/classfile/javaAssertions.cpp
+--- openjdk6/hotspot/src/share/vm/classfile/javaAssertions.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/javaAssertions.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)javaAssertions.cpp	1.14 07/05/05 17:06:50 JVM"
+-#endif
+ /*
+  * Copyright 2000-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,16 +19,16 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+ #include "incls/_javaAssertions.cpp.incl"
+ 
+-bool				JavaAssertions::_userDefault = false;
+-bool				JavaAssertions::_sysDefault = false;
+-JavaAssertions::OptionList*	JavaAssertions::_classes = 0;
+-JavaAssertions::OptionList*	JavaAssertions::_packages = 0;
++bool                            JavaAssertions::_userDefault = false;
++bool                            JavaAssertions::_sysDefault = false;
++JavaAssertions::OptionList*     JavaAssertions::_classes = 0;
++JavaAssertions::OptionList*     JavaAssertions::_packages = 0;
+ 
+ JavaAssertions::OptionList::OptionList(const char* name, bool enabled,
+   OptionList* next) {
+@@ -163,7 +160,7 @@
+     assert(len == 0 || classname[len] == '/', "not a package name");
+     for (OptionList* p = _packages; p != 0; p = p->next()) {
+       if (strncmp(p->name(), classname, len) == 0 && p->name()[len] == '\0') {
+-	return p;
++        return p;
+       }
+     }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/javaAssertions.hpp openjdk/hotspot/src/share/vm/classfile/javaAssertions.hpp
+--- openjdk6/hotspot/src/share/vm/classfile/javaAssertions.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/javaAssertions.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)javaAssertions.hpp	1.11 07/05/05 17:06:50 JVM"
+-#endif
+ /*
+  * Copyright 2000 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class JavaAssertions: AllStatic {
+@@ -54,29 +51,29 @@
+   static inline void trace(const char* name, const char* typefound,
+     const char* namefound, bool enabled);
+ 
+-  static inline OptionList*	match_class(const char* classname);
+-  static OptionList*		match_package(const char* classname);
++  static inline OptionList*     match_class(const char* classname);
++  static OptionList*            match_package(const char* classname);
+ 
+-  static bool		_userDefault;	// User class default (-ea/-da).
+-  static bool		_sysDefault;	// System class default (-esa/-dsa).
+-  static OptionList*	_classes;	// Options for classes.
+-  static OptionList*	_packages;	// Options for package trees.
++  static bool           _userDefault;   // User class default (-ea/-da).
++  static bool           _sysDefault;    // System class default (-esa/-dsa).
++  static OptionList*    _classes;       // Options for classes.
++  static OptionList*    _packages;      // Options for package trees.
+ };
+ 
+ class JavaAssertions::OptionList: public CHeapObj {
+ public:
+   inline OptionList(const char* name, bool enable, OptionList* next);
+ 
+-  inline const char*	name() const	{ return _name; }
+-  inline bool		enabled() const	{ return _enabled; }
+-  inline OptionList*	next() const	{ return _next; }
++  inline const char*    name() const    { return _name; }
++  inline bool           enabled() const { return _enabled; }
++  inline OptionList*    next() const    { return _next; }
+ 
+   static int count(OptionList* p);
+ 
+ private:
+-  const char*	_name;
+-  OptionList*	_next;
+-  bool		_enabled;
++  const char*   _name;
++  OptionList*   _next;
++  bool          _enabled;
+ };
+ 
+ inline bool JavaAssertions::userClassDefault() {
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/javaClasses.cpp openjdk/hotspot/src/share/vm/classfile/javaClasses.cpp
+--- openjdk6/hotspot/src/share/vm/classfile/javaClasses.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/javaClasses.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)javaClasses.cpp	1.247 07/05/17 15:50:20 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -147,7 +144,7 @@
+   { JavaThread* thread = (JavaThread*)THREAD;
+     assert(thread->is_Java_thread(), "must be java thread");
+     ThreadToNativeFromVM ttn(thread);
+-    HandleMark hm(thread);    
++    HandleMark hm(thread);
+     js = (_to_java_string_fn)(thread->jni_environment(), str);
+   }
+   return Handle(THREAD, JNIHandles::resolve(js));
+@@ -187,7 +184,7 @@
+       c = to_char;
+     }
+     to_buffer->char_at_put(index, c);
+-  }  
++  }
+   return string;
+ }
+ 
+@@ -217,9 +214,9 @@
+     char* chars = NEW_RESOURCE_ARRAY(char, utf8_length + 1);
+     UNICODE::convert_to_utf8(value->char_at_addr(offset), length, chars);
+     // Allocate the symbol
+-    result = oopFactory::new_symbol_handle(chars, utf8_length, CHECK_(symbolHandle()));  
++    result = oopFactory::new_symbol_handle(chars, utf8_length, CHECK_(symbolHandle()));
+   } else {
+-    result = oopFactory::new_symbol_handle("", 0, CHECK_(symbolHandle()));  
++    result = oopFactory::new_symbol_handle("", 0, CHECK_(symbolHandle()));
+   }
+   return result;
+ }
+@@ -274,7 +271,7 @@
+   int          offset = java_lang_String::offset(obj);
+   int          length = java_lang_String::length(obj);
+ 
+-  int end = MIN2(length, 100); 
++  int end = MIN2(length, 100);
+   if (value == NULL) {
+     // This can happen if, e.g., printing a String
+     // object before its initializer has been called
+@@ -308,7 +305,7 @@
+       Handle comp_mirror;
+       if (k->oop_is_typeArray()) {
+         BasicType type = typeArrayKlass::cast(k->as_klassOop())->element_type();
+-        comp_mirror = SystemDictionary::java_mirror(type);
++        comp_mirror = Universe::java_mirror(type);
+         assert(comp_mirror.not_null(), "must have primitive mirror");
+       } else if (k->oop_is_objArray()) {
+         klassOop element_klass = objArrayKlass::cast(k->as_klassOop())->element_klass();
+@@ -381,7 +378,7 @@
+ 
+ 
+ bool java_lang_Class::is_primitive(oop java_class) {
+-  klassOop k = klassOop(java_class->obj_field(klass_offset)); 
++  klassOop k = klassOop(java_class->obj_field(klass_offset));
+   return k == NULL;
+ }
+ 
+@@ -394,15 +391,15 @@
+     // Note: create_basic_type_mirror above initializes ak to a non-null value.
+     type = arrayKlass::cast(ak)->element_type();
+   } else {
+-    assert(java_class == SystemDictionary::void_mirror(), "only valid non-array primitive");
++    assert(java_class == Universe::void_mirror(), "only valid non-array primitive");
+   }
+-  assert(SystemDictionary::java_mirror(type) == java_class, "must be consistent");
++  assert(Universe::java_mirror(type) == java_class, "must be consistent");
+   return type;
+ }
+ 
+ 
+ oop java_lang_Class::primitive_mirror(BasicType t) {
+-  oop mirror = SystemDictionary::java_mirror(t);
++  oop mirror = Universe::java_mirror(t);
+   assert(mirror != NULL && mirror->is_a(SystemDictionary::class_klass()), "must be a Class");
+   assert(java_lang_Class::is_primitive(mirror), "must be primitive");
+   return mirror;
+@@ -417,7 +414,7 @@
+ 
+   klassOop k = SystemDictionary::class_klass();
+   // The classRedefinedCount field is only present starting in 1.5,
+-  // so don't go fatal. 
++  // so don't go fatal.
+   COMPUTE_OPTIONAL_OFFSET("java.lang.Class", classRedefinedCount_offset,
+     k, vmSymbols::classRedefinedCount_name(), vmSymbols::int_signature());
+ }
+@@ -447,8 +444,8 @@
+ 
+ // Note: JDK1.1 and before had a privateInfo_offset field which was used for the
+ //       platform thread structure, and a eetop offset which was used for thread
+-//       local storage (and unused by the HotSpot VM). In JDK1.2 the two structures 
+-//       merged, so in the HotSpot VM we just use the eetop field for the thread 
++//       local storage (and unused by the HotSpot VM). In JDK1.2 the two structures
++//       merged, so in the HotSpot VM we just use the eetop field for the thread
+ //       instead of the privateInfo_offset.
+ //
+ // Note: The stackSize field is only present starting in 1.4.
+@@ -465,7 +462,7 @@
+ int java_lang_Thread::_tid_offset = 0;
+ int java_lang_Thread::_thread_status_offset = 0;
+ int java_lang_Thread::_park_blocker_offset = 0;
+-int java_lang_Thread::_park_event_offset = 0 ; 
++int java_lang_Thread::_park_event_offset = 0 ;
+ 
+ 
+ void java_lang_Thread::compute_offsets() {
+@@ -480,12 +477,12 @@
+   COMPUTE_OFFSET("java.lang.Thread", _daemon_offset,    k, vmSymbols::daemon_name(),    vmSymbols::bool_signature());
+   COMPUTE_OFFSET("java.lang.Thread", _eetop_offset,     k, vmSymbols::eetop_name(),     vmSymbols::long_signature());
+   COMPUTE_OFFSET("java.lang.Thread", _stillborn_offset, k, vmSymbols::stillborn_name(), vmSymbols::bool_signature());
+-  // The stackSize field is only present starting in 1.4, so don't go fatal. 
++  // The stackSize field is only present starting in 1.4, so don't go fatal.
+   COMPUTE_OPTIONAL_OFFSET("java.lang.Thread", _stackSize_offset, k, vmSymbols::stackSize_name(), vmSymbols::long_signature());
+-  // The tid and thread_status fields are only present starting in 1.5, so don't go fatal. 
++  // The tid and thread_status fields are only present starting in 1.5, so don't go fatal.
+   COMPUTE_OPTIONAL_OFFSET("java.lang.Thread", _tid_offset, k, vmSymbols::thread_id_name(), vmSymbols::long_signature());
+   COMPUTE_OPTIONAL_OFFSET("java.lang.Thread", _thread_status_offset, k, vmSymbols::thread_status_name(), vmSymbols::int_signature());
+-  // The parkBlocker field is only present starting in 1.6, so don't go fatal. 
++  // The parkBlocker field is only present starting in 1.6, so don't go fatal.
+   COMPUTE_OPTIONAL_OFFSET("java.lang.Thread", _park_blocker_offset, k, vmSymbols::park_blocker_name(), vmSymbols::object_signature());
+   COMPUTE_OPTIONAL_OFFSET("java.lang.Thread", _park_event_offset, k, vmSymbols::park_event_name(),
+  vmSymbols::long_signature());
+@@ -498,8 +495,8 @@
+ 
+ 
+ void java_lang_Thread::set_thread(oop java_thread, JavaThread* thread) {
+-  // We are storing a JavaThread* (malloc'ed data) into a long field in the thread 
+-  // object. The store has to be 64-bit wide so we use a pointer store, but we 
++  // We are storing a JavaThread* (malloc'ed data) into a long field in the thread
++  // object. The store has to be 64-bit wide so we use a pointer store, but we
+   // cannot call oopDesc::obj_field_put since it includes a write barrier!
+   oop* addr = java_thread->obj_field_addr(_eetop_offset);
+   *addr = (oop) thread;
+@@ -507,7 +504,7 @@
+ 
+ 
+ typeArrayOop java_lang_Thread::name(oop java_thread) {
+-  oop name = java_thread->obj_field(_name_offset);  
++  oop name = java_thread->obj_field(_name_offset);
+   assert(name == NULL || (name->is_typeArray() && typeArrayKlass::cast(name->klass())->element_type() == T_CHAR), "just checking");
+   return typeArrayOop(name);
+ }
+@@ -606,7 +603,7 @@
+     if (thr == NULL) {
+       // the thread hasn't run yet or is in the process of exiting
+       return NEW;
+-    } 
++    }
+     return (java_lang_Thread::ThreadStatus)JVMTI_THREAD_STATE_ALIVE;
+   }
+ }
+@@ -622,7 +619,7 @@
+ }
+ 
+ oop java_lang_Thread::park_blocker(oop java_thread) {
+-  assert(JDK_Version::supports_thread_park_blocker() && _park_blocker_offset != 0, 
++  assert(JDK_Version::supports_thread_park_blocker() && _park_blocker_offset != 0,
+          "Must support parkBlocker field");
+ 
+   if (_park_blocker_offset > 0) {
+@@ -638,7 +635,7 @@
+   }
+   return 0;
+ }
+- 
++
+ bool java_lang_Thread::set_park_event(oop java_thread, jlong ptr) {
+   if (_park_event_offset > 0) {
+     java_thread->long_field_put(_park_event_offset, ptr);
+@@ -803,7 +800,7 @@
+ }
+ 
+ // Print stack trace element to resource allocated buffer
+-char* java_lang_Throwable::print_stack_element_to_buffer(methodOop method, int bci) { 
++char* java_lang_Throwable::print_stack_element_to_buffer(methodOop method, int bci) {
+   // Get strings and string lengths
+   instanceKlass* klass = instanceKlass::cast(method->method_holder());
+   const char* klass_name  = klass->external_name();
+@@ -825,14 +822,14 @@
+   sprintf(buf, "\tat %s.%s", klass_name, method_name);
+   if (method->is_native()) {
+     strcat(buf, "(Native Method)");
+-  } else {    
++  } else {
+     int line_number = method->line_number_from_bci(bci);
+     if (source_file_name != NULL && (line_number != -1)) {
+       // Sourcename and linenumber
+       sprintf(buf + (int)strlen(buf), "(%s:%d)", source_file_name, line_number);
+     } else if (source_file_name != NULL) {
+       // Just sourcename
+-      sprintf(buf + (int)strlen(buf), "(%s)", source_file_name);      
++      sprintf(buf + (int)strlen(buf), "(%s)", source_file_name);
+     } else {
+       // Neither soucename and linenumber
+       sprintf(buf + (int)strlen(buf), "(Unknown Source)");
+@@ -847,13 +844,13 @@
+ }
+ 
+ 
+-void java_lang_Throwable::print_stack_element(Handle stream, methodOop method, int bci) {  
++void java_lang_Throwable::print_stack_element(Handle stream, methodOop method, int bci) {
+   ResourceMark rm;
+   char* buf = print_stack_element_to_buffer(method, bci);
+   print_to_stream(stream, buf);
+ }
+ 
+-void java_lang_Throwable::print_stack_element(outputStream *st, methodOop method, int bci) {  
++void java_lang_Throwable::print_stack_element(outputStream *st, methodOop method, int bci) {
+   ResourceMark rm;
+   char* buf = print_stack_element_to_buffer(method, bci);
+   st->print_cr("%s", buf);
+@@ -867,12 +864,12 @@
+     JavaValue result(T_VOID);
+     Handle arg (THREAD, oopFactory::new_charArray(str, THREAD));
+     if (!HAS_PENDING_EXCEPTION) {
+-      JavaCalls::call_virtual(&result, 
+-                              stream, 
++      JavaCalls::call_virtual(&result,
++                              stream,
+                               KlassHandle(THREAD, stream->klass()),
+-                              vmSymbolHandles::println_name(), 
+-                              vmSymbolHandles::char_array_void_signature(), 
+-                              arg, 
++                              vmSymbolHandles::println_name(),
++                              vmSymbolHandles::char_array_void_signature(),
++                              arg,
+                               THREAD);
+     }
+     // Ignore any exceptions. we are in the middle of exception handling. Same as classic VM.
+@@ -897,11 +894,11 @@
+       st->print_cr(no_stack_trace_message());
+       return;
+     }
+-  
++
+     while (result.not_null()) {
+       objArrayHandle methods (THREAD,
+                               objArrayOop(result->obj_at(trace_methods_offset)));
+-      typeArrayHandle bcis (THREAD, 
++      typeArrayHandle bcis (THREAD,
+                             typeArrayOop(result->obj_at(trace_bcis_offset)));
+ 
+       if (methods.is_null() || bcis.is_null()) {
+@@ -936,7 +933,7 @@
+         h_throwable = Handle(THREAD, (oop) result.get_jobject());
+         if (h_throwable.not_null()) {
+           st->print("Caused by: ");
+-          print(h_throwable, st); 
++          print(h_throwable, st);
+           st->cr();
+         }
+       }
+@@ -954,11 +951,11 @@
+     print_to_stream(stream, no_stack_trace_message());
+     return;
+   }
+-  
++
+   while (result.not_null()) {
+     objArrayHandle methods (thread,
+                             objArrayOop(result->obj_at(trace_methods_offset)));
+-    typeArrayHandle bcis (thread, 
++    typeArrayHandle bcis (thread,
+                           typeArrayOop(result->obj_at(trace_bcis_offset)));
+ 
+     if (methods.is_null() || bcis.is_null()) {
+@@ -1095,13 +1092,13 @@
+   JavaThread* thread = (JavaThread*)THREAD;
+   BacktraceBuilder bt(CHECK);
+ 
+-  // Instead of using vframe directly, this version of fill_in_stack_trace 
+-  // basically handles everything by hand. This significantly improved the 
+-  // speed of this method call up to 28.5% on Solaris sparc. 27.1% on Windows.  
++  // Instead of using vframe directly, this version of fill_in_stack_trace
++  // basically handles everything by hand. This significantly improved the
++  // speed of this method call up to 28.5% on Solaris sparc. 27.1% on Windows.
+   // See bug 6333838 for  more details.
+   // The "ASSERT" here is to verify this method generates the exactly same stack
+   // trace as utilizing vframe.
+-#ifdef ASSERT 
++#ifdef ASSERT
+   vframeStream st(thread);
+   methodHandle st_method(THREAD, st.method());
+ #endif
+@@ -1111,11 +1108,11 @@
+   nmethod* nm = NULL;
+   bool skip_fillInStackTrace_check = false;
+   bool skip_throwableInit_check = false;
+- 
++
+   for (frame fr = thread->last_frame(); max_depth != total_count;) {
+     methodOop method = NULL;
+     int bci = 0;
+-    
++
+     // Compiled java method case.
+     if (decode_offset != 0) {
+       DebugInfoReadStream stream(nm, decode_offset);
+@@ -1123,9 +1120,9 @@
+       method = (methodOop)nm->oop_at(stream.read_int());
+       bci = stream.read_bci();
+     } else {
+-      if (fr.is_first_frame()) break;      
++      if (fr.is_first_frame()) break;
+       address pc = fr.pc();
+-      if (AbstractInterpreter::contains(pc)) {
++      if (fr.is_interpreted_frame()) {
+         intptr_t bcx = fr.interpreter_frame_bcx();
+         method = fr.interpreter_frame_method();
+         bci =  fr.is_bci(bcx) ? bcx : method->bci_from((address)bcx);
+@@ -1145,12 +1142,12 @@
+         } else {
+           PcDesc* pd = nm->pc_desc_at(pc);
+           decode_offset = pd->scope_decode_offset();
+-          // if decode_offset is not equal to 0, it will execute the 
++          // if decode_offset is not equal to 0, it will execute the
+           // "compiled java method case" at the beginning of the loop.
+           continue;
+         }
+       }
+-    }  
++    }
+ #ifdef ASSERT
+   assert(st_method() == method && st.bci() == bci,
+          "Wrong stack trace");
+@@ -1173,7 +1170,7 @@
+     // that belongs to a superclass of the exception  we are going to skipping
+     // them in stack trace. This is simlar to classic VM.
+     if (!skip_throwableInit_check) {
+-      if (method->name() == vmSymbols::object_initializer_name() &&  
++      if (method->name() == vmSymbols::object_initializer_name() &&
+           throwable->is_a(method->method_holder())) {
+         continue;
+       } else {
+@@ -1194,24 +1191,24 @@
+   if (!StackTraceInThrowable) {
+     return;
+   }
+- 
++
+   // Disable stack traces for some preallocated out of memory errors
+   if (!Universe::should_fill_in_stack_trace(throwable)) {
+     return;
+   }
+- 
++
+   PRESERVE_EXCEPTION_MARK;
+- 
++
+   JavaThread* thread = JavaThread::active();
+   fill_in_stack_trace(throwable, thread);
+   // ignore exceptions thrown during stack trace filling
+-  CLEAR_PENDING_EXCEPTION;  
++  CLEAR_PENDING_EXCEPTION;
+ }
+ 
+ void java_lang_Throwable::allocate_backtrace(Handle throwable, TRAPS) {
+   // Allocate stack trace - backtrace is created but not filled in
+ 
+-  // No-op if stack trace is disabled 
++  // No-op if stack trace is disabled
+   if (!StackTraceInThrowable) return;
+ 
+   objArrayOop h_oop = oopFactory::new_objectArray(trace_size, CHECK);
+@@ -1220,7 +1217,7 @@
+   objArrayHandle methods (THREAD, m_oop);
+   typeArrayOop b = oopFactory::new_shortArray(trace_chunk_size, CHECK);
+   typeArrayHandle bcis(THREAD, b);
+-  
++
+   // backtrace has space for one chunk (next is NULL)
+   backtrace->obj_at_put(trace_methods_offset, methods());
+   backtrace->obj_at_put(trace_bcis_offset, bcis());
+@@ -1242,7 +1239,7 @@
+   oop m = objArrayOop(backtrace)->obj_at(trace_methods_offset);
+   objArrayOop methods = objArrayOop(m);
+   assert(methods != NULL && methods->length() > 0, "method array not preallocated");
+-  
++
+   oop b = objArrayOop(backtrace)->obj_at(trace_bcis_offset);
+   typeArrayOop bcis = typeArrayOop(b);
+   assert(bcis != NULL, "bci array not preallocated");
+@@ -1251,16 +1248,16 @@
+ 
+   JavaThread* thread = JavaThread::current();
+   ResourceMark rm(thread);
+-  vframeStream st(thread); 
++  vframeStream st(thread);
+ 
+-  // Unlike fill_in_stack_trace we do not skip fillInStackTrace or throwable init 
+-  // methods as preallocated errors aren't created by "java" code. 
++  // Unlike fill_in_stack_trace we do not skip fillInStackTrace or throwable init
++  // methods as preallocated errors aren't created by "java" code.
+ 
+   // fill in as much stack trace as possible
+   int max_chunks = MIN2(methods->length(), (int)MaxJavaStackTraceDepth);
+   int chunk_count = 0;
+ 
+-  for (;!st.at_end(); st.next()) {    
++  for (;!st.at_end(); st.next()) {
+     // add element
+     bcis->ushort_at_put(chunk_count, st.bci());
+     methods->obj_at_put(chunk_count, st.method());
+@@ -1314,7 +1311,7 @@
+   int chunk_index = index % trace_chunk_size;
+   while (chunk != NULL && skip_chunks > 0) {
+     chunk = objArrayOop(chunk->obj_at(trace_next_offset));
+-	skip_chunks--;
++        skip_chunks--;
+   }
+   if (chunk == NULL) {
+     THROW_(vmSymbols::java_lang_IndexOutOfBoundsException(), NULL);
+@@ -1361,7 +1358,7 @@
+   // File in source line number
+   int line_number;
+   if (method->is_native()) {
+-    // Negative value different from -1 below, enabling Java code in 
++    // Negative value different from -1 below, enabling Java code in
+     // class java.lang.StackTraceElement to distinguish "native" from
+     // "no LineNumberTable".
+     line_number = -2;
+@@ -1410,12 +1407,12 @@
+   COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Method", annotation_default_offset,    k, vmSymbols::annotation_default_name(),    vmSymbols::byte_array_signature());
+ }
+ 
+-Handle java_lang_reflect_Method::create(TRAPS) {  
++Handle java_lang_reflect_Method::create(TRAPS) {
+   assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+   klassOop klass = SystemDictionary::reflect_method_klass();
+   // This class is eagerly initialized during VM initialization, since we keep a refence
+   // to one of the methods
+-  assert(instanceKlass::cast(klass)->is_initialized(), "must be initialized");  
++  assert(instanceKlass::cast(klass)->is_initialized(), "must be initialized");
+   return instanceKlass::cast(klass)->allocate_instance_handle(CHECK_NH);
+ }
+ 
+@@ -1691,7 +1688,7 @@
+   COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Field", annotations_offset,  k, vmSymbols::annotations_name(),  vmSymbols::byte_array_signature());
+ }
+ 
+-Handle java_lang_reflect_Field::create(TRAPS) {  
++Handle java_lang_reflect_Field::create(TRAPS) {
+   assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+   symbolHandle name = vmSymbolHandles::java_lang_reflect_Field();
+   klassOop k = SystemDictionary::resolve_or_fail(name, true, CHECK_NH);
+@@ -1793,7 +1790,7 @@
+ }
+ 
+ 
+-Handle sun_reflect_ConstantPool::create(TRAPS) {  
++Handle sun_reflect_ConstantPool::create(TRAPS) {
+   assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+   klassOop k = SystemDictionary::reflect_constant_pool_klass();
+   instanceKlassHandle klass (THREAD, k);
+@@ -2035,7 +2032,7 @@
+ }
+ 
+ 
+-oop java_security_AccessControlContext::create(objArrayHandle context, bool isPrivileged, Handle privileged_context, TRAPS) {  
++oop java_security_AccessControlContext::create(objArrayHandle context, bool isPrivileged, Handle privileged_context, TRAPS) {
+   assert(_isPrivileged_offset != 0, "offsets should have been initialized");
+   // Ensure klass is initialized
+   instanceKlass::cast(SystemDictionary::AccessControlContext_klass())->initialize(CHECK_0);
+@@ -2197,8 +2194,8 @@
+ void java_lang_StackTraceElement::set_lineNumber(oop element, int value) {
+   element->int_field_put(lineNumber_offset, value);
+ }
+-  
+-  
++
++
+ // Support for java Assertions - java_lang_AssertionStatusDirectives.
+ 
+ void java_lang_AssertionStatusDirectives::set_classes(oop o, oop val) {
+@@ -2254,7 +2251,7 @@
+   assert(JDK_Version::is_gte_jdk16x_version(), "Must be JDK 1.6 or later");
+   SystemDictionary::load_abstract_ownable_synchronizer_klass(CHECK);
+   klassOop k = SystemDictionary::abstract_ownable_synchronizer_klass();
+-  COMPUTE_OFFSET("java.util.concurrent.locks.AbstractOwnableSynchronizer", _owner_offset, k, 
++  COMPUTE_OFFSET("java.util.concurrent.locks.AbstractOwnableSynchronizer", _owner_offset, k,
+                  vmSymbols::exclusive_owner_thread_name(), vmSymbols::thread_signature());
+ }
+ 
+@@ -2267,7 +2264,7 @@
+ // Invoked before SystemDictionary::initialize, so pre-loaded classes
+ // are not available to determine the offset_of_static_fields.
+ void JavaClasses::compute_hard_coded_offsets() {
+-  const int x = wordSize;  			
++  const int x = wordSize;
+   const int header = instanceOopDesc::header_size_in_bytes();
+ 
+   // Do the String Class
+@@ -2329,7 +2326,7 @@
+   java_lang_AssertionStatusDirectives::deflt_offset = java_lang_AssertionStatusDirectives::hc_deflt_offset * x + header;
+ 
+ }
+-  
++
+ 
+ // Compute non-hard-coded field offsets of all the classes in this file
+ void JavaClasses::compute_offsets() {
+@@ -2359,7 +2356,7 @@
+ 
+ #ifndef PRODUCT
+ 
+-// These functions exist to assert the validity of hard-coded field offsets to guard 
++// These functions exist to assert the validity of hard-coded field offsets to guard
+ // against changes in the class files
+ 
+ bool JavaClasses::check_offset(const char *klass_name, int hardcoded_offset, const char *field_name, const char* field_sig) {
+@@ -2382,7 +2379,7 @@
+   if (fd.offset() == hardcoded_offset ) {
+     return true;
+   } else {
+-    tty->print_cr("Offset of nonstatic field %s.%s is hardcoded as %d but should really be %d.", 
++    tty->print_cr("Offset of nonstatic field %s.%s is hardcoded as %d but should really be %d.",
+                   klass_name, field_name, hardcoded_offset, fd.offset());
+     return false;
+   }
+@@ -2431,7 +2428,7 @@
+   CHECK_OFFSET("java/lang/String", java_lang_String, offset, "I");
+   CHECK_OFFSET("java/lang/String", java_lang_String, count, "I");
+   CHECK_OFFSET("java/lang/String", java_lang_String, hash, "I");
+-  
++
+   // java.lang.Class
+ 
+   // Fake fields
+@@ -2445,7 +2442,7 @@
+   CHECK_OFFSET("java/lang/Throwable", java_lang_Throwable, detailMessage, "Ljava/lang/String;");
+   CHECK_OFFSET("java/lang/Throwable", java_lang_Throwable, cause, "Ljava/lang/Throwable;");
+   CHECK_OFFSET("java/lang/Throwable", java_lang_Throwable, stackTrace, "[Ljava/lang/StackTraceElement;");
+-  
++
+   // Boxed primitive objects (java_lang_boxing_object)
+ 
+   CHECK_OFFSET("java/lang/Boolean",   java_lang_boxing_object, value, "Z");
+@@ -2490,7 +2487,7 @@
+   CHECK_STATIC_OFFSET("java/lang/ref/SoftReference", java_lang_ref_SoftReference, clock, "J");
+ 
+   // java.lang.AssertionStatusDirectives
+-  // 
++  //
+   // The CheckAssertionStatusDirectives boolean can be removed from here and
+   // globals.hpp after the AssertionStatusDirectives class has been integrated
+   // into merlin "for some time."  Without it, the vm will fail with early
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/javaClasses.hpp openjdk/hotspot/src/share/vm/classfile/javaClasses.hpp
+--- openjdk6/hotspot/src/share/vm/classfile/javaClasses.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/javaClasses.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)javaClasses.hpp	1.157 07/05/05 17:05:52 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Interface for manipulating the basic Java classes.
+@@ -33,12 +30,12 @@
+ // For most classes we hardwire the offsets for performance reasons. In certain
+ // cases (e.g. java.security.AccessControlContext) we compute the offsets at
+ // startup since the layout here differs between JDK1.2 and JDK1.3.
+-// 
++//
+ // Note that fields (static and non-static) are arranged with oops before non-oops
+ // on a per class basis. The offsets below have to reflect this ordering.
+ //
+-// When editing the layouts please update the check_offset verification code 
+-// correspondingly. The names in the enums must be identical to the actual field 
++// When editing the layouts please update the check_offset verification code
++// correspondingly. The names in the enums must be identical to the actual field
+ // names in order for the verification code to work.
+ 
+ 
+@@ -72,10 +69,10 @@
+   static oop    create_oop_from_unicode(jchar* unicode, int len, TRAPS);
+   static Handle create_from_str(const char* utf8_str, TRAPS);
+   static oop    create_oop_from_str(const char* utf8_str, TRAPS);
+-  static Handle create_from_symbol(symbolHandle symbol, TRAPS);  
++  static Handle create_from_symbol(symbolHandle symbol, TRAPS);
+   static Handle create_from_platform_dependent_str(const char* str, TRAPS);
+   static Handle char_converter(Handle java_string, jchar from_char, jchar to_char, TRAPS);
+- 
++
+   static int value_offset_in_bytes()  { return value_offset;  }
+   static int count_offset_in_bytes()  { return count_offset;  }
+   static int offset_offset_in_bytes() { return offset_offset; }
+@@ -105,7 +102,7 @@
+ 
+   // Conversion between '.' and '/' formats
+   static Handle externalize_classname(Handle java_string, TRAPS) { return char_converter(java_string, '/', '.', THREAD); }
+-  static Handle internalize_classname(Handle java_string, TRAPS) { return char_converter(java_string, '.', '/', THREAD); }    
++  static Handle internalize_classname(Handle java_string, TRAPS) { return char_converter(java_string, '.', '/', THREAD); }
+ 
+   // Conversion
+   static symbolHandle as_symbol(Handle java_string, TRAPS);
+@@ -126,7 +123,7 @@
+ class java_lang_Class : AllStatic {
+    friend class VMStructs;
+  private:
+-  // The fake offsets are added by the class loader when java.lang.Class is loaded 
++  // The fake offsets are added by the class loader when java.lang.Class is loaded
+ 
+   enum {
+     hc_klass_offset                = 0,
+@@ -151,9 +148,9 @@
+   // Conversion
+   static klassOop as_klassOop(oop java_class);
+   // Testing
+-  static bool is_primitive(oop java_class);  
+-  static BasicType primitive_type(oop java_class);  
+-  static oop primitive_mirror(BasicType t);  
++  static bool is_primitive(oop java_class);
++  static BasicType primitive_type(oop java_class);
++  static oop primitive_mirror(BasicType t);
+   // JVM_NewInstance support
+   static methodOop resolved_constructor(oop java_class);
+   static void set_resolved_constructor(oop java_class, methodOop constructor);
+@@ -189,9 +186,9 @@
+   static int _stillborn_offset;
+   static int _stackSize_offset;
+   static int _tid_offset;
+-  static int _thread_status_offset; 
+-  static int _park_blocker_offset; 
+-  static int _park_event_offset ; 
++  static int _thread_status_offset;
++  static int _park_blocker_offset;
++  static int _park_event_offset ;
+ 
+   static void compute_offsets();
+ 
+@@ -227,11 +224,11 @@
+   static jlong stackSize(oop java_thread);
+   // Thread ID
+   static jlong thread_id(oop java_thread);
+-    
++
+   // Blocker object responsible for thread parking
+   static oop park_blocker(oop java_thread);
+ 
+-  // Pointer to type-stable park handler, encoded as jlong. 
++  // Pointer to type-stable park handler, encoded as jlong.
+   // Should be set when apparently null
+   // For details, see unsafe.cpp Unsafe_Unpark
+   static jlong park_event(oop java_thread);
+@@ -246,16 +243,16 @@
+                                JVMTI_THREAD_STATE_RUNNABLE,
+     SLEEPING                 = JVMTI_THREAD_STATE_ALIVE +          // Thread.sleep()
+                                JVMTI_THREAD_STATE_WAITING +
+-                               JVMTI_THREAD_STATE_WAITING_WITH_TIMEOUT + 
++                               JVMTI_THREAD_STATE_WAITING_WITH_TIMEOUT +
+                                JVMTI_THREAD_STATE_SLEEPING,
+     IN_OBJECT_WAIT           = JVMTI_THREAD_STATE_ALIVE +          // Object.wait()
+                                JVMTI_THREAD_STATE_WAITING +
+                                JVMTI_THREAD_STATE_WAITING_INDEFINITELY +
+-                               JVMTI_THREAD_STATE_IN_OBJECT_WAIT, 
++                               JVMTI_THREAD_STATE_IN_OBJECT_WAIT,
+     IN_OBJECT_WAIT_TIMED     = JVMTI_THREAD_STATE_ALIVE +          // Object.wait(long)
+                                JVMTI_THREAD_STATE_WAITING +
+                                JVMTI_THREAD_STATE_WAITING_WITH_TIMEOUT +
+-                               JVMTI_THREAD_STATE_IN_OBJECT_WAIT, 
++                               JVMTI_THREAD_STATE_IN_OBJECT_WAIT,
+     PARKED                   = JVMTI_THREAD_STATE_ALIVE +          // LockSupport.park()
+                                JVMTI_THREAD_STATE_WAITING +
+                                JVMTI_THREAD_STATE_WAITING_INDEFINITELY +
+@@ -263,18 +260,18 @@
+     PARKED_TIMED             = JVMTI_THREAD_STATE_ALIVE +          // LockSupport.park(long)
+                                JVMTI_THREAD_STATE_WAITING +
+                                JVMTI_THREAD_STATE_WAITING_WITH_TIMEOUT +
+-                               JVMTI_THREAD_STATE_PARKED,  
+-    BLOCKED_ON_MONITOR_ENTER = JVMTI_THREAD_STATE_ALIVE +          // (re-)entering a synchronization block 
+-                               JVMTI_THREAD_STATE_BLOCKED_ON_MONITOR_ENTER,   
++                               JVMTI_THREAD_STATE_PARKED,
++    BLOCKED_ON_MONITOR_ENTER = JVMTI_THREAD_STATE_ALIVE +          // (re-)entering a synchronization block
++                               JVMTI_THREAD_STATE_BLOCKED_ON_MONITOR_ENTER,
+     TERMINATED               = JVMTI_THREAD_STATE_TERMINATED
+   };
+   // Write thread status info to threadStatus field of java.lang.Thread.
+   static void set_thread_status(oop java_thread_oop, ThreadStatus status);
+-  // Read thread status info from threadStatus field of java.lang.Thread. 
++  // Read thread status info from threadStatus field of java.lang.Thread.
+   static ThreadStatus get_thread_status(oop java_thread_oop);
+ 
+   static const char*  thread_status_name(oop java_thread_oop);
+-    
++
+   // Debugging
+   friend class JavaClasses;
+ };
+@@ -283,20 +280,20 @@
+ 
+ class java_lang_ThreadGroup : AllStatic {
+  private:
+-  static int _parent_offset;        
++  static int _parent_offset;
+   static int _name_offset;
+   static int _threads_offset;
+   static int _groups_offset;
+   static int _maxPriority_offset;
+   static int _destroyed_offset;
+   static int _daemon_offset;
+-  static int _vmAllowSuspension_offset; 
+-  static int _nthreads_offset;  
+-  static int _ngroups_offset; 
++  static int _vmAllowSuspension_offset;
++  static int _nthreads_offset;
++  static int _ngroups_offset;
+ 
+   static void compute_offsets();
+ 
+- public:  
++ public:
+   // parent ThreadGroup
+   static oop  parent(oop java_thread_group);
+   // name
+@@ -321,7 +318,7 @@
+   // Debugging
+   friend class JavaClasses;
+ };
+-  
++
+ 
+ 
+ // Interface to java.lang.Throwable objects
+@@ -363,7 +360,7 @@
+   // Backtrace
+   static oop backtrace(oop throwable);
+   static void set_backtrace(oop throwable, oop value);
+-  // Needed by JVMTI to filter out this internal field. 
++  // Needed by JVMTI to filter out this internal field.
+   static int get_backtrace_offset() { return backtrace_offset;}
+   static int get_detailMessage_offset() { return detailMessage_offset;}
+   // Message
+@@ -404,7 +401,7 @@
+  private:
+   // Note that to reduce dependencies on the JDK we compute these
+   // offsets at run-time.
+-  static int override_offset; 
++  static int override_offset;
+ 
+   static void compute_offsets();
+ 
+@@ -429,8 +426,8 @@
+   static int returnType_offset;
+   static int parameterTypes_offset;
+   static int exceptionTypes_offset;
+-  static int slot_offset; 
+-  static int modifiers_offset; 
++  static int slot_offset;
++  static int modifiers_offset;
+   static int signature_offset;
+   static int annotations_offset;
+   static int parameter_annotations_offset;
+@@ -545,7 +542,7 @@
+  private:
+   // Note that to reduce dependencies on the JDK we compute these
+   // offsets at run-time.
+-  static int clazz_offset; 
++  static int clazz_offset;
+   static int name_offset;
+   static int type_offset;
+   static int slot_offset;
+@@ -593,14 +590,14 @@
+ 
+   // Debugging
+   friend class JavaClasses;
+-}; 
++};
+ 
+ // Interface to sun.reflect.ConstantPool objects
+ class sun_reflect_ConstantPool {
+  private:
+   // Note that to reduce dependencies on the JDK we compute these
+   // offsets at run-time.
+-  static int _cp_oop_offset; 
++  static int _cp_oop_offset;
+ 
+   static void compute_offsets();
+ 
+@@ -617,12 +614,12 @@
+ 
+   // Debugging
+   friend class JavaClasses;
+-}; 
++};
+ 
+ // Interface to sun.reflect.UnsafeStaticFieldAccessorImpl objects
+ class sun_reflect_UnsafeStaticFieldAccessorImpl {
+  private:
+-  static int _base_offset; 
++  static int _base_offset;
+   static void compute_offsets();
+ 
+  public:
+@@ -632,7 +629,7 @@
+ 
+   // Debugging
+   friend class JavaClasses;
+-}; 
++};
+ 
+ // Interface to java.lang primitive type boxing objects:
+ //  - java.lang.Boolean
+@@ -651,7 +648,7 @@
+   enum {
+    hc_value_offset = 0
+   };
+-  static int value_offset; 
++  static int value_offset;
+ 
+   static oop initialize_and_allocate(klassOop klass, TRAPS);
+  public:
+@@ -677,7 +674,7 @@
+    hc_referent_offset   = 0,
+    hc_queue_offset      = 1,
+    hc_next_offset       = 2,
+-   hc_discovered_offset	= 3  // Is not last, see SoftRefs.
++   hc_discovered_offset = 3  // Is not last, see SoftRefs.
+   };
+   enum {
+    hc_static_lock_offset    = 0,
+@@ -691,7 +688,7 @@
+   static int static_lock_offset;
+   static int static_pending_offset;
+   static int number_of_fake_oop_fields;
+- 
++
+   // Accessors
+   static oop referent(oop ref)        { return *referent_addr(ref); }
+   static void set_referent(oop ref, oop value);
+@@ -750,7 +747,7 @@
+ 
+   static void compute_offsets();
+  public:
+-  static oop create(objArrayHandle context, bool isPrivileged, Handle privileged_context, TRAPS);  
++  static oop create(objArrayHandle context, bool isPrivileged, Handle privileged_context, TRAPS);
+ 
+   // Debugging/initialization
+   friend class JavaClasses;
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/loaderConstraints.cpp openjdk/hotspot/src/share/vm/classfile/loaderConstraints.cpp
+--- openjdk6/hotspot/src/share/vm/classfile/loaderConstraints.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/loaderConstraints.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)loaderConstraints.cpp	1.19 07/05/17 15:50:23 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -64,7 +61,7 @@
+ }
+ 
+ // We must keep the symbolOop used in the name alive.  We'll use the
+-// loaders to decide if a particular entry can be purged. 
++// loaders to decide if a particular entry can be purged.
+ void LoaderConstraintTable::always_strong_classes_do(OopClosure* blk) {
+   // We must keep the symbolOop used in the name alive.
+   for (int cindex = 0; cindex < table_size(); cindex++) {
+@@ -116,29 +113,29 @@
+       // Remove klass that is no longer alive
+       if (klass != NULL && !is_alive->do_object_b(klass)) {
+         probe->set_klass(NULL);
+-	if (TraceLoaderConstraints) {
+-	  ResourceMark rm;
+-	  tty->print_cr("[Purging class object from constraint for name %s,"
+-		     " loader list:", 
+-		     probe->name()->as_C_string());
+-  	  for (int i = 0; i < probe->num_loaders(); i++) {
+-	    tty->print_cr("[   [%d]: %s", i, 
+-			  SystemDictionary::loader_name(probe->loader(i)));
+-	  }
+-	}
++        if (TraceLoaderConstraints) {
++          ResourceMark rm;
++          tty->print_cr("[Purging class object from constraint for name %s,"
++                     " loader list:",
++                     probe->name()->as_C_string());
++          for (int i = 0; i < probe->num_loaders(); i++) {
++            tty->print_cr("[   [%d]: %s", i,
++                          SystemDictionary::loader_name(probe->loader(i)));
++          }
++        }
+       }
+       // Remove entries no longer alive from loader array
+-      int n = 0; 
++      int n = 0;
+       while (n < probe->num_loaders()) {
+         if (probe->loader(n) != NULL) {
+           if (!is_alive->do_object_b(probe->loader(n))) {
+-	    if (TraceLoaderConstraints) {
+-	      ResourceMark rm;
++            if (TraceLoaderConstraints) {
++              ResourceMark rm;
+               tty->print_cr("[Purging loader %s from constraint for name %s",
+-			    SystemDictionary::loader_name(probe->loader(n)),
+-			    probe->name()->as_C_string()
+-			    );
+-	    }
++                            SystemDictionary::loader_name(probe->loader(n)),
++                            probe->name()->as_C_string()
++                            );
++            }
+ 
+             // Compact array
+             int num = probe->num_loaders() - 1;
+@@ -146,14 +143,14 @@
+             probe->set_loader(n, probe->loader(num));
+             probe->set_loader(num, NULL);
+ 
+-	    if (TraceLoaderConstraints) {
+-	      ResourceMark rm;
++            if (TraceLoaderConstraints) {
++              ResourceMark rm;
+               tty->print_cr("[New loader list:");
+-	      for (int i = 0; i < probe->num_loaders(); i++) {
+-                tty->print_cr("[   [%d]: %s", i, 
+-			      SystemDictionary::loader_name(probe->loader(i)));
+-	      }
+-	    }
++              for (int i = 0; i < probe->num_loaders(); i++) {
++                tty->print_cr("[   [%d]: %s", i,
++                              SystemDictionary::loader_name(probe->loader(i)));
++              }
++            }
+ 
+             continue;  // current element replaced, so restart without
+                        // incrementing n
+@@ -163,11 +160,11 @@
+       }
+       // Check whether entry should be purged
+       if (probe->num_loaders() < 2) {
+-	    if (TraceLoaderConstraints) {
+-	      ResourceMark rm;
+-	      tty->print("[Purging complete constraint for name %s\n", 
+-			 probe->name()->as_C_string());
+-	    }
++            if (TraceLoaderConstraints) {
++              ResourceMark rm;
++              tty->print("[Purging complete constraint for name %s\n",
++                         probe->name()->as_C_string());
++            }
+ 
+         // Purge entry
+         *p = probe->next();
+@@ -201,98 +198,98 @@
+     failure_code = 1;
+   } else {
+     klassOop klass = klass1 != NULL ? klass1 : klass2;
+-      
++
+     LoaderConstraintEntry** pp1 = find_loader_constraint(class_name,
+-							 class_loader1);
++                                                         class_loader1);
+     if (*pp1 != NULL && (*pp1)->klass() != NULL) {
+       if (klass != NULL) {
+-	if (klass != (*pp1)->klass()) {
+-	  failure_code = 2;
+-	}
++        if (klass != (*pp1)->klass()) {
++          failure_code = 2;
++        }
+       } else {
+-	klass = (*pp1)->klass();
++        klass = (*pp1)->klass();
+       }
+     }
+-    
++
+     LoaderConstraintEntry** pp2 = find_loader_constraint(class_name,
+-							 class_loader2);
++                                                         class_loader2);
+     if (*pp2 != NULL && (*pp2)->klass() != NULL) {
+       if (klass != NULL) {
+-	if (klass != (*pp2)->klass()) {
+-	  failure_code = 3;
+-	}
++        if (klass != (*pp2)->klass()) {
++          failure_code = 3;
++        }
+       } else {
+-	klass = (*pp2)->klass();
++        klass = (*pp2)->klass();
+       }
+     }
+ 
+     if (failure_code == 0) {
+       if (*pp1 == NULL && *pp2 == NULL) {
+-	unsigned int hash = compute_hash(class_name);
+-	int index = hash_to_index(hash);
+-	LoaderConstraintEntry* p;
+-	p = new_entry(hash, class_name(), klass, 2, 2);
+-	p->set_loaders(NEW_C_HEAP_ARRAY(oop, 2));
+-	p->set_loader(0, class_loader1());
+-	p->set_loader(1, class_loader2());
+-	p->set_klass(klass);
+-	p->set_next(bucket(index));
+-	set_entry(index, p);
+-	if (TraceLoaderConstraints) {
+-	  ResourceMark rm;
+-	  tty->print("[Adding new constraint for name: %s, loader[0]: %s,"
+-		     " loader[1]: %s ]\n",
+-		     class_name()->as_C_string(), 
+-		     SystemDictionary::loader_name(class_loader1()),
+-		     SystemDictionary::loader_name(class_loader2())
+-		     );
+-	}
++        unsigned int hash = compute_hash(class_name);
++        int index = hash_to_index(hash);
++        LoaderConstraintEntry* p;
++        p = new_entry(hash, class_name(), klass, 2, 2);
++        p->set_loaders(NEW_C_HEAP_ARRAY(oop, 2));
++        p->set_loader(0, class_loader1());
++        p->set_loader(1, class_loader2());
++        p->set_klass(klass);
++        p->set_next(bucket(index));
++        set_entry(index, p);
++        if (TraceLoaderConstraints) {
++          ResourceMark rm;
++          tty->print("[Adding new constraint for name: %s, loader[0]: %s,"
++                     " loader[1]: %s ]\n",
++                     class_name()->as_C_string(),
++                     SystemDictionary::loader_name(class_loader1()),
++                     SystemDictionary::loader_name(class_loader2())
++                     );
++        }
+       } else if (*pp1 == *pp2) {
+-	/* constraint already imposed */
+-	if ((*pp1)->klass() == NULL) {
+-	  (*pp1)->set_klass(klass);
+-	  if (TraceLoaderConstraints) {
+-	    ResourceMark rm;
+-	    tty->print("[Setting class object in existing constraint for"
+-		       " name: %s and loader %s ]\n",
+-		       class_name()->as_C_string(),
+-		       SystemDictionary::loader_name(class_loader1())
+-		       );
+-	  }
+-	} else {
+-	  assert((*pp1)->klass() == klass, "loader constraints corrupted");
+-	}
++        /* constraint already imposed */
++        if ((*pp1)->klass() == NULL) {
++          (*pp1)->set_klass(klass);
++          if (TraceLoaderConstraints) {
++            ResourceMark rm;
++            tty->print("[Setting class object in existing constraint for"
++                       " name: %s and loader %s ]\n",
++                       class_name()->as_C_string(),
++                       SystemDictionary::loader_name(class_loader1())
++                       );
++          }
++        } else {
++          assert((*pp1)->klass() == klass, "loader constraints corrupted");
++        }
+       } else if (*pp1 == NULL) {
+-	extend_loader_constraint(*pp2, class_loader1, klass);
++        extend_loader_constraint(*pp2, class_loader1, klass);
+       } else if (*pp2 == NULL) {
+-	extend_loader_constraint(*pp1, class_loader2, klass);
++        extend_loader_constraint(*pp1, class_loader2, klass);
+       } else {
+-	merge_loader_constraints(pp1, pp2, klass);
++        merge_loader_constraints(pp1, pp2, klass);
+       }
+     }
+   }
+-  
++
+   if (failure_code != 0 && TraceLoaderConstraints) {
+     ResourceMark rm;
+     const char* reason = "";
+     switch(failure_code) {
+     case 1: reason = "the class objects presented by loader[0] and loader[1]"
+-	      " are different"; break;
++              " are different"; break;
+     case 2: reason = "the class object presented by loader[0] does not match"
+-	      " the stored class object in the constraint"; break;
++              " the stored class object in the constraint"; break;
+     case 3: reason = "the class object presented by loader[1] does not match"
+-	      " the stored class object in the constraint"; break;
++              " the stored class object in the constraint"; break;
+     default: reason = "unknown reason code";
+     }
+     tty->print("[Failed to add constraint for name: %s, loader[0]: %s,"
+-	       " loader[1]: %s, Reason: %s ]\n",
+-	       class_name()->as_C_string(),
+-	       SystemDictionary::loader_name(class_loader1()),
+-	       SystemDictionary::loader_name(class_loader2()),
+-	       reason
+-	       );
++               " loader[1]: %s, Reason: %s ]\n",
++               class_name()->as_C_string(),
++               SystemDictionary::loader_name(class_loader1()),
++               SystemDictionary::loader_name(class_loader2()),
++               reason
++               );
+   }
+-  
++
+   return failure_code == 0;
+ }
+ 
+@@ -307,20 +304,20 @@
+     if (TraceLoaderConstraints) {
+       ResourceMark rm;
+       tty->print("[Constraint check failed for name %s, loader %s: "
+-		 "the presented class object differs from that stored ]\n",
+-		 name()->as_C_string(), 
+-		 SystemDictionary::loader_name(loader()));
++                 "the presented class object differs from that stored ]\n",
++                 name()->as_C_string(),
++                 SystemDictionary::loader_name(loader()));
+     }
+     return false;
+   } else {
+     if (p && p->klass() == NULL) {
+       p->set_klass(k());
+       if (TraceLoaderConstraints) {
+-	ResourceMark rm;
+-	tty->print("[Updating constraint for name %s, loader %s, "
+-		   "by setting class object ]\n",
+-		   name()->as_C_string(), 
+-		   SystemDictionary::loader_name(loader()));
++        ResourceMark rm;
++        tty->print("[Updating constraint for name %s, loader %s, "
++                   "by setting class object ]\n",
++                   name()->as_C_string(),
++                   SystemDictionary::loader_name(loader()));
+       }
+     }
+     return true;
+@@ -376,7 +373,7 @@
+         p->set_loaders(new_loaders);
+     }
+ }
+- 
++
+ 
+ void LoaderConstraintTable::extend_loader_constraint(LoaderConstraintEntry* p,
+                                                      Handle loader,
+@@ -388,11 +385,11 @@
+   if (TraceLoaderConstraints) {
+     ResourceMark rm;
+     tty->print("[Extending constraint for name %s by adding loader[%d]: %s %s",
+-	       p->name()->as_C_string(),
+-	       num,
++               p->name()->as_C_string(),
++               num,
+                SystemDictionary::loader_name(loader()),
+-	       (p->klass() == NULL ? " and setting class object ]\n" : " ]\n")
+-	       );
++               (p->klass() == NULL ? " and setting class object ]\n" : " ]\n")
++               );
+   }
+   if (p->klass() == NULL) {
+     p->set_klass(klass);
+@@ -406,16 +403,16 @@
+                                                    LoaderConstraintEntry** pp1,
+                                                    LoaderConstraintEntry** pp2,
+                                                    klassOop klass) {
+-  // make sure *pp1 has higher capacity 
++  // make sure *pp1 has higher capacity
+   if ((*pp1)->max_loaders() < (*pp2)->max_loaders()) {
+     LoaderConstraintEntry** tmp = pp2;
+     pp2 = pp1;
+     pp1 = tmp;
+   }
+-  
++
+   LoaderConstraintEntry* p1 = *pp1;
+   LoaderConstraintEntry* p2 = *pp2;
+-  
++
+   ensure_loader_constraint_capacity(p1, p2->num_loaders());
+ 
+   for (int i = 0; i < p2->num_loaders(); i++) {
+@@ -426,19 +423,19 @@
+ 
+   if (TraceLoaderConstraints) {
+     ResourceMark rm;
+-    tty->print_cr("[Merged constraints for name %s, new loader list:", 
+-		  p1->name()->as_C_string()
+-		  );
+-  
++    tty->print_cr("[Merged constraints for name %s, new loader list:",
++                  p1->name()->as_C_string()
++                  );
++
+     for (int i = 0; i < p1->num_loaders(); i++) {
+-      tty->print_cr("[   [%d]: %s", i, 
+-		    SystemDictionary::loader_name(p1->loader(i)));
++      tty->print_cr("[   [%d]: %s", i,
++                    SystemDictionary::loader_name(p1->loader(i)));
+     }
+     if (p1->klass() == NULL) {
+       tty->print_cr("[... and setting class object]");
+     }
+   }
+-  
++
+   // p1->klass() will hold NULL if klass, p2->klass(), and old
+   // p1->klass() are all NULL.  In addition, all three must have
+   // matching non-NULL values, otherwise either the constraints would
+@@ -468,7 +465,7 @@
+                                 probe = probe->next()) {
+       guarantee(probe->name()->is_symbol(), "should be symbol");
+       if (probe->klass() != NULL) {
+-        instanceKlass* ik = instanceKlass::cast(probe->klass()); 
++        instanceKlass* ik = instanceKlass::cast(probe->klass());
+         guarantee(ik->name() == probe->name(), "name should match");
+         symbolHandle name (thread, ik->name());
+         Handle loader(thread, ik->class_loader());
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/loaderConstraints.hpp openjdk/hotspot/src/share/vm/classfile/loaderConstraints.hpp
+--- openjdk6/hotspot/src/share/vm/classfile/loaderConstraints.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/loaderConstraints.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)loaderConstraints.hpp	1.14 07/05/05 17:05:52 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class LoaderConstraintEntry;
+@@ -82,7 +79,7 @@
+   bool check_or_update(instanceKlassHandle k, Handle loader,
+                               symbolHandle name);
+ 
+-  
++
+   void purge_loader_constraints(BoolObjectClosure* is_alive);
+ 
+   void verify(Dictionary* dictionary);
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/placeholders.cpp openjdk/hotspot/src/share/vm/classfile/placeholders.cpp
+--- openjdk6/hotspot/src/share/vm/classfile/placeholders.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/placeholders.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)placeholders.cpp	1.20 07/05/17 15:50:29 JVM"
+-#endif
+ /*
+  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -31,7 +28,7 @@
+ // Placeholder methods
+ 
+ PlaceholderEntry* PlaceholderTable::new_entry(int hash, symbolOop name,
+-                                              oop loader, bool havesupername, 
++                                              oop loader, bool havesupername,
+                                               symbolOop supername) {
+   PlaceholderEntry* entry = (PlaceholderEntry*)Hashtable::new_entry(hash, name);
+   entry->set_loader(loader);
+@@ -63,9 +60,9 @@
+ }
+ 
+ 
+-// Remove a placeholder object. 
++// Remove a placeholder object.
+ void PlaceholderTable::remove_entry(int index, unsigned int hash,
+-                                    symbolHandle class_name, 
++                                    symbolHandle class_name,
+                                     Handle class_loader) {
+   assert_locked_or_safepoint(SystemDictionary_lock);
+   PlaceholderEntry** p = bucket_addr(index);
+@@ -108,8 +105,8 @@
+ }
+ 
+   // find_and_add returns probe pointer - old or new
+-  // If no entry exists, add a placeholder entry 
+-  // If entry exists, reuse entry 
++  // If no entry exists, add a placeholder entry
++  // If entry exists, reuse entry
+   // For both, push SeenThread for classloadAction
+   // if havesupername: this is used for circularity for instanceklass loading
+ PlaceholderEntry* PlaceholderTable::find_and_add(int index, unsigned int hash, symbolHandle name, Handle loader, classloadAction action, symbolHandle supername, Thread* thread) {
+@@ -163,8 +160,8 @@
+ 
+ void PlaceholderTable::oops_do(OopClosure* f) {
+   for (int index = 0; index < table_size(); index++) {
+-    for (PlaceholderEntry* probe = bucket(index); 
+-                           probe != NULL; 
++    for (PlaceholderEntry* probe = bucket(index);
++                           probe != NULL;
+                            probe = probe->next()) {
+       probe->oops_do(f);
+     }
+@@ -189,10 +186,10 @@
+ // do all entries in the placeholder table
+ void PlaceholderTable::entries_do(void f(symbolOop, oop)) {
+   for (int index = 0; index < table_size(); index++) {
+-    for (PlaceholderEntry* probe = bucket(index); 
+-                           probe != NULL; 
++    for (PlaceholderEntry* probe = bucket(index);
++                           probe != NULL;
+                            probe = probe->next()) {
+-      f(probe->klass(), probe->loader());             
++      f(probe->klass(), probe->loader());
+     }
+   }
+ }
+@@ -232,9 +229,9 @@
+ #endif
+ 
+ void PlaceholderEntry::verify() const {
+-  guarantee(loader() == NULL || loader()->is_instance(), 
++  guarantee(loader() == NULL || loader()->is_instance(),
+             "checking type of _loader");
+-  guarantee(instanceKlass() == NULL 
++  guarantee(instanceKlass() == NULL
+             || Klass::cast(instanceKlass())->oop_is_instance(),
+             "checking type of instanceKlass result");
+   klass()->verify();
+@@ -243,8 +240,8 @@
+ void PlaceholderTable::verify() {
+   int element_count = 0;
+   for (int pindex = 0; pindex < table_size(); pindex++) {
+-    for (PlaceholderEntry* probe = bucket(pindex); 
+-                           probe != NULL; 
++    for (PlaceholderEntry* probe = bucket(pindex);
++                           probe != NULL;
+                            probe = probe->next()) {
+       probe->verify();
+       element_count++;  // both klasses and place holders count
+@@ -257,9 +254,9 @@
+ 
+ #ifndef PRODUCT
+ void PlaceholderTable::print() {
+-  for (int pindex = 0; pindex < table_size(); pindex++) {    
++  for (int pindex = 0; pindex < table_size(); pindex++) {
+     for (PlaceholderEntry* probe = bucket(pindex);
+-                           probe != NULL; 
++                           probe != NULL;
+                            probe = probe->next()) {
+       if (Verbose) tty->print("%4d: ", pindex);
+       tty->print(" place holder ");
+@@ -270,5 +267,3 @@
+   }
+ }
+ #endif
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/placeholders.hpp openjdk/hotspot/src/share/vm/classfile/placeholders.hpp
+--- openjdk6/hotspot/src/share/vm/classfile/placeholders.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/placeholders.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)placeholders.hpp	1.21 07/05/05 17:05:54 JVM"
+-#endif
+ /*
+  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class PlaceholderEntry;
+@@ -51,7 +48,7 @@
+     Hashtable::add_entry(index, (HashtableEntry*)new_entry);
+   }
+ 
+-  void add_entry(int index, unsigned int hash, symbolHandle name, 
++  void add_entry(int index, unsigned int hash, symbolHandle name,
+                 Handle loader, bool havesupername, symbolHandle supername);
+ 
+ // This returns a symbolOop to match type for SystemDictionary
+@@ -69,7 +66,7 @@
+ // LOAD_SUPER needed to check for class circularity
+ // DEFINE_CLASS: ultimately define class must be single threaded
+ // on a class/classloader basis
+-// so the head of that queue owns the token  
++// so the head of that queue owns the token
+ // and the rest of the threads return the result the first thread gets
+  enum classloadAction {
+     LOAD_INSTANCE = 1,             // calling load_instance_class
+@@ -80,17 +77,17 @@
+   // find_and_add returns probe pointer - old or new
+   // If no entry exists, add a placeholder entry and push SeenThread
+   // If entry exists, reuse entry and push SeenThread for classloadAction
+-  PlaceholderEntry* find_and_add(int index, unsigned int hash, 
+-                                 symbolHandle name, Handle loader, 
+-                                 classloadAction action, symbolHandle supername, 
++  PlaceholderEntry* find_and_add(int index, unsigned int hash,
++                                 symbolHandle name, Handle loader,
++                                 classloadAction action, symbolHandle supername,
+                                  Thread* thread);
+ 
+   void remove_entry(int index, unsigned int hash,
+                     symbolHandle name, Handle loader);
+ 
+ // Remove placeholder information
+-  void find_and_remove(int index, unsigned int hash, 
+-                       symbolHandle name, Handle loader, Thread* thread); 
++  void find_and_remove(int index, unsigned int hash,
++                       symbolHandle name, Handle loader, Thread* thread);
+ 
+   // GC support.
+   void oops_do(OopClosure* f);
+@@ -129,7 +126,7 @@
+    SeenThread* next()              const { return _stnext;}
+    void set_next(SeenThread *seen) { _stnext = seen; }
+    void set_prev(SeenThread *seen) { _stprev = seen; }
+-   
++
+ #ifndef PRODUCT
+   void printActionQ() {
+     SeenThread* seen = this;
+@@ -159,13 +156,13 @@
+   Thread*           _definer;       // owner of define token
+   klassOop          _instanceKlass; // instanceKlass from successful define
+   SeenThread*       _superThreadQ;  // doubly-linked queue of Threads loading a superclass for this class
+-  SeenThread*       _loadInstanceThreadQ;  // loadInstance thread 
++  SeenThread*       _loadInstanceThreadQ;  // loadInstance thread
+                                     // can be multiple threads if classloader object lock broken by application
+                                     // or if classloader supports parallel classloading
+-                   
++
+   SeenThread*       _defineThreadQ; // queue of Threads trying to define this class
+                                     // including _definer
+-                                    // _definer owns token 
++                                    // _definer owns token
+                                     // queue waits for and returns results from _definer
+ 
+  public:
+@@ -249,7 +246,7 @@
+ 
+   bool super_load_in_progress() {
+      return (_superThreadQ != NULL);
+-  } 
++  }
+ 
+   bool instance_load_in_progress() {
+     return (_loadInstanceThreadQ != NULL);
+@@ -268,7 +265,7 @@
+     assert_lock_strong(SystemDictionary_lock);
+     SeenThread* threadEntry = new SeenThread(thread);
+     SeenThread* seen = actionToQueue(action);
+-    
++
+     if (seen == NULL) {
+       set_threadQ(threadEntry, action);
+       return;
+@@ -332,5 +329,3 @@
+   void print() const  PRODUCT_RETURN;
+   void verify() const;
+ };
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/resolutionErrors.cpp openjdk/hotspot/src/share/vm/classfile/resolutionErrors.cpp
+--- openjdk6/hotspot/src/share/vm/classfile/resolutionErrors.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/resolutionErrors.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)resolutionErrors.cpp	1.6 07/05/05 17:05:54 JVM"
+-#endif
+ /*
+  * Copyright 2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,15 +19,15 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_resolutionErrors.cpp.incl"
+ 
+ // add new entry to the table
+-void ResolutionErrorTable::add_entry(int index, unsigned int hash, 
+-				     constantPoolHandle pool, int cp_index, symbolHandle error)
++void ResolutionErrorTable::add_entry(int index, unsigned int hash,
++                                     constantPoolHandle pool, int cp_index, symbolHandle error)
+ {
+   assert_locked_or_safepoint(SystemDictionary_lock);
+   assert(!pool.is_null() && !error.is_null(), "adding NULL obj");
+@@ -40,8 +37,8 @@
+ }
+ 
+ // find entry in the table
+-ResolutionErrorEntry* ResolutionErrorTable::find_entry(int index, unsigned int hash, 
+-						       constantPoolHandle pool, int cp_index)
++ResolutionErrorEntry* ResolutionErrorTable::find_entry(int index, unsigned int hash,
++                                                       constantPoolHandle pool, int cp_index)
+ {
+   assert_locked_or_safepoint(SystemDictionary_lock);
+ 
+@@ -56,13 +53,13 @@
+ }
+ 
+ // create new error entry
+-ResolutionErrorEntry* ResolutionErrorTable::new_entry(int hash, constantPoolOop pool, 
+-						      int cp_index, symbolOop error)
+-{   
++ResolutionErrorEntry* ResolutionErrorTable::new_entry(int hash, constantPoolOop pool,
++                                                      int cp_index, symbolOop error)
++{
+   ResolutionErrorEntry* entry = (ResolutionErrorEntry*)Hashtable::new_entry(hash, pool);
+   entry->set_cp_index(cp_index);
+   entry->set_error(error);
+-  
++
+   return entry;
+ }
+ 
+@@ -74,8 +71,8 @@
+ // GC support
+ void ResolutionErrorTable::oops_do(OopClosure* f) {
+   for (int i = 0; i < table_size(); i++) {
+-    for (ResolutionErrorEntry* probe = bucket(i); 
+-                           probe != NULL; 
++    for (ResolutionErrorEntry* probe = bucket(i);
++                           probe != NULL;
+                            probe = probe->next()) {
+       assert(probe->pool() != (constantPoolOop)NULL, "resolution error table is corrupt");
+       assert(probe->error() != (symbolOop)NULL, "resolution error table is corrupt");
+@@ -94,12 +91,12 @@
+ // decide when the entry can be purged.
+ void ResolutionErrorTable::always_strong_classes_do(OopClosure* blk) {
+   for (int i = 0; i < table_size(); i++) {
+-    for (ResolutionErrorEntry* probe = bucket(i); 
+-                           probe != NULL; 
++    for (ResolutionErrorEntry* probe = bucket(i);
++                           probe != NULL;
+                            probe = probe->next()) {
+       assert(probe->error() != (symbolOop)NULL, "resolution error table is corrupt");
+       blk->do_oop((oop*)probe->error_addr());
+-    }	
++    }
+   }
+ }
+ 
+@@ -112,13 +109,11 @@
+       assert(entry->pool() != (constantPoolOop)NULL, "resolution error table is corrupt");
+       constantPoolOop pool = entry->pool();
+       if (is_alive->do_object_b(pool)) {
+-	p = entry->next_addr();
++        p = entry->next_addr();
+       } else {
+-	*p = entry->next();
+-	free_entry(entry);
++        *p = entry->next();
++        free_entry(entry);
+       }
+     }
+   }
+ }
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/resolutionErrors.hpp openjdk/hotspot/src/share/vm/classfile/resolutionErrors.hpp
+--- openjdk6/hotspot/src/share/vm/classfile/resolutionErrors.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/resolutionErrors.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)resolutionErrors.hpp	1.6 07/05/05 17:05:54 JVM"
+-#endif
+ /*
+  * Copyright 2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class ResolutionErrorEntry;
+@@ -48,14 +45,14 @@
+   void add_entry(int index, ResolutionErrorEntry* new_entry) {
+     Hashtable::add_entry(index, (HashtableEntry*)new_entry);
+   }
+-  
++
+   void add_entry(int index, unsigned int hash,
+-		 constantPoolHandle pool, int which, symbolHandle error);
+-		 
++                 constantPoolHandle pool, int which, symbolHandle error);
++
+ 
+   // find error given the constant pool and constant pool index
+-  ResolutionErrorEntry* find_entry(int index, unsigned int hash, 
+-				   constantPoolHandle pool, int cp_index);
++  ResolutionErrorEntry* find_entry(int index, unsigned int hash,
++                                   constantPoolHandle pool, int cp_index);
+ 
+ 
+   unsigned int compute_hash(constantPoolHandle pool, int cp_index) {
+@@ -63,9 +60,9 @@
+   }
+ 
+   // purges unloaded entries from the table
+-  void purge_resolution_errors(BoolObjectClosure* is_alive);	
+- 
+-  // this table keeps symbolOops alive 
++  void purge_resolution_errors(BoolObjectClosure* is_alive);
++
++  // this table keeps symbolOops alive
+   void always_strong_classes_do(OopClosure* blk);
+ 
+   // GC support.
+@@ -75,19 +72,19 @@
+ 
+ class ResolutionErrorEntry : public HashtableEntry {
+  private:
+-  int		    _cp_index;
+-  symbolOop	    _error;
++  int               _cp_index;
++  symbolOop         _error;
+ 
+  public:
+-  constantPoolOop    pool() const 		{ return (constantPoolOop)literal(); }
+-  constantPoolOop*   pool_addr()  		{ return (constantPoolOop*)literal_addr(); }
++  constantPoolOop    pool() const               { return (constantPoolOop)literal(); }
++  constantPoolOop*   pool_addr()                { return (constantPoolOop*)literal_addr(); }
+ 
+-  int		     cp_index() const		{ return _cp_index; }
+-  void		     set_cp_index(int cp_index) { _cp_index = cp_index; }
++  int                cp_index() const           { return _cp_index; }
++  void               set_cp_index(int cp_index) { _cp_index = cp_index; }
+ 
+-  symbolOop          error() const 		{ return _error; }
+-  void		     set_error(symbolOop e)	{ _error = e; }
+-  symbolOop*         error_addr()		{ return &_error; }
++  symbolOop          error() const              { return _error; }
++  void               set_error(symbolOop e)     { _error = e; }
++  symbolOop*         error_addr()               { return &_error; }
+ 
+   ResolutionErrorEntry* next() const {
+     return (ResolutionErrorEntry*)HashtableEntry::next();
+@@ -100,4 +97,3 @@
+   // GC support
+   void oops_do(OopClosure* blk);
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/stackMapFrame.cpp openjdk/hotspot/src/share/vm/classfile/stackMapFrame.cpp
+--- openjdk6/hotspot/src/share/vm/classfile/stackMapFrame.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/stackMapFrame.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)stackMapFrame.cpp	1.24 07/05/05 17:06:57 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,14 +19,14 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_stackMapFrame.cpp.incl"
+ 
+-StackMapFrame::StackMapFrame(u2 max_locals, u2 max_stack, ClassVerifier* v) : 
+-                      _offset(0), _locals_size(0), _stack_size(0), _flags(0), 
++StackMapFrame::StackMapFrame(u2 max_locals, u2 max_stack, ClassVerifier* v) :
++                      _offset(0), _locals_size(0), _stack_size(0), _flags(0),
+                       _max_locals(max_locals), _max_stack(max_stack),
+                       _verifier(v) {
+   Thread* thr = v->thread();
+@@ -41,7 +38,7 @@
+   }
+   for(i = 0; i < max_stack; i++) {
+     _stack[i] = VerificationType::bogus_type();
+-  }  
++  }
+ }
+ 
+ StackMapFrame* StackMapFrame::frame_in_exception_handler(u1 flags) {
+@@ -82,7 +79,7 @@
+   if (old_object == VerificationType::uninitialized_this_type()) {
+     // "this" has been initialized - reset flags
+     _flags = 0;
+-  } 
++  }
+ }
+ 
+ VerificationType StackMapFrame::set_locals_from_arg(
+@@ -100,12 +97,12 @@
+     } else {
+       _locals[0] = thisKlass;
+     }
+-  } 
+-  
++  }
++
+   // local num may be greater than size of parameters because long/double occupies two slots
+   while(!ss.at_return_type()) {
+     init_local_num += _verifier->change_sig_to_verificationType(
+-      &ss, &_locals[init_local_num], 
++      &ss, &_locals[init_local_num],
+       CHECK_VERIFY_(verifier(), VerificationType::bogus_type()));
+     ss.next();
+   }
+@@ -134,7 +131,7 @@
+ }
+ 
+ void StackMapFrame::copy_locals(const StackMapFrame* src) {
+-  int32_t len = src->locals_size() < _locals_size ? 
++  int32_t len = src->locals_size() < _locals_size ?
+     src->locals_size() : _locals_size;
+   for (int32_t i = 0; i < len; i++) {
+     _locals[i] = src->locals()[i];
+@@ -142,7 +139,7 @@
+ }
+ 
+ void StackMapFrame::copy_stack(const StackMapFrame* src) {
+-  int32_t len = src->stack_size() < _stack_size ? 
++  int32_t len = src->stack_size() < _stack_size ?
+     src->stack_size() : _stack_size;
+   for (int32_t i = 0; i < len; i++) {
+     _stack[i] = src->stack()[i];
+@@ -197,9 +194,9 @@
+     int32_t index, VerificationType type, TRAPS) {
+   if (index >= _max_locals) {
+     verifier()->verify_error(_offset, "Local variable table overflow");
+-    return VerificationType::bogus_type(); 
++    return VerificationType::bogus_type();
+   }
+-  bool subtype = type.is_assignable_from(_locals[index], 
++  bool subtype = type.is_assignable_from(_locals[index],
+     verifier()->current_class(), CHECK_(VerificationType::bogus_type()));
+   if (!subtype) {
+     verifier()->verify_error(_offset, "Bad local variable type");
+@@ -247,7 +244,7 @@
+   if (index >= _locals_size) {
+ #ifdef ASSERT
+     for (int i=_locals_size; i<index; i++) {
+-      assert(_locals[i] == VerificationType::bogus_type(), 
++      assert(_locals[i] == VerificationType::bogus_type(),
+              "holes must be bogus type");
+     }
+ #endif
+@@ -278,7 +275,7 @@
+   if (index >= _locals_size - 1) {
+ #ifdef ASSERT
+     for (int i=_locals_size; i<index; i++) {
+-      assert(_locals[i] == VerificationType::bogus_type(), 
++      assert(_locals[i] == VerificationType::bogus_type(),
+              "holes must be bogus type");
+     }
+ #endif
+@@ -304,4 +301,3 @@
+ }
+ 
+ #endif
+-
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/stackMapFrame.hpp openjdk/hotspot/src/share/vm/classfile/stackMapFrame.hpp
+--- openjdk6/hotspot/src/share/vm/classfile/stackMapFrame.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/stackMapFrame.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)stackMapFrame.hpp	1.20 07/05/05 17:06:57 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A StackMapFrame represents one frame in the stack map attribute.
+@@ -35,9 +32,9 @@
+  private:
+   int32_t _offset;
+ 
+-  // See comment in StackMapTable about _frame_count about why these 
++  // See comment in StackMapTable about _frame_count about why these
+   // fields are int32_t instead of u2.
+-  int32_t _locals_size;  // number of valid type elements in _locals 
++  int32_t _locals_size;  // number of valid type elements in _locals
+   int32_t _stack_size;   // number of valid type elements in _stack
+ 
+   int32_t _max_locals;
+@@ -52,7 +49,7 @@
+  public:
+   // constructors
+ 
+-  // This constructor is used by the type checker to allocate frames 
++  // This constructor is used by the type checker to allocate frames
+   // in type state, which have _max_locals and _max_stack array elements
+   // in _locals and _stack.
+   StackMapFrame(u2 max_locals, u2 max_stack, ClassVerifier* verifier);
+@@ -62,7 +59,7 @@
+   StackMapFrame(int32_t offset,
+                 u1 flags,
+                 u2 locals_size,
+-                u2 stack_size, 
++                u2 stack_size,
+                 u2 max_locals,
+                 u2 max_stack,
+                 VerificationType* locals,
+@@ -170,7 +167,7 @@
+     if (_stack_size != 0) {
+       VerificationType top = _stack[_stack_size - 1];
+       bool subtype = type.is_assignable_from(
+-        top, verifier()->current_class(), 
++        top, verifier()->current_class(),
+         CHECK_(VerificationType::bogus_type()));
+       if (subtype) {
+         _stack_size --;
+@@ -227,4 +224,3 @@
+   // Debugging
+   void print() const PRODUCT_RETURN;
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/stackMapTable.cpp openjdk/hotspot/src/share/vm/classfile/stackMapTable.cpp
+--- openjdk6/hotspot/src/share/vm/classfile/stackMapTable.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/stackMapTable.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)stackMapTable.cpp	1.28 07/05/05 17:06:53 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -39,7 +36,7 @@
+     StackMapFrame* pre_frame = init_frame;
+     for (int32_t i = 0; i < _frame_count; i++) {
+       StackMapFrame* frame = reader->next(
+-        pre_frame, i == 0, max_locals, max_stack, 
++        pre_frame, i == 0, max_locals, max_stack,
+         CHECK_VERIFY(pre_frame->verifier()));
+       _frame_array[i] = frame;
+       int offset = frame->offset();
+@@ -65,12 +62,12 @@
+ }
+ 
+ bool StackMapTable::match_stackmap(
+-    StackMapFrame* frame, int32_t target, 
++    StackMapFrame* frame, int32_t target,
+     bool match, bool update, TRAPS) const {
+   int index = get_index_from_offset(target);
+ 
+   return match_stackmap(
+-    frame, target, index, match, 
++    frame, target, index, match,
+     update, CHECK_VERIFY_(frame->verifier(), false));
+ }
+ 
+@@ -80,9 +77,9 @@
+ // The values of match and update are:                  _match__update_
+ //
+ // checking a branch target/exception handler:           true   false
+-// linear bytecode verification following an 
++// linear bytecode verification following an
+ // unconditional branch:                                 false  true
+-// linear bytecode verification not following an 
++// linear bytecode verification not following an
+ // unconditional branch:                                 true   true
+ bool StackMapTable::match_stackmap(
+     StackMapFrame* frame, int32_t target, int32_t frame_index,
+@@ -101,7 +98,7 @@
+     result = frame->is_assignable_to(
+       stackmap_frame, CHECK_VERIFY_(frame->verifier(), false));
+   }
+-  if (update) { 
++  if (update) {
+     // Use the frame in stackmap table as current frame
+     int lsize = stackmap_frame->locals_size();
+     int ssize = stackmap_frame->stack_size();
+@@ -124,7 +121,7 @@
+     frame, target, true, false, CHECK_VERIFY(frame->verifier()));
+   if (!match || (target < 0 || target >= _code_length)) {
+     frame->verifier()->verify_error(frame->offset(),
+-      "Inconsistent stackmap frames at branch target %d", target); 
++      "Inconsistent stackmap frames at branch target %d", target);
+     return;
+   }
+   // check if uninitialized objects exist on backward branches
+@@ -135,7 +132,7 @@
+     const StackMapFrame* frame, int32_t target, TRAPS) const {
+   if (frame->offset() > target && frame->has_new_object()) {
+     frame->verifier()->verify_error(frame->offset(),
+-      "Uninitialized object exists on backward branch %d", target); 
++      "Uninitialized object exists on backward branch %d", target);
+     return;
+   }
+ }
+@@ -231,7 +228,7 @@
+       frame->copy_locals(pre_frame);
+     }
+     return frame;
+-  } 
++  }
+   if (frame_type < 128) {
+     // same_locals_1_stack_item_frame
+     if (first) {
+@@ -247,7 +244,7 @@
+     }
+     VerificationType* stack = NEW_RESOURCE_ARRAY_IN_THREAD(
+       THREAD, VerificationType, 2);
+-    u2 stack_size = 1; 
++    u2 stack_size = 1;
+     stack[0] = parse_verification_type(NULL, CHECK_VERIFY_(_verifier, NULL));
+     if (stack[0].is_category2()) {
+       stack[1] = stack[0].to_category2_2nd();
+@@ -287,7 +284,7 @@
+     }
+     VerificationType* stack = NEW_RESOURCE_ARRAY_IN_THREAD(
+       THREAD, VerificationType, 2);
+-    u2 stack_size = 1; 
++    u2 stack_size = 1;
+     stack[0] = parse_verification_type(NULL, CHECK_VERIFY_(_verifier, NULL));
+     if (stack[0].is_category2()) {
+       stack[1] = stack[0].to_category2_2nd();
+@@ -337,7 +334,7 @@
+       offset = pre_frame->offset() + offset_delta + 1;
+     }
+     frame = new StackMapFrame(
+-      offset, flags, new_length, 0, max_locals, max_stack, 
++      offset, flags, new_length, 0, max_locals, max_stack,
+       locals, NULL, _verifier);
+     if (first && locals != NULL) {
+       frame->copy_locals(pre_frame);
+@@ -371,7 +368,7 @@
+       offset = pre_frame->offset() + offset_delta + 1;
+     }
+     frame = new StackMapFrame(
+-      offset, flags, real_length, 0, max_locals, 
++      offset, flags, real_length, 0, max_locals,
+       max_stack, locals, NULL, _verifier);
+     return frame;
+   }
+@@ -388,7 +385,7 @@
+     for (i=0; i<locals_size; i++) {
+       locals[real_locals_size] = parse_verification_type(&flags, THREAD);
+       if (locals[real_locals_size].is_category2()) {
+-        locals[real_locals_size + 1] = 
++        locals[real_locals_size + 1] =
+           locals[real_locals_size].to_category2_2nd();
+         ++real_locals_size;
+       }
+@@ -428,4 +425,3 @@
+     "reserved frame type", CHECK_VERIFY_(pre_frame->verifier(), NULL));
+   return NULL;
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/stackMapTable.hpp openjdk/hotspot/src/share/vm/classfile/stackMapTable.hpp
+--- openjdk6/hotspot/src/share/vm/classfile/stackMapTable.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/stackMapTable.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)stackMapTable.hpp	1.21 07/05/05 17:06:57 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class StackMapReader;
+@@ -35,7 +32,7 @@
+   // be difficult to detect/recover from overflow or underflow conditions.
+   // Widening the type and making it signed will help detect these.
+   int32_t              _code_length;
+-  int32_t              _frame_count;     // Stackmap frame count 
++  int32_t              _frame_count;     // Stackmap frame count
+   StackMapFrame**       _frame_array;
+ 
+  public:
+@@ -44,28 +41,28 @@
+                 char* code_data, int code_len, TRAPS);
+ 
+   inline int32_t get_frame_count() const { return _frame_count; }
+-  inline int get_offset(int index) const { 
+-    return _frame_array[index]->offset(); 
++  inline int get_offset(int index) const {
++    return _frame_array[index]->offset();
+   }
+ 
+   // Match and/or update current_frame to the frame in stackmap table with
+-  // specified offset. Return true if the two frames match. 
++  // specified offset. Return true if the two frames match.
+   bool match_stackmap(
+-    StackMapFrame* current_frame, int32_t offset, 
++    StackMapFrame* current_frame, int32_t offset,
+     bool match, bool update, TRAPS) const;
+   // Match and/or update current_frame to the frame in stackmap table with
+-  // specified offset and frame index. Return true if the two frames match. 
++  // specified offset and frame index. Return true if the two frames match.
+   bool match_stackmap(
+     StackMapFrame* current_frame, int32_t offset, int32_t frame_index,
+     bool match, bool update, TRAPS) const;
+ 
+-  // Check jump instructions. Make sure there are no uninitialized 
++  // Check jump instructions. Make sure there are no uninitialized
+   // instances on backward branch.
+   void check_jump_target(StackMapFrame* frame, int32_t target, TRAPS) const;
+ 
+   // The following methods are only used inside this class.
+ 
+-  // Returns the frame array index where the frame with offset is stored. 
++  // Returns the frame array index where the frame with offset is stored.
+   int get_index_from_offset(int32_t offset) const;
+ 
+   // Make sure that there's no uninitialized object exist on backward branch.
+@@ -81,7 +78,7 @@
+   typeArrayHandle _data;
+   int _index;
+  public:
+-  StackMapStream(typeArrayHandle ah) 
++  StackMapStream(typeArrayHandle ah)
+     : _data(ah), _index(0) {
+   }
+   u1 get_u1(TRAPS) {
+@@ -106,7 +103,7 @@
+ 
+ class StackMapReader : StackObj {
+  private:
+-  // information about the class and method 
++  // information about the class and method
+   constantPoolHandle  _cp;
+   ClassVerifier* _verifier;
+   StackMapStream* _stream;
+@@ -114,7 +111,7 @@
+   int32_t _code_length;
+ 
+   // information get from the attribute
+-  int32_t  _frame_count;       // frame count 
++  int32_t  _frame_count;       // frame count
+ 
+   int32_t chop(VerificationType* locals, int32_t length, int32_t chops);
+   VerificationType parse_verification_type(u1* flags, TRAPS);
+@@ -123,7 +120,7 @@
+     if (size < 0 || size > max_size) {
+       // Since this error could be caused someone rewriting the method
+       // but not knowing to update the stackmap data, we call the the
+-      // verifier's error method, which may not throw an exception and 
++      // verifier's error method, which may not throw an exception and
+       // failover to the old verifier instead.
+       _verifier->class_format_error(
+         "StackMapTable format error: bad type array size");
+@@ -152,7 +149,7 @@
+     }
+   }
+ 
+-  inline int32_t get_frame_count() const		{ return _frame_count; }
++  inline int32_t get_frame_count() const                { return _frame_count; }
+   StackMapFrame* next(StackMapFrame* pre_frame, bool first,
+                       u2 max_locals, u2 max_stack, TRAPS);
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/symbolTable.cpp openjdk/hotspot/src/share/vm/classfile/symbolTable.cpp
+--- openjdk6/hotspot/src/share/vm/classfile/symbolTable.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/symbolTable.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)symbolTable.cpp	1.69 07/05/05 17:05:55 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -55,7 +52,7 @@
+ // entries in the symbol table during normal execution (only during
+ // safepoints).
+ 
+-symbolOop SymbolTable::lookup(const char* name, int len, TRAPS) {  
++symbolOop SymbolTable::lookup(const char* name, int len, TRAPS) {
+   unsigned int hashValue = hash_symbol(name, len);
+   int index = the_table()->hash_to_index(hashValue);
+ 
+@@ -63,7 +60,7 @@
+ 
+   // Found
+   if (s != NULL) return s;
+-  
++
+   // Otherwise, add to symbol to table
+   return the_table()->basic_add(index, (u1*)name, len, hashValue, CHECK_NULL);
+ }
+@@ -81,11 +78,11 @@
+     hashValue = hash_symbol(name, len);
+     index = the_table()->hash_to_index(hashValue);
+     symbolOop s = the_table()->lookup(index, name, len, hashValue);
+-  
++
+     // Found
+     if (s != NULL) return s;
+   }
+-   
++
+   // Otherwise, add to symbol to table. Copy to a C string first.
+   char stack_buf[128];
+   ResourceMark rm(THREAD);
+@@ -105,7 +102,7 @@
+ }
+ 
+ symbolOop SymbolTable::lookup_only(const char* name, int len,
+-                                   unsigned int& hash) {  
++                                   unsigned int& hash) {
+   hash = hash_symbol(name, len);
+   int index = the_table()->hash_to_index(hash);
+ 
+@@ -139,7 +136,7 @@
+ 
+ 
+ symbolOop SymbolTable::basic_add(int index, u1 *name, int len,
+-                                 unsigned int hashValue, TRAPS) {  
++                                 unsigned int hashValue, TRAPS) {
+   assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
+          "proposed name of symbol must be stable");
+ 
+@@ -162,7 +159,7 @@
+     // A race occured and another thread introduced the symbol, this one
+     // will be dropped and collected.
+     return test;
+-  }  
++  }
+ 
+   HashtableEntry* entry = new_entry(hashValue, sym());
+   add_entry(index, entry);
+@@ -238,7 +235,7 @@
+   const int results_length = 100;
+   int results[results_length];
+   int i,j;
+-  
++
+   // initialize results to zero
+   for (j = 0; j < results_length; j++) {
+     results[j] = 0;
+@@ -264,7 +261,7 @@
+   tty->print_cr("%8s %5d", "Total  ", total);
+   tty->print_cr("%8s %5d", "Maximum", max_symbols);
+   tty->print_cr("%8s %3.2f", "Average",
+-	  ((float) total / (float) the_table()->table_size()));
++          ((float) total / (float) the_table()->table_size()));
+   tty->print_cr("%s", "Histogram:");
+   tty->print_cr(" %s %29s", "Length", "Number chains that length");
+   for (i = 0; i < results_length; i++) {
+@@ -272,7 +269,7 @@
+       tty->print_cr("%6d %10d", i, results[i]);
+     }
+   }
+-  int line_length = 70;    
++  int line_length = 70;
+   tty->print_cr("%s %30s", " Length", "Number chains that length");
+   for (i = 0; i < results_length; i++) {
+     if (results[i] > 0) {
+@@ -285,9 +282,9 @@
+       }
+       tty->cr();
+     }
+-  }  
++  }
+   tty->print_cr(" %s %d: %d\n", "Number chains longer than",
+-	            results_length, out_of_range);
++                    results_length, out_of_range);
+ }
+ 
+ #endif // PRODUCT
+@@ -376,7 +373,7 @@
+ 
+ 
+ oop StringTable::basic_add(int index, Handle string_or_null, jchar* name,
+-                           int len, unsigned int hashValue, TRAPS) {  
++                           int len, unsigned int hashValue, TRAPS) {
+   debug_only(StableMemoryChecker smc(name, len * sizeof(name[0])));
+   assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
+          "proposed name of symbol must be stable");
+@@ -402,7 +399,7 @@
+   if (test != NULL) {
+     // Entry already added
+     return test;
+-  }  
++  }
+ 
+   HashtableEntry* entry = new_entry(hashValue, string());
+   add_entry(index, entry);
+@@ -428,10 +425,10 @@
+ 
+   // Found
+   if (string != NULL) return string;
+-  
++
+   // Otherwise, add to symbol to table
+   return the_table()->basic_add(index, string_or_null, name, len,
+-                                hashValue, CHECK_NULL);  
++                                hashValue, CHECK_NULL);
+ }
+ 
+ oop StringTable::intern(symbolOop symbol, TRAPS) {
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/symbolTable.hpp openjdk/hotspot/src/share/vm/classfile/symbolTable.hpp
+--- openjdk6/hotspot/src/share/vm/classfile/symbolTable.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/symbolTable.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)symbolTable.hpp	1.48 07/05/05 17:05:56 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The symbol table holds all symbolOops and corresponding interned strings.
+@@ -45,7 +42,7 @@
+   // The symbol table
+   static SymbolTable* _the_table;
+ 
+-  // Adding elements    
++  // Adding elements
+   symbolOop basic_add(int index, u1* name, int len,
+                       unsigned int hashValue, TRAPS);
+   bool basic_add(constantPoolHandle cp, int names_count,
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/systemDictionary.cpp openjdk/hotspot/src/share/vm/classfile/systemDictionary.cpp
+--- openjdk6/hotspot/src/share/vm/classfile/systemDictionary.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/systemDictionary.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)systemDictionary.cpp	1.361 07/09/01 18:23:02 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -95,6 +92,7 @@
+ klassOop    SystemDictionary::_java_nio_Buffer_klass      =  NULL;
+ 
+ klassOop    SystemDictionary::_sun_misc_AtomicLongCSImpl_klass = NULL;
++klassOop    SystemDictionary::_sun_jkernel_DownloadManager_klass  = NULL;
+ 
+ klassOop    SystemDictionary::_boolean_klass              =  NULL;
+ klassOop    SystemDictionary::_char_klass                 =  NULL;
+@@ -106,17 +104,6 @@
+ klassOop    SystemDictionary::_long_klass                 =  NULL;
+ klassOop    SystemDictionary::_box_klasses[T_VOID+1]      =  { NULL /*, NULL...*/ };
+ 
+-oop         SystemDictionary::_int_mirror                 =  NULL;
+-oop         SystemDictionary::_float_mirror               =  NULL;
+-oop         SystemDictionary::_double_mirror              =  NULL;
+-oop         SystemDictionary::_byte_mirror                =  NULL;
+-oop         SystemDictionary::_bool_mirror                =  NULL;
+-oop         SystemDictionary::_char_mirror                =  NULL;
+-oop         SystemDictionary::_long_mirror                =  NULL;
+-oop         SystemDictionary::_short_mirror               =  NULL;
+-oop         SystemDictionary::_void_mirror                =  NULL;
+-oop         SystemDictionary::_mirrors[T_VOID+1]          =  { NULL /*, NULL...*/ };
+-
+ oop         SystemDictionary::_java_system_loader         =  NULL;
+ 
+ bool        SystemDictionary::_has_loadClassInternal      =  false;
+@@ -134,15 +121,15 @@
+ }
+ 
+ void SystemDictionary::compute_java_system_loader(TRAPS) {
+-  KlassHandle system_klass(THREAD, _classloader_klass);    
++  KlassHandle system_klass(THREAD, _classloader_klass);
+   JavaValue result(T_OBJECT);
+-  JavaCalls::call_static(&result, 
++  JavaCalls::call_static(&result,
+                          KlassHandle(THREAD, _classloader_klass),
+                          vmSymbolHandles::getSystemClassLoader_name(),
+                          vmSymbolHandles::void_classloader_signature(),
+                          CHECK);
+-    
+-  _java_system_loader = (oop)result.get_jobject();    
++
++  _java_system_loader = (oop)result.get_jobject();
+ }
+ 
+ 
+@@ -169,7 +156,7 @@
+ 
+ // Forwards to resolve_or_null
+ 
+-klassOop SystemDictionary::resolve_or_fail(symbolHandle class_name, Handle class_loader, Handle protection_domain, bool throw_error, TRAPS) {  
++klassOop SystemDictionary::resolve_or_fail(symbolHandle class_name, Handle class_loader, Handle protection_domain, bool throw_error, TRAPS) {
+   klassOop klass = resolve_or_null(class_name, class_loader, protection_domain, THREAD);
+   if (HAS_PENDING_EXCEPTION || klass == NULL) {
+     KlassHandle k_h(THREAD, klass);
+@@ -192,7 +179,7 @@
+       CLEAR_PENDING_EXCEPTION;
+       THROW_MSG_CAUSE_0(vmSymbols::java_lang_NoClassDefFoundError(), class_name->as_C_string(), e);
+     } else {
+-      return NULL; 
++      return NULL;
+     }
+   }
+   // Class not found, throw appropriate error or exception depending on value of throw_error
+@@ -200,11 +187,11 @@
+     ResourceMark rm(THREAD);
+     if (throw_error) {
+       THROW_MSG_0(vmSymbols::java_lang_NoClassDefFoundError(), class_name->as_C_string());
+-    } else {      
+-      THROW_MSG_0(vmSymbols::java_lang_ClassNotFoundException(), class_name->as_C_string());      
++    } else {
++      THROW_MSG_0(vmSymbols::java_lang_ClassNotFoundException(), class_name->as_C_string());
+     }
+   }
+-  return (klassOop)klass_h(); 
++  return (klassOop)klass_h();
+ }
+ 
+ 
+@@ -217,7 +204,7 @@
+ 
+ // Forwards to resolve_instance_class_or_null
+ 
+-klassOop SystemDictionary::resolve_or_null(symbolHandle class_name, Handle class_loader, Handle protection_domain, TRAPS) {  
++klassOop SystemDictionary::resolve_or_null(symbolHandle class_name, Handle class_loader, Handle protection_domain, TRAPS) {
+   assert(!THREAD->is_Compiler_thread(), "Can not load classes with the Compiler thread");
+   if (FieldType::is_array(class_name())) {
+     return resolve_array_class_or_null(class_name, class_loader, protection_domain, CHECK_NULL);
+@@ -226,32 +213,32 @@
+   }
+ }
+ 
+-klassOop SystemDictionary::resolve_or_null(symbolHandle class_name, TRAPS) {  
++klassOop SystemDictionary::resolve_or_null(symbolHandle class_name, TRAPS) {
+   return resolve_or_null(class_name, Handle(), Handle(), THREAD);
+ }
+ 
+ // Forwards to resolve_instance_class_or_null
+ 
+ klassOop SystemDictionary::resolve_array_class_or_null(symbolHandle class_name,
+-                                                       Handle class_loader, 
++                                                       Handle class_loader,
+                                                        Handle protection_domain,
+-                                                       TRAPS) {  
++                                                       TRAPS) {
+   assert(FieldType::is_array(class_name()), "must be array");
+   jint dimension;
+   symbolOop object_key;
+-  klassOop k = NULL;  
++  klassOop k = NULL;
+   // dimension and object_key are assigned as a side-effect of this call
+-  BasicType t = FieldType::get_array_info(class_name(), 
+-                                          &dimension, 
+-                                          &object_key, 
++  BasicType t = FieldType::get_array_info(class_name(),
++                                          &dimension,
++                                          &object_key,
+                                           CHECK_NULL);
+ 
+   if (t == T_OBJECT) {
+     symbolHandle h_key(THREAD, object_key);
+     // naked oop "k" is OK here -- we assign back into it
+-    k = SystemDictionary::resolve_instance_class_or_null(h_key, 
+-                                                         class_loader, 
+-                                                         protection_domain, 
++    k = SystemDictionary::resolve_instance_class_or_null(h_key,
++                                                         class_loader,
++                                                         protection_domain,
+                                                          CHECK_NULL);
+     if (k != NULL) {
+       k = Klass::cast(k)->array_klass(dimension, CHECK_NULL);
+@@ -266,7 +253,7 @@
+ 
+ // Must be called for any super-class or super-interface resolution
+ // during class definition to allow class circularity checking
+-// super-interface callers: 
++// super-interface callers:
+ //    parse_interfaces - for defineClass & jvmtiRedefineClasses
+ // super-class callers:
+ //   ClassFileParser - for defineClass & jvmtiRedefineClasses
+@@ -286,12 +273,12 @@
+ //    so it tries to load Super
+ //   3. If we load the class internally, or user classloader uses same thread
+ //      loadClassFromxxx or defineClass via parseClassFile Super ...
+-//      3.1 resolve_super_or_fail creates placeholder: T1, Super (super Base) 
++//      3.1 resolve_super_or_fail creates placeholder: T1, Super (super Base)
+ //      3.3 resolve_instance_class_or_null Base, finds placeholder for Base
+ //      3.4 calls resolve_super_or_fail Base
+ //      3.5 finds T1,Base -> throws class circularity
+ //OR 4. If T2 tries to resolve Super via defineClass Super ...
+-//      4.1 resolve_super_or_fail creates placeholder: T2, Super (super Base) 
++//      4.1 resolve_super_or_fail creates placeholder: T2, Super (super Base)
+ //      4.2 resolve_instance_class_or_null Base, finds placeholder for Base (super Super)
+ //      4.3 calls resolve_super_or_fail Super in parallel on own thread T2
+ //      4.4 finds T2, Super -> throws class circularity
+@@ -340,18 +327,18 @@
+     // see: nsk redefclass014 & java.lang.instrument Instrument032
+     if ((childk != NULL ) && (is_superclass) &&
+        ((quicksuperk = instanceKlass::cast(childk)->super()) != NULL) &&
+-      
+-         ((Klass::cast(quicksuperk)->name() == class_name()) && 
++
++         ((Klass::cast(quicksuperk)->name() == class_name()) &&
+             (Klass::cast(quicksuperk)->class_loader()  == class_loader()))) {
+            return quicksuperk;
+     } else {
+       PlaceholderEntry* probe = placeholders()->get_entry(p_index, p_hash, child_name, class_loader);
+       if (probe && probe->check_seen_thread(THREAD, PlaceholderTable::LOAD_SUPER)) {
+           throw_circularity_error = true;
+-      } 
++      }
+ 
+       // add placeholder entry even if error - callers will remove on error
+-      PlaceholderEntry* newprobe = placeholders()->find_and_add(p_index, p_hash, child_name, class_loader, PlaceholderTable::LOAD_SUPER, class_name, THREAD); 
++      PlaceholderEntry* newprobe = placeholders()->find_and_add(p_index, p_hash, child_name, class_loader, PlaceholderTable::LOAD_SUPER, class_name, THREAD);
+       if (throw_circularity_error) {
+          newprobe->remove_seen_thread(THREAD, PlaceholderTable::LOAD_SUPER);
+       }
+@@ -370,12 +357,12 @@
+                                                  class_loader,
+                                                  protection_domain,
+                                                  THREAD);
+-  
++
+   KlassHandle superk_h(THREAD, superk);
+-  
++
+   // Note: clean up of placeholders currently in callers of
+   // resolve_super_or_fail - either at update_dictionary time
+-  // or on error 
++  // or on error
+   {
+   MutexLocker mu(SystemDictionary_lock, THREAD);
+    PlaceholderEntry* probe = placeholders()->get_entry(p_index, p_hash, child_name, class_loader);
+@@ -407,7 +394,7 @@
+     tty->print(" - protection domain: "); protection_domain()->print_value_on(tty); tty->cr();
+     tty->print(" - loading:           "); klass()->print_value_on(tty);             tty->cr();
+   }
+-  
++
+   assert(class_loader() != NULL, "should not have non-null protection domain for null classloader");
+ 
+   KlassHandle system_loader(THREAD, SystemDictionary::classloader_klass());
+@@ -415,7 +402,7 @@
+                          class_loader,
+                          system_loader,
+                          vmSymbolHandles::checkPackageAccess_name(),
+-                         vmSymbolHandles::class_protectiondomain_signature(), 
++                         vmSymbolHandles::class_protectiondomain_signature(),
+                          Handle(THREAD, klass->java_mirror()),
+                          protection_domain,
+                          THREAD);
+@@ -429,8 +416,8 @@
+     tty->cr();
+   }
+ 
+-  if (HAS_PENDING_EXCEPTION) return; 
+-    
++  if (HAS_PENDING_EXCEPTION) return;
++
+   // If no exception has been thrown, we have validated the protection domain
+   // Insert the protection domain of the initiating class into the set.
+   {
+@@ -441,7 +428,7 @@
+     int d_index = dictionary()->hash_to_index(d_hash);
+ 
+     MutexLocker mu(SystemDictionary_lock, THREAD);
+-    { 
++    {
+       // Note that we have an entry, and entries can be deleted only during GC,
+       // so we cannot allow GC to occur while we're holding this entry.
+ 
+@@ -463,13 +450,13 @@
+ // Waits on SystemDictionary_lock to indicate placeholder table updated
+ // On return, caller must recheck placeholder table state
+ //
+-// We only get here if 
++// We only get here if
+ //  1) custom classLoader, i.e. not bootstrap classloader
+ //  2) UnsyncloadClass not set
+ //  3) custom classLoader has broken the class loader objectLock
+ //     so another thread got here in parallel
+ //
+-// lockObject must be held. 
++// lockObject must be held.
+ // Complicated dance due to lock ordering:
+ // Must first release the classloader object lock to
+ // allow initial definer to complete the class definition
+@@ -484,7 +471,7 @@
+ void SystemDictionary::double_lock_wait(Handle lockObject, TRAPS) {
+   assert_lock_strong(SystemDictionary_lock);
+ 
+-  bool calledholdinglock 
++  bool calledholdinglock
+       = ObjectSynchronizer::current_thread_holds_lock((JavaThread*)THREAD, lockObject);
+   assert(calledholdinglock,"must hold lock for notify");
+   assert(!UnsyncloadClass, "unexpected double_lock_wait");
+@@ -508,10 +495,10 @@
+ // to force placeholder entry creation for this class
+ // Caller must check for pending exception
+ // Returns non-null klassOop if other thread has completed load
+-// and we are done, 
++// and we are done,
+ // If return null klassOop and no pending exception, the caller must load the class
+ instanceKlassHandle SystemDictionary::handle_parallel_super_load(
+-    symbolHandle name, symbolHandle superclassname, Handle class_loader, 
++    symbolHandle name, symbolHandle superclassname, Handle class_loader,
+     Handle protection_domain, Handle lockObject, TRAPS) {
+ 
+   instanceKlassHandle nh = instanceKlassHandle(); // null Handle
+@@ -522,10 +509,10 @@
+ 
+   // superk is not used, resolve_super called for circularity check only
+   // This code is reached in two situations. One if this thread
+-  // is loading the same class twice (e.g. ClassCircularity, or 
++  // is loading the same class twice (e.g. ClassCircularity, or
+   // java.lang.instrument).
+   // The second is if another thread started the resolve_super first
+-  // and has not yet finished. 
++  // and has not yet finished.
+   // In both cases the original caller will clean up the placeholder
+   // entry on error.
+   klassOop superk = SystemDictionary::resolve_super_or_fail(name,
+@@ -555,7 +542,7 @@
+     } else {
+       return nh;
+     }
+-  } 
++  }
+ 
+   // must loop to both handle other placeholder updates
+   // and spurious notifications
+@@ -603,11 +590,11 @@
+ klassOop SystemDictionary::resolve_instance_class_or_null(symbolHandle class_name, Handle class_loader, Handle protection_domain, TRAPS) {
+   assert(class_name.not_null() && !FieldType::is_array(class_name()), "invalid class name");
+   // First check to see if we should remove wrapping L and ;
+-  symbolHandle name;    
++  symbolHandle name;
+   if (FieldType::is_obj(class_name())) {
+     ResourceMark rm(THREAD);
+     // Ignore wrapping L and ;.
+-    name = oopFactory::new_symbol_handle(class_name()->as_C_string() + 1, class_name()->utf8_length() - 2, CHECK_NULL);    
++    name = oopFactory::new_symbol_handle(class_name()->as_C_string() + 1, class_name()->utf8_length() - 2, CHECK_NULL);
+   } else {
+     name = class_name;
+   }
+@@ -656,8 +643,8 @@
+   PlaceholderEntry* placeholder;
+   symbolHandle superclassname;
+ 
+-  {           
+-    MutexLocker mu(SystemDictionary_lock, THREAD);  
++  {
++    MutexLocker mu(SystemDictionary_lock, THREAD);
+     klassOop check = find_class(d_index, d_hash, name, class_loader);
+     if (check != NULL) {
+       // Klass is already loaded, so just return it
+@@ -671,13 +658,13 @@
+            superclassname = symbolHandle(THREAD, placeholder->supername());
+            havesupername = true;
+          }
+-      } 
++      }
+     }
+   }
+ 
+   // If the class in is in the placeholder table, class loading is in progress
+   if (super_load_in_progress && havesupername==true) {
+-    k = SystemDictionary::handle_parallel_super_load(name, superclassname, 
++    k = SystemDictionary::handle_parallel_super_load(name, superclassname,
+         class_loader, protection_domain, lockObject, THREAD);
+     if (HAS_PENDING_EXCEPTION) {
+       return NULL;
+@@ -688,14 +675,14 @@
+   }
+ 
+   if (!class_has_been_loaded) {
+-  
++
+     // add placeholder entry to record loading instance class
+     // Five cases:
+     // All cases need to prevent modifying bootclasssearchpath
+     // in parallel with a classload of same classname
+     // case 1. traditional classloaders that rely on the classloader object lock
+     //   - no other need for LOAD_INSTANCE
+-    // case 2. traditional classloaders that break the classloader object lock 
++    // case 2. traditional classloaders that break the classloader object lock
+     //    as a deadlock workaround. Detection of this case requires that
+     //    this check is done while holding the classloader object lock,
+     //    and that lock is still held when calling classloader's loadClass.
+@@ -723,7 +710,7 @@
+           } else {
+             // case 1: traditional: should never see load_in_progress.
+             while (!class_has_been_loaded && oldprobe && oldprobe->instance_load_in_progress()) {
+-     
++
+               // case 4: bootstrap classloader: prevent futile classloading,
+               // wait on first requestor
+               if (class_loader.is_null()) {
+@@ -742,22 +729,22 @@
+               }
+               // check if other thread failed to load and cleaned up
+               oldprobe = placeholders()->get_entry(p_index, p_hash, name, class_loader);
+-            } 
+-          } 
++            }
++          }
+         }
+       }
+-      // All cases: add LOAD_INSTANCE 
++      // All cases: add LOAD_INSTANCE
+       // case 3: UnsyncloadClass: allow competing threads to try
+       // LOAD_INSTANCE in parallel
+       // add placeholder entry even if error - callers will remove on error
+       if (!class_has_been_loaded) {
+-        PlaceholderEntry* newprobe = placeholders()->find_and_add(p_index, p_hash, name, class_loader, PlaceholderTable::LOAD_INSTANCE, nullsymbolHandle, THREAD); 
++        PlaceholderEntry* newprobe = placeholders()->find_and_add(p_index, p_hash, name, class_loader, PlaceholderTable::LOAD_INSTANCE, nullsymbolHandle, THREAD);
+         if (throw_circularity_error) {
+           newprobe->remove_seen_thread(THREAD, PlaceholderTable::LOAD_INSTANCE);
+         }
+         // For class loaders that do not acquire the classloader object lock,
+         // if they did not catch another thread holding LOAD_INSTANCE,
+-        // need a check analogous to the acquire ObjectLocker/find_class 
++        // need a check analogous to the acquire ObjectLocker/find_class
+         // i.e. now that we hold the LOAD_INSTANCE token on loading this class/CL
+         // one final check if the load has already completed
+         klassOop check = find_class(d_index, d_hash, name, class_loader);
+@@ -783,7 +770,7 @@
+       // In custom class loaders, the usual findClass calls
+       // findLoadedClass, which directly searches  the SystemDictionary, then
+       // defineClass. If these are not atomic with respect to other threads,
+-      // the findLoadedClass can fail, but the defineClass can get a 
++      // the findLoadedClass can fail, but the defineClass can get a
+       // LinkageError:: duplicate class definition.
+       // If they got a linkageError, check if a parallel class load succeeded.
+       // If it did, then for bytecode resolution the specification requires
+@@ -793,7 +780,7 @@
+       // Should not get here for classloaders that support parallelism
+       // with the new cleaner mechanism, e.g. bootstrap classloader
+       if (UnsyncloadClass || (class_loader.is_null())) {
+-        if (k.is_null() && HAS_PENDING_EXCEPTION 
++        if (k.is_null() && HAS_PENDING_EXCEPTION
+           && PENDING_EXCEPTION->is_a(SystemDictionary::linkageError_klass())) {
+           MutexLocker mu(SystemDictionary_lock, THREAD);
+           klassOop check = find_class(d_index, d_hash, name, class_loader);
+@@ -808,9 +795,9 @@
+ 
+       // clean up placeholder entries for success or error
+       // This cleans up LOAD_INSTANCE entries
+-      // It also cleans up LOAD_SUPER entries on errors from 
++      // It also cleans up LOAD_SUPER entries on errors from
+       // calling load_instance_class
+-      { 
++      {
+         MutexLocker mu(SystemDictionary_lock, THREAD);
+         PlaceholderEntry* probe = placeholders()->get_entry(p_index, p_hash, name, class_loader);
+         if (probe != NULL) {
+@@ -822,7 +809,7 @@
+ 
+       // If everything was OK (no exceptions, no null return value), and
+       // class_loader is NOT the defining loader, do a little more bookkeeping.
+-      if (!HAS_PENDING_EXCEPTION && !k.is_null() && 
++      if (!HAS_PENDING_EXCEPTION && !k.is_null() &&
+         k->class_loader() != class_loader()) {
+ 
+         check_constraints(d_index, d_hash, k, class_loader, false, THREAD);
+@@ -831,8 +818,8 @@
+         // can throw and doesn't use the CHECK macro.
+         if (!HAS_PENDING_EXCEPTION) {
+           { // Grabbing the Compile_lock prevents systemDictionary updates
+-            // during compilations. 
+-            MutexLocker mu(Compile_lock, THREAD);      
++            // during compilations.
++            MutexLocker mu(Compile_lock, THREAD);
+             update_dictionary(d_index, d_hash, p_index, p_hash,
+                             k, class_loader, THREAD);
+           }
+@@ -858,7 +845,7 @@
+ #ifdef ASSERT
+   {
+     Handle loader (THREAD, k->class_loader());
+-    MutexLocker mu(SystemDictionary_lock, THREAD);  
++    MutexLocker mu(SystemDictionary_lock, THREAD);
+     oop kk = find_class_or_placeholder(name, loader);
+     assert(kk == k(), "should be present in dictionary");
+   }
+@@ -867,9 +854,9 @@
+   // return if the protection domain in NULL
+   if (protection_domain() == NULL) return k();
+ 
+-  // Check the protection domain has the right access 
++  // Check the protection domain has the right access
+   {
+-    MutexLocker mu(SystemDictionary_lock, THREAD);  
++    MutexLocker mu(SystemDictionary_lock, THREAD);
+     // Note that we have an entry, and entries can be deleted only during GC,
+     // so we cannot allow GC to occur while we're holding this entry.
+     // We're using a No_Safepoint_Verifier to catch any place where we
+@@ -903,7 +890,7 @@
+ // the new entry.
+ 
+ klassOop SystemDictionary::find(symbolHandle class_name,
+-                                Handle class_loader, 
++                                Handle class_loader,
+                                 Handle protection_domain,
+                                 TRAPS) {
+ 
+@@ -928,7 +915,7 @@
+ // return NULL in case of error.
+ klassOop SystemDictionary::find_instance_or_array_klass(symbolHandle class_name,
+                                                         Handle class_loader,
+-							Handle protection_domain,
++                                                        Handle protection_domain,
+                                                         TRAPS) {
+   klassOop k = NULL;
+   assert(class_name() != NULL, "class name must be non NULL");
+@@ -939,7 +926,7 @@
+ 
+     // dimension and object_key are assigned as a side-effect of this call
+     BasicType t = FieldType::get_array_info(class_name(), &dimension,
+-					    &object_key, CHECK_(NULL));
++                                            &object_key, CHECK_(NULL));
+     if (t != T_OBJECT) {
+       k = Universe::typeArrayKlassObj(t);
+     } else {
+@@ -988,7 +975,7 @@
+   // Parsed name could be null if we threw an error before we got far
+   // enough along to parse it -- in that case, there is nothing to clean up.
+   if (!parsed_name.is_null()) {
+-    unsigned int p_hash = placeholders()->compute_hash(parsed_name, 
++    unsigned int p_hash = placeholders()->compute_hash(parsed_name,
+                                                        class_loader);
+     int p_index = placeholders()->hash_to_index(p_hash);
+     {
+@@ -1003,25 +990,25 @@
+ 
+ // Add a klass to the system from a stream (called by jni_DefineClass and
+ // JVM_DefineClass).
+-// Note: class_name can be NULL. In that case we do not know the name of 
++// Note: class_name can be NULL. In that case we do not know the name of
+ // the class until we have parsed the stream.
+ 
+-klassOop SystemDictionary::resolve_from_stream(symbolHandle class_name, 
+-                                               Handle class_loader, 
+-                                               Handle protection_domain, 
+-                                               ClassFileStream* st, 
++klassOop SystemDictionary::resolve_from_stream(symbolHandle class_name,
++                                               Handle class_loader,
++                                               Handle protection_domain,
++                                               ClassFileStream* st,
+                                                TRAPS) {
+ 
+-  // Make sure we are synchronized on the class loader before we initiate 
++  // Make sure we are synchronized on the class loader before we initiate
+   // loading.
+-  Handle lockObject = compute_loader_lock_object(class_loader, THREAD); 
++  Handle lockObject = compute_loader_lock_object(class_loader, THREAD);
+   check_loader_lock_contention(lockObject, THREAD);
+   ObjectLocker ol(lockObject, THREAD);
+ 
+   symbolHandle parsed_name;
+ 
+-  // Parse the stream. Note that we do this even though this klass might 
+-  // already be present in the SystemDictionary, otherwise we would not 
++  // Parse the stream. Note that we do this even though this klass might
++  // already be present in the SystemDictionary, otherwise we would not
+   // throw potential ClassFormatErrors.
+   //
+   // Note: "name" is updated.
+@@ -1030,16 +1017,16 @@
+   //   to be called for all classes but java.lang.Object; and we preload
+   //   java.lang.Object through resolve_or_fail, not this path.
+ 
+-  instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name, 
+-                                                             class_loader, 
++  instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name,
++                                                             class_loader,
+                                                              protection_domain,
+                                                              parsed_name,
+                                                              THREAD);
+ 
+   const char* pkg = "java/";
+-  if (!HAS_PENDING_EXCEPTION && 
+-      !class_loader.is_null() && 
+-      !parsed_name.is_null() && 
++  if (!HAS_PENDING_EXCEPTION &&
++      !class_loader.is_null() &&
++      !parsed_name.is_null() &&
+       !strncmp((const char*)parsed_name->bytes(), pkg, strlen(pkg))) {
+     // It is illegal to define classes in the "java." package from
+     // JVM_DefineClass or jni_DefineClass unless you're the bootclassloader
+@@ -1054,13 +1041,13 @@
+     size_t len = strlen(fmt) + strlen(name);
+     char* message = NEW_RESOURCE_ARRAY(char, len);
+     jio_snprintf(message, len, fmt, name);
+-    Exceptions::_throw_msg(THREAD_AND_LOCATION, 
++    Exceptions::_throw_msg(THREAD_AND_LOCATION,
+       vmSymbols::java_lang_SecurityException(), message);
+   }
+ 
+   if (!HAS_PENDING_EXCEPTION) {
+     assert(!parsed_name.is_null(), "Sanity");
+-    assert(class_name.is_null() || class_name() == parsed_name(), 
++    assert(class_name.is_null() || class_name() == parsed_name(),
+            "name mismatch");
+     // Verification prevents us from creating names with dots in them, this
+     // asserts that that's the case.
+@@ -1077,7 +1064,7 @@
+   // a format error before the class was parsed far enough to
+   // find the name).
+   if (HAS_PENDING_EXCEPTION && !parsed_name.is_null()) {
+-    unsigned int p_hash = placeholders()->compute_hash(parsed_name, 
++    unsigned int p_hash = placeholders()->compute_hash(parsed_name,
+                                                        class_loader);
+     int p_index = placeholders()->hash_to_index(p_hash);
+     {
+@@ -1209,12 +1196,69 @@
+       tty->print_cr("]");
+     }
+     // notify a class loaded from shared object
+-    ClassLoadingService::notify_class_loaded(instanceKlass::cast(ik()), 
++    ClassLoadingService::notify_class_loaded(instanceKlass::cast(ik()),
+                                              true /* shared class */);
+   }
+   return ik;
+ }
+ 
++#ifdef KERNEL
++// Some classes on the bootstrap class path haven't been installed on the
++// system yet.  Call the DownloadManager method to make them appear in the
++// bootstrap class path and try again to load the named class.
++// Note that with delegation class loaders all classes in another loader will
++// first try to call this so it'd better be fast!!
++static instanceKlassHandle download_and_retry_class_load(
++                                                    symbolHandle class_name,
++                                                    TRAPS) {
++
++  klassOop dlm = SystemDictionary::sun_jkernel_DownloadManager_klass();
++  instanceKlassHandle nk;
++
++  // If download manager class isn't loaded just return.
++  if (dlm == NULL) return nk;
++
++  { HandleMark hm(THREAD);
++    ResourceMark rm(THREAD);
++    Handle s = java_lang_String::create_from_symbol(class_name, CHECK_(nk));
++    Handle class_string = java_lang_String::externalize_classname(s, CHECK_(nk));
++
++    // return value
++    JavaValue result(T_OBJECT);
++
++    // Call the DownloadManager.  We assume that it has a lock because
++    // multiple classes could be not found and downloaded at the same time.
++    // class sun.misc.DownloadManager;
++    // public static String getBootClassPathEntryForClass(String className);
++    JavaCalls::call_static(&result,
++                       KlassHandle(THREAD, dlm),
++                       vmSymbolHandles::getBootClassPathEntryForClass_name(),
++                       vmSymbolHandles::string_string_signature(),
++                       class_string,
++                       CHECK_(nk));
++
++    // Get result.string and add to bootclasspath
++    assert(result.get_type() == T_OBJECT, "just checking");
++    oop obj = (oop) result.get_jobject();
++    if (obj == NULL) { return nk; }
++
++    char* new_class_name = java_lang_String::as_utf8_string(obj);
++
++    // lock the loader
++    // we use this lock because JVMTI does.
++    Handle loader_lock(THREAD, SystemDictionary::system_loader_lock());
++
++    ObjectLocker ol(loader_lock, THREAD);
++    // add the file to the bootclasspath
++    ClassLoader::update_class_path_entry_list(new_class_name, true);
++  } // end HandleMark
++
++  if (TraceClassLoading) {
++    ClassLoader::print_bootclasspath();
++  }
++  return ClassLoader::load_classfile(class_name, CHECK_(nk));
++}
++#endif // KERNEL
+ 
+ 
+ instanceKlassHandle SystemDictionary::load_instance_class(symbolHandle class_name, Handle class_loader, TRAPS) {
+@@ -1230,6 +1274,15 @@
+       k = ClassLoader::load_classfile(class_name, CHECK_(nh));
+     }
+ 
++#ifdef KERNEL
++    // If the VM class loader has failed to load the class, call the
++    // DownloadManager class to make it magically appear on the classpath
++    // and try again.  This is only configured with the Kernel VM.
++    if (k.is_null()) {
++      k = download_and_retry_class_load(class_name, CHECK_(nh));
++    }
++#endif // KERNEL
++
+     // find_or_define_instance_class may return a different k
+     if (!k.is_null()) {
+       k = find_or_define_instance_class(class_name, class_loader, k, CHECK_(nh));
+@@ -1238,7 +1291,7 @@
+   } else {
+     // Use user specified class loader to load class. Call loadClass operation on class_loader.
+     ResourceMark rm(THREAD);
+-      
++
+     Handle s = java_lang_String::create_from_symbol(class_name, CHECK_(nh));
+     // Translate to external class name format, i.e., convert '/' chars to '.'
+     Handle string = java_lang_String::externalize_classname(s, CHECK_(nh));
+@@ -1255,19 +1308,19 @@
+     // findClass, this flag risks unexpected timing bugs in the field.
+     // Do NOT assume this will be supported in future releases.
+     if (!UnsyncloadClass && has_loadClassInternal()) {
+-      JavaCalls::call_special(&result, 
+-                              class_loader, 
++      JavaCalls::call_special(&result,
++                              class_loader,
+                               spec_klass,
+                               vmSymbolHandles::loadClassInternal_name(),
+-                              vmSymbolHandles::string_class_signature(), 
++                              vmSymbolHandles::string_class_signature(),
+                               string,
+                               CHECK_(nh));
+     } else {
+-      JavaCalls::call_virtual(&result, 
+-                              class_loader, 
++      JavaCalls::call_virtual(&result,
++                              class_loader,
+                               spec_klass,
+                               vmSymbolHandles::loadClass_name(),
+-                              vmSymbolHandles::string_class_signature(), 
++                              vmSymbolHandles::string_class_signature(),
+                               string,
+                               CHECK_(nh));
+     }
+@@ -1277,8 +1330,8 @@
+ 
+     // Primitive classes return null since forName() can not be
+     // used to obtain any of the Class objects representing primitives or void
+-    if ((obj != NULL) && !(java_lang_Class::is_primitive(obj))) {      
+-      instanceKlassHandle k = 
++    if ((obj != NULL) && !(java_lang_Class::is_primitive(obj))) {
++      instanceKlassHandle k =
+                 instanceKlassHandle(THREAD, java_lang_Class::as_klassOop(obj));
+       // For user defined Java class loaders, check that the name returned is
+       // the same as that requested.  This check is done for the bootstrap
+@@ -1298,7 +1351,7 @@
+ 
+   // for bootstrap classloader don't acquire lock
+   if (!class_loader_h.is_null()) {
+-    assert(ObjectSynchronizer::current_thread_holds_lock((JavaThread*)THREAD, 
++    assert(ObjectSynchronizer::current_thread_holds_lock((JavaThread*)THREAD,
+          compute_loader_lock_object(class_loader_h, THREAD)),
+          "define called without lock");
+   }
+@@ -1335,7 +1388,7 @@
+     unsigned int p_hash = placeholders()->compute_hash(name_h, class_loader_h);
+     int p_index = placeholders()->hash_to_index(p_hash);
+ 
+-    MutexLocker mu_r(Compile_lock, THREAD);                    
++    MutexLocker mu_r(Compile_lock, THREAD);
+ 
+     // Add to class hierarchy, initialize vtables, and do possible
+     // deoptimizations.
+@@ -1380,7 +1433,7 @@
+   int p_index = placeholders()->hash_to_index(p_hash);
+   PlaceholderEntry* probe;
+ 
+-  { 
++  {
+     MutexLocker mu(SystemDictionary_lock, THREAD);
+     // First check if class already defined
+     klassOop check = find_class(d_index, d_hash, class_name, class_loader);
+@@ -1390,7 +1443,7 @@
+ 
+     // Acquire define token for this class/classloader
+     symbolHandle nullsymbolHandle;
+-    probe = placeholders()->find_and_add(p_index, p_hash, class_name, class_loader, PlaceholderTable::DEFINE_CLASS, nullsymbolHandle, THREAD); 
++    probe = placeholders()->find_and_add(p_index, p_hash, class_name, class_loader, PlaceholderTable::DEFINE_CLASS, nullsymbolHandle, THREAD);
+     // Check if another thread defining in parallel
+     if (probe->definer() == NULL) {
+       // Thread will define the class
+@@ -1456,7 +1509,7 @@
+ // This method is added to check how often we have to wait to grab loader
+ // lock. The results are being recorded in the performance counters defined in
+ // ClassLoader::_sync_systemLoaderLockContentionRate and
+-// ClassLoader::_sync_nonSystemLoaderLockConteionRate. 
++// ClassLoader::_sync_nonSystemLoaderLockConteionRate.
+ void SystemDictionary::check_loader_lock_contention(Handle loader_lock, TRAPS) {
+   if (!UsePerfData) {
+     return;
+@@ -1466,7 +1519,7 @@
+ 
+   if (ObjectSynchronizer::query_lock_ownership((JavaThread*)THREAD, loader_lock)
+       == ObjectSynchronizer::owner_other) {
+-    // contention will likely happen, so increment the corresponding 
++    // contention will likely happen, so increment the corresponding
+     // contention counter.
+     if (loader_lock() == _system_loader_lock_obj) {
+       ClassLoader::sync_systemLoaderLockContentionRate()->inc();
+@@ -1474,8 +1527,8 @@
+       ClassLoader::sync_nonSystemLoaderLockContentionRate()->inc();
+     }
+   }
+-} 
+-  
++}
++
+ // ----------------------------------------------------------------------------
+ // Lookup
+ 
+@@ -1502,13 +1555,13 @@
+ 
+ 
+ // Used for assertions and verification only
+-oop SystemDictionary::find_class_or_placeholder(symbolHandle class_name, 
++oop SystemDictionary::find_class_or_placeholder(symbolHandle class_name,
+                                                 Handle class_loader) {
+   #ifndef ASSERT
+-  guarantee(VerifyBeforeGC   || 
+-            VerifyDuringGC   || 
++  guarantee(VerifyBeforeGC   ||
++            VerifyDuringGC   ||
+             VerifyBeforeExit ||
+-            VerifyAfterGC, "too expensive"); 
++            VerifyAfterGC, "too expensive");
+   #endif
+   assert_locked_or_safepoint(SystemDictionary_lock);
+   symbolOop class_name_ = class_name();
+@@ -1543,14 +1596,14 @@
+ 
+ void SystemDictionary::add_to_hierarchy(instanceKlassHandle k, TRAPS) {
+   assert(k.not_null(), "just checking");
+-  // Link into hierachy. Make sure the vtables are initialized before linking into 
++  // Link into hierachy. Make sure the vtables are initialized before linking into
+   k->append_to_sibling_list();                    // add to superklass/sibling list
+-  k->process_interfaces(THREAD);                  // handle all "implements" declarations  
++  k->process_interfaces(THREAD);                  // handle all "implements" declarations
+   k->set_init_state(instanceKlass::loaded);
+   // Now flush all code that depended on old class hierarchy.
+   // Note: must be done *after* linking k into the hierarchy (was bug 12/9/97)
+-  // Also, first reinitialize vtable because it may have gotten out of synch 
+-  // while the new class wasn't connected to the class hierarchy.     
++  // Also, first reinitialize vtable because it may have gotten out of synch
++  // while the new class wasn't connected to the class hierarchy.
+   Universe::flush_dependents_on(k);
+ }
+ 
+@@ -1558,9 +1611,9 @@
+ // ----------------------------------------------------------------------------
+ // GC support
+ 
+-// Following roots during mark-sweep is separated in two phases. 
++// Following roots during mark-sweep is separated in two phases.
+ //
+-// The first phase follows preloaded classes and all other system 
++// The first phase follows preloaded classes and all other system
+ // classes, since these will never get unloaded anyway.
+ //
+ // The second phase removes (unloads) unreachable classes from the
+@@ -1577,10 +1630,10 @@
+ void SystemDictionary::always_strong_classes_do(OopClosure* blk) {
+   // Follow all system classes and temporary placeholders in dictionary
+   dictionary()->always_strong_classes_do(blk);
+-  
++
+   // Placeholders. These are *always* strong roots, as they
+   // represent classes we're actively loading.
+-  placeholders_do(blk);  
++  placeholders_do(blk);
+ 
+   // Loader constraints. We must keep the symbolOop used in the name alive.
+   constraints()->always_strong_classes_do(blk);
+@@ -1605,7 +1658,7 @@
+ 
+ // The mirrors are scanned by shared_oops_do() which is
+ // not called by oops_do().  In order to process oops in
+-// a necessary order, shared_oops_do() is called by
++// a necessary order, shared_oops_do() is call by
+ // Universe::oops_do().
+ void SystemDictionary::oops_do(OopClosure* f) {
+   // Adjust preloaded classes and system loader object
+@@ -1660,14 +1713,14 @@
+   f->do_oop((oop*) &_final_reference_klass);
+   f->do_oop((oop*) &_phantom_reference_klass);
+   f->do_oop((oop*) &_finalizer_klass);
+-  
++
+   f->do_oop((oop*) &_thread_klass);
+   f->do_oop((oop*) &_threadGroup_klass);
+-  f->do_oop((oop*) &_properties_klass);      
+-  f->do_oop((oop*) &_reflect_accessible_object_klass);      
+-  f->do_oop((oop*) &_reflect_field_klass);      
+-  f->do_oop((oop*) &_reflect_method_klass);      
+-  f->do_oop((oop*) &_reflect_constructor_klass);      
++  f->do_oop((oop*) &_properties_klass);
++  f->do_oop((oop*) &_reflect_accessible_object_klass);
++  f->do_oop((oop*) &_reflect_field_klass);
++  f->do_oop((oop*) &_reflect_method_klass);
++  f->do_oop((oop*) &_reflect_constructor_klass);
+   f->do_oop((oop*) &_reflect_magic_klass);
+   f->do_oop((oop*) &_reflect_method_accessor_klass);
+   f->do_oop((oop*) &_reflect_constructor_accessor_klass);
+@@ -1684,6 +1737,7 @@
+   f->do_oop((oop*) &_java_nio_Buffer_klass);
+ 
+   f->do_oop((oop*) &_sun_misc_AtomicLongCSImpl_klass);
++  f->do_oop((oop*) &_sun_jkernel_DownloadManager_klass);
+ 
+   f->do_oop((oop*) &_boolean_klass);
+   f->do_oop((oop*) &_char_klass);
+@@ -1696,8 +1750,8 @@
+   {
+     for (int i = 0; i < T_VOID+1; i++) {
+       if (_box_klasses[i] != NULL) {
+-	assert(i >= T_BOOLEAN, "checking");
+-	f->do_oop((oop*) &_box_klasses[i]);
++        assert(i >= T_BOOLEAN, "checking");
++        f->do_oop((oop*) &_box_klasses[i]);
+       }
+     }
+   }
+@@ -1706,31 +1760,8 @@
+   // Universe::oops_do(), via a call to shared_oops_do(), so should
+   // not be processed again.
+ 
+-  f->do_oop((oop*) &_system_loader_lock_obj); 
+-  FilteredFieldsMap::klasses_oops_do(f); 
+-}
+-
+-// These *_mirror objects in the system dictionary need to be processed
+-// early on when class data sharing is enabled, and are therefore treated
+-// as part of Universe::oops_do()  rather than in SystemDictionary::oops_do()
+-// as one would normally expect.
+-void SystemDictionary::shared_oops_do(OopClosure* f) {
+-  f->do_oop((oop*) &_int_mirror);
+-  f->do_oop((oop*) &_float_mirror);
+-  f->do_oop((oop*) &_double_mirror);
+-  f->do_oop((oop*) &_byte_mirror);
+-  f->do_oop((oop*) &_bool_mirror);
+-  f->do_oop((oop*) &_char_mirror);
+-  f->do_oop((oop*) &_long_mirror);
+-  f->do_oop((oop*) &_short_mirror);
+-  f->do_oop((oop*) &_void_mirror);
+-
+-  // It's important to iterate over these guys even if they are null,
+-  // since that's how shared heaps are restored.
+-  for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
+-    f->do_oop((oop*) &_mirrors[i]);
+-  }
+-  assert(_mirrors[0] == NULL && _mirrors[T_BOOLEAN - 1] == NULL, "checking");
++  f->do_oop((oop*) &_system_loader_lock_obj);
++  FilteredFieldsMap::klasses_oops_do(f);
+ }
+ 
+ void SystemDictionary::lazily_loaded_oops_do(OopClosure* f) {
+@@ -1813,18 +1844,21 @@
+   assert(_object_klass == NULL, "preloaded classes should only be initialized once");
+   // Preload commonly used klasses
+   _object_klass            = resolve_or_fail(vmSymbolHandles::java_lang_Object(),                true, CHECK);
+-  _string_klass            = resolve_or_fail(vmSymbolHandles::java_lang_String(),                true, CHECK);  
++  _string_klass            = resolve_or_fail(vmSymbolHandles::java_lang_String(),                true, CHECK);
+   _class_klass             = resolve_or_fail(vmSymbolHandles::java_lang_Class(),                 true, CHECK);
+   debug_only(instanceKlass::verify_class_klass_nonstatic_oop_maps(_class_klass));
+-
+-  // Fixup mirrors for classes loaded before java.lang.Class
+-  initialize_basic_type_mirrors(CHECK);
++  // Fixup mirrors for classes loaded before java.lang.Class.
++  // These calls iterate over the objects currently in the perm gen
++  // so calling them at this point is matters (not before when there
++  // are fewer objects and not later after there are more objects
++  // in the perm gen.
++  Universe::initialize_basic_type_mirrors(CHECK);
+   Universe::fixup_mirrors(CHECK);
+ 
+   _cloneable_klass         = resolve_or_fail(vmSymbolHandles::java_lang_Cloneable(),             true, CHECK);
+   _classloader_klass       = resolve_or_fail(vmSymbolHandles::java_lang_ClassLoader(),           true, CHECK);
+   _serializable_klass      = resolve_or_fail(vmSymbolHandles::java_io_Serializable(),            true, CHECK);
+-  _system_klass            = resolve_or_fail(vmSymbolHandles::java_lang_System(),                true, CHECK);  
++  _system_klass            = resolve_or_fail(vmSymbolHandles::java_lang_System(),                true, CHECK);
+ 
+   _throwable_klass         = resolve_or_fail(vmSymbolHandles::java_lang_Throwable(),             true, CHECK);
+   _error_klass             = resolve_or_fail(vmSymbolHandles::java_lang_Error(),                 true, CHECK);
+@@ -1834,14 +1868,14 @@
+   _protectionDomain_klass  = resolve_or_fail(vmSymbolHandles::java_security_ProtectionDomain(),  true, CHECK);
+   _AccessControlContext_klass = resolve_or_fail(vmSymbolHandles::java_security_AccessControlContext(),  true, CHECK);
+   _classNotFoundException_klass = resolve_or_fail(vmSymbolHandles::java_lang_ClassNotFoundException(),  true, CHECK);
+-  _noClassDefFoundError_klass   = resolve_or_fail(vmSymbolHandles::java_lang_NoClassDefFoundError(),  true, CHECK);  
+-  _linkageError_klass   = resolve_or_fail(vmSymbolHandles::java_lang_LinkageError(),  true, CHECK);  
+-  _classCastException_klass = resolve_or_fail(vmSymbolHandles::java_lang_ClassCastException(),   true, CHECK);  
+-  _arrayStoreException_klass = resolve_or_fail(vmSymbolHandles::java_lang_ArrayStoreException(),   true, CHECK);  
+-  _virtualMachineError_klass = resolve_or_fail(vmSymbolHandles::java_lang_VirtualMachineError(),   true, CHECK);  
+-  _outOfMemoryError_klass  = resolve_or_fail(vmSymbolHandles::java_lang_OutOfMemoryError(),      true, CHECK);  
+-  _StackOverflowError_klass = resolve_or_fail(vmSymbolHandles::java_lang_StackOverflowError(),   true, CHECK);  
+-  _illegalMonitorStateException_klass = resolve_or_fail(vmSymbolHandles::java_lang_IllegalMonitorStateException(),   true, CHECK);  
++  _noClassDefFoundError_klass   = resolve_or_fail(vmSymbolHandles::java_lang_NoClassDefFoundError(),  true, CHECK);
++  _linkageError_klass   = resolve_or_fail(vmSymbolHandles::java_lang_LinkageError(),  true, CHECK);
++  _classCastException_klass = resolve_or_fail(vmSymbolHandles::java_lang_ClassCastException(),   true, CHECK);
++  _arrayStoreException_klass = resolve_or_fail(vmSymbolHandles::java_lang_ArrayStoreException(),   true, CHECK);
++  _virtualMachineError_klass = resolve_or_fail(vmSymbolHandles::java_lang_VirtualMachineError(),   true, CHECK);
++  _outOfMemoryError_klass  = resolve_or_fail(vmSymbolHandles::java_lang_OutOfMemoryError(),      true, CHECK);
++  _StackOverflowError_klass = resolve_or_fail(vmSymbolHandles::java_lang_StackOverflowError(),   true, CHECK);
++  _illegalMonitorStateException_klass = resolve_or_fail(vmSymbolHandles::java_lang_IllegalMonitorStateException(),   true, CHECK);
+ 
+   // Preload ref klasses and set reference types
+   _reference_klass         = resolve_or_fail(vmSymbolHandles::java_lang_ref_Reference(),         true, CHECK);
+@@ -1860,11 +1894,11 @@
+ 
+   _thread_klass           = resolve_or_fail(vmSymbolHandles::java_lang_Thread(),                true, CHECK);
+   _threadGroup_klass      = resolve_or_fail(vmSymbolHandles::java_lang_ThreadGroup(),           true, CHECK);
+-  _properties_klass       = resolve_or_fail(vmSymbolHandles::java_util_Properties(),            true, CHECK);  
+-  _reflect_accessible_object_klass = resolve_or_fail(vmSymbolHandles::java_lang_reflect_AccessibleObject(),  true, CHECK);  
+-  _reflect_field_klass    = resolve_or_fail(vmSymbolHandles::java_lang_reflect_Field(),         true, CHECK);  
+-  _reflect_method_klass   = resolve_or_fail(vmSymbolHandles::java_lang_reflect_Method(),        true, CHECK);  
+-  _reflect_constructor_klass = resolve_or_fail(vmSymbolHandles::java_lang_reflect_Constructor(),   true, CHECK);  
++  _properties_klass       = resolve_or_fail(vmSymbolHandles::java_util_Properties(),            true, CHECK);
++  _reflect_accessible_object_klass = resolve_or_fail(vmSymbolHandles::java_lang_reflect_AccessibleObject(),  true, CHECK);
++  _reflect_field_klass    = resolve_or_fail(vmSymbolHandles::java_lang_reflect_Field(),         true, CHECK);
++  _reflect_method_klass   = resolve_or_fail(vmSymbolHandles::java_lang_reflect_Method(),        true, CHECK);
++  _reflect_constructor_klass = resolve_or_fail(vmSymbolHandles::java_lang_reflect_Constructor(),   true, CHECK);
+   // Universe::is_gte_jdk14x_version() is not set up by this point.
+   // It's okay if these turn out to be NULL in non-1.4 JDKs.
+   _reflect_magic_klass    = resolve_or_null(vmSymbolHandles::sun_reflect_MagicAccessorImpl(),         CHECK);
+@@ -1874,9 +1908,9 @@
+   _reflect_constant_pool_klass = resolve_or_null(vmSymbolHandles::sun_reflect_ConstantPool(),         CHECK);
+   _reflect_unsafe_static_field_accessor_impl_klass = resolve_or_null(vmSymbolHandles::sun_reflect_UnsafeStaticFieldAccessorImpl(), CHECK);
+ 
+-  _vector_klass           = resolve_or_fail(vmSymbolHandles::java_util_Vector(),                true, CHECK);  
+-  _hashtable_klass        = resolve_or_fail(vmSymbolHandles::java_util_Hashtable(),             true, CHECK);  
+-  _stringBuffer_klass     = resolve_or_fail(vmSymbolHandles::java_lang_StringBuffer(),          true, CHECK);  
++  _vector_klass           = resolve_or_fail(vmSymbolHandles::java_util_Vector(),                true, CHECK);
++  _hashtable_klass        = resolve_or_fail(vmSymbolHandles::java_util_Hashtable(),             true, CHECK);
++  _stringBuffer_klass     = resolve_or_fail(vmSymbolHandles::java_lang_StringBuffer(),          true, CHECK);
+ 
+   // It's NULL in non-1.4 JDKs.
+   _stackTraceElement_klass = resolve_or_null(vmSymbolHandles::java_lang_StackTraceElement(),          CHECK);
+@@ -1887,6 +1921,12 @@
+ 
+   // If this class isn't present, it won't be referenced.
+   _sun_misc_AtomicLongCSImpl_klass = resolve_or_null(vmSymbolHandles::sun_misc_AtomicLongCSImpl(),     CHECK);
++#ifdef KERNEL
++  _sun_jkernel_DownloadManager_klass = resolve_or_null(vmSymbolHandles::sun_jkernel_DownloadManager(),     CHECK);
++  if (_sun_jkernel_DownloadManager_klass == NULL) {
++    warning("Cannot find sun/jkernel/DownloadManager");
++  }
++#endif // KERNEL
+ 
+   // Preload boxing klasses
+   _boolean_klass           = resolve_or_fail(vmSymbolHandles::java_lang_Boolean(),               true, CHECK);
+@@ -1916,43 +1956,10 @@
+ 
+   { // Compute whether we should use checkPackageAccess or NOT
+     methodOop method = instanceKlass::cast(classloader_klass())->find_method(vmSymbols::checkPackageAccess_name(), vmSymbols::class_protectiondomain_signature());
+-    _has_checkPackageAccess = (method != NULL); 
+-  }
+-}
+-
+-void SystemDictionary::initialize_basic_type_mirrors(TRAPS) { 
+-  if (UseSharedSpaces) {
+-    assert(_int_mirror != NULL, "already loaded");
+-    assert(_void_mirror == _mirrors[T_VOID], "consistently loaded");
+-    return;
++    _has_checkPackageAccess = (method != NULL);
+   }
+-
+-  assert(_int_mirror==NULL, "basic type mirrors already initialized");
+-
+-  _int_mirror     = java_lang_Class::create_basic_type_mirror("int",    T_INT,     CHECK);
+-  _float_mirror   = java_lang_Class::create_basic_type_mirror("float",  T_FLOAT,   CHECK);
+-  _double_mirror  = java_lang_Class::create_basic_type_mirror("double", T_DOUBLE,  CHECK);
+-  _byte_mirror    = java_lang_Class::create_basic_type_mirror("byte",   T_BYTE,    CHECK);
+-  _bool_mirror    = java_lang_Class::create_basic_type_mirror("boolean",T_BOOLEAN, CHECK);
+-  _char_mirror    = java_lang_Class::create_basic_type_mirror("char",   T_CHAR,    CHECK);
+-  _long_mirror    = java_lang_Class::create_basic_type_mirror("long",   T_LONG,    CHECK);
+-  _short_mirror   = java_lang_Class::create_basic_type_mirror("short",  T_SHORT,   CHECK);
+-  _void_mirror    = java_lang_Class::create_basic_type_mirror("void",   T_VOID,    CHECK);
+-
+-  _mirrors[T_INT]     = _int_mirror;
+-  _mirrors[T_FLOAT]   = _float_mirror;
+-  _mirrors[T_DOUBLE]  = _double_mirror;
+-  _mirrors[T_BYTE]    = _byte_mirror;
+-  _mirrors[T_BOOLEAN] = _bool_mirror;
+-  _mirrors[T_CHAR]    = _char_mirror;
+-  _mirrors[T_LONG]    = _long_mirror;
+-  _mirrors[T_SHORT]   = _short_mirror;
+-  _mirrors[T_VOID]    = _void_mirror;
+-  //_mirrors[T_OBJECT]  = instanceKlass::cast(_object_klass)->java_mirror();
+-  //_mirrors[T_ARRAY]   = instanceKlass::cast(_object_klass)->java_mirror();
+ }
+ 
+-
+ // Tells if a given klass is a box (wrapper class, such as java.lang.Integer).
+ // If so, returns the basic type it holds.  If not, returns T_OBJECT.
+ BasicType SystemDictionary::box_klass_type(klassOop k) {
+@@ -1974,25 +1981,25 @@
+ 
+ void SystemDictionary::check_constraints(int d_index, unsigned int d_hash,
+                                          instanceKlassHandle k,
+-                                         Handle class_loader, bool defining, 
++                                         Handle class_loader, bool defining,
+                                          TRAPS) {
+   const char *linkage_error = NULL;
+   {
+     symbolHandle name (THREAD, k->name());
+-    MutexLocker mu(SystemDictionary_lock, THREAD);         
++    MutexLocker mu(SystemDictionary_lock, THREAD);
+ 
+     klassOop check = find_class(d_index, d_hash, name, class_loader);
+-    if (check != (klassOop)NULL) { 
++    if (check != (klassOop)NULL) {
+       // if different instanceKlass - duplicate class definition,
+       // else - ok, class loaded by a different thread in parallel,
+-      // we should only have found it if it was done loading and ok to use 
++      // we should only have found it if it was done loading and ok to use
+       // system dictionary only holds instance classes, placeholders
+       // also holds array classes
+-      
++
+       assert(check->klass_part()->oop_is_instance(), "noninstance in systemdictionary");
+       if ((defining == true) || (k() != check)) {
+         linkage_error = "loader (instance of  %s): attempted  duplicate class "
+-	  "definition for name: \"%s\"";
++          "definition for name: \"%s\"";
+       } else {
+         return;
+       }
+@@ -2007,13 +2014,13 @@
+ 
+     if (linkage_error == NULL) {
+       if (constraints()->check_or_update(k, class_loader, name) == false) {
+-	linkage_error = "loader constraint violation: loader (instance of %s)"
+-	  " previously initiated loading for a different type with name \"%s\"";
++        linkage_error = "loader constraint violation: loader (instance of %s)"
++          " previously initiated loading for a different type with name \"%s\"";
+       }
+     }
+   }
+ 
+-  // Throw error now if needed (cannot throw while holding 
++  // Throw error now if needed (cannot throw while holding
+   // SystemDictionary_lock because of rank ordering)
+ 
+   if (linkage_error) {
+@@ -2029,19 +2036,19 @@
+ }
+ 
+ 
+-// Update system dictionary - done after check_constraint and add_to_hierachy 
++// Update system dictionary - done after check_constraint and add_to_hierachy
+ // have been called.
+ void SystemDictionary::update_dictionary(int d_index, unsigned int d_hash,
+                                          int p_index, unsigned int p_hash,
+-                                         instanceKlassHandle k, 
+-                                         Handle class_loader, 
++                                         instanceKlassHandle k,
++                                         Handle class_loader,
+                                          TRAPS) {
+   // Compile_lock prevents systemDictionary updates during compilations
+   assert_locked_or_safepoint(Compile_lock);
+   symbolHandle name (THREAD, k->name());
+ 
+   {
+-  MutexLocker mu1(SystemDictionary_lock, THREAD);           
++  MutexLocker mu1(SystemDictionary_lock, THREAD);
+ 
+   // See whether biased locking is enabled and if so set it for this
+   // klass.
+@@ -2076,7 +2083,7 @@
+   assert (sd_check != NULL, "should have entry in system dictionary");
+ // Changed to allow PH to remain to complete class circularity checking
+ // while only one thread can define a class at one time, multiple
+-// classes can resolve the superclass for a class at one time, 
++// classes can resolve the superclass for a class at one time,
+ // and the placeholder is used to track that
+ //  symbolOop ph_check = find_placeholder(p_index, p_hash, name, class_loader);
+ //  assert (ph_check == NULL, "should not have a placeholder entry");
+@@ -2103,7 +2110,7 @@
+   if (FieldType::is_array(class_name())) {
+     // Array classes are hard because their klassOops are not kept in the
+     // constraint table. The array klass may be constrained, but the elem class
+-    // may not be. 
++    // may not be.
+     jint dimension;
+     symbolOop object_key;
+     BasicType t = FieldType::get_array_info(class_name(), &dimension,
+@@ -2123,15 +2130,15 @@
+     // Non-array classes are easy: simply check the constraint table.
+     klass = constraints()->find_constrained_klass(class_name, class_loader);
+   }
+-      
++
+   return klass;
+ }
+ 
+ 
+ bool SystemDictionary::add_loader_constraint(symbolHandle class_name,
+                                              Handle class_loader1,
+-                                             Handle class_loader2, 
+-					     Thread* THREAD) {
++                                             Handle class_loader2,
++                                             Thread* THREAD) {
+   unsigned int d_hash1 = dictionary()->compute_hash(class_name, class_loader1);
+   int d_index1 = dictionary()->hash_to_index(d_hash1);
+ 
+@@ -2147,7 +2154,7 @@
+     klassOop klass1 = find_class(d_index1, d_hash1, class_name, class_loader1);
+     klassOop klass2 = find_class(d_index2, d_hash2, class_name, class_loader2);
+     return constraints()->add_entry(class_name, klass1, class_loader1,
+-				    klass2, class_loader2);
++                                    klass2, class_loader2);
+   }
+ }
+ 
+@@ -2156,7 +2163,7 @@
+ void SystemDictionary::add_resolution_error(constantPoolHandle pool, int which, symbolHandle error) {
+   unsigned int hash = resolution_errors()->compute_hash(pool, which);
+   int index = resolution_errors()->hash_to_index(hash);
+-  { 
++  {
+     MutexLocker ml(SystemDictionary_lock, Thread::current());
+     resolution_errors()->add_entry(index, hash, pool, which, error);
+   }
+@@ -2166,7 +2173,7 @@
+ symbolOop SystemDictionary::find_resolution_error(constantPoolHandle pool, int which) {
+   unsigned int hash = resolution_errors()->compute_hash(pool, which);
+   int index = resolution_errors()->hash_to_index(hash);
+-  { 
++  {
+     MutexLocker ml(SystemDictionary_lock, Thread::current());
+     ResolutionErrorEntry* entry = resolution_errors()->find_entry(index, hash, pool, which);
+     return (entry != NULL) ? entry->error() : (symbolOop)NULL;
+@@ -2182,18 +2189,18 @@
+ char* SystemDictionary::check_signature_loaders(symbolHandle signature,
+                                                Handle loader1, Handle loader2,
+                                                bool is_method, TRAPS)  {
+-  // Nothing to do if loaders are the same. 
++  // Nothing to do if loaders are the same.
+   if (loader1() == loader2()) {
+     return NULL;
+   }
+-  
++
+   SignatureStream sig_strm(signature, is_method);
+   while (!sig_strm.is_done()) {
+     if (sig_strm.is_object()) {
+       symbolOop s = sig_strm.as_symbol(CHECK_NULL);
+       symbolHandle sig (THREAD, s);
+       if (!add_loader_constraint(sig, loader1, loader2, THREAD)) {
+-	return sig()->as_C_string();
++        return sig()->as_C_string();
+       }
+     }
+     sig_strm.next();
+@@ -2275,8 +2282,8 @@
+   if (probe == NULL) {
+     probe = SystemDictionary::find_shared_class(class_name);
+   }
+-  guarantee(probe != NULL && 
+-            (!probe->is_klass() || probe == obj()), 
++  guarantee(probe != NULL &&
++            (!probe->is_klass() || probe == obj()),
+                      "Loaded klasses should be in SystemDictionary");
+ }
+ 
+@@ -2287,7 +2294,7 @@
+  private:
+   static int nclasses;        // number of classes
+   static int nmethods;        // number of methods
+-  static int nmethoddata;     // number of methodData    
++  static int nmethoddata;     // number of methodData
+   static int class_size;      // size of class objects in words
+   static int method_size;     // size of method objects in words
+   static int debug_size;      // size of debug info in methods
+@@ -2302,7 +2309,7 @@
+       class_size += ik->constants()->size();
+       class_size += ik->local_interfaces()->size();
+       class_size += ik->transitive_interfaces()->size();
+-      // We do not have to count implementors, since we only store one!      
++      // We do not have to count implementors, since we only store one!
+       class_size += ik->fields()->size();
+     }
+   }
+@@ -2329,18 +2336,18 @@
+     SystemDictionary::methods_do(do_method);
+     tty->print_cr("Class statistics:");
+     tty->print_cr("%d classes (%d bytes)", nclasses, class_size * oopSize);
+-    tty->print_cr("%d methods (%d bytes = %d base + %d debug info)", nmethods, 
++    tty->print_cr("%d methods (%d bytes = %d base + %d debug info)", nmethods,
+                   (method_size + debug_size) * oopSize, method_size * oopSize, debug_size * oopSize);
+     tty->print_cr("%d methoddata (%d bytes)", nmethoddata, methoddata_size * oopSize);
+   }
+ };
+ 
+ 
+-int ClassStatistics::nclasses        = 0;  
++int ClassStatistics::nclasses        = 0;
+ int ClassStatistics::nmethods        = 0;
+ int ClassStatistics::nmethoddata     = 0;
+ int ClassStatistics::class_size      = 0;
+-int ClassStatistics::method_size     = 0; 
++int ClassStatistics::method_size     = 0;
+ int ClassStatistics::debug_size      = 0;
+ int ClassStatistics::methoddata_size = 0;
+ 
+@@ -2390,7 +2397,7 @@
+     // collect parameter size info (add one for receiver, if any)
+     _parameter_size_profile[MIN2(m->size_of_parameters() + (m->is_static() ? 0 : 1), max_parameter_size - 1)]++;
+     // collect bytecodes info
+-    { 
++    {
+       Thread *thread = Thread::current();
+       HandleMark hm(thread);
+       BytecodeStream s(methodHandle(thread, m));
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/systemDictionary.hpp openjdk/hotspot/src/share/vm/classfile/systemDictionary.hpp
+--- openjdk6/hotspot/src/share/vm/classfile/systemDictionary.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/systemDictionary.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)systemDictionary.hpp	1.153 07/05/05 17:05:56 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The system dictionary stores all loaded classes and maps:
+@@ -33,7 +30,7 @@
+ // represented as NULL.
+ 
+ // The underlying data structure is an open hash table with a fixed number
+-// of buckets. During loading the loader object is locked, (for the VM loader 
++// of buckets. During loading the loader object is locked, (for the VM loader
+ // a private lock object is used). Class loading can thus be done concurrently,
+ // but only by different loaders.
+ //
+@@ -50,16 +47,16 @@
+ // Clients of this class who are interested in finding if a class has
+ // been completely loaded -- not classes in the process of being loaded --
+ // can read the SystemDictionary unlocked. This is safe because
+-//    - entries are only deleted at safepoints  
++//    - entries are only deleted at safepoints
+ //    - readers cannot come to a safepoint while actively examining
+-//         an entry  (an entry cannot be deleted from under a reader) 
++//         an entry  (an entry cannot be deleted from under a reader)
+ //    - entries must be fully formed before they are available to concurrent
+ //         readers (we must ensure write ordering)
+ //
+ // Note that placeholders are deleted at any time, as they are removed
+ // when a class is completely loaded. Therefore, readers as well as writers
+ // of placeholders must hold the SystemDictionary_lock.
+-// 
++//
+ 
+ class Dictionary;
+ class PlaceholderTable;
+@@ -94,7 +91,7 @@
+   // Version with null loader and protection domain
+   static klassOop resolve_or_null(symbolHandle class_name, TRAPS);
+ 
+-  // Resolve a superclass or superinterface. Called from ClassFileParser, 
++  // Resolve a superclass or superinterface. Called from ClassFileParser,
+   // parse_interfaces, resolve_instance_class_or_null, load_shared_class
+   // "child_name" is the class whose super class or interface is being resolved.
+   static klassOop resolve_super_or_fail(symbolHandle child_name,
+@@ -111,10 +108,10 @@
+                                Handle protection_domain,
+                                ClassFileStream* st,
+                                TRAPS);
+-                               
++
+   // Resolve from stream (called by jni_DefineClass and JVM_DefineClass)
+   static klassOop resolve_from_stream(symbolHandle class_name, Handle class_loader, Handle protection_domain, ClassFileStream* st, TRAPS);
+-  
++
+   // Lookup an already loaded class. If not found NULL is returned.
+   static klassOop find(symbolHandle class_name, Handle class_loader, Handle protection_domain, TRAPS);
+ 
+@@ -122,9 +119,9 @@
+   // Do not make any queries to class loaders; consult only the cache.
+   // If not found NULL is returned.
+   static klassOop find_instance_or_array_klass(symbolHandle class_name,
+-					       Handle class_loader,
+-					       Handle protection_domain,
+-					       TRAPS);
++                                               Handle class_loader,
++                                               Handle protection_domain,
++                                               TRAPS);
+ 
+   // Lookup an instance or array class that has already been loaded
+   // either into the given class loader, or else into another class
+@@ -150,7 +147,7 @@
+   static klassOop find_constrained_instance_or_array_klass(symbolHandle class_name,
+                                                            Handle class_loader,
+                                                            TRAPS);
+-  
++
+   // Iterate over all klasses in dictionary
+   //   Just the classes from defining class loaders
+   static void classes_do(void f(klassOop));
+@@ -183,11 +180,8 @@
+   // Applies "f->do_oop" to all root oops in the system dictionary.
+   static void oops_do(OopClosure* f);
+ 
+-  // Applies "f->do_oop" to root oops that are loaded from a shared heap.
+-  static void shared_oops_do(OopClosure* f);
+-
+   // System loader lock
+-  static oop system_loader_lock()	    { return _system_loader_lock_obj; }
++  static oop system_loader_lock()           { return _system_loader_lock_obj; }
+ 
+ private:
+   //    Traverses preloaded oops: various system classes.  These are
+@@ -237,7 +231,7 @@
+ 
+   // Fast access to commonly used classes (preloaded)
+   static klassOop check_klass(klassOop k) {
+-    assert(k != NULL, "preloaded klass not initialized"); 
++    assert(k != NULL, "preloaded klass not initialized");
+     return k;
+   }
+ 
+@@ -272,15 +266,15 @@
+   static klassOop final_reference_klass()   { return check_klass(_final_reference_klass); }
+   static klassOop phantom_reference_klass() { return check_klass(_phantom_reference_klass); }
+   static klassOop finalizer_klass()         { return check_klass(_finalizer_klass); }
+-  
++
+   static klassOop thread_klass()            { return check_klass(_thread_klass); }
+   static klassOop threadGroup_klass()       { return check_klass(_threadGroup_klass); }
+-  static klassOop properties_klass()        { return check_klass(_properties_klass); }  
++  static klassOop properties_klass()        { return check_klass(_properties_klass); }
+   static klassOop reflect_accessible_object_klass() { return check_klass(_reflect_accessible_object_klass); }
+   static klassOop reflect_field_klass()     { return check_klass(_reflect_field_klass); }
+   static klassOop reflect_method_klass()    { return check_klass(_reflect_method_klass); }
+   static klassOop reflect_constructor_klass() { return check_klass(_reflect_constructor_klass); }
+-  static klassOop reflect_method_accessor_klass() { 
++  static klassOop reflect_method_accessor_klass() {
+     assert(JDK_Version::is_gte_jdk14x_version() && UseNewReflection, "JDK 1.4 only");
+     return check_klass(_reflect_method_accessor_klass);
+   }
+@@ -309,6 +303,9 @@
+ 
+   static klassOop sun_misc_AtomicLongCSImpl_klass() { return _sun_misc_AtomicLongCSImpl_klass; }
+ 
++  // To support incremental JRE downloads (KERNEL JRE). Null if not present.
++  static klassOop sun_jkernel_DownloadManager_klass() { return _sun_jkernel_DownloadManager_klass; }
++
+   static klassOop boolean_klass()           { return check_klass(_boolean_klass); }
+   static klassOop char_klass()              { return check_klass(_char_klass); }
+   static klassOop float_klass()             { return check_klass(_float_klass); }
+@@ -316,7 +313,7 @@
+   static klassOop byte_klass()              { return check_klass(_byte_klass); }
+   static klassOop short_klass()             { return check_klass(_short_klass); }
+   static klassOop int_klass()               { return check_klass(_int_klass); }
+-  static klassOop long_klass()              { return check_klass(_long_klass); } 
++  static klassOop long_klass()              { return check_klass(_long_klass); }
+ 
+   static klassOop box_klass(BasicType t) {
+     assert((uint)t < T_VOID+1, "range check");
+@@ -340,7 +337,7 @@
+ 
+   static bool class_klass_loaded()          { return _class_klass != NULL; }
+   static bool cloneable_klass_loaded()      { return _cloneable_klass != NULL; }
+-  
++
+   // Returns default system loader
+   static oop java_system_loader();
+ 
+@@ -350,37 +347,23 @@
+ private:
+   // Mirrors for primitive classes (created eagerly)
+   static oop check_mirror(oop m) {
+-    assert(m != NULL, "mirror not initialized"); 
++    assert(m != NULL, "mirror not initialized");
+     return m;
+   }
+ 
+ public:
+-  static oop int_mirror()                   { return check_mirror(_int_mirror); }
+-  static oop float_mirror()                 { return check_mirror(_float_mirror); }
+-  static oop double_mirror()                { return check_mirror(_double_mirror); }
+-  static oop byte_mirror()                  { return check_mirror(_byte_mirror); }
+-  static oop bool_mirror()                  { return check_mirror(_bool_mirror); }
+-  static oop char_mirror()                  { return check_mirror(_char_mirror); }
+-  static oop long_mirror()                  { return check_mirror(_long_mirror); }
+-  static oop short_mirror()                 { return check_mirror(_short_mirror); }
+-  static oop void_mirror()                  { return check_mirror(_void_mirror); }
+-
+-  static oop java_mirror(BasicType t) {
+-    assert((uint)t < T_VOID+1, "range check");
+-    return check_mirror(_mirrors[t]);
+-  }
+   // Note:  java_lang_Class::primitive_type is the inverse of java_mirror
+ 
+   // Check class loader constraints
+   static bool add_loader_constraint(symbolHandle name, Handle loader1,
+                                     Handle loader2, TRAPS);
+   static char* check_signature_loaders(symbolHandle signature, Handle loader1,
+-				       Handle loader2, bool is_method, TRAPS);
++                                       Handle loader2, bool is_method, TRAPS);
+ 
+   // Utility for printing loader "name" as part of tracing constraints
+   static const char* loader_name(oop loader) {
+-    return ((loader) == NULL ? "<bootloader>" : 
+-	    instanceKlass::cast((loader)->klass())->name()->as_C_string() );
++    return ((loader) == NULL ? "<bootloader>" :
++            instanceKlass::cast((loader)->klass())->name()->as_C_string() );
+   }
+ 
+   // Record the error when the first attempt to resolve a reference from a constant
+@@ -392,7 +375,7 @@
+ 
+   enum Constants {
+     _loader_constraint_size = 107,                     // number of entries in constraint table
+-    _resolution_error_size  = 107,		       // number of entries in resolution error table
++    _resolution_error_size  = 107,                     // number of entries in resolution error table
+     _nof_buckets            = 1009                     // number of buckets in hash table
+   };
+ 
+@@ -420,7 +403,7 @@
+   static LoaderConstraintTable*  _loader_constraints;
+ 
+   // Resolution errors
+-  static ResolutionErrorTable*	 _resolution_errors;
++  static ResolutionErrorTable*   _resolution_errors;
+ 
+ public:
+   // for VM_CounterDecay iteration support
+@@ -444,14 +427,14 @@
+   static klassOop resolve_instance_class_or_null(symbolHandle class_name, Handle class_loader, Handle protection_domain, TRAPS);
+   static klassOop resolve_array_class_or_null(symbolHandle class_name, Handle class_loader, Handle protection_domain, TRAPS);
+   static instanceKlassHandle handle_parallel_super_load(symbolHandle class_name, symbolHandle supername, Handle class_loader, Handle protection_domain, Handle lockObject, TRAPS);
+-  // Wait on SystemDictionary_lock; unlocks lockObject before 
++  // Wait on SystemDictionary_lock; unlocks lockObject before
+   // waiting; relocks lockObject with correct recursion count
+   // after waiting, but before reentering SystemDictionary_lock
+   // to preserve lock order semantics.
+   static void double_lock_wait(Handle lockObject, TRAPS);
+   static void define_instance_class(instanceKlassHandle k, TRAPS);
+-  static instanceKlassHandle find_or_define_instance_class(symbolHandle class_name, 
+-                                                Handle class_loader, 
++  static instanceKlassHandle find_or_define_instance_class(symbolHandle class_name,
++                                                Handle class_loader,
+                                                 instanceKlassHandle k, TRAPS);
+   static instanceKlassHandle load_shared_class(symbolHandle class_name,
+                                                Handle class_loader, TRAPS);
+@@ -464,13 +447,13 @@
+   static klassOop find_shared_class(symbolHandle class_name);
+ 
+   // Setup link to hierarchy
+-  static void add_to_hierarchy(instanceKlassHandle k, TRAPS);  
+- 
++  static void add_to_hierarchy(instanceKlassHandle k, TRAPS);
++
+ private:
+   // We pass in the hashtable index so we can calculate it outside of
+-  // the SystemDictionary_lock.   
++  // the SystemDictionary_lock.
+ 
+-  // Basic find on loaded classes 
++  // Basic find on loaded classes
+   static klassOop find_class(int index, unsigned int hash,
+                              symbolHandle name, Handle loader);
+ 
+@@ -484,16 +467,16 @@
+                                        Handle class_loader);
+ 
+   // Updating entry in dictionary
+-  // Add a completely loaded class 
++  // Add a completely loaded class
+   static void add_klass(int index, symbolHandle class_name,
+                         Handle class_loader, KlassHandle obj);
+ 
+   // Add a placeholder for a class being loaded
+-  static void add_placeholder(int index, 
+-                              symbolHandle class_name, 
++  static void add_placeholder(int index,
++                              symbolHandle class_name,
+                               Handle class_loader);
+   static void remove_placeholder(int index,
+-                                 symbolHandle class_name, 
++                                 symbolHandle class_name,
+                                  Handle class_loader);
+ 
+   // Performs cleanups after resolve_super_or_fail. This typically needs
+@@ -502,14 +485,13 @@
+   static void resolution_cleanups(symbolHandle class_name,
+                                   Handle class_loader,
+                                   TRAPS);
+-  
++
+   // Initialization
+   static void initialize_preloaded_classes(TRAPS);
+-  static void initialize_basic_type_mirrors(TRAPS);
+-    
++
+   // Class loader constraints
+   static void check_constraints(int index, unsigned int hash,
+-                                instanceKlassHandle k, Handle loader, 
++                                instanceKlassHandle k, Handle loader,
+                                 bool defining, TRAPS);
+   static void update_dictionary(int d_index, unsigned int d_hash,
+                                 int p_index, unsigned int p_hash,
+@@ -523,7 +505,7 @@
+   static klassOop _classloader_klass;
+   static klassOop _serializable_klass;
+   static klassOop _system_klass;
+-  
++
+   static klassOop _throwable_klass;
+   static klassOop _error_klass;
+   static klassOop _threaddeath_klass;
+@@ -549,7 +531,7 @@
+ 
+   static klassOop _thread_klass;
+   static klassOop _threadGroup_klass;
+-  static klassOop _properties_klass;      
++  static klassOop _properties_klass;
+   static klassOop _reflect_accessible_object_klass;
+   static klassOop _reflect_field_klass;
+   static klassOop _reflect_method_klass;
+@@ -573,6 +555,9 @@
+ 
+   static klassOop _sun_misc_AtomicLongCSImpl_klass;
+ 
++  // KERNEL JRE support.
++  static klassOop _sun_jkernel_DownloadManager_klass;
++
+   // Lazily loaded klasses
+   static volatile klassOop _abstract_ownable_synchronizer_klass;
+ 
+@@ -593,18 +578,4 @@
+ 
+   static bool _has_loadClassInternal;
+   static bool _has_checkPackageAccess;
+-
+-  // Primitive classes
+-  static oop _int_mirror;
+-  static oop _float_mirror;
+-  static oop _double_mirror;
+-  static oop _byte_mirror;
+-  static oop _bool_mirror;
+-  static oop _char_mirror;
+-  static oop _long_mirror;
+-  static oop _short_mirror;
+-  static oop _void_mirror;
+-
+-  // table of same
+-  static oop _mirrors[T_VOID+1];
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/verificationType.cpp openjdk/hotspot/src/share/vm/classfile/verificationType.cpp
+--- openjdk6/hotspot/src/share/vm/classfile/verificationType.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/verificationType.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)verificationType.cpp	1.16 07/05/05 17:07:01 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -54,19 +51,19 @@
+   } else if (is_object()) {
+     // We need check the class hierarchy to check assignability
+     if (name() == vmSymbols::java_lang_Object()) {
+-      // any object or array is assignable to java.lang.Object 
++      // any object or array is assignable to java.lang.Object
+       return true;
+     }
+     klassOop this_class = SystemDictionary::resolve_or_fail(
+-        name_handle(), Handle(THREAD, context->class_loader()), 
++        name_handle(), Handle(THREAD, context->class_loader()),
+         Handle(THREAD, context->protection_domain()), true, CHECK_false);
+     if (this_class->klass_part()->is_interface()) {
+-      // We treat interfaces as java.lang.Object, including 
++      // We treat interfaces as java.lang.Object, including
+       // java.lang.Cloneable and java.io.Serializable
+       return true;
+     } else if (from.is_object()) {
+       klassOop from_class = SystemDictionary::resolve_or_fail(
+-          from.name_handle(), Handle(THREAD, context->class_loader()), 
++          from.name_handle(), Handle(THREAD, context->class_loader()),
+           Handle(THREAD, context->protection_domain()), true, CHECK_false);
+       return instanceKlass::cast(from_class)->is_subclass_of(this_class);
+     }
+@@ -90,14 +87,14 @@
+     case 'J': return VerificationType(Long);
+     case 'F': return VerificationType(Float);
+     case 'D': return VerificationType(Double);
+-    case '[': 
++    case '[':
+       component = SymbolTable::lookup(
+-        name(), 1, name()->utf8_length(), 
++        name(), 1, name()->utf8_length(),
+         CHECK_(VerificationType::bogus_type()));
+       return VerificationType::reference_type(component);
+-    case 'L': 
++    case 'L':
+       component = SymbolTable::lookup(
+-        name(), 2, name()->utf8_length() - 1, 
++        name(), 2, name()->utf8_length() - 1,
+         CHECK_(VerificationType::bogus_type()));
+       return VerificationType::reference_type(component);
+     default:
+@@ -127,9 +124,9 @@
+     case Null:             st->print(" null "); break;
+     default:
+       if (is_uninitialized_this()) {
+-        st->print(" uninitializedThis "); 
++        st->print(" uninitializedThis ");
+       } else if (is_uninitialized()) {
+-        st->print(" uninitialized %d ", bci()); 
++        st->print(" uninitialized %d ", bci());
+       } else {
+         st->print(" class %s ", name()->as_klass_external_name());
+       }
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/verificationType.hpp openjdk/hotspot/src/share/vm/classfile/verificationType.hpp
+--- openjdk6/hotspot/src/share/vm/classfile/verificationType.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/verificationType.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)verificationType.hpp	1.17 07/05/05 17:07:01 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,34 +19,34 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ enum {
+   // As specifed in the JVM spec
+-  ITEM_Top = 0, 
+-  ITEM_Integer = 1, 
+-  ITEM_Float = 2, 
+-  ITEM_Double = 3, 
+-  ITEM_Long = 4, 
+-  ITEM_Null = 5, 
++  ITEM_Top = 0,
++  ITEM_Integer = 1,
++  ITEM_Float = 2,
++  ITEM_Double = 3,
++  ITEM_Long = 4,
++  ITEM_Null = 5,
+   ITEM_UninitializedThis = 6,
+   ITEM_Object = 7,
+   ITEM_Uninitialized = 8,
+-  ITEM_Bogus = (uint)-1  
++  ITEM_Bogus = (uint)-1
+ };
+ 
+ class VerificationType VALUE_OBJ_CLASS_SPEC {
+   private:
+-    // Least significant bits of _handle are always 0, so we use these as 
++    // Least significant bits of _handle are always 0, so we use these as
+     // the indicator that the _handle is valid.  Otherwise, the _data field
+-    // contains encoded data (as specified below).  Should the VM change 
++    // contains encoded data (as specified below).  Should the VM change
+     // and the lower bits on oops aren't 0, the assert in the constructor
+-    // will catch this and we'll have to add a descriminator tag to this 
++    // will catch this and we'll have to add a descriminator tag to this
+     // structure.
+     union {
+       symbolOop* _handle;
+-      uintptr_t _data; 
++      uintptr_t _data;
+     } _u;
+ 
+     enum {
+@@ -59,10 +56,10 @@
+     };
+ 
+     // Enum for the _data field
+-    enum { 
+-      // Bottom two bits determine if the type is a reference, primitive, 
++    enum {
++      // Bottom two bits determine if the type is a reference, primitive,
+       // uninitialized or a query-type.
+-      TypeMask           = 0x00000003, 
++      TypeMask           = 0x00000003,
+ 
+       // Topmost types encoding
+       Reference          = 0x0,        // _handle contains the name
+@@ -70,7 +67,7 @@
+       Uninitialized      = 0x2,        // 0x00ffff00 contains bci
+       TypeQuery          = 0x3,        // Meta-types used for category testing
+ 
+-      // Utility flags 
++      // Utility flags
+       ReferenceFlag      = 0x00,       // For reference query types
+       Category1Flag      = 0x01,       // One-word values
+       Category2Flag      = 0x02,       // First word of a two-word value
+@@ -128,36 +125,36 @@
+   static VerificationType byte_type() { return VerificationType(Byte); }
+   static VerificationType char_type() { return VerificationType(Char); }
+   static VerificationType short_type() { return VerificationType(Short); }
+-  static VerificationType double2_type() 
++  static VerificationType double2_type()
+     { return VerificationType(Double_2nd); }
+ 
+   // "check" types are used for queries.  A "check" type is not assignable
+-  // to anything, but the specified types are assignable to a "check".  For 
+-  // example, any category1 primitive is assignable to category1_check and 
++  // to anything, but the specified types are assignable to a "check".  For
++  // example, any category1 primitive is assignable to category1_check and
+   // any reference is assignable to reference_check.
+-  static VerificationType reference_check() 
++  static VerificationType reference_check()
+     { return VerificationType(ReferenceQuery); }
+-  static VerificationType category1_check() 
++  static VerificationType category1_check()
+     { return VerificationType(Category1Query); }
+-  static VerificationType category2_check() 
++  static VerificationType category2_check()
+     { return VerificationType(Category2Query); }
+-  static VerificationType category2_2nd_check() 
++  static VerificationType category2_2nd_check()
+     { return VerificationType(Category2_2ndQuery); }
+ 
+-  // For reference types, store the actual oop* handle 
+-  static VerificationType reference_type(symbolHandle sh) { 
++  // For reference types, store the actual oop* handle
++  static VerificationType reference_type(symbolHandle sh) {
+       assert(((uintptr_t)sh.raw_value() & 0x3) == 0, "Oops must be aligned");
+-      // If the above assert fails in the future because oop* isn't aligned, 
++      // If the above assert fails in the future because oop* isn't aligned,
+       // then this type encoding system will have to change to have a tag value
+       // to descriminate between oops and primitives.
+-      return VerificationType((uintptr_t)((symbolOop*)sh.raw_value())); 
++      return VerificationType((uintptr_t)((symbolOop*)sh.raw_value()));
+   }
+-  static VerificationType reference_type(symbolOop s, TRAPS) 
++  static VerificationType reference_type(symbolOop s, TRAPS)
+     { return reference_type(symbolHandle(THREAD, s)); }
+ 
+-  static VerificationType uninitialized_type(u2 bci) 
++  static VerificationType uninitialized_type(u2 bci)
+     { return VerificationType(bci << 1 * BitsPerByte | Uninitialized); }
+-  static VerificationType uninitialized_this_type() 
++  static VerificationType uninitialized_this_type()
+     { return uninitialized_type(BciForThis); }
+ 
+   // Create based on u1 read from classfile
+@@ -176,10 +173,10 @@
+   bool is_long2() const     { return (_u._data == Long_2nd); }
+   bool is_double2() const   { return (_u._data == Double_2nd); }
+   bool is_reference() const { return ((_u._data & TypeMask) == Reference); }
+-  bool is_category1() const { 
+-    // This should return true for all one-word types, which are category1 
+-    // primitives, and references (including uninitialized refs).  Though 
+-    // the 'query' types should technically return 'false' here, if we 
++  bool is_category1() const {
++    // This should return true for all one-word types, which are category1
++    // primitives, and references (including uninitialized refs).  Though
++    // the 'query' types should technically return 'false' here, if we
+     // allow this to return true, we can perform the test using only
+     // 2 operations rather than 8 (3 masks, 3 compares and 2 logical 'ands').
+     // Since noone should call this on a query type anyway, this is ok.
+@@ -189,8 +186,8 @@
+     // is not set.
+   }
+   bool is_category2() const { return ((_u._data & Category2) == Category2); }
+-  bool is_category2_2nd() const { 
+-    return ((_u._data & Category2_2nd) == Category2_2nd); 
++  bool is_category2_2nd() const {
++    return ((_u._data & Category2_2nd) == Category2_2nd);
+   }
+   bool is_reference_check() const { return _u._data == ReferenceQuery; }
+   bool is_category1_check() const { return _u._data == Category1Query; }
+@@ -198,7 +195,7 @@
+   bool is_category2_2nd_check() const { return _u._data == Category2_2ndQuery; }
+   bool is_check() const { return (_u._data & TypeQuery) == TypeQuery; }
+ 
+-  bool is_x_array(char sig) const { 
++  bool is_x_array(char sig) const {
+     return is_null() || (is_array() && (name()->byte_at(1) == sig));
+   }
+   bool is_int_array() const { return is_x_array('I'); }
+@@ -211,17 +208,17 @@
+   bool is_double_array() const { return is_x_array('D'); }
+   bool is_object_array() const { return is_x_array('L'); }
+   bool is_array_array() const { return is_x_array('['); }
+-  bool is_reference_array() const 
++  bool is_reference_array() const
+     { return is_object_array() || is_array_array(); }
+-  bool is_object() const 
+-    { return (is_reference() && !is_null() && name()->utf8_length() >= 1 && 
++  bool is_object() const
++    { return (is_reference() && !is_null() && name()->utf8_length() >= 1 &&
+               name()->byte_at(0) != '['); }
+-  bool is_array() const 
+-    { return (is_reference() && !is_null() && name()->utf8_length() >= 2 && 
++  bool is_array() const
++    { return (is_reference() && !is_null() && name()->utf8_length() >= 2 &&
+               name()->byte_at(0) == '['); }
+-  bool is_uninitialized() const 
++  bool is_uninitialized() const
+     { return ((_u._data & Uninitialized) == Uninitialized); }
+-  bool is_uninitialized_this() const 
++  bool is_uninitialized_this() const
+     { return is_uninitialized() && bci() == BciForThis; }
+ 
+   VerificationType to_category2_2nd() const {
+@@ -234,18 +231,18 @@
+     return ((_u._data & BciMask) >> 1 * BitsPerByte);
+   }
+ 
+-  symbolHandle name_handle() const { 
++  symbolHandle name_handle() const {
+     assert(is_reference() && !is_null(), "Must be a non-null reference");
+-    return symbolHandle(_u._handle, true); 
++    return symbolHandle(_u._handle, true);
+   }
+-  symbolOop name() const { 
++  symbolOop name() const {
+     assert(is_reference() && !is_null(), "Must be a non-null reference");
+-    return *(_u._handle); 
++    return *(_u._handle);
+   }
+ 
+   bool equals(const VerificationType& t) const {
+     return (_u._data == t._u._data ||
+-      (is_reference() && t.is_reference() && !is_null() && !t.is_null() && 
++      (is_reference() && t.is_reference() && !is_null() && !t.is_null() &&
+        name() == t.name()));
+   }
+ 
+@@ -258,7 +255,7 @@
+   }
+ 
+   // The whole point of this type system - check to see if one type
+-  // is assignable to another.  Returns true if one can assign 'from' to 
++  // is assignable to another.  Returns true if one can assign 'from' to
+   // this.
+   bool is_assignable_from(
+       const VerificationType& from, instanceKlassHandle context, TRAPS) const {
+@@ -279,8 +276,8 @@
+         case Char:
+         case Short:
+           // An int can be assigned to boolean, byte, char or short values.
+-          return from.is_integer(); 
+-        default: 
++          return from.is_integer();
++        default:
+           if (is_reference() && from.is_reference()) {
+             return is_reference_assignable_from(from, context, CHECK_false);
+           } else {
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/verifier.cpp openjdk/hotspot/src/share/vm/classfile/verifier.cpp
+--- openjdk6/hotspot/src/share/vm/classfile/verifier.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/verifier.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)verifier.cpp	1.113 07/05/23 10:53:19 JVM"
+-#endif
+ /*
+  * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -57,17 +54,17 @@
+ // Methods in Verifier
+ 
+ bool Verifier::should_verify_for(oop class_loader) {
+-  return class_loader == NULL ? 
++  return class_loader == NULL ?
+     BytecodeVerificationLocal : BytecodeVerificationRemote;
+ }
+ 
+ bool Verifier::relax_verify_for(oop loader) {
+   bool trusted = java_lang_ClassLoader::is_trusted_loader(loader);
+-  bool need_verify = 
++  bool need_verify =
+     // verifyAll
+-    (BytecodeVerificationLocal && BytecodeVerificationRemote) || 
++    (BytecodeVerificationLocal && BytecodeVerificationRemote) ||
+     // verifyRemote
+-    (!BytecodeVerificationLocal && BytecodeVerificationRemote && !trusted); 
++    (!BytecodeVerificationLocal && BytecodeVerificationRemote && !trusted);
+   return !need_verify;
+ }
+ 
+@@ -88,13 +85,13 @@
+     if (TraceClassInitialization) {
+       tty->print_cr("Start class verification for: %s", klassName);
+     }
+-    if (UseSplitVerifier && 
++    if (UseSplitVerifier &&
+         klass->major_version() >= STACKMAP_ATTRIBUTE_MAJOR_VERSION) {
+         ClassVerifier split_verifier(
+           klass, message_buffer, message_buffer_len, THREAD);
+         split_verifier.verify_class(THREAD);
+         exception_name = split_verifier.result();
+-      if (FailOverToOldVerifier && !HAS_PENDING_EXCEPTION && 
++      if (FailOverToOldVerifier && !HAS_PENDING_EXCEPTION &&
+           (exception_name == vmSymbols::java_lang_VerifyError() ||
+            exception_name == vmSymbols::java_lang_ClassFormatError())) {
+         if (TraceClassInitialization) {
+@@ -127,12 +124,12 @@
+     return true; // verifcation succeeded
+   } else { // VerifyError or ClassFormatError to be created and thrown
+     ResourceMark rm(THREAD);
+-    instanceKlassHandle kls = 
++    instanceKlassHandle kls =
+       SystemDictionary::resolve_or_fail(exception_name, true, CHECK_false);
+     while (!kls.is_null()) {
+       if (kls == klass) {
+-        // If the class being verified is the exception we're creating 
+-        // or one of it's superclasses, we're in trouble and are going 
++        // If the class being verified is the exception we're creating
++        // or one of it's superclasses, we're in trouble and are going
+         // to infinitely recurse when we try to initialize the exception.
+         // So bail out here by throwing the preallocated VM error.
+         THROW_OOP_(Universe::virtual_machine_error_instance(), false);
+@@ -148,7 +145,7 @@
+   symbolOop name = klass->name();
+   klassOop refl_magic_klass = SystemDictionary::reflect_magic_klass();
+ 
+-  return (should_verify_for(klass->class_loader()) && 
++  return (should_verify_for(klass->class_loader()) &&
+     // return if the class is a bootstrapping class
+     // We need to skip the following four for bootstraping
+     name != vmSymbols::java_lang_Object() &&
+@@ -168,7 +165,7 @@
+     // sun/reflect/SerializationConstructorAccessor.
+     // NOTE: this is called too early in the bootstrapping process to be
+     // guarded by Universe::is_gte_jdk14x_version()/UseNewReflection.
+-    (refl_magic_klass == NULL || 
++    (refl_magic_klass == NULL ||
+      !klass->is_subtype_of(refl_magic_klass) ||
+      VerifyReflectionBytecodes)
+   );
+@@ -216,7 +213,7 @@
+ 
+   // These numbers are chosen so that VerifyClassCodes interface doesn't need
+   // to be changed (still return jboolean (unsigned char)), and result is
+-  // 1 when verification is passed. 
++  // 1 when verification is passed.
+   symbolHandle nh(NULL);
+   if (result == 0) {
+     return vmSymbols::java_lang_VerifyError();
+@@ -238,7 +235,7 @@
+ 
+ ClassVerifier::ClassVerifier(
+     instanceKlassHandle klass, char* msg, size_t msg_len, TRAPS)
+-    : _thread(THREAD), _exception_type(symbolHandle()), _message(msg), 
++    : _thread(THREAD), _exception_type(symbolHandle()), _message(msg),
+       _message_buffer_len(msg_len), _klass(klass) {
+   _this_type = VerificationType::reference_type(klass->name());
+ }
+@@ -248,7 +245,7 @@
+ 
+ void ClassVerifier::verify_class(TRAPS) {
+   if (_verify_verbose) {
+-    tty->print_cr("Verifying class %s with new format", 
++    tty->print_cr("Verifying class %s with new format",
+       _klass->external_name());
+   }
+ 
+@@ -365,7 +362,7 @@
+             opcode != Bytecodes::_lstore && opcode != Bytecodes::_fload  &&
+             opcode != Bytecodes::_dload  && opcode != Bytecodes::_fstore &&
+             opcode != Bytecodes::_dstore) {
+-          verify_error(bci, "Bad wide instruction"); 
++          verify_error(bci, "Bad wide instruction");
+           return;
+         }
+       }
+@@ -390,7 +387,7 @@
+         case Bytecodes::_lconst_0 :
+         case Bytecodes::_lconst_1 :
+           current_frame.push_stack_2(
+-            VerificationType::long_type(), 
++            VerificationType::long_type(),
+             VerificationType::long2_type(), CHECK_VERIFY(this));
+           no_control_flow = false; break;
+         case Bytecodes::_fconst_0 :
+@@ -402,7 +399,7 @@
+         case Bytecodes::_dconst_0 :
+         case Bytecodes::_dconst_1 :
+           current_frame.push_stack_2(
+-            VerificationType::double_type(), 
++            VerificationType::double_type(),
+             VerificationType::double2_type(), CHECK_VERIFY(this));
+           no_control_flow = false; break;
+         case Bytecodes::_sipush :
+@@ -412,13 +409,13 @@
+           no_control_flow = false; break;
+         case Bytecodes::_ldc :
+           verify_ldc(
+-            opcode, bcs.get_index(), &current_frame, 
++            opcode, bcs.get_index(), &current_frame,
+             cp, bci, CHECK_VERIFY(this));
+           no_control_flow = false; break;
+         case Bytecodes::_ldc_w :
+         case Bytecodes::_ldc2_w :
+           verify_ldc(
+-            opcode, bcs.get_index_big(), &current_frame, 
++            opcode, bcs.get_index_big(), &current_frame,
+             cp, bci, CHECK_VERIFY(this));
+           no_control_flow = false; break;
+         case Bytecodes::_iload :
+@@ -478,7 +475,7 @@
+             VerificationType::reference_check(), CHECK_VERIFY(this));
+           if (!atype.is_int_array()) {
+             verify_error(bci, bad_type_msg, "iaload");
+-            return;               
++            return;
+           }
+           current_frame.push_stack(
+             VerificationType::integer_type(), CHECK_VERIFY(this));
+@@ -529,7 +526,7 @@
+             return;
+           }
+           current_frame.push_stack_2(
+-            VerificationType::long_type(), 
++            VerificationType::long_type(),
+             VerificationType::long2_type(), CHECK_VERIFY(this));
+           no_control_flow = false; break;
+         case Bytecodes::_faload :
+@@ -554,7 +551,7 @@
+             return;
+           }
+           current_frame.push_stack_2(
+-            VerificationType::double_type(), 
++            VerificationType::double_type(),
+             VerificationType::double2_type(), CHECK_VERIFY(this));
+           no_control_flow = false; break;
+         case Bytecodes::_aaload : {
+@@ -570,7 +567,7 @@
+             current_frame.push_stack(
+               VerificationType::null_type(), CHECK_VERIFY(this));
+           } else {
+-            VerificationType component = 
++            VerificationType component =
+               atype.get_component(CHECK_VERIFY(this));
+             current_frame.push_stack(component, CHECK_VERIFY(this));
+           }
+@@ -676,7 +673,7 @@
+           no_control_flow = false; break;
+         case Bytecodes::_lastore :
+           current_frame.pop_stack_2(
+-            VerificationType::long2_type(), 
++            VerificationType::long2_type(),
+             VerificationType::long_type(), CHECK_VERIFY(this));
+           current_frame.pop_stack(
+             VerificationType::integer_type(), CHECK_VERIFY(this));
+@@ -701,7 +698,7 @@
+           no_control_flow = false; break;
+         case Bytecodes::_dastore :
+           current_frame.pop_stack_2(
+-            VerificationType::double2_type(), 
++            VerificationType::double2_type(),
+             VerificationType::double_type(), CHECK_VERIFY(this));
+           current_frame.pop_stack(
+             VerificationType::integer_type(), CHECK_VERIFY(this));
+@@ -890,15 +887,15 @@
+         case Bytecodes::_lor :
+         case Bytecodes::_lxor :
+           current_frame.pop_stack_2(
+-            VerificationType::long2_type(), 
++            VerificationType::long2_type(),
+             VerificationType::long_type(), CHECK_VERIFY(this));
+           // fall through
+         case Bytecodes::_lneg :
+           current_frame.pop_stack_2(
+-            VerificationType::long2_type(), 
++            VerificationType::long2_type(),
+             VerificationType::long_type(), CHECK_VERIFY(this));
+           current_frame.push_stack_2(
+-            VerificationType::long_type(), 
++            VerificationType::long_type(),
+             VerificationType::long2_type(), CHECK_VERIFY(this));
+           no_control_flow = false; break;
+         case Bytecodes::_lshl :
+@@ -907,10 +904,10 @@
+           current_frame.pop_stack(
+             VerificationType::integer_type(), CHECK_VERIFY(this));
+           current_frame.pop_stack_2(
+-            VerificationType::long2_type(), 
++            VerificationType::long2_type(),
+             VerificationType::long_type(), CHECK_VERIFY(this));
+           current_frame.push_stack_2(
+-            VerificationType::long_type(), 
++            VerificationType::long_type(),
+             VerificationType::long2_type(), CHECK_VERIFY(this));
+           no_control_flow = false; break;
+         case Bytecodes::_fadd :
+@@ -933,15 +930,15 @@
+         case Bytecodes::_ddiv :
+         case Bytecodes::_drem :
+           current_frame.pop_stack_2(
+-            VerificationType::double2_type(), 
++            VerificationType::double2_type(),
+             VerificationType::double_type(), CHECK_VERIFY(this));
+           // fall through
+         case Bytecodes::_dneg :
+           current_frame.pop_stack_2(
+-            VerificationType::double2_type(), 
++            VerificationType::double2_type(),
+             VerificationType::double_type(), CHECK_VERIFY(this));
+           current_frame.push_stack_2(
+-            VerificationType::double_type(), 
++            VerificationType::double_type(),
+             VerificationType::double2_type(), CHECK_VERIFY(this));
+           no_control_flow = false; break;
+         case Bytecodes::_iinc :
+@@ -951,12 +948,12 @@
+           type = current_frame.pop_stack(
+             VerificationType::integer_type(), CHECK_VERIFY(this));
+           current_frame.push_stack_2(
+-            VerificationType::long_type(), 
++            VerificationType::long_type(),
+             VerificationType::long2_type(), CHECK_VERIFY(this));
+           no_control_flow = false; break;
+        case Bytecodes::_l2i :
+           current_frame.pop_stack_2(
+-            VerificationType::long2_type(), 
++            VerificationType::long2_type(),
+             VerificationType::long_type(), CHECK_VERIFY(this));
+           current_frame.push_stack(
+             VerificationType::integer_type(), CHECK_VERIFY(this));
+@@ -971,22 +968,22 @@
+           current_frame.pop_stack(
+             VerificationType::integer_type(), CHECK_VERIFY(this));
+           current_frame.push_stack_2(
+-            VerificationType::double_type(), 
++            VerificationType::double_type(),
+             VerificationType::double2_type(), CHECK_VERIFY(this));
+           no_control_flow = false; break;
+         case Bytecodes::_l2f :
+           current_frame.pop_stack_2(
+-            VerificationType::long2_type(), 
++            VerificationType::long2_type(),
+             VerificationType::long_type(), CHECK_VERIFY(this));
+           current_frame.push_stack(
+             VerificationType::float_type(), CHECK_VERIFY(this));
+           no_control_flow = false; break;
+         case Bytecodes::_l2d :
+           current_frame.pop_stack_2(
+-            VerificationType::long2_type(), 
++            VerificationType::long2_type(),
+             VerificationType::long_type(), CHECK_VERIFY(this));
+           current_frame.push_stack_2(
+-            VerificationType::double_type(), 
++            VerificationType::double_type(),
+             VerificationType::double2_type(), CHECK_VERIFY(this));
+           no_control_flow = false; break;
+         case Bytecodes::_f2i :
+@@ -999,34 +996,34 @@
+           current_frame.pop_stack(
+             VerificationType::float_type(), CHECK_VERIFY(this));
+           current_frame.push_stack_2(
+-            VerificationType::long_type(), 
++            VerificationType::long_type(),
+             VerificationType::long2_type(), CHECK_VERIFY(this));
+           no_control_flow = false; break;
+         case Bytecodes::_f2d :
+           current_frame.pop_stack(
+             VerificationType::float_type(), CHECK_VERIFY(this));
+           current_frame.push_stack_2(
+-            VerificationType::double_type(), 
++            VerificationType::double_type(),
+             VerificationType::double2_type(), CHECK_VERIFY(this));
+           no_control_flow = false; break;
+         case Bytecodes::_d2i :
+           current_frame.pop_stack_2(
+-            VerificationType::double2_type(), 
++            VerificationType::double2_type(),
+             VerificationType::double_type(), CHECK_VERIFY(this));
+           current_frame.push_stack(
+             VerificationType::integer_type(), CHECK_VERIFY(this));
+           no_control_flow = false; break;
+         case Bytecodes::_d2l :
+           current_frame.pop_stack_2(
+-            VerificationType::double2_type(), 
++            VerificationType::double2_type(),
+             VerificationType::double_type(), CHECK_VERIFY(this));
+           current_frame.push_stack_2(
+-            VerificationType::long_type(), 
++            VerificationType::long_type(),
+             VerificationType::long2_type(), CHECK_VERIFY(this));
+           no_control_flow = false; break;
+         case Bytecodes::_d2f :
+           current_frame.pop_stack_2(
+-            VerificationType::double2_type(), 
++            VerificationType::double2_type(),
+             VerificationType::double_type(), CHECK_VERIFY(this));
+           current_frame.push_stack(
+             VerificationType::float_type(), CHECK_VERIFY(this));
+@@ -1041,10 +1038,10 @@
+           no_control_flow = false; break;
+         case Bytecodes::_lcmp :
+           current_frame.pop_stack_2(
+-            VerificationType::long2_type(), 
++            VerificationType::long2_type(),
+             VerificationType::long_type(), CHECK_VERIFY(this));
+           current_frame.pop_stack_2(
+-            VerificationType::long2_type(), 
++            VerificationType::long2_type(),
+             VerificationType::long_type(), CHECK_VERIFY(this));
+           current_frame.push_stack(
+             VerificationType::integer_type(), CHECK_VERIFY(this));
+@@ -1053,7 +1050,7 @@
+         case Bytecodes::_fcmpg :
+           current_frame.pop_stack(
+             VerificationType::float_type(), CHECK_VERIFY(this));
+-          current_frame.pop_stack( 
++          current_frame.pop_stack(
+             VerificationType::float_type(), CHECK_VERIFY(this));
+           current_frame.push_stack(
+             VerificationType::integer_type(), CHECK_VERIFY(this));
+@@ -1061,10 +1058,10 @@
+         case Bytecodes::_dcmpl :
+         case Bytecodes::_dcmpg :
+           current_frame.pop_stack_2(
+-            VerificationType::double2_type(), 
++            VerificationType::double2_type(),
+             VerificationType::double_type(), CHECK_VERIFY(this));
+           current_frame.pop_stack_2(
+-            VerificationType::double2_type(), 
++            VerificationType::double2_type(),
+             VerificationType::double_type(), CHECK_VERIFY(this));
+           current_frame.push_stack(
+             VerificationType::integer_type(), CHECK_VERIFY(this));
+@@ -1116,7 +1113,7 @@
+         case Bytecodes::_tableswitch :
+         case Bytecodes::_lookupswitch :
+           verify_switch(
+-            &bcs, code_length, code_data, &current_frame, 
++            &bcs, code_length, code_data, &current_frame,
+             &stackmap_table, CHECK_VERIFY(this));
+           no_control_flow = true; break;
+         case Bytecodes::_ireturn :
+@@ -1155,7 +1152,7 @@
+           }
+           // Make sure "this" has been initialized if current method is an
+           // <init>
+-          if (_method->name() == vmSymbols::object_initializer_name() && 
++          if (_method->name() == vmSymbols::object_initializer_name() &&
+               current_frame.flag_this_uninit()) {
+             verify_error(bci,
+               "Constructor must call super() or this() before return");
+@@ -1185,7 +1182,7 @@
+         {
+           index = bcs.get_index_big();
+           verify_cp_class_type(index, cp, CHECK_VERIFY(this));
+-          VerificationType new_class_type = 
++          VerificationType new_class_type =
+             cp_index_to_type(index, cp, CHECK_VERIFY(this));
+           if (!new_class_type.is_object()) {
+             verify_error(bci, "Illegal new instruction");
+@@ -1244,12 +1241,12 @@
+           index = bcs.get_index_big();
+           u2 dim = *(bcs.bcp()+3);
+           verify_cp_class_type(index, cp, CHECK_VERIFY(this));
+-          VerificationType new_array_type = 
++          VerificationType new_array_type =
+             cp_index_to_type(index, cp, CHECK_VERIFY(this));
+           if (!new_array_type.is_array()) {
+             verify_error(bci,
+               "Illegal constant pool index in multianewarray instruction");
+-            return;  
++            return;
+           }
+           if (dim < 1 || new_array_type.dimensions() < dim) {
+             verify_error(bci,
+@@ -1329,7 +1326,7 @@
+       }
+       if (end_pc != code_length) {   // special case: end_pc == code_length
+         if (end_pc > code_length || code_data[end_pc] == 0) {
+-          class_format_error("Illegal exception table end_pc %d", end_pc); 
++          class_format_error("Illegal exception table end_pc %d", end_pc);
+           return;
+         }
+       }
+@@ -1341,7 +1338,7 @@
+       if (catch_type_index != 0) {
+         VerificationType catch_type = cp_index_to_type(
+           catch_type_index, cp, CHECK_VERIFY(this));
+-        VerificationType throwable = 
++        VerificationType throwable =
+           VerificationType::reference_type(vmSymbols::java_lang_Throwable());
+         bool is_subclass = throwable.is_assignable_from(
+           catch_type, current_class(), CHECK_VERIFY(this));
+@@ -1398,7 +1395,7 @@
+       // current_frame is the stackmap frame got from the last instruction.
+       // If matched, current_frame will be updated by this method.
+       bool match = stackmap_table->match_stackmap(
+-        current_frame, this_offset, stackmap_index, 
++        current_frame, this_offset, stackmap_index,
+         !no_control_flow, true, CHECK_VERIFY_(this, 0));
+       if (!match) {
+         // report type error
+@@ -1440,7 +1437,7 @@
+             catch_type_index, cp, CHECK_VERIFY(this));
+           new_frame->push_stack(catch_type, CHECK_VERIFY(this));
+         } else {
+-          VerificationType throwable = 
++          VerificationType throwable =
+             VerificationType::reference_type(vmSymbols::java_lang_Throwable());
+           new_frame->push_stack(throwable, CHECK_VERIFY(this));
+         }
+@@ -1448,7 +1445,7 @@
+           new_frame, handler_pc, true, false, CHECK_VERIFY(this));
+         if (!match) {
+           verify_error(bci,
+-            "Stack map does not match the one at exception handler %d", 
++            "Stack map does not match the one at exception handler %d",
+             handler_pc);
+           return;
+         }
+@@ -1460,7 +1457,7 @@
+ void ClassVerifier::verify_cp_index(constantPoolHandle cp, int index, TRAPS) {
+   int nconstants = cp->length();
+   if ((index <= 0) || (index >= nconstants)) {
+-    verify_error("Illegal constant pool index %d in class %s", 
++    verify_error("Illegal constant pool index %d in class %s",
+       index, instanceKlass::cast(cp->pool_holder())->external_name());
+     return;
+   }
+@@ -1474,14 +1471,14 @@
+   // instead.  Get the original index for the tag check
+   constantPoolCacheOop cache = cp->cache();
+   if (cache != NULL &&
+-       ((types == (1 <<  JVM_CONSTANT_InterfaceMethodref)) || 
+-        (types == (1 <<  JVM_CONSTANT_Methodref)) || 
++       ((types == (1 <<  JVM_CONSTANT_InterfaceMethodref)) ||
++        (types == (1 <<  JVM_CONSTANT_Methodref)) ||
+         (types == (1 <<  JVM_CONSTANT_Fieldref)))) {
+     int native_index = index;
+     if (Bytes::is_Java_byte_ordering_different()) {
+       native_index = Bytes::swap_u2(index);
+     }
+-    assert((native_index >= 0) && (native_index < cache->length()), 
++    assert((native_index >= 0) && (native_index < cache->length()),
+       "Must be a legal index into the cp cache");
+     index = cache->entry_at(native_index)->constant_pool_index();
+   }
+@@ -1490,7 +1487,7 @@
+   unsigned int tag = cp->tag_at(index).value();
+   if ((types & (1 << tag)) == 0) {
+     verify_error(
+-      "Illegal type at constant pool entry %d in class %s", 
++      "Illegal type at constant pool entry %d in class %s",
+       index, instanceKlass::cast(cp->pool_holder())->external_name());
+     return;
+   }
+@@ -1501,7 +1498,7 @@
+   verify_cp_index(cp, index, CHECK_VERIFY(this));
+   constantTag tag = cp->tag_at(index);
+   if (!tag.is_klass() && !tag.is_unresolved_klass()) {
+-    verify_error("Illegal type at constant pool entry %d in class %s", 
++    verify_error("Illegal type at constant pool entry %d in class %s",
+       index, instanceKlass::cast(cp->pool_holder())->external_name());
+     return;
+   }
+@@ -1619,11 +1616,11 @@
+       VerificationType::float_type(), CHECK_VERIFY(this));
+   } else if (tag.is_double()) {
+     current_frame->push_stack_2(
+-      VerificationType::double_type(), 
++      VerificationType::double_type(),
+       VerificationType::double2_type(), CHECK_VERIFY(this));
+   } else if (tag.is_long()) {
+     current_frame->push_stack_2(
+-      VerificationType::long_type(), 
++      VerificationType::long_type(),
+       VerificationType::long2_type(), CHECK_VERIFY(this));
+   } else {
+     verify_error(bci, "Invalid index in ldc");
+@@ -1732,12 +1729,12 @@
+   }
+   VerificationType target_class_type = ref_class_type;
+ 
+-  assert(sizeof(VerificationType) == sizeof(uintptr_t), 
++  assert(sizeof(VerificationType) == sizeof(uintptr_t),
+         "buffer type must match VerificationType size");
+   uintptr_t field_type_buffer[2];
+   VerificationType* field_type = (VerificationType*)field_type_buffer;
+   // If we make a VerificationType[2] array directly, the compiler calls
+-  // to the c-runtime library to do the allocation instead of just 
++  // to the c-runtime library to do the allocation instead of just
+   // stack allocating it.  Plus it would run constructors.  This shows up
+   // in performance profiles.
+ 
+@@ -1802,7 +1799,7 @@
+         break;
+ 
+       klassOop ref_class_oop = load_class(ref_class_name, CHECK);
+-      if (is_protected_access(current_class(), ref_class_oop, field_name(), 
++      if (is_protected_access(current_class(), ref_class_oop, field_name(),
+                               field_sig(), false)) {
+         // It's protected access, check if stack object is assignable to
+         // current class.
+@@ -1820,8 +1817,8 @@
+ }
+ 
+ void ClassVerifier::verify_invoke_init(
+-    RawBytecodeStream* bcs, VerificationType ref_class_type, 
+-    StackMapFrame* current_frame, u4 code_length, bool *this_uninit, 
++    RawBytecodeStream* bcs, VerificationType ref_class_type,
++    StackMapFrame* current_frame, u4 code_length, bool *this_uninit,
+     constantPoolHandle cp, TRAPS) {
+   u2 bci = bcs->bci();
+   VerificationType type = current_frame->pop_stack(
+@@ -1862,11 +1859,11 @@
+     // protected, then the objectref must be the current class or a subclass
+     // of the current class.
+     VerificationType objectref_type = new_class_type;
+-    if (name_in_supers(ref_class_type.name(), current_class())) { 
++    if (name_in_supers(ref_class_type.name(), current_class())) {
+       klassOop ref_klass = load_class(
+         ref_class_type.name(), CHECK_VERIFY(this));
+       methodOop m = instanceKlass::cast(ref_klass)->uncached_lookup_method(
+-        vmSymbols::object_initializer_name(), 
++        vmSymbols::object_initializer_name(),
+         cp->signature_ref_at(bcs->get_index_big()));
+       instanceKlassHandle mh(THREAD, m->method_holder());
+       if (m->is_protected() && !mh->is_same_class_package(_klass())) {
+@@ -1886,8 +1883,8 @@
+ }
+ 
+ void ClassVerifier::verify_invoke_instructions(
+-    RawBytecodeStream* bcs, u4 code_length, StackMapFrame* current_frame, 
+-    bool *this_uninit, VerificationType return_type, 
++    RawBytecodeStream* bcs, u4 code_length, StackMapFrame* current_frame,
++    bool *this_uninit, VerificationType return_type,
+     constantPoolHandle cp, TRAPS) {
+   // Make sure the constant pool item is the right type
+   u2 index = bcs->get_index_big();
+@@ -1916,11 +1913,11 @@
+   // of parsing the signature once to find its size.
+   // -3 is for '(', ')' and return descriptor; multiply by 2 is for
+   // longs/doubles to be consertive.
+-  assert(sizeof(VerificationType) == sizeof(uintptr_t), 
++  assert(sizeof(VerificationType) == sizeof(uintptr_t),
+         "buffer type must match VerificationType size");
+   uintptr_t on_stack_sig_types_buffer[128];
+   // If we make a VerificationType[128] array directly, the compiler calls
+-  // to the c-runtime library to do the allocation instead of just 
++  // to the c-runtime library to do the allocation instead of just
+   // stack allocating it.  Plus it would run constructors.  This shows up
+   // in performance profiles.
+ 
+@@ -1971,7 +1968,7 @@
+ 
+   if (method_name->byte_at(0) == '<') {
+     // Make sure <init> can only be invoked by invokespecial
+-    if (opcode != Bytecodes::_invokespecial || 
++    if (opcode != Bytecodes::_invokespecial ||
+         method_name() != vmSymbols::object_initializer_name()) {
+       verify_error(bci, "Illegal call to internal method");
+       return;
+@@ -1995,7 +1992,7 @@
+   // Check objectref on operand stack
+   if (opcode != Bytecodes::_invokestatic) {
+     if (method_name() == vmSymbols::object_initializer_name()) {  // <init> method
+-      verify_invoke_init(bcs, ref_class_type, current_frame, 
++      verify_invoke_init(bcs, ref_class_type, current_frame,
+         code_length, this_uninit, cp, CHECK_VERIFY(this));
+     } else {   // other methods
+       // Ensures that target class is assignable to method class.
+@@ -2058,7 +2055,7 @@
+ VerificationType ClassVerifier::get_newarray_type(
+     u2 index, u2 bci, TRAPS) {
+   const char* from_bt[] = {
+-    NULL, NULL, NULL, NULL, "[Z", "[C", "[F", "[D", "[B", "[S", "[I", "[J", 
++    NULL, NULL, NULL, NULL, "[Z", "[C", "[F", "[D", "[B", "[S", "[I", "[J",
+   };
+   if (index < T_BOOLEAN || index > T_LONG) {
+     verify_error(bci, "Illegal newarray instruction");
+@@ -2077,7 +2074,7 @@
+   current_frame->pop_stack(
+     VerificationType::integer_type(), CHECK_VERIFY(this));
+ 
+-  VerificationType component_type = 
++  VerificationType component_type =
+     cp_index_to_type(index, cp, CHECK_VERIFY(this));
+   ResourceMark rm(THREAD);
+   int length;
+@@ -2114,10 +2111,10 @@
+ 
+ void ClassVerifier::verify_lload(u2 index, StackMapFrame* current_frame, TRAPS) {
+   current_frame->get_local_2(
+-    index, VerificationType::long_type(), 
++    index, VerificationType::long_type(),
+     VerificationType::long2_type(), CHECK_VERIFY(this));
+   current_frame->push_stack_2(
+-    VerificationType::long_type(), 
++    VerificationType::long_type(),
+     VerificationType::long2_type(), CHECK_VERIFY(this));
+ }
+ 
+@@ -2130,10 +2127,10 @@
+ 
+ void ClassVerifier::verify_dload(u2 index, StackMapFrame* current_frame, TRAPS) {
+   current_frame->get_local_2(
+-    index, VerificationType::double_type(), 
++    index, VerificationType::double_type(),
+     VerificationType::double2_type(), CHECK_VERIFY(this));
+   current_frame->push_stack_2(
+-    VerificationType::double_type(), 
++    VerificationType::double_type(),
+     VerificationType::double2_type(), CHECK_VERIFY(this));
+ }
+ 
+@@ -2152,10 +2149,10 @@
+ 
+ void ClassVerifier::verify_lstore(u2 index, StackMapFrame* current_frame, TRAPS) {
+   current_frame->pop_stack_2(
+-    VerificationType::long2_type(), 
++    VerificationType::long2_type(),
+     VerificationType::long_type(), CHECK_VERIFY(this));
+   current_frame->set_local_2(
+-    index, VerificationType::long_type(), 
++    index, VerificationType::long_type(),
+     VerificationType::long2_type(), CHECK_VERIFY(this));
+ }
+ 
+@@ -2167,10 +2164,10 @@
+ 
+ void ClassVerifier::verify_dstore(u2 index, StackMapFrame* current_frame, TRAPS) {
+   current_frame->pop_stack_2(
+-    VerificationType::double2_type(), 
++    VerificationType::double2_type(),
+     VerificationType::double_type(), CHECK_VERIFY(this));
+   current_frame->set_local_2(
+-    index, VerificationType::double_type(), 
++    index, VerificationType::double_type(),
+     VerificationType::double2_type(), CHECK_VERIFY(this));
+ }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/verifier.hpp openjdk/hotspot/src/share/vm/classfile/verifier.hpp
+--- openjdk6/hotspot/src/share/vm/classfile/verifier.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/verifier.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)verifier.hpp	1.41 07/05/05 17:07:02 JVM"
+-#endif
+ /*
+  * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The verifier class
+@@ -34,7 +31,7 @@
+   /**
+    * Verify the bytecodes for a class.  If 'throw_exception' is true
+    * then the appropriate VerifyError or ClassFormatError will be thrown.
+-   * Otherwise, no exception is thrown and the return indicates the 
++   * Otherwise, no exception is thrown and the return indicates the
+    * error.
+    */
+   static bool verify(instanceKlassHandle klass, Mode mode, TRAPS);
+@@ -71,7 +68,7 @@
+ // Care needs to be taken to make sure resource objects don't outlive
+ // the lifetime of their ResourceMark.
+ 
+-// These macros are used similarly to CHECK macros but also check 
++// These macros are used similarly to CHECK macros but also check
+ // the status of the verifier and return if that has an error.
+ #define CHECK_VERIFY(verifier) \
+   CHECK); if ((verifier)->has_error()) return; (0
+@@ -106,33 +103,33 @@
+   void verify_cp_class_type(int index, constantPoolHandle cp, TRAPS);
+ 
+   u2 verify_stackmap_table(
+-    u2 stackmap_index, u2 bci, StackMapFrame* current_frame, 
++    u2 stackmap_index, u2 bci, StackMapFrame* current_frame,
+     StackMapTable* stackmap_table, bool no_control_flow, TRAPS);
+ 
+   void verify_exception_handler_targets(
+-    u2 bci, bool this_uninit, StackMapFrame* current_frame, 
++    u2 bci, bool this_uninit, StackMapFrame* current_frame,
+     StackMapTable* stackmap_table, TRAPS);
+ 
+   void verify_ldc(
+-    int opcode, u2 index, StackMapFrame *current_frame, 
++    int opcode, u2 index, StackMapFrame *current_frame,
+     constantPoolHandle cp, u2 bci, TRAPS);
+ 
+   void verify_switch(
+-    RawBytecodeStream* bcs, u4 code_length, char* code_data, 
++    RawBytecodeStream* bcs, u4 code_length, char* code_data,
+     StackMapFrame* current_frame, StackMapTable* stackmap_table, TRAPS);
+ 
+   void verify_field_instructions(
+-    RawBytecodeStream* bcs, StackMapFrame* current_frame, 
++    RawBytecodeStream* bcs, StackMapFrame* current_frame,
+     constantPoolHandle cp, TRAPS);
+ 
+   void verify_invoke_init(
+-    RawBytecodeStream* bcs, VerificationType ref_class_type, 
+-    StackMapFrame* current_frame, u4 code_length, bool* this_uninit, 
++    RawBytecodeStream* bcs, VerificationType ref_class_type,
++    StackMapFrame* current_frame, u4 code_length, bool* this_uninit,
+     constantPoolHandle cp, TRAPS);
+ 
+   void verify_invoke_instructions(
+-    RawBytecodeStream* bcs, u4 code_length, StackMapFrame* current_frame, 
+-    bool* this_uninit, VerificationType return_type, 
++    RawBytecodeStream* bcs, u4 code_length, StackMapFrame* current_frame,
++    bool* this_uninit, VerificationType return_type,
+     constantPoolHandle cp, TRAPS);
+ 
+   VerificationType get_newarray_type(u2 index, u2 bci, TRAPS);
+@@ -176,8 +173,8 @@
+   instanceKlassHandle current_class() const { return _klass; }
+   VerificationType current_type() const { return _this_type; }
+ 
+-  // Verifies the class.  If a verify or class file format error occurs, 
+-  // the '_exception_name' symbols will set to the exception name and 
++  // Verifies the class.  If a verify or class file format error occurs,
++  // the '_exception_name' symbols will set to the exception name and
+   // the message_buffer will be filled in with the exception message.
+   void verify_class(TRAPS);
+ 
+@@ -185,7 +182,7 @@
+   symbolHandle result() const { return _exception_type; }
+   bool has_error() const { return !(result().is_null()); }
+ 
+-  // Called when verify or class format errors are encountered.  
++  // Called when verify or class format errors are encountered.
+   // May throw an exception based upon the mode.
+   void verify_error(u2 offset, const char* fmt, ...);
+   void verify_error(const char* fmt, ...);
+@@ -211,13 +208,13 @@
+   switch (bt) {
+     case T_OBJECT:
+     case T_ARRAY:
+-      { 
+-        symbolOop name = sig_type->as_symbol(CHECK_0); 
+-        *inference_type = 
++      {
++        symbolOop name = sig_type->as_symbol(CHECK_0);
++        *inference_type =
+           VerificationType::reference_type(symbolHandle(THREAD, name));
+         return 1;
+       }
+-    case T_LONG: 
++    case T_LONG:
+       *inference_type = VerificationType::long_type();
+       *++inference_type = VerificationType::long2_type();
+       return 2;
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/vmSymbols.cpp openjdk/hotspot/src/share/vm/classfile/vmSymbols.cpp
+--- openjdk6/hotspot/src/share/vm/classfile/vmSymbols.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/vmSymbols.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)vmSymbols.cpp	1.28 07/05/17 15:50:36 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -35,7 +32,8 @@
+ 
+ inline int compare_symbol(symbolOop a, symbolOop b) {
+   if (a == b)  return 0;
+-  return (intptr_t)a > (intptr_t)b ? +1 : -1;
++  // follow the natural address order:
++  return (address)a > (address)b ? +1 : -1;
+ }
+ 
+ static vmSymbols::SID vm_symbol_index[vmSymbols::SID_LIMIT];
+@@ -275,7 +273,7 @@
+ 
+ 
+ #define VM_INTRINSIC_INITIALIZE(id, klass, name, sig, flags) #id "\0"
+-static const char* vm_intrinsic_name_bodies = 
++static const char* vm_intrinsic_name_bodies =
+   VM_INTRINSICS_DO(VM_INTRINSIC_INITIALIZE,
+                    VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE);
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/classfile/vmSymbols.hpp openjdk/hotspot/src/share/vm/classfile/vmSymbols.hpp
+--- openjdk6/hotspot/src/share/vm/classfile/vmSymbols.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/classfile/vmSymbols.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)vmSymbols.hpp	1.162 07/05/17 15:50:40 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,10 +19,10 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+-// The classes vmSymbols and vmSymbolHandles are a name spaces for fast lookup of 
++// The classes vmSymbols and vmSymbolHandles are a name spaces for fast lookup of
+ // symbols commonly used in the VM. The first class return a symbolOop, while the
+ // second class returns a SymbolHandle. The underlying data structure is shared
+ // between the two classes.
+@@ -97,6 +94,8 @@
+   template(java_lang_Compiler,                        "java/lang/Compiler")                       \
+   template(sun_misc_Signal,                           "sun/misc/Signal")                          \
+   template(java_lang_AssertionStatusDirectives,       "java/lang/AssertionStatusDirectives")      \
++  template(sun_jkernel_DownloadManager,               "sun/jkernel/DownloadManager")                 \
++  template(getBootClassPathEntryForClass_name,        "getBootClassPathEntryForClass")            \
+                                                                                                   \
+   /* class file format tags */                                                                    \
+   template(tag_source_file,                           "SourceFile")                               \
+@@ -411,8 +410,8 @@
+   template(serializeAgentPropertiesToByteArray_name,   "serializeAgentPropertiesToByteArray")                     \
+   template(classRedefinedCount_name,                   "classRedefinedCount")                                     \
+   /*end*/
+- 
+-                
++
++
+ 
+ // Here are all the intrinsics known to the runtime and the CI.
+ // Each intrinsic consists of a public enum name (like _hashCode),
+diff -ruN openjdk6/hotspot/src/share/vm/code/codeBlob.cpp openjdk/hotspot/src/share/vm/code/codeBlob.cpp
+--- openjdk6/hotspot/src/share/vm/code/codeBlob.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/codeBlob.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)codeBlob.cpp	1.128 07/05/05 17:05:19 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -93,7 +90,7 @@
+   _name                  = name;
+   _size                  = size;
+   _frame_complete_offset = frame_complete;
+-  _header_size           = header_size;  
++  _header_size           = header_size;
+   _relocation_size       = round_to(cb->total_relocation_size(), oopSize);
+   _instructions_offset   = align_code_offset(header_size + _relocation_size);
+   _data_offset           = _instructions_offset + round_to(cb->total_code_size(), oopSize);
+@@ -170,9 +167,9 @@
+ relocInfo::relocType CodeBlob::reloc_type_for_address(address pc) {
+   RelocIterator iter(this, pc, pc+1);
+   while (iter.next()) {
+-    return (relocInfo::relocType) iter.type();  
++    return (relocInfo::relocType) iter.type();
+   }
+-  // No relocation info found for pc 
++  // No relocation info found for pc
+   ShouldNotReachHere();
+   return relocInfo::none; // dummy return value
+ }
+@@ -224,7 +221,7 @@
+ void CodeBlob::do_unloading(BoolObjectClosure* is_alive,
+                             OopClosure* keep_alive,
+                             bool unloading_occurred) {
+-  ShouldNotReachHere(); 
++  ShouldNotReachHere();
+ }
+ 
+ OopMap* CodeBlob::oop_map_for_return_address(address return_address) {
+@@ -243,7 +240,7 @@
+ {}
+ 
+ BufferBlob* BufferBlob::create(const char* name, int buffer_size) {
+-  ThreadInVMfromUnknown __tiv;	// get to VM state in case we block on CodeCache_lock
++  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
+ 
+   BufferBlob* blob = NULL;
+   unsigned int size = sizeof(BufferBlob);
+@@ -268,7 +265,7 @@
+ {}
+ 
+ BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
+-  ThreadInVMfromUnknown __tiv;	// get to VM state in case we block on CodeCache_lock
++  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
+ 
+   BufferBlob* blob = NULL;
+   unsigned int size = allocation_size(cb, sizeof(BufferBlob));
+@@ -292,8 +289,8 @@
+ 
+ 
+ void BufferBlob::free( BufferBlob *blob ) {
+-  ThreadInVMfromUnknown __tiv;	// get to VM state in case we block on CodeCache_lock
+-  { 
++  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
++  {
+     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+     CodeCache::free((CodeBlob*)blob);
+   }
+@@ -316,9 +313,9 @@
+   int         frame_size,
+   OopMapSet*  oop_maps,
+   bool        caller_must_gc_arguments
+-) 
++)
+ : CodeBlob(name, cb, sizeof(RuntimeStub), size, frame_complete, frame_size, oop_maps)
+-{  
++{
+   _caller_must_gc_arguments = caller_must_gc_arguments;
+ }
+ 
+@@ -331,7 +328,7 @@
+                                            bool caller_must_gc_arguments)
+ {
+   RuntimeStub* stub = NULL;
+-  ThreadInVMfromUnknown __tiv;	// get to VM state in case we block on CodeCache_lock
++  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
+   {
+     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+     unsigned int size = allocation_size(cb, sizeof(RuntimeStub));
+@@ -346,11 +343,11 @@
+       tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, stub);
+       Disassembler::decode(stub->instructions_begin(), stub->instructions_end());
+     }
+-    VTune::register_stub(stub_id, stub->instructions_begin(), stub->instructions_end());   
+-    Forte::register_stub(stub_id, stub->instructions_begin(), stub->instructions_end());   
++    VTune::register_stub(stub_id, stub->instructions_begin(), stub->instructions_end());
++    Forte::register_stub(stub_id, stub->instructions_begin(), stub->instructions_end());
+ 
+     if (JvmtiExport::should_post_dynamic_code_generated()) {
+-      JvmtiExport::post_dynamic_code_generated(stub_name, stub->instructions_begin(), stub->instructions_end());      
++      JvmtiExport::post_dynamic_code_generated(stub_name, stub->instructions_begin(), stub->instructions_end());
+     }
+   }
+ 
+@@ -374,7 +371,7 @@
+ DeoptimizationBlob::DeoptimizationBlob(
+   CodeBuffer* cb,
+   int         size,
+-  OopMapSet*  oop_maps, 
++  OopMapSet*  oop_maps,
+   int         unpack_offset,
+   int         unpack_with_exception_offset,
+   int         unpack_with_reexecution_offset,
+@@ -382,7 +379,7 @@
+ )
+ : SingletonBlob("DeoptimizationBlob", cb, sizeof(DeoptimizationBlob), size, frame_size, oop_maps)
+ {
+-  _unpack_offset           = unpack_offset;  
++  _unpack_offset           = unpack_offset;
+   _unpack_with_exception   = unpack_with_exception_offset;
+   _unpack_with_reexecution = unpack_with_reexecution_offset;
+ #ifdef COMPILER1
+@@ -393,14 +390,14 @@
+ 
+ DeoptimizationBlob* DeoptimizationBlob::create(
+   CodeBuffer* cb,
+-  OopMapSet*  oop_maps, 
++  OopMapSet*  oop_maps,
+   int        unpack_offset,
+   int        unpack_with_exception_offset,
+   int        unpack_with_reexecution_offset,
+   int        frame_size)
+ {
+   DeoptimizationBlob* blob = NULL;
+-  ThreadInVMfromUnknown __tiv;	// get to VM state in case we block on CodeCache_lock
++  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
+   {
+     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+     unsigned int size = allocation_size(cb, sizeof(DeoptimizationBlob));
+@@ -426,9 +423,9 @@
+ 
+     if (JvmtiExport::should_post_dynamic_code_generated()) {
+       JvmtiExport::post_dynamic_code_generated("DeoptimizationBlob",
+-					       blob->instructions_begin(),
+-					       blob->instructions_end());
+-    }    
++                                               blob->instructions_begin(),
++                                               blob->instructions_end());
++    }
+   }
+ 
+   // Track memory usage statistic after releasing CodeCache_lock
+@@ -464,10 +461,10 @@
+   int        frame_size)
+ {
+   UncommonTrapBlob* blob = NULL;
+-  ThreadInVMfromUnknown __tiv;	// get to VM state in case we block on CodeCache_lock
++  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
+   {
+     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+-    unsigned int size = allocation_size(cb, sizeof(UncommonTrapBlob)); 
++    unsigned int size = allocation_size(cb, sizeof(UncommonTrapBlob));
+     blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size);
+   }
+ 
+@@ -481,12 +478,12 @@
+     }
+     VTune::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
+     Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
+-   
++
+     if (JvmtiExport::should_post_dynamic_code_generated()) {
+       JvmtiExport::post_dynamic_code_generated("UncommonTrapBlob",
+-					       blob->instructions_begin(),
+-					       blob->instructions_end());
+-    }    
++                                               blob->instructions_begin(),
++                                               blob->instructions_end());
++    }
+   }
+ 
+   // Track memory usage statistic after releasing CodeCache_lock
+@@ -513,7 +510,7 @@
+   int         size,
+   OopMapSet*  oop_maps,
+   int         frame_size
+-) 
++)
+ : SingletonBlob("ExceptionBlob", cb, sizeof(ExceptionBlob), size, frame_size, oop_maps)
+ {}
+ 
+@@ -524,10 +521,10 @@
+   int         frame_size)
+ {
+   ExceptionBlob* blob = NULL;
+-  ThreadInVMfromUnknown __tiv;	// get to VM state in case we block on CodeCache_lock
++  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
+   {
+     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+-    unsigned int size = allocation_size(cb, sizeof(ExceptionBlob)); 
++    unsigned int size = allocation_size(cb, sizeof(ExceptionBlob));
+     blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size);
+   }
+ 
+@@ -544,11 +541,11 @@
+ 
+     if (JvmtiExport::should_post_dynamic_code_generated()) {
+       JvmtiExport::post_dynamic_code_generated("ExceptionBlob",
+-					       blob->instructions_begin(),
+-					       blob->instructions_end());
+-    }    
++                                               blob->instructions_begin(),
++                                               blob->instructions_end());
++    }
+   }
+-  
++
+   // Track memory usage statistic after releasing CodeCache_lock
+   MemoryService::track_code_cache_memory_usage();
+ 
+@@ -572,7 +569,7 @@
+   int         size,
+   OopMapSet*  oop_maps,
+   int         frame_size
+-) 
++)
+ : SingletonBlob("SafepointBlob", cb, sizeof(SafepointBlob), size, frame_size, oop_maps)
+ {}
+ 
+@@ -583,10 +580,10 @@
+   int         frame_size)
+ {
+   SafepointBlob* blob = NULL;
+-  ThreadInVMfromUnknown __tiv;	// get to VM state in case we block on CodeCache_lock
++  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
+   {
+     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+-    unsigned int size = allocation_size(cb, sizeof(SafepointBlob)); 
++    unsigned int size = allocation_size(cb, sizeof(SafepointBlob));
+     blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size);
+   }
+ 
+@@ -603,8 +600,8 @@
+ 
+     if (JvmtiExport::should_post_dynamic_code_generated()) {
+       JvmtiExport::post_dynamic_code_generated("SafepointBlob",
+-					       blob->instructions_begin(),
+-					       blob->instructions_end());
++                                               blob->instructions_begin(),
++                                               blob->instructions_end());
+     }
+   }
+ 
+@@ -626,7 +623,7 @@
+ // Verification and printing
+ 
+ void CodeBlob::verify() {
+-  ShouldNotReachHere(); 
++  ShouldNotReachHere();
+ }
+ 
+ #ifndef PRODUCT
+@@ -662,7 +659,7 @@
+ 
+ #endif
+ 
+-void RuntimeStub::verify() {  
++void RuntimeStub::verify() {
+   // unimplemented
+ }
+ 
+@@ -682,14 +679,14 @@
+ 
+ #endif
+ 
+-void SingletonBlob::verify() {  
++void SingletonBlob::verify() {
+   // unimplemented
+ }
+ 
+ #ifndef PRODUCT
+ 
+ void SingletonBlob::print() const {
+-  CodeBlob::print();  
++  CodeBlob::print();
+   tty->print_cr(name());
+   Disassembler::decode((CodeBlob*)this);
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/code/codeBlob.hpp openjdk/hotspot/src/share/vm/code/codeBlob.hpp
+--- openjdk6/hotspot/src/share/vm/code/codeBlob.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/codeBlob.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)codeBlob.hpp	1.125 07/05/05 17:05:18 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // CodeBlob - superclass for all entries in the CodeCache.
+@@ -55,7 +52,7 @@
+   int        _frame_complete_offset;             // instruction offsets in [0.._frame_complete_offset) have
+                                                  // not finished setting up their frame. Beware of pc's in
+                                                  // that range. There is a similar range(s) on returns
+-                                                 // which we don't detect. 
++                                                 // which we don't detect.
+   int        _data_offset;                       // offset to where data region begins
+   int        _oops_offset;                       // offset to where embedded oop table begins (inside data)
+   int        _oops_length;                       // number of embedded oops
+@@ -69,9 +66,9 @@
+   inline void initialize_immediate_oop(oop* dest, jobject handle);
+ 
+  public:
+-  // Returns the space needed for CodeBlob 
++  // Returns the space needed for CodeBlob
+   static unsigned int allocation_size(CodeBuffer* cb, int header_size);
+-  
++
+   // Creation
+   // a) simple CodeBlob
+   // frame_complete is the offset from the beginning of the instructions
+@@ -96,7 +93,7 @@
+   virtual bool is_buffer_blob() const            { return false; }
+   virtual bool is_nmethod() const                { return false; }
+   virtual bool is_runtime_stub() const           { return false; }
+-  virtual bool is_deoptimization_stub() const    { return false; }  
++  virtual bool is_deoptimization_stub() const    { return false; }
+   virtual bool is_uncommon_trap_stub() const     { return false; }
+   virtual bool is_exception_stub() const         { return false; }
+   virtual bool is_safepoint_stub() const         { return false; }
+@@ -113,7 +110,7 @@
+   address    instructions_begin() const          { return (address)    header_begin() + _instructions_offset;  }
+   address    instructions_end() const            { return (address)    header_begin() + _data_offset; }
+   address    data_begin() const                  { return (address)    header_begin() + _data_offset; }
+-  address    data_end() const                    { return (address)    header_begin() + _size; }  
++  address    data_end() const                    { return (address)    header_begin() + _size; }
+   oop*       oops_begin() const                  { return (oop*)      (header_begin() + _oops_offset); }
+   oop*       oops_end() const                    { return                oops_begin() + _oops_length; }
+ 
+@@ -129,16 +126,16 @@
+   int relocation_size() const                    { return (address) relocation_end() - (address) relocation_begin(); }
+   int instructions_size() const                  { return instructions_end() - instructions_begin();  }
+   int data_size() const                          { return data_end() - data_begin(); }
+-  int oops_size() const                          { return (address) oops_end() - (address) oops_begin(); }  
++  int oops_size() const                          { return (address) oops_end() - (address) oops_begin(); }
+ 
+-  // Containment  
++  // Containment
+   bool blob_contains(address addr) const         { return header_begin()       <= addr && addr < data_end(); }
+-  bool relocation_contains(relocInfo* addr) const{ return relocation_begin()   <= addr && addr < relocation_end(); }     
++  bool relocation_contains(relocInfo* addr) const{ return relocation_begin()   <= addr && addr < relocation_end(); }
+   bool instructions_contains(address addr) const { return instructions_begin() <= addr && addr < instructions_end(); }
+   bool data_contains(address addr) const         { return data_begin()         <= addr && addr < data_end(); }
+   bool oops_contains(oop* addr) const            { return oops_begin()         <= addr && addr < oops_end(); }
+   bool contains(address addr) const              { return instructions_contains(addr); }
+-  bool is_frame_complete_at(address addr) const  { return instructions_contains(addr) && 
++  bool is_frame_complete_at(address addr) const  { return instructions_contains(addr) &&
+                                                           addr >= instructions_begin() + _frame_complete_offset; }
+ 
+   // Relocation support
+@@ -158,13 +155,13 @@
+   oop* oop_addr_at(int index) const{             // for GC
+     // relocation indexes are biased by 1 (because 0 is reserved)
+     assert(index > 0 && index <= _oops_length, "must be a valid non-zero index");
+-    return &oops_begin()[index-1];  
++    return &oops_begin()[index-1];
+   }
+ 
+   void copy_oops(GrowableArray<jobject>* oops);
+ 
+-  // CodeCache support: really only used by the nmethods, but in order to get 
+-  // asserts and certain bookkeeping to work in the CodeCache they are defined 
++  // CodeCache support: really only used by the nmethods, but in order to get
++  // asserts and certain bookkeeping to work in the CodeCache they are defined
+   // virtual here.
+   virtual bool is_zombie() const                 { return false; }
+   virtual bool is_locked_by_vm() const           { return false; }
+@@ -178,8 +175,8 @@
+                             OopClosure* keep_alive,
+                             bool unloading_occurred);
+   virtual void oops_do(OopClosure* f) = 0;
+-  
+-  // OopMap for frame  
++
++  // OopMap for frame
+   OopMapSet* oop_maps() const                    { return _oop_maps; }
+   void set_oop_maps(OopMapSet* p);
+   OopMap* oop_map_for_return_address(address return_address);
+@@ -200,7 +197,7 @@
+   virtual void verify();
+   virtual void print() const                     PRODUCT_RETURN;
+   virtual void print_value_on(outputStream* st) const PRODUCT_RETURN;
+-  
++
+   // Print the comment associated with offset on stream, if there is one
+   void print_block_comment(outputStream* stream, intptr_t offset) {
+     _comments.print_block_comment(stream, offset);
+@@ -285,7 +282,7 @@
+ 
+   // Typing
+   bool is_runtime_stub() const                   { return true; }
+-  
++
+   // GC support
+   bool caller_must_gc_arguments(JavaThread* thread) const { return _caller_must_gc_arguments; }
+ 
+@@ -293,7 +290,7 @@
+ 
+   // GC/Verification support
+   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f)  { /* nothing to do */ }
+-  bool is_alive() const                          { return true; }   
++  bool is_alive() const                          { return true; }
+   void do_unloading(BoolObjectClosure* is_alive,
+                     OopClosure* keep_alive,
+                     bool unloading_occurred)     { /* do nothing */ }
+@@ -310,7 +307,7 @@
+ 
+ class SingletonBlob: public CodeBlob {
+   friend class VMStructs;
+-  public:  
++  public:
+    SingletonBlob(
+      const char* name,
+      CodeBuffer* cb,
+@@ -370,7 +367,7 @@
+   );
+ 
+   // Typing
+-  bool is_deoptimization_stub() const { return true; }  
++  bool is_deoptimization_stub() const { return true; }
+   const DeoptimizationBlob *as_deoptimization_stub() const { return this; }
+   bool exception_address_is_unpack_entry(address pc) const {
+     address unpack_pc = unpack();
+@@ -379,7 +376,7 @@
+ 
+ 
+ 
+-  
++
+   // GC for args
+   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* Nothing to do */ }
+ 
+@@ -428,7 +425,7 @@
+   // Creation
+   static UncommonTrapBlob* create(
+     CodeBuffer* cb,
+-    OopMapSet*  oop_maps, 
++    OopMapSet*  oop_maps,
+     int         frame_size
+   );
+ 
+@@ -472,11 +469,11 @@
+ 
+   // Typing
+   bool is_exception_stub() const                 { return true; }
+-  
++
+   // Iteration
+   void oops_do(OopClosure* f) {}
+ };
+-#endif // COMPILER2 
++#endif // COMPILER2
+ 
+ 
+ //----------------------------------------------------------------------------------------------------
+diff -ruN openjdk6/hotspot/src/share/vm/code/codeCache.cpp openjdk/hotspot/src/share/vm/code/codeCache.cpp
+--- openjdk6/hotspot/src/share/vm/code/codeCache.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/codeCache.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)codeCache.cpp	1.132 07/05/05 17:05:19 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -66,7 +63,7 @@
+                   header_size             * 100 / total_size,
+                   relocation_size         * 100 / total_size,
+                   code_size               * 100 / total_size,
+-                  stub_size               * 100 / total_size,                  
++                  stub_size               * 100 / total_size,
+                   scopes_oop_size         * 100 / total_size,
+                   scopes_data_size        * 100 / total_size,
+                   scopes_pcs_size         * 100 / total_size);
+@@ -78,11 +75,11 @@
+     header_size      += cb->header_size();
+     relocation_size  += cb->relocation_size();
+     scopes_oop_size  += cb->oops_size();
+-    if (cb->is_nmethod()) { 
++    if (cb->is_nmethod()) {
+       nmethod *nm = (nmethod*)cb;
+       code_size        += nm->code_size();
+       stub_size        += nm->stub_size();
+-    
++
+       scopes_data_size += nm->scopes_data_size();
+       scopes_pcs_size  += nm->scopes_pcs_size();
+     } else {
+@@ -146,12 +143,12 @@
+     if (PrintCodeCacheExtension) {
+       ResourceMark rm;
+       tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)",
+-		    (intptr_t)_heap->begin(), (intptr_t)_heap->end(),
+-		    (address)_heap->end() - (address)_heap->begin());
++                    (intptr_t)_heap->begin(), (intptr_t)_heap->end(),
++                    (address)_heap->end() - (address)_heap->begin());
+     }
+   }
+   verify_if_often();
+-  if (PrintCodeCache2) {	// Need to add a new flag
++  if (PrintCodeCache2) {        // Need to add a new flag
+       ResourceMark rm;
+       tty->print_cr("CodeCache allocation:  addr: " INTPTR_FORMAT ", size: 0x%x\n", cb, size);
+   }
+@@ -162,18 +159,18 @@
+   assert_locked_or_safepoint(CodeCache_lock);
+   verify_if_often();
+ 
+-  if (PrintCodeCache2) {	// Need to add a new flag
++  if (PrintCodeCache2) {        // Need to add a new flag
+       ResourceMark rm;
+       tty->print_cr("CodeCache free:  addr: " INTPTR_FORMAT ", size: 0x%x\n", cb, cb->size());
+   }
+   if (cb->is_nmethod() && ((nmethod *)cb)->has_dependencies()) {
+     _number_of_nmethods_with_dependencies--;
+-  }  
+-  _number_of_blobs--;  
++  }
++  _number_of_blobs--;
+ 
+   _heap->deallocate(cb);
+ 
+-  verify_if_often();  
++  verify_if_often();
+   assert(_number_of_blobs >= 0, "sanity check");
+ }
+ 
+@@ -183,7 +180,7 @@
+   assert_locked_or_safepoint(CodeCache_lock);
+   if (cb->is_nmethod() && ((nmethod *)cb)->has_dependencies()) {
+     _number_of_nmethods_with_dependencies++;
+-  }  
++  }
+   // flush the hardware I-cache
+   ICache::invalidate_range(cb->instructions_begin(), cb->instructions_size());
+ }
+@@ -211,33 +208,33 @@
+ // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not
+ // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain
+ // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
+-CodeBlob* CodeCache::find_blob(void* start) {  
++CodeBlob* CodeCache::find_blob(void* start) {
+   CodeBlob* result = find_blob_unsafe(start);
+   if (result == NULL) return NULL;
+   // We could potientially look up non_entrant methods
+   guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
+-  return result;  
++  return result;
+ }
+ 
+-nmethod* CodeCache::find_nmethod(void* start) {  
++nmethod* CodeCache::find_nmethod(void* start) {
+   CodeBlob *cb = find_blob(start);
+-  assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");  
++  assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");
+   return (nmethod*)cb;
+ }
+ 
+ 
+ void CodeCache::blobs_do(void f(CodeBlob* nm)) {
+   assert_locked_or_safepoint(CodeCache_lock);
+-  FOR_ALL_BLOBS(p) { 
+-    f(p); 
++  FOR_ALL_BLOBS(p) {
++    f(p);
+   }
+ }
+ 
+ 
+ void CodeCache::nmethods_do(void f(nmethod* nm)) {
+   assert_locked_or_safepoint(CodeCache_lock);
+-  FOR_ALL_BLOBS(nm) { 
+-    if (nm->is_nmethod()) f((nmethod*)nm); 
++  FOR_ALL_BLOBS(nm) {
++    if (nm->is_nmethod()) f((nmethod*)nm);
+   }
+ }
+ 
+@@ -324,11 +321,11 @@
+   MemoryService::add_code_heap_memory_pool(_heap);
+ 
+   // Initialize ICache flush mechanism
+-  // This service is needed for os::register_code_area 
++  // This service is needed for os::register_code_area
+   icache_init();
+ 
+-  // Give OS a chance to register generated code area.  
+-  // This is used on Windows 64 bit platforms to register 
++  // Give OS a chance to register generated code area.
++  // This is used on Windows 64 bit platforms to register
+   // Structured Exception Handlers for our generated code.
+   os::register_code_area(_heap->low_boundary(), _heap->high_boundary());
+ }
+@@ -340,8 +337,8 @@
+ 
+ //------------------------------------------------------------------------------------------------
+ 
+-int CodeCache::number_of_nmethods_with_dependencies() { 
+-  return _number_of_nmethods_with_dependencies; 
++int CodeCache::number_of_nmethods_with_dependencies() {
++  return _number_of_nmethods_with_dependencies;
+ }
+ 
+ void CodeCache::clear_inline_caches() {
+@@ -358,47 +355,48 @@
+ #endif // PRODUCT
+ 
+ 
+-int CodeCache::mark_for_deoptimization(klassOop dependee) {
++int CodeCache::mark_for_deoptimization(DepChange& changes) {
+   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+-  
++
+ #ifndef PRODUCT
+   dependentCheckTime.start();
+   dependentCheckCount++;
+ #endif // PRODUCT
+-  
++
+   int number_of_marked_CodeBlobs = 0;
+ 
+   // search the hierarchy looking for nmethods which are affected by the loading of this class
+-  for (klassOop d = dependee; d != NULL; d = instanceKlass::cast(d)->super()) {
+-    number_of_marked_CodeBlobs += instanceKlass::cast(d)->mark_dependent_nmethods(dependee);
+-  }
++
+   // then search the interfaces this class implements looking for nmethods
+   // which might be dependent of the fact that an interface only had one
+   // implementor.
+-  objArrayOop interfaces = instanceKlass::cast(dependee)->transitive_interfaces();
+-  int number_of_interfaces = interfaces->length();
+-  for (int interface_index = 0; interface_index < number_of_interfaces; interface_index += 1) {
+-    klassOop d = klassOop(interfaces->obj_at(interface_index));
+-    number_of_marked_CodeBlobs += instanceKlass::cast(d)->mark_dependent_nmethods(dependee);
++
++  { No_Safepoint_Verifier nsv;
++    for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
++      klassOop d = str.klass();
++      number_of_marked_CodeBlobs += instanceKlass::cast(d)->mark_dependent_nmethods(changes);
++    }
+   }
+ 
+   if (VerifyDependencies) {
++    // Turn off dependency tracing while actually testing deps.
++    NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
+     FOR_ALL_ALIVE_NMETHODS(nm) {
+       if (!nm->is_marked_for_deoptimization() &&
+-          nm->is_dependent_on(NULL)) {
++          nm->check_all_dependencies()) {
+         ResourceMark rm;
+         tty->print_cr("Should have been marked for deoptimization:");
+-        tty->print_cr("  dependee = %s", instanceKlass::cast(dependee)->external_name());
++        changes.print();
+         nm->print();
+         nm->print_dependencies();
+       }
+     }
+   }
+-  
++
+ #ifndef PRODUCT
+   dependentCheckTime.stop();
+ #endif // PRODUCT
+-  
++
+   return number_of_marked_CodeBlobs;
+ }
+ 
+@@ -407,7 +405,7 @@
+ int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
+   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+   int number_of_marked_CodeBlobs = 0;
+-  
++
+   // Deoptimize all methods of the evolving class itself
+   objArrayOop old_methods = dependee->methods();
+   for (int i = 0; i < old_methods->length(); i++) {
+@@ -430,7 +428,7 @@
+     } else  {
+       // flush caches in case they refer to a redefined methodOop
+       nm->clear_inline_caches();
+-    } 
++    }
+   }
+ 
+   return number_of_marked_CodeBlobs;
+@@ -438,7 +436,7 @@
+ #endif // HOTSWAP
+ 
+ 
+-// Deoptimize all methods 
++// Deoptimize all methods
+ void CodeCache::mark_all_nmethods_for_deoptimization() {
+   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+   FOR_ALL_ALIVE_NMETHODS(nm) {
+@@ -450,7 +448,7 @@
+ int CodeCache::mark_for_deoptimization(methodOop dependee) {
+   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+   int number_of_marked_CodeBlobs = 0;
+-  
++
+   FOR_ALL_ALIVE_NMETHODS(nm) {
+     if (nm->is_dependent_on_method(dependee)) {
+       ResourceMark rm;
+@@ -472,7 +470,7 @@
+       // be zombied when it is no longer seen on the stack. Note that the nmethod
+       // might be "entrant" and not on the stack and so could be zombied immediately
+       // but we can't tell because we don't track it on stack until it becomes
+-      // non-entrant. 
++      // non-entrant.
+ 
+       if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
+         nm->make_zombie();
+@@ -534,23 +532,23 @@
+       nmethod* nm = (nmethod*)cb;
+ 
+       if (Verbose && nm->method() != NULL) {
+-	ResourceMark rm;
+-	char *method_name = nm->method()->name_and_sig_as_C_string();
+-	tty->print("%s", method_name);
+-	if(nm->is_alive()) { tty->print_cr(" alive"); }
+-	if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
+-	if(nm->is_zombie()) { tty->print_cr(" zombie"); }
++        ResourceMark rm;
++        char *method_name = nm->method()->name_and_sig_as_C_string();
++        tty->print("%s", method_name);
++        if(nm->is_alive()) { tty->print_cr(" alive"); }
++        if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
++        if(nm->is_zombie()) { tty->print_cr(" zombie"); }
+       }
+ 
+       nmethodCount++;
+- 
++
+       if(nm->is_alive()) { nmethodAlive++; }
+       if(nm->is_not_entrant()) { nmethodNotEntrant++; }
+       if(nm->is_zombie()) { nmethodZombie++; }
+       if(nm->is_unloaded()) { nmethodUnloaded++; }
+       if(nm->is_native_method()) { nmethodNative++; }
+ 
+-      if(nm->method() != NULL && nm->is_java_method()) { 
++      if(nm->method() != NULL && nm->is_java_method()) {
+         nmethodJava++;
+         if(nm->code_size() > maxCodeSize) {
+           maxCodeSize = nm->code_size();
+@@ -577,7 +575,7 @@
+   for (cb = first(); cb != NULL; cb = next(cb)) {
+     if (cb->is_nmethod()) {
+       nmethod* nm = (nmethod*)cb;
+-      if(nm->is_java_method()) { 
++      if(nm->is_java_method()) {
+         buckets[nm->code_size() / bucketSize]++;
+       }
+     }
+@@ -647,7 +645,7 @@
+         code_size += p->instructions_size();
+         OopMapSet* set = p->oop_maps();
+         if (set != NULL) {
+-          number_of_oop_maps += set->size();      
++          number_of_oop_maps += set->size();
+           map_size   += set->heap_size();
+         }
+       }
+diff -ruN openjdk6/hotspot/src/share/vm/code/codeCache.hpp openjdk/hotspot/src/share/vm/code/codeCache.hpp
+--- openjdk6/hotspot/src/share/vm/code/codeCache.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/codeCache.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)codeCache.hpp	1.67 07/05/05 17:05:18 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The CodeCache implements the code cache for various pieces of generated
+@@ -35,6 +32,7 @@
+ //     locating a method given a addess of an instruction.
+ 
+ class OopClosure;
++class DepChange;
+ 
+ class CodeCache : AllStatic {
+   friend class VMStructs;
+@@ -47,9 +45,9 @@
+   static int _number_of_blobs;
+   static int _number_of_nmethods_with_dependencies;
+   static bool _needs_cache_clean;
+-  
++
+   static void verify_if_often() PRODUCT_RETURN;
+- public:     
++ public:
+ 
+   // Initialization
+   static void initialize();
+@@ -61,9 +59,9 @@
+   static int alignment_offset();                    // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
+   static void free(CodeBlob* cb);                   // frees a CodeBlob
+   static void flush();                              // flushes all CodeBlobs
+-  static bool contains(void *p);                    // returns whether p is included  
+-  static void blobs_do(void f(CodeBlob* cb));       // iterates over all CodeBlobs 
+-  static void nmethods_do(void f(nmethod* nm));     // iterates over all nmethods 
++  static bool contains(void *p);                    // returns whether p is included
++  static void blobs_do(void f(CodeBlob* cb));       // iterates over all CodeBlobs
++  static void nmethods_do(void f(nmethod* nm));     // iterates over all nmethods
+ 
+   // Lookup
+   static CodeBlob* find_blob(void* start);
+@@ -72,14 +70,14 @@
+   // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
+   // what you are doing)
+   static CodeBlob* find_blob_unsafe(void* start) {
+-    CodeBlob* result = (CodeBlob*)_heap->find_start(start); 
+-    assert(result == NULL || result->blob_contains((address)start), "found wrong CodeBlob");      
+-    return result;   
+-  } 
++    CodeBlob* result = (CodeBlob*)_heap->find_start(start);
++    assert(result == NULL || result->blob_contains((address)start), "found wrong CodeBlob");
++    return result;
++  }
+ 
+   // Iteration
+   static CodeBlob* first();
+-  static CodeBlob* next (CodeBlob* cb);  
++  static CodeBlob* next (CodeBlob* cb);
+   static CodeBlob* alive(CodeBlob *cb);
+   static nmethod* alive_nmethod(CodeBlob *cb);
+   static int       nof_blobs()                 { return _number_of_blobs; }
+@@ -94,29 +92,29 @@
+                            OopClosure* keep_alive,
+                            bool unloading_occurred);
+   static void oops_do(OopClosure* f);
+-  
++
+   // Printing/debugging
+-  static void print()   PRODUCT_RETURN;          // prints summary 
++  static void print()   PRODUCT_RETURN;          // prints summary
+   static void print_internals();
+   static void verify();                          // verifies the code cache
+ 
+   // The full limits of the codeCache
+   static address  low_bound()                    { return (address) _heap->low_boundary(); }
+   static address  high_bound()                   { return (address) _heap->high_boundary(); }
+-  
++
+   // Profiling
+   static address first_address();                // first address used for CodeBlobs
+   static address last_address();                 // last  address used for CodeBlobs
+   static size_t  capacity()                      { return _heap->capacity(); }
+   static size_t  max_capacity()                  { return _heap->max_capacity(); }
+-  static size_t  unallocated_capacity()          { return _heap->unallocated_capacity(); }  
++  static size_t  unallocated_capacity()          { return _heap->unallocated_capacity(); }
+ 
+   static bool needs_cache_clean()                { return _needs_cache_clean; }
+   static void set_needs_cache_clean(bool v)      { _needs_cache_clean = v;    }
+   static void clear_inline_caches();             // clear all inline caches
+ 
+   // Deoptimization
+-  static int  mark_for_deoptimization(klassOop dependee);
++  static int  mark_for_deoptimization(DepChange& changes);
+ #ifdef HOTSWAP
+   static int  mark_for_evol_deoptimization(instanceKlassHandle dependee);
+ #endif // HOTSWAP
+diff -ruN openjdk6/hotspot/src/share/vm/code/compiledIC.cpp openjdk/hotspot/src/share/vm/code/compiledIC.cpp
+--- openjdk6/hotspot/src/share/vm/code/compiledIC.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/compiledIC.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)compiledIC.cpp	1.157 07/05/05 17:05:18 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -37,9 +34,9 @@
+ // MT-safe to use.
+ 
+ void CompiledIC::set_cached_oop(oop cache) {
+-  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");  
++  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
+   assert (!is_optimized(), "an optimized virtual call does not have a cached oop");
+-  assert (cache == NULL || cache != badOop, "invalid oop");  
++  assert (cache == NULL || cache != badOop, "invalid oop");
+ 
+   if (TraceCompiledIC) {
+     tty->print("  ");
+@@ -48,7 +45,7 @@
+   }
+ 
+   if (cache == NULL)  cache = (oop)Universe::non_oop_word();
+-  
++
+   *_oop_addr = cache;
+   // fix up the relocations
+   RelocIterator iter = _oops;
+@@ -56,7 +53,7 @@
+     if (iter.type() == relocInfo::oop_type) {
+       oop_Relocation* r = iter.oop_reloc();
+       if (r->oop_addr() == _oop_addr)
+-	r->fix_oop_relocation();
++        r->fix_oop_relocation();
+     }
+   }
+   return;
+@@ -67,21 +64,21 @@
+   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
+   assert (!is_optimized(), "an optimized virtual call does not have a cached oop");
+ 
+-  if (!is_in_transition_state()) {    
++  if (!is_in_transition_state()) {
+     oop data = *_oop_addr;
+     // If we let the oop value here be initialized to zero...
+     assert(data != NULL || Universe::non_oop_word() == NULL,
+-	   "no raw nulls in CompiledIC oops, because of patching races");
+-    return (data == (oop)Universe::non_oop_word()) ? (oop)NULL : data;    
++           "no raw nulls in CompiledIC oops, because of patching races");
++    return (data == (oop)Universe::non_oop_word()) ? (oop)NULL : data;
+   } else {
+     return InlineCacheBuffer::cached_oop_for((CompiledIC *)this);
+-  }  
++  }
+ }
+ 
+ 
+ void CompiledIC::set_ic_destination(address entry_point) {
+   assert(entry_point != NULL, "must set legal entry point");
+-  assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");  
++  assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
+   if (TraceCompiledIC) {
+     tty->print("  ");
+     print_compiled_ic();
+@@ -92,14 +89,14 @@
+   CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
+   assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
+ #endif
+-  _ic_call->set_destination_mt_safe(entry_point);  
++  _ic_call->set_destination_mt_safe(entry_point);
+ }
+ 
+ 
+ address CompiledIC::ic_destination() const {
+  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
+  if (!is_in_transition_state()) {
+-   return _ic_call->destination();  
++   return _ic_call->destination();
+  } else {
+    return InlineCacheBuffer::ic_destination_for((CompiledIC *)this);
+  }
+@@ -114,7 +111,7 @@
+ 
+ // Returns native address of 'call' instruction in inline-cache. Used by
+ // the InlineCacheBuffer when it needs to find the stub.
+-address CompiledIC::stub_address() const {  
++address CompiledIC::stub_address() const {
+   assert(is_in_transition_state(), "should only be called when we are in a transition state");
+   return _ic_call->destination();
+ }
+@@ -131,27 +128,27 @@
+   assert(method->is_oop(), "cannot be NULL and must be oop");
+   assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
+   assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
+-  
++
+   address entry;
+   if (is_invoke_interface) {
+-    int index = klassItable::compute_itable_index(call_info->resolved_method()());            
+-    entry = VtableStubs::create_stub(false, index, method());    
++    int index = klassItable::compute_itable_index(call_info->resolved_method()());
++    entry = VtableStubs::create_stub(false, index, method());
+     assert(entry != NULL, "entry not computed");
+     klassOop k = call_info->resolved_method()->method_holder();
+     assert(Klass::cast(k)->is_interface(), "sanity check");
+     InlineCacheBuffer::create_transition_stub(this, k, entry);
+   } else {
+     // Can be different than method->vtable_index(), due to package-private etc.
+-    int vtable_index = call_info->vtable_index(); 
++    int vtable_index = call_info->vtable_index();
+     entry = VtableStubs::create_stub(true, vtable_index, method());
+     InlineCacheBuffer::create_transition_stub(this, method(), entry);
+   }
+-      
++
+   if (TraceICs) {
+     ResourceMark rm;
+     tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
+-		   instruction_address(), method->print_value_string(), entry);
+-  } 
++                   instruction_address(), method->print_value_string(), entry);
++  }
+ 
+   Events::log("compiledIC " INTPTR_FORMAT " --> megamorphic " INTPTR_FORMAT, this, (address)method());
+   // We can't check this anymore. With lazy deopt we could have already
+@@ -165,10 +162,10 @@
+ 
+ 
+ // true if destination is megamorphic stub
+-bool CompiledIC::is_megamorphic() const {  
++bool CompiledIC::is_megamorphic() const {
+   assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
+   assert(!is_optimized(), "an optimized call cannot be megamorphic");
+-  
++
+   // Cannot rely on cached_oop. It is either an interface or a method.
+   return VtableStubs::is_entry_point(ic_destination());
+ }
+@@ -196,15 +193,15 @@
+ #endif // COMPILER1
+ #endif // TIERED
+   assert( is_c1_method ||
+-         !is_monomorphic || 
+-         is_optimized() ||  
++         !is_monomorphic ||
++         is_optimized() ||
+          (cached_oop() != NULL && cached_oop()->is_klass()), "sanity check");
+ #endif // ASSERT
+   return is_monomorphic;
+ }
+ 
+ 
+-bool CompiledIC::is_call_to_interpreted() const {  
++bool CompiledIC::is_call_to_interpreted() const {
+   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
+   // Call to interpreter if destination is either calling to a stub (if it
+   // is optimized), or calling to an I2C blob
+@@ -212,8 +209,8 @@
+   if (!is_optimized()) {
+     // must use unsafe because the destination can be a zombie (and we're cleaning)
+     // and the print_compiled_ic code wants to know if site (in the non-zombie)
+-    // is to the interpreter. 
+-    CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());  
++    // is to the interpreter.
++    CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
+     is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob());
+     assert(!is_call_to_interpreted ||  (cached_oop() != NULL && cached_oop()->is_compiledICHolder()), "sanity check");
+   } else {
+@@ -232,7 +229,7 @@
+ }
+ 
+ 
+-void CompiledIC::set_to_clean() {  
++void CompiledIC::set_to_clean() {
+   assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call");
+   if (TraceInlineCacheClearing || TraceICs) {
+     tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", instruction_address());
+@@ -251,15 +248,15 @@
+   bool safe_transition = is_optimized() || SafepointSynchronize::is_at_safepoint();
+ 
+   if (safe_transition) {
+-    if (!is_optimized()) set_cached_oop(NULL);  
++    if (!is_optimized()) set_cached_oop(NULL);
+     // Kill any leftover stub we might have too
+     if (is_in_transition_state()) {
+       ICStub* old_stub = ICStub_from_destination_address(stub_address());
+       old_stub->clear();
+     }
+-    set_ic_destination(entry); 
++    set_ic_destination(entry);
+   } else {
+-    // Unsafe transition - create stub. 
++    // Unsafe transition - create stub.
+     InlineCacheBuffer::create_transition_stub(this, NULL, entry);
+   }
+   // We can't check this anymore. With lazy deopt we could have already
+@@ -293,7 +290,7 @@
+   // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized
+   // callsites. In addition ic_miss code will update a site to monomorphic if it determines
+   // that an monomorphic call to the interpreter can now be monomorphic to compiled code.
+-  // 
++  //
+   // In both of these cases the only thing being modifed is the jump/call target and these
+   // transitions are mt_safe
+ 
+@@ -307,44 +304,44 @@
+       // (either because of CHA or the static target is final)
+       // At code generation time, this call has been emitted as static call
+       // Call via stub
+-      assert(info.cached_oop().not_null() && info.cached_oop()->is_method(), "sanity check");        
+-      CompiledStaticCall* csc = compiledStaticCall_at(instruction_address());    
++      assert(info.cached_oop().not_null() && info.cached_oop()->is_method(), "sanity check");
++      CompiledStaticCall* csc = compiledStaticCall_at(instruction_address());
+       methodHandle method (thread, (methodOop)info.cached_oop()());
+       csc->set_to_interpreted(method, info.entry());
+       if (TraceICs) {
+          ResourceMark rm(thread);
+-         tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter: %s", 
++         tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter: %s",
+            instruction_address(),
+            method->print_value_string());
+-      }    
++      }
+     } else {
+-      // Call via method-klass-holder 
+-      assert(info.cached_oop().not_null(), "must be set");            
+-      InlineCacheBuffer::create_transition_stub(this, info.cached_oop()(), info.entry());    
++      // Call via method-klass-holder
++      assert(info.cached_oop().not_null(), "must be set");
++      InlineCacheBuffer::create_transition_stub(this, info.cached_oop()(), info.entry());
+ 
+       if (TraceICs) {
+          ResourceMark rm(thread);
+          tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via mkh", instruction_address());
+-      }          
++      }
+     }
+   } else {
+-    // Call to compiled code          
++    // Call to compiled code
+     bool static_bound = info.is_optimized() || (info.cached_oop().is_null());
+ #ifdef ASSERT
+     CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry());
+     assert (cb->is_nmethod(), "must be compiled!");
+ #endif /* ASSERT */
+-    
++
+     // This is MT safe if we come from a clean-cache and go through a
+     // non-verified entry point
+     bool safe = SafepointSynchronize::is_at_safepoint() ||
+-                (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean()));               
++                (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean()));
+ 
+     if (!safe) {
+       InlineCacheBuffer::create_transition_stub(this, info.cached_oop()(), info.entry());
+     } else {
+       set_ic_destination(info.entry());
+-      if (!is_optimized()) set_cached_oop(info.cached_oop()());    
++      if (!is_optimized()) set_cached_oop(info.cached_oop()());
+     }
+ 
+     if (TraceICs) {
+@@ -354,7 +351,7 @@
+         instruction_address(),
+         ((klassOop)info.cached_oop()())->print_value_string(),
+         (safe) ? "" : "via stub");
+-    }          
++    }
+   }
+   // We can't check this anymore. With lazy deopt we could have already
+   // cleaned this IC entry before we even return. This is possible if
+@@ -369,12 +366,12 @@
+ // is_optimized: Compiler has generated an optimized call (i.e., no inline
+ // cache) static_bound: The call can be static bound (i.e, no need to use
+ // inline cache)
+-void CompiledIC::compute_monomorphic_entry(methodHandle method, 
++void CompiledIC::compute_monomorphic_entry(methodHandle method,
+                                            KlassHandle receiver_klass,
+                                            bool is_optimized,
+                                            bool static_bound,
+                                            CompiledICInfo& info,
+-                                           TRAPS) {  
++                                           TRAPS) {
+   info._is_optimized = is_optimized;
+ 
+   nmethod* method_code = method->code();
+@@ -395,7 +392,7 @@
+     } else {
+       info._cached_oop = receiver_klass;
+     }
+-    info._to_interpreter = false;        
++    info._to_interpreter = false;
+   } else {
+     // Note: the following problem exists with Compiler1:
+     //   - at compile time we may or may not know if the destination is final
+@@ -420,7 +417,7 @@
+ #ifdef COMPILER2
+ #ifdef TIERED
+ #if defined(ASSERT)
+-    // can't check the assert because we don't have the CompiledIC with which to 
++    // can't check the assert because we don't have the CompiledIC with which to
+     // find the address if the call instruction.
+     //
+     // CodeBlob* cb = find_blob_unsafe(instruction_address());
+@@ -430,8 +427,8 @@
+     assert(!static_bound || is_optimized, "static_bound should imply is_optimized");
+ #endif // TIERED
+ #endif // COMPILER2
+-    if (is_optimized) {      
+-      // Use stub entry       
++    if (is_optimized) {
++      // Use stub entry
+       info._entry      = method()->get_c2i_entry();
+       info._cached_oop = method;
+     } else {
+@@ -444,7 +441,7 @@
+ }
+ 
+ 
+-inline static RelocIterator parse_ic(CodeBlob* code, address ic_call, oop* &_oop_addr, bool *is_optimized) {  
++inline static RelocIterator parse_ic(CodeBlob* code, address ic_call, oop* &_oop_addr, bool *is_optimized) {
+    address  first_oop = NULL;
+    // Mergers please note: Sun SC5.x CC insists on an lvalue for a reference parameter.
+    CodeBlob *code1 = code;
+@@ -505,7 +502,7 @@
+ void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {
+   address stub=find_stub();
+   assert(stub!=NULL, "stub not found");
+-  
++
+   if (TraceICs) {
+     ResourceMark rm;
+     tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
+@@ -513,18 +510,18 @@
+                   callee->name_and_sig_as_C_string());
+   }
+ 
+-  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);   // creation also verifies the object  
++  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);   // creation also verifies the object
+   NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
+ 
+   assert(method_holder->data()    == 0           || method_holder->data()    == (intptr_t)callee(), "a) MT-unsafe modification of inline cache");
+   assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry, "b) MT-unsafe modification of inline cache");
+ 
+-  // Update stub    
++  // Update stub
+   method_holder->set_data((intptr_t)callee());
+   jump->set_jump_destination(entry);
+ 
+-  // Update jump to call 
+-  set_destination_mt_safe(stub);  
++  // Update jump to call
++  set_destination_mt_safe(stub);
+ }
+ 
+ 
+@@ -539,7 +536,7 @@
+ 
+   if (info._to_interpreter) {
+     // Call to interpreted code
+-    set_to_interpreted(info.callee(), info.entry());  
++    set_to_interpreted(info.callee(), info.entry());
+   } else {
+     if (TraceICs) {
+       ResourceMark rm;
+@@ -550,7 +547,7 @@
+     // Call to compiled code
+     assert (CodeCache::contains(info.entry()), "wrong entry point");
+     set_destination_mt_safe(info.entry());
+-  }      
++  }
+ }
+ 
+ 
+@@ -567,7 +564,7 @@
+     // puts a converter-frame on the stack to save arguments.
+     info._to_interpreter = true;
+     info._entry      = m()->get_c2i_entry();
+-  } 
++  }
+ }
+ 
+ 
+@@ -576,21 +573,21 @@
+   // Reset stub
+   address stub = static_stub->addr();
+   assert(stub!=NULL, "stub not found");
+-  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);   // creation also verifies the object  
++  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);   // creation also verifies the object
+   NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
+   method_holder->set_data(0);
+   jump->set_jump_destination((address)-1);
+ }
+ 
+ 
+-address CompiledStaticCall::find_stub() {    
++address CompiledStaticCall::find_stub() {
+   // Find reloc. information containing this call-site
+   RelocIterator iter((nmethod*)NULL, instruction_address());
+-  while (iter.next()) {    
++  while (iter.next()) {
+     if (iter.addr() == instruction_address()) {
+       switch(iter.type()) {
+         case relocInfo::static_call_type:
+-          return iter.static_call_reloc()->static_stub();          
++          return iter.static_call_reloc()->static_stub();
+         // We check here for opt_virtual_call_type, since we reuse the code
+         // from the CompiledIC implementation
+         case relocInfo::opt_virtual_call_type:
+@@ -601,22 +598,22 @@
+           ShouldNotReachHere();
+       }
+     }
+-  }  
++  }
+   return NULL;
+ }
+ 
+ 
+ //-----------------------------------------------------------------------------
+ // Non-product mode code
+-#ifndef PRODUCT 
++#ifndef PRODUCT
+ 
+ void CompiledIC::verify() {
+   // make sure code pattern is actually a call imm32 instruction
+-  _ic_call->verify();  
++  _ic_call->verify();
+   if (os::is_MP()) {
+-    _ic_call->verify_alignment();  
++    _ic_call->verify_alignment();
+   }
+-  assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted() 
++  assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted()
+           || is_optimized() || is_megamorphic(), "sanity check");
+ }
+ 
+@@ -629,7 +626,7 @@
+ 
+ void CompiledIC::print_compiled_ic() {
+   tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT,
+-	     instruction_address(), is_call_to_interpreted() ? "interpreted " : "", ic_destination());
++             instruction_address(), is_call_to_interpreted() ? "interpreted " : "", ic_destination());
+ }
+ 
+ 
+@@ -646,20 +643,20 @@
+ }
+ 
+ void CompiledStaticCall::verify() {
+-  // Verify call 
++  // Verify call
+   NativeCall::verify();
+   if (os::is_MP()) {
+-    verify_alignment();  
++    verify_alignment();
+   }
+ 
+   // Verify stub
+-  address stub = find_stub();  
++  address stub = find_stub();
+   assert(stub != NULL, "no stub found for static call");
+-  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);   // creation also verifies the object  
++  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);   // creation also verifies the object
+   NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
+ 
+   // Verify state
+-  assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");  
++  assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
+ }
+ 
+ #endif
+diff -ruN openjdk6/hotspot/src/share/vm/code/compiledIC.hpp openjdk/hotspot/src/share/vm/code/compiledIC.hpp
+--- openjdk6/hotspot/src/share/vm/code/compiledIC.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/compiledIC.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)compiledIC.hpp	1.52 07/05/05 17:05:19 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //-----------------------------------------------------------------------------
+@@ -47,7 +44,7 @@
+ //
+ // The numbers in square brackets refere to the kind of transition:
+ // [1]: Initial fixup. Receiver it found from debug information
+-// [2]: Compilation of a method 
++// [2]: Compilation of a method
+ // [3]: Recompilation of a method (note: only entry is changed. The klassOop must stay the same)
+ // [4]: Inline cache miss. We go directly to megamorphic call.
+ //
+@@ -87,31 +84,31 @@
+   // to change an inline-cache. These changes the underlying inline-cache directly. They *newer* make
+   // changes to a transition stub.
+   void set_ic_destination(address entry_point);
+-  void set_cached_oop(oop cache); 
++  void set_cached_oop(oop cache);
+ 
+   // Reads the location of the transition stub. This will fail with an assertion, if no transition stub is
+   // associated with the inline cache.
+   address stub_address() const;
+   bool is_in_transition_state() const;  // Use InlineCacheBuffer
+-  
++
+  public:
+   // conversion (machine PC to CompiledIC*)
+   friend CompiledIC* CompiledIC_before(address return_addr);
+   friend CompiledIC* CompiledIC_at(address call_site);
+-  friend CompiledIC* CompiledIC_at(Relocation* call_site);   
++  friend CompiledIC* CompiledIC_at(Relocation* call_site);
+ 
+   // Return the cached_oop/destination associated with this inline cache. If the cache currently points
+-  // to a transition stub, it will read the values from the transition stub. 
+-  oop  cached_oop() const;  
++  // to a transition stub, it will read the values from the transition stub.
++  oop  cached_oop() const;
+   address ic_destination() const;
+ 
+   bool is_optimized() const   { return _is_optimized; }
+ 
+   // State
+   bool is_clean() const;
+-  bool is_megamorphic() const;   
++  bool is_megamorphic() const;
+   bool is_call_to_compiled() const;
+-  bool is_call_to_interpreted() const; 
++  bool is_call_to_interpreted() const;
+ 
+   address end_of_call() { return  _ic_call->return_address(); }
+ 
+@@ -121,13 +118,13 @@
+   // Note: We do not provide any direct access to the stub code, to prevent parts of the code
+   // to manipulate the inline cache in MT-unsafe ways.
+   //
+-  // They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full.  
++  // They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full.
+   //
+   void set_to_clean();  // Can only be called during a safepoint operation
+   void set_to_monomorphic(const CompiledICInfo& info);
+   void set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
+-  
+-  static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass, 
++
++  static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
+                                         bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS);
+ 
+   // Location
+@@ -139,20 +136,20 @@
+   void verify()            PRODUCT_RETURN;
+ };
+ 
+-inline CompiledIC* CompiledIC_before(address return_addr) { 
+-  CompiledIC* c_ic = new CompiledIC(nativeCall_before(return_addr));    
+-  c_ic->verify();    
++inline CompiledIC* CompiledIC_before(address return_addr) {
++  CompiledIC* c_ic = new CompiledIC(nativeCall_before(return_addr));
++  c_ic->verify();
+   return c_ic;
+ }
+ 
+ inline CompiledIC* CompiledIC_at(address call_site) {
+-  CompiledIC* c_ic = new CompiledIC(nativeCall_at(call_site));    
++  CompiledIC* c_ic = new CompiledIC(nativeCall_at(call_site));
+   c_ic->verify();
+   return c_ic;
+ }
+ 
+-inline CompiledIC* CompiledIC_at(Relocation* call_site) {    
+-  CompiledIC* c_ic = new CompiledIC(call_site);    
++inline CompiledIC* CompiledIC_at(Relocation* call_site) {
++  CompiledIC* c_ic = new CompiledIC(call_site);
+   c_ic->verify();
+   return c_ic;
+ }
+@@ -201,40 +198,40 @@
+   friend CompiledStaticCall* compiledStaticCall_at(address native_call);
+   friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site);
+ 
+-  // State 
++  // State
+   bool is_clean() const;
+   bool is_call_to_compiled() const;
+   bool is_call_to_interpreted() const;
+-  
++
+   // Clean static call (will force resolving on next use)
+   void set_to_clean();
+ 
+-  // Set state. The entry must be the same, as computed by compute_entry. 
++  // Set state. The entry must be the same, as computed by compute_entry.
+   // Computation and setting is split up, since the actions are separate during
+-  // a OptoRuntime::resolve_xxx.  
++  // a OptoRuntime::resolve_xxx.
+   void set(const StaticCallInfo& info);
+-  
++
+   // Compute entry point given a method
+   static void compute_entry(methodHandle m, StaticCallInfo& info);
+ 
+   // Stub support
+   address find_stub();
+   static void set_stub_to_clean(static_stub_Relocation* static_stub);
+-  
++
+   // Misc.
+   void print()  PRODUCT_RETURN;
+-  void verify() PRODUCT_RETURN;  
++  void verify() PRODUCT_RETURN;
+ };
+ 
+ 
+ inline CompiledStaticCall* compiledStaticCall_before(address return_addr) {
+-  CompiledStaticCall* st = (CompiledStaticCall*)nativeCall_before(return_addr);    
++  CompiledStaticCall* st = (CompiledStaticCall*)nativeCall_before(return_addr);
+   st->verify();
+   return st;
+ }
+ 
+ inline CompiledStaticCall* compiledStaticCall_at(address native_call) {
+-  CompiledStaticCall* st = (CompiledStaticCall*)native_call;    
++  CompiledStaticCall* st = (CompiledStaticCall*)native_call;
+   st->verify();
+   return st;
+ }
+@@ -242,5 +239,3 @@
+ inline CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) {
+   return compiledStaticCall_at(call_site->addr());
+ }
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/code/compressedStream.cpp openjdk/hotspot/src/share/vm/code/compressedStream.cpp
+--- openjdk6/hotspot/src/share/vm/code/compressedStream.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/compressedStream.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)compressedStream.cpp	1.27 07/05/05 17:05:20 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/code/compressedStream.hpp openjdk/hotspot/src/share/vm/code/compressedStream.hpp
+--- openjdk6/hotspot/src/share/vm/code/compressedStream.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/compressedStream.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)compressedStream.hpp	1.27 07/05/05 17:05:20 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Simple interface for filing out and filing in basic types
+@@ -67,7 +64,7 @@
+   jint     read_int_mb(jint b0);  // UNSIGNED5 coding, 2-5 byte cases
+ 
+  public:
+-  CompressedReadStream(u_char* buffer, int position = 0) 
++  CompressedReadStream(u_char* buffer, int position = 0)
+   : CompressedStream(buffer, position) {}
+ 
+   jboolean read_bool()                 { return (jboolean) read();      }
+@@ -93,7 +90,7 @@
+   void store(u_char b) {
+     _buffer[_position++] = b;
+   }
+-  void write(u_char b) { 
++  void write(u_char b) {
+     if (full()) grow();
+     store(b);
+   }
+@@ -106,7 +103,7 @@
+ 
+  public:
+   CompressedWriteStream(int initial_size);
+-  CompressedWriteStream(u_char* buffer, int initial_size, int position = 0) 
++  CompressedWriteStream(u_char* buffer, int initial_size, int position = 0)
+   : CompressedStream(buffer, position) { _size = initial_size; }
+ 
+   void write_bool(jboolean value)      { write(value);      }
+diff -ruN openjdk6/hotspot/src/share/vm/code/debugInfo.cpp openjdk/hotspot/src/share/vm/code/debugInfo.cpp
+--- openjdk6/hotspot/src/share/vm/code/debugInfo.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/debugInfo.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)debugInfo.cpp	1.34 07/05/05 17:05:19 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -36,14 +33,43 @@
+ }
+ 
+ // Serializing oops
++
+ void DebugInfoWriteStream::write_handle(jobject h) {
+   write_int(recorder()->oop_recorder()->find_index(h));
+ }
+ 
++ScopeValue* DebugInfoReadStream::read_object_value() {
++  int id = read_int();
++#ifdef ASSERT
++  assert(_obj_pool != NULL, "object pool does not exist");
++  for (int i = _obj_pool->length() - 1; i >= 0; i--) {
++    assert(((ObjectValue*) _obj_pool->at(i))->id() != id, "should not be read twice");
++  }
++#endif
++  ObjectValue* result = new ObjectValue(id);
++  _obj_pool->append(result);
++  result->read_object(this);
++  return result;
++}
++
++ScopeValue* DebugInfoReadStream::get_cached_object() {
++  int id = read_int();
++  assert(_obj_pool != NULL, "object pool does not exist");
++  for (int i = _obj_pool->length() - 1; i >= 0; i--) {
++    ObjectValue* sv = (ObjectValue*) _obj_pool->at(i);
++    if (sv->id() == id) {
++      return sv;
++    }
++  }
++  ShouldNotReachHere();
++  return NULL;
++}
++
+ // Serializing scope values
+ 
+ enum { LOCATION_CODE = 0, CONSTANT_INT_CODE = 1,  CONSTANT_OOP_CODE = 2,
+-                          CONSTANT_LONG_CODE = 3, CONSTANT_DOUBLE_CODE = 4 };
++                          CONSTANT_LONG_CODE = 3, CONSTANT_DOUBLE_CODE = 4,
++                          OBJECT_CODE = 5,        OBJECT_ID_CODE = 6 };
+ 
+ ScopeValue* ScopeValue::read_from(DebugInfoReadStream* stream) {
+   ScopeValue* result = NULL;
+@@ -53,7 +79,9 @@
+    case CONSTANT_OOP_CODE:    result = new ConstantOopReadValue(stream); break;
+    case CONSTANT_LONG_CODE:   result = new ConstantLongValue(stream);    break;
+    case CONSTANT_DOUBLE_CODE: result = new ConstantDoubleValue(stream);  break;
+-   default: ShouldNotReachHere();            
++   case OBJECT_CODE:          result = stream->read_object_value();      break;
++   case OBJECT_ID_CODE:       result = stream->get_cached_object();      break;
++   default: ShouldNotReachHere();
+   }
+   return result;
+ }
+@@ -73,6 +101,51 @@
+   location().print_on(st);
+ }
+ 
++// ObjectValue
++
++void ObjectValue::read_object(DebugInfoReadStream* stream) {
++  _klass = read_from(stream);
++  assert(_klass->is_constant_oop(), "should be constant klass oop");
++  int length = stream->read_int();
++  for (int i = 0; i < length; i++) {
++    ScopeValue* val = read_from(stream);
++    _field_values.append(val);
++  }
++}
++
++void ObjectValue::write_on(DebugInfoWriteStream* stream) {
++  if (_visited) {
++    stream->write_int(OBJECT_ID_CODE);
++    stream->write_int(_id);
++  } else {
++    _visited = true;
++    stream->write_int(OBJECT_CODE);
++    stream->write_int(_id);
++    _klass->write_on(stream);
++    int length = _field_values.length();
++    stream->write_int(length);
++    for (int i = 0; i < length; i++) {
++      _field_values.at(i)->write_on(stream);
++    }
++  }
++}
++
++void ObjectValue::print_on(outputStream* st) const {
++  st->print("obj[%d]", _id);
++}
++
++void ObjectValue::print_fields_on(outputStream* st) const {
++#ifndef PRODUCT
++  if (_field_values.length() > 0) {
++    _field_values.at(0)->print_on(st);
++  }
++  for (int i = 1; i < _field_values.length(); i++) {
++    st->print(", ");
++    _field_values.at(i)->print_on(st);
++  }
++#endif
++}
++
+ // ConstantIntValue
+ 
+ ConstantIntValue::ConstantIntValue(DebugInfoReadStream* stream) {
+@@ -147,19 +220,22 @@
+ 
+ // MonitorValue
+ 
+-MonitorValue::MonitorValue(ScopeValue* owner, Location basic_lock) {
++MonitorValue::MonitorValue(ScopeValue* owner, Location basic_lock, bool eliminated) {
+   _owner       = owner;
+   _basic_lock  = basic_lock;
++  _eliminated  = eliminated;
+ }
+ 
+ MonitorValue::MonitorValue(DebugInfoReadStream* stream) {
+   _basic_lock  = Location(stream);
+   _owner       = ScopeValue::read_from(stream);
++  _eliminated  = (stream->read_bool() != 0);
+ }
+ 
+ void MonitorValue::write_on(DebugInfoWriteStream* stream) {
+   _basic_lock.write_on(stream);
+   _owner->write_on(stream);
++  stream->write_bool(_eliminated);
+ }
+ 
+ #ifndef PRODUCT
+@@ -169,6 +245,8 @@
+   st->print(",");
+   basic_lock().print_on(st);
+   st->print("}");
++  if (_eliminated) {
++    st->print(" (eliminated)");
++  }
+ }
+ #endif
+-
+diff -ruN openjdk6/hotspot/src/share/vm/code/debugInfo.hpp openjdk/hotspot/src/share/vm/code/debugInfo.hpp
+--- openjdk6/hotspot/src/share/vm/code/debugInfo.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/debugInfo.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)debugInfo.hpp	1.34 07/05/05 17:05:20 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,11 +19,11 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Classes used for serializing debugging information.
+-// These abstractions are introducted to provide symmetric 
++// These abstractions are introducted to provide symmetric
+ // read and write operations.
+ 
+ // ScopeValue        describes the value of a variable/expression in a scope
+@@ -37,6 +34,7 @@
+  public:
+   // Testers
+   virtual bool is_location() const { return false; }
++  virtual bool is_object() const { return false; }
+   virtual bool is_constant_int() const { return false; }
+   virtual bool is_constant_double() const { return false; }
+   virtual bool is_constant_long() const { return false; }
+@@ -68,6 +66,57 @@
+   void print_on(outputStream* st) const;
+ };
+ 
++
++// An ObjectValue describes an object eliminated by escape analysis.
++
++class ObjectValue: public ScopeValue {
++ private:
++  int                        _id;
++  ScopeValue*                _klass;
++  GrowableArray<ScopeValue*> _field_values;
++  Handle                     _value;
++  bool                       _visited;
++
++ public:
++  ObjectValue(int id, ScopeValue* klass)
++     : _id(id)
++     , _klass(klass)
++     , _field_values()
++     , _value()
++     , _visited(false) {
++    assert(klass->is_constant_oop(), "should be constant klass oop");
++  }
++
++  ObjectValue(int id)
++     : _id(id)
++     , _klass(NULL)
++     , _field_values()
++     , _value()
++     , _visited(false) {}
++
++  // Accessors
++  bool                        is_object() const         { return true; }
++  int                         id() const                { return _id; }
++  ScopeValue*                 klass() const             { return _klass; }
++  GrowableArray<ScopeValue*>* field_values()            { return &_field_values; }
++  ScopeValue*                 field_at(int i) const     { return _field_values.at(i); }
++  int                         field_size()              { return _field_values.length(); }
++  Handle                      value() const             { return _value; }
++  bool                        is_visited() const        { return _visited; }
++
++  void                        set_value(oop value)      { _value = Handle(value); }
++  void                        set_visited(bool visited) { _visited = false; }
++
++  // Serialization of debugging information
++  void read_object(DebugInfoReadStream* stream);
++  void write_on(DebugInfoWriteStream* stream);
++
++  // Printing
++  void print_on(outputStream* st) const;
++  void print_fields_on(outputStream* st) const;
++};
++
++
+ // A ConstantIntValue describes a constant int; i.e., the corresponding logical entity
+ // is either a source constant or its computation has been constant-folded.
+ 
+@@ -166,13 +215,15 @@
+  private:
+   ScopeValue* _owner;
+   Location    _basic_lock;
++  bool        _eliminated;
+  public:
+   // Constructor
+-  MonitorValue(ScopeValue* owner, Location basic_lock);
++  MonitorValue(ScopeValue* owner, Location basic_lock, bool eliminated = false);
+ 
+   // Accessors
+   ScopeValue*  owner()      const { return _owner; }
+   Location     basic_lock() const { return _basic_lock;  }
++  bool         eliminated() const { return _eliminated; }
+ 
+   // Serialization of debugging information
+   MonitorValue(DebugInfoReadStream* stream);
+@@ -189,15 +240,20 @@
+  private:
+   const nmethod* _code;
+   const nmethod* code() const { return _code; }
++  GrowableArray<ScopeValue*>* _obj_pool;
+  public:
+-  DebugInfoReadStream(const nmethod* code, int offset) :
+-    CompressedReadStream(code->scopes_data_begin(), offset) { 
+-    _code = code; 
++  DebugInfoReadStream(const nmethod* code, int offset, GrowableArray<ScopeValue*>* obj_pool = NULL) :
++    CompressedReadStream(code->scopes_data_begin(), offset) {
++    _code = code;
++    _obj_pool = obj_pool;
++
+   } ;
+ 
+-  oop read_oop() { 
+-    return code()->oop_at(read_int()); 
+-  } 
++  oop read_oop() {
++    return code()->oop_at(read_int());
++  }
++  ScopeValue* read_object_value();
++  ScopeValue* get_cached_object();
+   // BCI encoding is mostly unsigned, but -1 is a distinguished value
+   int read_bci() { return read_int() + InvocationEntryBci; }
+ };
+@@ -214,4 +270,3 @@
+   void write_handle(jobject h);
+   void write_bci(int bci) { write_int(bci - InvocationEntryBci); }
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/code/debugInfoRec.cpp openjdk/hotspot/src/share/vm/code/debugInfoRec.cpp
+--- openjdk6/hotspot/src/share/vm/code/debugInfoRec.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/debugInfoRec.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)debugInfoRec.cpp	1.54 07/05/05 17:05:20 JVM"
+-#endif
+ /*
+  * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -168,7 +165,8 @@
+   }
+   assert(_pcs_size > _pcs_length, "There must be room for after expanding");
+ 
+-  _pcs[_pcs_length++] = PcDesc(pc_offset, DebugInformationRecorder::serialized_null);
++  _pcs[_pcs_length++] = PcDesc(pc_offset, DebugInformationRecorder::serialized_null,
++                               DebugInformationRecorder::serialized_null);
+ }
+ 
+ 
+@@ -305,7 +303,7 @@
+          (!method->is_native() && 0 <= bci && bci < method->code_size()) ||
+          bci == -1, "illegal bci");
+ 
+-  // serialize the locals/expressions/monitors  
++  // serialize the locals/expressions/monitors
+   stream()->write_int((intptr_t) locals);
+   stream()->write_int((intptr_t) expressions);
+   stream()->write_int((intptr_t) monitors);
+@@ -324,6 +322,18 @@
+   }
+ }
+ 
++void DebugInformationRecorder::dump_object_pool(GrowableArray<ScopeValue*>* objects) {
++  guarantee( _pcs_length > 0, "safepoint must exist before describing scopes");
++  PcDesc* last_pd = &_pcs[_pcs_length-1];
++  if (objects != NULL) {
++    for (int i = objects->length() - 1; i >= 0; i--) {
++      ((ObjectValue*) objects->at(i))->set_visited(false);
++    }
++  }
++  int offset = serialize_scope_values(objects);
++  last_pd->set_obj_decode_offset(offset);
++}
++
+ void DebugInformationRecorder::end_scopes(int pc_offset, bool is_safepoint) {
+   assert(_recording_state == (is_safepoint? rs_safepoint: rs_non_safepoint),
+          "nesting of recording calls");
+diff -ruN openjdk6/hotspot/src/share/vm/code/debugInfoRec.hpp openjdk/hotspot/src/share/vm/code/debugInfoRec.hpp
+--- openjdk6/hotspot/src/share/vm/code/debugInfoRec.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/debugInfoRec.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)debugInfoRec.hpp	1.37 07/05/05 17:05:20 JVM"
+-#endif
+ /*
+  * Copyright 1998-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //** The DebugInformationRecorder collects debugging information
+@@ -42,8 +39,8 @@
+ //         - create monitor stack if needed (use create_monitor_values)
+ //         - describe scope (use describe_scope)
+ //         "repeat last four steps for all scopes"
+-//         "outer most scope first and inner most scope last" 
+-//         NB: nodes from create_scope_values and create_locations 
++//         "outer most scope first and inner most scope last"
++//         NB: nodes from create_scope_values and create_locations
+ //             can be reused for simple sharing.
+ //         - mark the end of the scopes (end_safepoint or end_non_safepoint)
+ //   2) Use oop_size, data_size, pcs_size to create the nmethod and
+@@ -94,12 +91,15 @@
+                       DebugToken* expressions = NULL,
+                       DebugToken* monitors    = NULL);
+ 
++
++  void dump_object_pool(GrowableArray<ScopeValue*>* objects);
++
+   // This call must follow every add_safepoint,
+   // after any intervening describe_scope calls.
+   void end_safepoint(int pc_offset)      { end_scopes(pc_offset, true); }
+   void end_non_safepoint(int pc_offset)  { end_scopes(pc_offset, false); }
+ 
+-  // helper fuctions for describe_scope to enable sharing 
++  // helper fuctions for describe_scope to enable sharing
+   DebugToken* create_scope_values(GrowableArray<ScopeValue*>* values);
+   DebugToken* create_monitor_values(GrowableArray<MonitorValue*>* monitors);
+ 
+@@ -137,7 +137,7 @@
+   const bool _recording_non_safepoints;
+ 
+   DebugInfoWriteStream* _stream;
+-  
++
+   DebugInfoWriteStream* stream() const { return _stream; }
+ 
+   OopRecorder* _oop_recorder;
+@@ -180,4 +180,3 @@
+  public:
+   enum { serialized_null = 0 };
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/code/dependencies.cpp openjdk/hotspot/src/share/vm/code/dependencies.cpp
+--- openjdk6/hotspot/src/share/vm/code/dependencies.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/dependencies.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)dependencies.cpp	1.16 07/05/05 17:05:20 JVM"
+-#endif
+ /*
+  * Copyright 2005-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -110,7 +107,7 @@
+ 
+ // Helper function.  If we are adding a new dep. under ctxk2,
+ // try to find an old dep. under a broader* ctxk1.  If there is
+-// 
++//
+ bool Dependencies::maybe_merge_ctxk(GrowableArray<ciObject*>* deps,
+                                     int ctxk_i, ciKlass* ctxk2) {
+   ciKlass* ctxk1 = deps->at(ctxk_i)->as_klass();
+@@ -283,7 +280,7 @@
+ 
+   // cast is safe, no deps can overflow INT_MAX
+   CompressedWriteStream bytes((int)estimate_size_in_bytes());
+-  
++
+   for (int deptv = (int)FIRST_TYPE; deptv < (int)TYPE_LIMIT; deptv++) {
+     DepType dept = (DepType)deptv;
+     GrowableArray<ciObject*>* deps = _deps[dept];
+@@ -538,7 +535,7 @@
+     }
+   }
+ }
+-  
++
+ 
+ /// Dependency stream support (decodes dependencies from an nmethod):
+ 
+@@ -791,53 +788,147 @@
+ 
+  private:
+   // the actual search method:
+-  klassOop find_witness(klassOop context_type,
+-                        bool search_under_participants,
+-                        bool test_context_type);
++  klassOop find_witness_anywhere(klassOop context_type,
++                                 bool participants_hide_witnesses,
++                                 bool top_level_call = true);
++  // the spot-checking version:
++  klassOop find_witness_in(DepChange& changes,
++                           klassOop context_type,
++                           bool participants_hide_witnesses);
+  public:
+-  klassOop find_witness_subtype(klassOop context_type) {
++  klassOop find_witness_subtype(klassOop context_type, DepChange* changes = NULL) {
+     assert(doing_subtype_search(), "must set up a subtype search");
+     // When looking for unexpected concrete types,
+     // do not look beneath expected ones.
++    const bool participants_hide_witnesses = true;
+     // CX > CC > C' is OK, even if C' is new.
+     // CX > { CC,  C' } is not OK if C' is new, and C' is the witness.
+-    return find_witness(context_type, false, true);
++    if (changes != NULL) {
++      return find_witness_in(*changes, context_type, participants_hide_witnesses);
++    } else {
++      return find_witness_anywhere(context_type, participants_hide_witnesses);
++    }
+   }
+-  klassOop find_witness_definer(klassOop context_type) {
++  klassOop find_witness_definer(klassOop context_type, DepChange* changes = NULL) {
+     assert(!doing_subtype_search(), "must set up a method definer search");
+     // When looking for unexpected concrete methods,
+     // look beneath expected ones, to see if there are overrides.
++    const bool participants_hide_witnesses = true;
+     // CX.m > CC.m > C'.m is not OK, if C'.m is new, and C' is the witness.
+-    return find_witness(context_type, true, true);
++    if (changes != NULL) {
++      return find_witness_in(*changes, context_type, !participants_hide_witnesses);
++    } else {
++      return find_witness_anywhere(context_type, !participants_hide_witnesses);
++    }
+   }
+ };
+ 
+-#ifdef ASSERT
++#ifndef PRODUCT
+ static int deps_find_witness_calls = 0;
+ static int deps_find_witness_steps = 0;
+ static int deps_find_witness_recursions = 0;
+-#endif //ASSERT
++static int deps_find_witness_singles = 0;
++static int deps_find_witness_print = 0; // set to -1 to force a final print
++static bool count_find_witness_calls() {
++  if (TraceDependencies || LogCompilation) {
++    int pcount = deps_find_witness_print + 1;
++    bool final_stats      = (pcount == 0);
++    bool initial_call     = (pcount == 1);
++    bool occasional_print = ((pcount & ((1<<10) - 1)) == 0);
++    if (pcount < 0)  pcount = 1; // crude overflow protection
++    deps_find_witness_print = pcount;
++    if (VerifyDependencies && initial_call) {
++      tty->print_cr("Warning:  TraceDependencies results may be inflated by VerifyDependencies");
++    }
++    if (occasional_print || final_stats) {
++      // Every now and then dump a little info about dependency searching.
++      if (xtty != NULL) {
++        xtty->elem("deps_find_witness calls='%d' steps='%d' recursions='%d' singles='%d'",
++                   deps_find_witness_calls,
++                   deps_find_witness_steps,
++                   deps_find_witness_recursions,
++                   deps_find_witness_singles);
++      }
++      if (final_stats || (TraceDependencies && WizardMode)) {
++        tty->print_cr("Dependency check (find_witness) "
++                      "calls=%d, steps=%d (avg=%.1f), recursions=%d, singles=%d",
++                      deps_find_witness_calls,
++                      deps_find_witness_steps,
++                      (double)deps_find_witness_steps / deps_find_witness_calls,
++                      deps_find_witness_recursions,
++                      deps_find_witness_singles);
++      }
++    }
++    return true;
++  }
++  return false;
++}
++#else
++#define count_find_witness_calls() (0)
++#endif //PRODUCT
++
++
++klassOop ClassHierarchyWalker::find_witness_in(DepChange& changes,
++                                               klassOop context_type,
++                                               bool participants_hide_witnesses) {
++  assert(changes.involves_context(context_type), "irrelevant dependency");
++  klassOop new_type = changes.new_type();
++
++  count_find_witness_calls();
++  NOT_PRODUCT(deps_find_witness_singles++);
++
++  // Current thread must be in VM (not native mode, as in CI):
++  assert(must_be_in_vm(), "raw oops here");
++  // Must not move the class hierarchy during this check:
++  assert_locked_or_safepoint(Compile_lock);
++
++  assert(!is_participant(new_type), "only old classes are participants");
++  if (participants_hide_witnesses) {
++    // If the new type is a subtype of a participant, we are done.
++    for (int i = 0; i < num_participants(); i++) {
++      klassOop part = participant(i);
++      if (part == NULL)  continue;
++      assert(changes.involves_context(part) == Klass::cast(new_type)->is_subtype_of(part),
++             "correct marking of participants, b/c new_type is unique");
++      if (changes.involves_context(part)) {
++        // new guy is protected from this check by previous participant
++        return NULL;
++      }
++    }
++  }
++
++  if (is_witness(new_type) &&
++      !ignore_witness(new_type)) {
++    return new_type;
++  }
++
++  return NULL;
++}
++
+ 
+ // Walk hierarchy under a context type, looking for unexpected types.
+ // Do not report participant types, and recursively walk beneath
+-// them only if search_under_participants is true.
+-// If test_context_type is false, skip testing the context type,
++// them only if participants_hide_witnesses is false.
++// If top_level_call is false, skip testing the context type,
+ // because the caller has already considered it.
+-klassOop ClassHierarchyWalker::find_witness(klassOop context_type,
+-                                            bool search_under_participants,
+-                                            bool test_context_type) {
+-  DEBUG_ONLY(deps_find_witness_calls++);
+-
++klassOop ClassHierarchyWalker::find_witness_anywhere(klassOop context_type,
++                                                     bool participants_hide_witnesses,
++                                                     bool top_level_call) {
+   // Current thread must be in VM (not native mode, as in CI):
+   assert(must_be_in_vm(), "raw oops here");
+   // Must not move the class hierarchy during this check:
+   assert_locked_or_safepoint(Compile_lock);
+ 
++  bool do_counts = count_find_witness_calls();
++
+   // Check the root of the sub-hierarchy first.
+-  if (test_context_type) {
+-    DEBUG_ONLY(deps_find_witness_steps++);
++  if (top_level_call) {
++    if (do_counts) {
++      NOT_PRODUCT(deps_find_witness_calls++);
++      NOT_PRODUCT(deps_find_witness_steps++);
++    }
+     if (is_participant(context_type)) {
+-      if (!search_under_participants)  return NULL;
++      if (participants_hide_witnesses)  return NULL;
+       // else fall through to search loop...
+     } else if (is_witness(context_type) && !ignore_witness(context_type)) {
+       // The context is an abstract class or interface, to start with.
+@@ -882,9 +973,10 @@
+       // implementors array overflowed => no exact info.
+       return context_type;  // report an inexact witness to this sad affair
+     }
+-    DEBUG_ONLY(deps_find_witness_steps++);
++    if (do_counts)
++      { NOT_PRODUCT(deps_find_witness_steps++); }
+     if (is_participant(impl)) {
+-      if (!search_under_participants)  continue;
++      if (participants_hide_witnesses)  continue;
+       // else fall through to process this guy's subclasses
+     } else if (is_witness(impl) && !ignore_witness(impl)) {
+       return impl;
+@@ -897,9 +989,9 @@
+     Klass* chain = chains[--chaini];
+     for (Klass* subk = chain; subk != NULL; subk = subk->next_sibling()) {
+       klassOop sub = subk->as_klassOop();
+-      DEBUG_ONLY(deps_find_witness_steps++);
++      if (do_counts) { NOT_PRODUCT(deps_find_witness_steps++); }
+       if (is_participant(sub)) {
+-        if (!search_under_participants)  continue;
++        if (participants_hide_witnesses)  continue;
+         // else fall through to process this guy's subclasses
+       } else if (is_witness(sub) && !ignore_witness(sub)) {
+         return sub;
+@@ -913,8 +1005,10 @@
+         // (Note that sub has already been tested, so that there is
+         // no need for the recursive call to re-test.  That's handy,
+         // since the recursive call sees sub as the context_type.)
+-        DEBUG_ONLY(deps_find_witness_recursions++);
+-        klassOop witness = find_witness(sub, search_under_participants, false);
++        if (do_counts) { NOT_PRODUCT(deps_find_witness_recursions++); }
++        klassOop witness = find_witness_anywhere(sub,
++                                                 participants_hide_witnesses,
++                                                 /*top_level_call=*/ false);
+         if (witness != NULL)  return witness;
+       }
+     }
+@@ -925,6 +1019,7 @@
+ #undef ADD_SUBCLASS_CHAIN
+ }
+ 
++
+ bool Dependencies::is_concrete_klass(klassOop k) {
+   if (Klass::cast(k)->is_abstract())  return false;
+   // %%% We could treat classes which are concrete but
+@@ -1023,27 +1118,30 @@
+ // This allows the compiler to narrow occurrences of ctxk by conck,
+ // when dealing with the types of actual instances.
+ klassOop Dependencies::check_abstract_with_unique_concrete_subtype(klassOop ctxk,
+-                                                                   klassOop conck) {
++                                                                   klassOop conck,
++                                                                   DepChange* changes) {
+   ClassHierarchyWalker wf(conck);
+-  return wf.find_witness_subtype(ctxk);
++  return wf.find_witness_subtype(ctxk, changes);
+ }
+ 
+ // If a non-concrete class has no concrete subtypes, it is not (yet)
+ // instantiatable.  This can allow the compiler to make some paths go
+ // dead, if they are gated by a test of the type.
+-klassOop Dependencies::check_abstract_with_no_concrete_subtype(klassOop ctxk) {
++klassOop Dependencies::check_abstract_with_no_concrete_subtype(klassOop ctxk,
++                                                               DepChange* changes) {
+   // Find any concrete subtype, with no participants:
+   ClassHierarchyWalker wf;
+-  return wf.find_witness_subtype(ctxk);
++  return wf.find_witness_subtype(ctxk, changes);
+ }
+ 
+ 
+ // If a concrete class has no concrete subtypes, it can always be
+ // exactly typed.  This allows the use of a cheaper type test.
+-klassOop Dependencies::check_concrete_with_no_concrete_subtype(klassOop ctxk) {
++klassOop Dependencies::check_concrete_with_no_concrete_subtype(klassOop ctxk,
++                                                               DepChange* changes) {
+   // Find any concrete subtype, with only the ctxk as participant:
+   ClassHierarchyWalker wf(ctxk);
+-  return wf.find_witness_subtype(ctxk);
++  return wf.find_witness_subtype(ctxk, changes);
+ }
+ 
+ 
+@@ -1062,6 +1160,8 @@
+ #ifndef PRODUCT
+     // Make sure the dependency mechanism will pass this discovery:
+     if (VerifyDependencies) {
++      // Turn off dependency tracing while actually testing deps.
++      FlagSetting fs(TraceDependencies, false);
+       if (!Dependencies::is_concrete_klass(ctxk)) {
+         guarantee(NULL ==
+                   (void *)check_abstract_with_no_concrete_subtype(ctxk),
+@@ -1078,6 +1178,8 @@
+ #ifndef PRODUCT
+     // Make sure the dependency mechanism will pass this discovery:
+     if (VerifyDependencies) {
++      // Turn off dependency tracing while actually testing deps.
++      FlagSetting fs(TraceDependencies, false);
+       if (!Dependencies::is_concrete_klass(ctxk)) {
+         guarantee(NULL == (void *)
+                   check_abstract_with_unique_concrete_subtype(ctxk, conck),
+@@ -1096,11 +1198,12 @@
+ klassOop Dependencies::check_abstract_with_exclusive_concrete_subtypes(
+                                                 klassOop ctxk,
+                                                 klassOop k1,
+-                                                klassOop k2) {
++                                                klassOop k2,
++                                                DepChange* changes) {
+   ClassHierarchyWalker wf;
+   wf.add_participant(k1);
+   wf.add_participant(k2);
+-  return wf.find_witness_subtype(ctxk);
++  return wf.find_witness_subtype(ctxk, changes);
+ }
+ 
+ // Search ctxk for concrete implementations.  If there are klen or fewer,
+@@ -1124,6 +1227,8 @@
+ #ifndef PRODUCT
+   // Make sure the dependency mechanism will pass this discovery:
+   if (VerifyDependencies) {
++    // Turn off dependency tracing while actually testing deps.
++    FlagSetting fs(TraceDependencies, false);
+     switch (Dependencies::is_concrete_klass(ctxk)? -1: num) {
+     case -1: // ctxk was itself concrete
+       guarantee(num == 1 && karray[0] == ctxk, "verify dep.");
+@@ -1154,13 +1259,14 @@
+ 
+ // If a class (or interface) has a unique concrete method uniqm, return NULL.
+ // Otherwise, return a class that contains an interfering method.
+-klassOop Dependencies::check_unique_concrete_method(klassOop ctxk, methodOop uniqm) {
++klassOop Dependencies::check_unique_concrete_method(klassOop ctxk, methodOop uniqm,
++                                                    DepChange* changes) {
+   // Here is a missing optimization:  If uniqm->is_final(),
+   // we don't really need to search beneath it for overrides.
+   // This is probably not important, since we don't use dependencies
+   // to track final methods.  (They can't be "definalized".)
+   ClassHierarchyWalker wf(uniqm->method_holder(), uniqm);
+-  return wf.find_witness_definer(ctxk);
++  return wf.find_witness_definer(ctxk, changes);
+ }
+ 
+ // Find the set of all non-abstract methods under ctxk that match m.
+@@ -1196,11 +1302,12 @@
+ 
+ klassOop Dependencies::check_exclusive_concrete_methods(klassOop ctxk,
+                                                         methodOop m1,
+-                                                        methodOop m2) {
++                                                        methodOop m2,
++                                                        DepChange* changes) {
+   ClassHierarchyWalker wf(m1);
+   wf.add_participant(m1->method_holder());
+   wf.add_participant(m2->method_holder());
+-  return wf.find_witness_definer(ctxk);
++  return wf.find_witness_definer(ctxk, changes);
+ }
+ 
+ // Find the set of all non-abstract methods under ctxk that match m[0].
+@@ -1216,6 +1323,7 @@
+   ClassHierarchyWalker wf(m0);
+   assert(wf.check_method_context(ctxk, m0), "proper context");
+   wf.record_witnesses(mlen);
++  bool participants_hide_witnesses = true;
+   klassOop wit = wf.find_witness_definer(ctxk);
+   if (wit != NULL)  return -1;  // Too many witnesses.
+   int num = wf.num_participants();
+@@ -1236,6 +1344,8 @@
+ #ifndef PRODUCT
+   // Make sure the dependency mechanism will pass this discovery:
+   if (VerifyDependencies) {
++    // Turn off dependency tracing while actually testing deps.
++    FlagSetting fs(TraceDependencies, false);
+     switch (mfill) {
+     case 1:
+       guarantee(NULL == (void *)check_unique_concrete_method(ctxk, marray[0]),
+@@ -1255,8 +1365,11 @@
+ }
+ 
+ 
+-klassOop Dependencies::check_has_no_finalizable_subclasses(klassOop ctxk) {
+-  Klass* result = find_finalizable_subclass(ctxk->klass_part());
++klassOop Dependencies::check_has_no_finalizable_subclasses(klassOop ctxk, DepChange* changes) {
++  Klass* search_at = ctxk->klass_part();
++  if (changes != NULL)
++    search_at = changes->new_type()->klass_part(); // just look at the new bit
++  Klass* result = find_finalizable_subclass(search_at);
+   if (result == NULL) {
+     return NULL;
+   }
+@@ -1264,7 +1377,7 @@
+ }
+ 
+ 
+-klassOop Dependencies::DepStream::check_dependency() {
++klassOop Dependencies::DepStream::check_dependency_impl(DepChange* changes) {
+   assert_locked_or_safepoint(Compile_lock);
+ 
+   klassOop witness = NULL;
+@@ -1277,32 +1390,39 @@
+     break;
+   case abstract_with_unique_concrete_subtype:
+     witness = check_abstract_with_unique_concrete_subtype(context_type(),
+-                                                          type_argument(1));
++                                                          type_argument(1),
++                                                          changes);
+     break;
+   case abstract_with_no_concrete_subtype:
+-    witness = check_abstract_with_no_concrete_subtype(context_type());
++    witness = check_abstract_with_no_concrete_subtype(context_type(),
++                                                      changes);
+     break;
+   case concrete_with_no_concrete_subtype:
+-    witness = check_concrete_with_no_concrete_subtype(context_type());
++    witness = check_concrete_with_no_concrete_subtype(context_type(),
++                                                      changes);
+     break;
+   case unique_concrete_method:
+     witness = check_unique_concrete_method(context_type(),
+-                                           method_argument(1));
++                                           method_argument(1),
++                                           changes);
+     break;
+   case abstract_with_exclusive_concrete_subtypes_2:
+     witness = check_abstract_with_exclusive_concrete_subtypes(context_type(),
+                                                               type_argument(1),
+-                                                              type_argument(2));
++                                                              type_argument(2),
++                                                              changes);
+     break;
+   case exclusive_concrete_methods_2:
+     witness = check_exclusive_concrete_methods(context_type(),
+                                                method_argument(1),
+-                                               method_argument(2));
++                                               method_argument(2),
++                                               changes);
+     break;
+   case no_finalizable_subclasses:
+-    witness = check_has_no_finalizable_subclasses(context_type());
++    witness = check_has_no_finalizable_subclasses(context_type(),
++                                                  changes);
+     break;
+-	  default:
++          default:
+     witness = NULL;
+     ShouldNotReachHere();
+     break;
+@@ -1316,3 +1436,114 @@
+   }
+   return witness;
+ }
++
++
++klassOop Dependencies::DepStream::spot_check_dependency_at(DepChange& changes) {
++  if (!changes.involves_context(context_type()))
++    // irrelevant dependency; skip it
++    return NULL;
++
++  return check_dependency_impl(&changes);
++}
++
++
++void DepChange::initialize() {
++  // entire transaction must be under this lock:
++  assert_lock_strong(Compile_lock);
++
++  // Mark all dependee and all its superclasses
++  // Mark transitive interfaces
++  for (ContextStream str(*this); str.next(); ) {
++    klassOop d = str.klass();
++    assert(!instanceKlass::cast(d)->is_marked_dependent(), "checking");
++    instanceKlass::cast(d)->set_is_marked_dependent(true);
++  }
++}
++
++DepChange::~DepChange() {
++  // Unmark all dependee and all its superclasses
++  // Unmark transitive interfaces
++  for (ContextStream str(*this); str.next(); ) {
++    klassOop d = str.klass();
++    instanceKlass::cast(d)->set_is_marked_dependent(false);
++  }
++}
++
++bool DepChange::involves_context(klassOop k) {
++  if (k == NULL || !Klass::cast(k)->oop_is_instance()) {
++    return false;
++  }
++  instanceKlass* ik = instanceKlass::cast(k);
++  bool is_contained = ik->is_marked_dependent();
++  assert(is_contained == Klass::cast(new_type())->is_subtype_of(k),
++         "correct marking of potential context types");
++  return is_contained;
++}
++
++bool DepChange::ContextStream::next() {
++  switch (_change_type) {
++  case Start_Klass:             // initial state; _klass is the new type
++    _ti_base = instanceKlass::cast(_klass)->transitive_interfaces();
++    _ti_index = 0;
++    _change_type = Change_new_type;
++    return true;
++  case Change_new_type:
++    // fall through:
++    _change_type = Change_new_sub;
++  case Change_new_sub:
++    _klass = instanceKlass::cast(_klass)->super();
++    if (_klass != NULL) {
++      return true;
++    }
++    // else set up _ti_limit and fall through:
++    _ti_limit = (_ti_base == NULL) ? 0 : _ti_base->length();
++    _change_type = Change_new_impl;
++  case Change_new_impl:
++    if (_ti_index < _ti_limit) {
++      _klass = klassOop( _ti_base->obj_at(_ti_index++) );
++      return true;
++    }
++    // fall through:
++    _change_type = NO_CHANGE;  // iterator is exhausted
++  case NO_CHANGE:
++    break;
++  default:
++    ShouldNotReachHere();
++  }
++  return false;
++}
++
++void DepChange::print() {
++  int nsup = 0, nint = 0;
++  for (ContextStream str(*this); str.next(); ) {
++    klassOop k = str.klass();
++    switch (str._change_type) {
++    case Change_new_type:
++      tty->print_cr("  dependee = %s", instanceKlass::cast(k)->external_name());
++      break;
++    case Change_new_sub:
++      if (!WizardMode)
++           ++nsup;
++      else tty->print_cr("  context super = %s", instanceKlass::cast(k)->external_name());
++      break;
++    case Change_new_impl:
++      if (!WizardMode)
++           ++nint;
++      else tty->print_cr("  context interface = %s", instanceKlass::cast(k)->external_name());
++      break;
++    }
++  }
++  if (nsup + nint != 0) {
++    tty->print_cr("  context supers = %d, interfaces = %d", nsup, nint);
++  }
++}
++
++#ifndef PRODUCT
++void Dependencies::print_statistics() {
++  if (deps_find_witness_print != 0) {
++    // Call one final time, to flush out the data.
++    deps_find_witness_print = -1;
++    count_find_witness_calls();
++  }
++}
++#endif
+diff -ruN openjdk6/hotspot/src/share/vm/code/dependencies.hpp openjdk/hotspot/src/share/vm/code/dependencies.hpp
+--- openjdk6/hotspot/src/share/vm/code/dependencies.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/dependencies.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)dependencies.hpp	1.11 07/05/05 17:05:18 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //** Dependencies represent assertions (approximate invariants) within
+@@ -46,6 +43,8 @@
+ class OopRecorder;
+ class xmlStream;
+ class CompileLog;
++class DepChange;
++class No_Safepoint_Verifier;
+ 
+ class Dependencies: public ResourceObj {
+  public:
+@@ -288,13 +287,20 @@
+   // Checking old assertions at run-time (in the VM only):
+   static klassOop check_evol_method(methodOop m);
+   static klassOop check_leaf_type(klassOop ctxk);
+-  static klassOop check_abstract_with_unique_concrete_subtype(klassOop ctxk, klassOop conck);
+-  static klassOop check_abstract_with_no_concrete_subtype(klassOop ctxk);
+-  static klassOop check_concrete_with_no_concrete_subtype(klassOop ctxk);
+-  static klassOop check_unique_concrete_method(klassOop ctxk, methodOop uniqm);
+-  static klassOop check_abstract_with_exclusive_concrete_subtypes(klassOop ctxk, klassOop k1, klassOop k2);
+-  static klassOop check_exclusive_concrete_methods(klassOop ctxk, methodOop m1, methodOop m2);
+-  static klassOop check_has_no_finalizable_subclasses(klassOop ctxk);
++  static klassOop check_abstract_with_unique_concrete_subtype(klassOop ctxk, klassOop conck,
++                                                              DepChange* changes = NULL);
++  static klassOop check_abstract_with_no_concrete_subtype(klassOop ctxk,
++                                                          DepChange* changes = NULL);
++  static klassOop check_concrete_with_no_concrete_subtype(klassOop ctxk,
++                                                          DepChange* changes = NULL);
++  static klassOop check_unique_concrete_method(klassOop ctxk, methodOop uniqm,
++                                               DepChange* changes = NULL);
++  static klassOop check_abstract_with_exclusive_concrete_subtypes(klassOop ctxk, klassOop k1, klassOop k2,
++                                                                  DepChange* changes = NULL);
++  static klassOop check_exclusive_concrete_methods(klassOop ctxk, methodOop m1, methodOop m2,
++                                                   DepChange* changes = NULL);
++  static klassOop check_has_no_finalizable_subclasses(klassOop ctxk,
++                                                      DepChange* changes = NULL);
+   // A returned klassOop is NULL if the dependency assertion is still
+   // valid.  A non-NULL klassOop is a 'witness' to the assertion
+   // failure, a point in the class hierarchy where the assertion has
+@@ -305,6 +311,10 @@
+   // witnesses to the failure.  The value returned from the check_foo
+   // method is chosen arbitrarily.
+ 
++  // The 'changes' value, if non-null, requests a limited spot-check
++  // near the indicated recent changes in the class hierarchy.
++  // It is used by DepStream::spot_check_dependency_at.
++
+   // Detecting possible new assertions:
+   static klassOop  find_unique_concrete_subtype(klassOop ctxk);
+   static methodOop find_unique_concrete_method(klassOop ctxk, methodOop m);
+@@ -334,7 +344,7 @@
+   }
+   void log_dependency(DepType dept,
+                       ciObject* x0,
+-                      ciObject* x1 = NULL, 
++                      ciObject* x1 = NULL,
+                       ciObject* x2 = NULL) {
+     if (log() == NULL)  return;
+     ciObject* args[max_arg_count];
+@@ -397,6 +407,8 @@
+     inline oop recorded_oop_at(int i);
+         // => _code? _code->oop_at(i): *_deps->_oop_recorder->handle_at(i)
+ 
++    klassOop check_dependency_impl(DepChange* changes);
++
+   public:
+     DepStream(Dependencies* deps)
+       : _deps(deps),
+@@ -434,7 +446,12 @@
+     }
+ 
+     // The point of the whole exercise:  Is this dep is still OK?
+-    klassOop check_dependency();
++    klassOop check_dependency() {
++      return check_dependency_impl(NULL);
++    }
++    // A lighter version:  Checks only around recent changes in a class
++    // hierarchy.  (See Universe::flush_dependents_on.)
++    klassOop spot_check_dependency_at(DepChange& changes);
+ 
+     // Log the current dependency to xtty or compilation log.
+     void log_dependency(klassOop witness = NULL);
+@@ -443,4 +460,91 @@
+     void print_dependency(klassOop witness = NULL, bool verbose = false);
+   };
+   friend class Dependencies::DepStream;
++
++  static void print_statistics() PRODUCT_RETURN;
++};
++
++// A class hierarchy change coming through the VM (under the Compile_lock).
++// The change is structured as a single new type with any number of supers
++// and implemented interface types.  Other than the new type, any of the
++// super types can be context types for a relevant dependency, which the
++// new type could invalidate.
++class DepChange : public StackObj {
++ private:
++  enum ChangeType {
++    NO_CHANGE = 0,              // an uninvolved klass
++    Change_new_type,            // a newly loaded type
++    Change_new_sub,             // a super with a new subtype
++    Change_new_impl,            // an interface with a new implementation
++    CHANGE_LIMIT,
++    Start_Klass = CHANGE_LIMIT  // internal indicator for ContextStream
++  };
++
++  // each change set is rooted in exactly one new type (at present):
++  KlassHandle _new_type;
++
++  void initialize();
++
++ public:
++  // notes the new type, marks it and all its super-types
++  DepChange(KlassHandle new_type)
++    : _new_type(new_type)
++  {
++    initialize();
++  }
++
++  // cleans up the marks
++  ~DepChange();
++
++  klassOop new_type()                   { return _new_type(); }
++
++  // involves_context(k) is true if k is new_type or any of the super types
++  bool involves_context(klassOop k);
++
++  // Usage:
++  // for (DepChange::ContextStream str(changes); str.next(); ) {
++  //   klassOop k = str.klass();
++  //   switch (str.change_type()) {
++  //     ...
++  //   }
++  // }
++  class ContextStream : public StackObj {
++   private:
++    DepChange&       _changes;
++    friend class DepChange;
++
++    // iteration variables:
++    ChangeType            _change_type;
++    klassOop              _klass;
++    objArrayOop           _ti_base;    // i.e., transitive_interfaces
++    int                   _ti_index;
++    int                   _ti_limit;
++
++    // start at the beginning:
++    void start() {
++      klassOop new_type = _changes.new_type();
++      _change_type = (new_type == NULL ? NO_CHANGE: Start_Klass);
++      _klass = new_type;
++      _ti_base = NULL;
++      _ti_index = 0;
++      _ti_limit = 0;
++    }
++
++    ContextStream(DepChange& changes)
++      : _changes(changes)
++    { start(); }
++
++   public:
++    ContextStream(DepChange& changes, No_Safepoint_Verifier& nsv)
++      : _changes(changes)
++      // the nsv argument makes it safe to hold oops like _klass
++    { start(); }
++
++    bool next();
++
++    klassOop   klass()           { return _klass; }
++  };
++  friend class DepChange::ContextStream;
++
++  void print();
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/code/exceptionHandlerTable.cpp openjdk/hotspot/src/share/vm/code/exceptionHandlerTable.cpp
+--- openjdk6/hotspot/src/share/vm/code/exceptionHandlerTable.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/exceptionHandlerTable.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)exceptionHandlerTable.cpp	1.33 07/05/05 17:05:20 JVM"
+-#endif
+ /*
+  * Copyright 1998-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/code/exceptionHandlerTable.hpp openjdk/hotspot/src/share/vm/code/exceptionHandlerTable.hpp
+--- openjdk6/hotspot/src/share/vm/code/exceptionHandlerTable.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/exceptionHandlerTable.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)exceptionHandlerTable.hpp	1.30 07/05/05 17:05:21 JVM"
+-#endif
+ /*
+  * Copyright 1998-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A HandlerTableEntry describes an individual entry of a subtable
+@@ -87,7 +84,7 @@
+   ReallocMark        _nesting;  // assertion check for reallocations
+ 
+   // add the entry & grow the table if needed
+-  void add_entry(HandlerTableEntry entry); 
++  void add_entry(HandlerTableEntry entry);
+   HandlerTableEntry* subtable_for(int catch_pco) const;
+ 
+  public:
+@@ -152,7 +149,7 @@
+ 
+   uint len() const { return _len; }
+   int size_in_bytes() const { return len() == 0 ? 0 : ((2 * len() + 1) * sizeof(implicit_null_entry)); }
+-  
++
+   void copy_to(nmethod* nm);
+   void print(address base) const;
+   void verify(nmethod *nm) const;
+diff -ruN openjdk6/hotspot/src/share/vm/code/icBuffer.cpp openjdk/hotspot/src/share/vm/code/icBuffer.cpp
+--- openjdk6/hotspot/src/share/vm/code/icBuffer.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/icBuffer.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)icBuffer.cpp	1.70 07/05/05 17:05:21 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -35,13 +32,13 @@
+ ICStub*    InlineCacheBuffer::_next_stub = NULL;
+ 
+ 
+-void ICStub::finalize() {  
++void ICStub::finalize() {
+   if (!is_empty()) {
+     ResourceMark rm;
+-    CompiledIC *ic = CompiledIC_at(ic_site());  
++    CompiledIC *ic = CompiledIC_at(ic_site());
+     assert(CodeCache::find_nmethod(ic->instruction_address()) != NULL, "inline cache in non-nmethod?");
+ 
+-    assert(this == ICStub_from_destination_address(ic->stub_address()), "wrong owner of ic buffer");    
++    assert(this == ICStub_from_destination_address(ic->stub_address()), "wrong owner of ic buffer");
+     ic->set_cached_oop(cached_oop());
+     ic->set_ic_destination(destination());
+   }
+@@ -71,11 +68,11 @@
+ 
+ 
+ void ICStub::clear() {
+-  _ic_site = NULL;  
++  _ic_site = NULL;
+ }
+ 
+ 
+-#ifndef PRODUCT 
++#ifndef PRODUCT
+ // anybody calling to this stub will trap
+ 
+ void ICStub::verify() {
+@@ -108,8 +105,8 @@
+     ICStub* ic_stub = (ICStub*)buffer()->request_committed(ic_stub_code_size());
+     if (ic_stub != NULL) {
+       return ic_stub;
+-    } 
+-    // we ran out of inline cache buffer space; must enter safepoint. 
++    }
++    // we ran out of inline cache buffer space; must enter safepoint.
+     // We do this by forcing a safepoint
+     EXCEPTION_MARK;
+ 
+@@ -120,7 +117,7 @@
+     if (HAS_PENDING_EXCEPTION) {
+       oop exception = PENDING_EXCEPTION;
+       CLEAR_PENDING_EXCEPTION;
+-      Thread::send_async_exception(JavaThread::current()->threadObj(), exception);      
++      Thread::send_async_exception(JavaThread::current()->threadObj(), exception);
+     }
+   }
+   ShouldNotReachHere();
+@@ -132,7 +129,7 @@
+   if (buffer()->number_of_stubs() > 1) {
+     if (TraceICBuffer) {
+       tty->print_cr("[updating inline caches with %d stubs]", buffer()->number_of_stubs());
+-    } 
++    }
+     buffer()->remove_all();
+     init_next_stub();
+   }
+@@ -167,17 +164,17 @@
+   }
+ 
+   // allocate and initialize new "out-of-line" inline-cache
+-  ICStub* ic_stub = get_next_stub();  
++  ICStub* ic_stub = get_next_stub();
+   ic_stub->set_stub(ic, cached_oop, entry);
+-  
++
+   // Update inline cache in nmethod to point to new "out-of-line" allocated inline cache
+   ic->set_ic_destination(ic_stub->code_begin());
+-  
+-  set_next_stub(new_ic_stub()); // can cause safepoint synchronization  
++
++  set_next_stub(new_ic_stub()); // can cause safepoint synchronization
+ }
+ 
+ 
+-address InlineCacheBuffer::ic_destination_for(CompiledIC *ic) {  
++address InlineCacheBuffer::ic_destination_for(CompiledIC *ic) {
+   ICStub* stub = ICStub_from_destination_address(ic->stub_address());
+   return stub->destination();
+ }
+@@ -185,8 +182,5 @@
+ 
+ oop InlineCacheBuffer::cached_oop_for(CompiledIC *ic) {
+   ICStub* stub = ICStub_from_destination_address(ic->stub_address());
+-  return stub->cached_oop();  
++  return stub->cached_oop();
+ }
+-
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/code/icBuffer.hpp openjdk/hotspot/src/share/vm/code/icBuffer.hpp
+--- openjdk6/hotspot/src/share/vm/code/icBuffer.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/icBuffer.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)icBuffer.hpp	1.30 07/05/05 17:05:21 JVM"
+-#endif
+ /*
+  * Copyright 1997-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //
+@@ -54,14 +51,14 @@
+  public:
+   // Creation
+   void set_stub(CompiledIC *ic, oop cached_value, address dest_addr);
+-   
++
+   // Code info
+   address code_begin() const                     { return (address)this + round_to(sizeof(ICStub), CodeEntryAlignment); }
+   address code_end() const                       { return (address)this + size(); }
+ 
+   // Call site info
+   address ic_site() const                        { return _ic_site; }
+-  void    clear(); 
++  void    clear();
+   bool    is_empty() const                       { return _ic_site == NULL; }
+ 
+   // stub info
+@@ -69,8 +66,8 @@
+   oop     cached_oop() const;   // cached_oop for stub
+ 
+   // Debugging
+-  void    verify()            PRODUCT_RETURN;  
+-  void    print()             PRODUCT_RETURN; 
++  void    verify()            PRODUCT_RETURN;
++  void    print()             PRODUCT_RETURN;
+ 
+   // Creation
+   friend ICStub* ICStub_from_destination_address(address destination_address);
+@@ -87,7 +84,7 @@
+ 
+ class InlineCacheBuffer: public AllStatic {
+  private:
+-  // friends  
++  // friends
+   friend class ICStub;
+ 
+   static int ic_stub_code_size();
+@@ -102,15 +99,15 @@
+   static void       init_next_stub();
+ 
+   static ICStub* new_ic_stub();
+- 
++
+ 
+   // Machine-dependent implementation of ICBuffer
+   static void    assemble_ic_buffer_code(address code_begin, oop cached_oop, address entry_point);
+-  static address ic_buffer_entry_point  (address code_begin); 
++  static address ic_buffer_entry_point  (address code_begin);
+   static oop     ic_buffer_cached_oop   (address code_begin);
+ 
+  public:
+- 
++
+     // Initialization; must be called before first usage
+   static void initialize();
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/code/location.cpp openjdk/hotspot/src/share/vm/code/location.cpp
+--- openjdk6/hotspot/src/share/vm/code/location.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/location.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)location.cpp	1.40 07/05/05 17:05:21 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -70,4 +67,3 @@
+   if ((offset_in_bytes % BytesPerInt) != 0)  return false;
+   return (offset_in_bytes / BytesPerInt) < (OFFSET_MASK >> OFFSET_SHIFT);
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/code/location.hpp openjdk/hotspot/src/share/vm/code/location.hpp
+--- openjdk6/hotspot/src/share/vm/code/location.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/location.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)location.hpp	1.47 07/05/05 17:05:21 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A Location describes a concrete machine variable location
+@@ -36,7 +33,7 @@
+ //  Type:   [14..12]
+ //  Offset: [11..0]
+ 
+-class Location VALUE_OBJ_CLASS_SPEC { 
++class Location VALUE_OBJ_CLASS_SPEC {
+   friend class VMStructs;
+  public:
+   enum Where {
+@@ -65,7 +62,7 @@
+     WHERE_MASK   = (jchar) 0x8000,
+     WHERE_SHIFT  = 15
+   };
+-    
++
+   uint16_t _value;
+ 
+   // Create a bit-packed Location
+@@ -102,9 +99,9 @@
+ 
+   int stack_offset() const    { assert(where() == on_stack,    "wrong Where"); return offset()<<LogBytesPerInt; }
+   int register_number() const { assert(where() == in_register, "wrong Where"); return offset()   ; }
+-  
++
+   VMReg reg() const { assert(where() == in_register, "wrong Where"); return VMRegImpl::as_VMReg(offset())   ; }
+-  
++
+   // Printing
+   void print_on(outputStream* st) const;
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/code/nmethod.cpp openjdk/hotspot/src/share/vm/code/nmethod.cpp
+--- openjdk6/hotspot/src/share/vm/code/nmethod.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/nmethod.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)nmethod.cpp	1.366 07/06/08 15:21:44 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -33,10 +30,10 @@
+ 
+ // Only bother with this argument setup if dtrace is available
+ 
+-HS_DTRACE_PROBE_DECL8(hotspot, compiled__method__load, 
++HS_DTRACE_PROBE_DECL8(hotspot, compiled__method__load,
+   const char*, int, const char*, int, const char*, int, void*, size_t);
+ 
+-HS_DTRACE_PROBE_DECL6(hotspot, compiled__method__unload, 
++HS_DTRACE_PROBE_DECL6(hotspot, compiled__method__unload,
+   char*, int, char*, int, char*, int);
+ 
+ #define DTRACE_METHOD_UNLOAD_PROBE(method)                                \
+@@ -69,7 +66,7 @@
+   assert(compiler() != NULL, "must be");
+   return compiler()->is_c2();
+ }
+-    
++
+ 
+ 
+ //---------------------------------------------------------------------------------
+@@ -316,6 +313,18 @@
+   // Note:  Do not update _last_pc_desc.  It fronts for the LRU cache.
+ }
+ 
++// adjust pcs_size so that it is a multiple of both oopSize and
++// sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
++// of oopSize, then 2*sizeof(PcDesc) is)
++static int  adjust_pcs_size(int pcs_size) {
++  int nsize = round_to(pcs_size,   oopSize);
++  if ((nsize % sizeof(PcDesc)) != 0) {
++    nsize = pcs_size + sizeof(PcDesc);
++  }
++  assert((nsize %  oopSize) == 0, "correct alignment");
++  return nsize;
++}
++
+ //-----------------------------------------------------------------------------
+ 
+ 
+@@ -330,25 +339,25 @@
+   set_exception_cache(new_entry);
+ }
+ 
+-void nmethod::remove_from_exception_cache(ExceptionCache* ec) { 
+-  ExceptionCache* prev = NULL; 
+-  ExceptionCache* curr = exception_cache(); 
+-  assert(curr != NULL, "nothing to remove"); 
+-  // find the previous and next entry of ec 
+-  while (curr != ec) { 
+-    prev = curr; 
+-    curr = curr->next(); 
+-    assert(curr != NULL, "ExceptionCache not found"); 
+-  } 
+-  // now: curr == ec 
+-  ExceptionCache* next = curr->next(); 
+-  if (prev == NULL) { 
+-    set_exception_cache(next); 
+-  } else { 
+-    prev->set_next(next); 
+-  } 
+-  delete curr; 
+-} 
++void nmethod::remove_from_exception_cache(ExceptionCache* ec) {
++  ExceptionCache* prev = NULL;
++  ExceptionCache* curr = exception_cache();
++  assert(curr != NULL, "nothing to remove");
++  // find the previous and next entry of ec
++  while (curr != ec) {
++    prev = curr;
++    curr = curr->next();
++    assert(curr != NULL, "ExceptionCache not found");
++  }
++  // now: curr == ec
++  ExceptionCache* next = curr->next();
++  if (prev == NULL) {
++    set_exception_cache(next);
++  } else {
++    prev->set_next(next);
++  }
++  delete curr;
++}
+ 
+ 
+ // public method for accessing the exception cache
+@@ -395,7 +404,7 @@
+ }
+ 
+ int nmethod::total_size() const {
+-  return 
++  return
+     code_size()          +
+     stub_size()          +
+     consts_size()        +
+@@ -417,10 +426,10 @@
+ 
+ 
+ nmethod* nmethod::new_native_nmethod(methodHandle method,
+-  CodeBuffer *code_buffer, 
+-  int vep_offset, 
+-  int frame_complete, 
+-  int frame_size, 
++  CodeBuffer *code_buffer,
++  int vep_offset,
++  int frame_complete,
++  int frame_size,
+   ByteSize basic_lock_owner_sp_offset,
+   ByteSize basic_lock_sp_offset,
+   OopMapSet* oop_maps) {
+@@ -429,7 +438,7 @@
+   {
+     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+     int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
+-    const int dummy = -1;		// Flag to force proper "operator new"
++    const int dummy = -1;               // Flag to force proper "operator new"
+     CodeOffsets offsets;
+     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
+     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
+@@ -457,11 +466,11 @@
+   int entry_bci,
+   CodeOffsets* offsets,
+   int orig_pc_offset,
+-  DebugInformationRecorder* debug_info, 
++  DebugInformationRecorder* debug_info,
+   Dependencies* dependencies,
+-  CodeBuffer* code_buffer, int frame_size, 
+-  OopMapSet* oop_maps, 
+-  ExceptionHandlerTable* handler_table, 
++  CodeBuffer* code_buffer, int frame_size,
++  OopMapSet* oop_maps,
++  ExceptionHandlerTable* handler_table,
+   ImplicitExceptionTable* nul_chk_table,
+   AbstractCompiler* compiler,
+   int comp_level
+@@ -473,26 +482,26 @@
+   { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+     int nmethod_size =
+       allocation_size(code_buffer, sizeof(nmethod))
+-      + round_to(debug_info->pcs_size()        , oopSize)
++      + adjust_pcs_size(debug_info->pcs_size())
+       + round_to(dependencies->size_in_bytes() , oopSize)
+       + round_to(handler_table->size_in_bytes(), oopSize)
+       + round_to(nul_chk_table->size_in_bytes(), oopSize)
+       + round_to(debug_info->data_size()       , oopSize);
+     nm = new (nmethod_size)
+       nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
+-              orig_pc_offset, debug_info, dependencies, code_buffer, frame_size, 
+-              oop_maps, 
++              orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
++              oop_maps,
+               handler_table,
+               nul_chk_table,
+               compiler,
+               comp_level);
+     if (nm != NULL) {
+-      // To make dependency checking during class loading fast, record 
++      // To make dependency checking during class loading fast, record
+       // the nmethod dependencies in the classes it is dependent on.
+       // This allows the dependency checking code to simply walk the
+-      // class hierarchy above the loaded class, checking only nmethods 
++      // class hierarchy above the loaded class, checking only nmethods
+       // which are dependent on those classes.  The slow way is to
+-      // check every nmethod for dependencies which makes it linear in 
++      // check every nmethod for dependencies which makes it linear in
+       // the number of methods compiled.  For applications with a lot
+       // classes the slow way is too slow.
+       for (Dependencies::DepStream deps(nm); deps.next(); ) {
+@@ -596,6 +605,9 @@
+       print_code();
+       oop_maps->print();
+     }
++    if (PrintRelocations) {
++      print_relocations();
++    }
+     if (xtty != NULL) {
+       xtty->tail("print_native_nmethod");
+     }
+@@ -604,10 +616,10 @@
+ }
+ 
+ 
+-void* nmethod::operator new(size_t size, int nmethod_size) {  
+-  // Always leave some room in the CodeCache for I2C/C2I adapters  
++void* nmethod::operator new(size_t size, int nmethod_size) {
++  // Always leave some room in the CodeCache for I2C/C2I adapters
+   if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) return NULL;
+-  return CodeCache::allocate(nmethod_size);    
++  return CodeCache::allocate(nmethod_size);
+ }
+ 
+ 
+@@ -617,7 +629,7 @@
+   int compile_id,
+   int entry_bci,
+   CodeOffsets* offsets,
+-  int orig_pc_offset,  
++  int orig_pc_offset,
+   DebugInformationRecorder* debug_info,
+   Dependencies* dependencies,
+   CodeBuffer *code_buffer,
+@@ -654,11 +666,11 @@
+     _consts_offset           = instructions_offset() + code_buffer->total_offset_of(code_buffer->consts()->start());
+     _scopes_data_offset      = data_offset();
+     _scopes_pcs_offset       = _scopes_data_offset   + round_to(debug_info->data_size         (), oopSize);
+-    _dependencies_offset     = _scopes_pcs_offset    + round_to(debug_info->pcs_size          (), oopSize);
++    _dependencies_offset     = _scopes_pcs_offset    + adjust_pcs_size(debug_info->pcs_size());
+     _handler_table_offset    = _dependencies_offset  + round_to(dependencies->size_in_bytes (), oopSize);
+     _nul_chk_table_offset    = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize);
+     _nmethod_end_offset      = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize);
+- 
++
+     _entry_point             = instructions_begin();
+     _verified_entry_point    = instructions_begin() + offsets->value(CodeOffsets::Verified_Entry);
+     _osr_entry_point         = instructions_begin() + offsets->value(CodeOffsets::OSR_Entry);
+@@ -669,7 +681,7 @@
+     flags.state              = alive;
+     _markedForDeoptimization = 0;
+ 
+-    _unload_reported	     = false;		// jvmti state
++    _unload_reported         = false;           // jvmti state
+ 
+     _lock_count = 0;
+     _stack_traversal_mark    = 0;
+@@ -681,7 +693,7 @@
+     debug_only(check_store();)
+ 
+     CodeCache::commit(this);
+-  
++
+     VTune::create_nmethod(this);
+ 
+     // Copy contents of ExceptionHandlerTable to nmethod
+@@ -690,7 +702,7 @@
+ 
+     // we use the information of entry points to find out if a method is
+     // static or non static
+-    assert(compiler->is_c2() || 
++    assert(compiler->is_c2() ||
+            _method->is_static() == (entry_point() == _verified_entry_point),
+            " entry points must be same for static methods and vice versa");
+   }
+@@ -740,16 +752,16 @@
+                 instructions_begin(), size());
+     xtty->print(" address='" INTPTR_FORMAT "'", (intptr_t) this);
+ 
+-    LOG_OFFSET(xtty, relocation);   
+-    LOG_OFFSET(xtty, code);         
+-    LOG_OFFSET(xtty, stub);         
+-    LOG_OFFSET(xtty, consts);       
+-    LOG_OFFSET(xtty, scopes_data);  
+-    LOG_OFFSET(xtty, scopes_pcs);   
+-    LOG_OFFSET(xtty, dependencies); 
++    LOG_OFFSET(xtty, relocation);
++    LOG_OFFSET(xtty, code);
++    LOG_OFFSET(xtty, stub);
++    LOG_OFFSET(xtty, consts);
++    LOG_OFFSET(xtty, scopes_data);
++    LOG_OFFSET(xtty, scopes_pcs);
++    LOG_OFFSET(xtty, dependencies);
+     LOG_OFFSET(xtty, handler_table);
+     LOG_OFFSET(xtty, nul_chk_table);
+-    LOG_OFFSET(xtty, oops);         
++    LOG_OFFSET(xtty, oops);
+ 
+     xtty->method(method());
+     xtty->stamp();
+@@ -830,11 +842,12 @@
+ ScopeDesc* nmethod::scope_desc_at(address pc) {
+   PcDesc* pd = pc_desc_at(pc);
+   guarantee(pd != NULL, "scope must be present");
+-  return new ScopeDesc(this, pd->scope_decode_offset());
++  return new ScopeDesc(this, pd->scope_decode_offset(),
++                       pd->obj_decode_offset());
+ }
+ 
+ 
+-void nmethod::clear_inline_caches() {  
++void nmethod::clear_inline_caches() {
+   assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
+   if (is_zombie()) {
+     return;
+@@ -849,7 +862,7 @@
+ 
+ void nmethod::cleanup_inline_caches() {
+ 
+-  assert(SafepointSynchronize::is_at_safepoint() && 
++  assert(SafepointSynchronize::is_at_safepoint() &&
+         !CompiledIC_lock->is_locked() &&
+         !Patching_lock->is_locked(), "no threads must be updating the inline caches by them selfs");
+ 
+@@ -872,28 +885,28 @@
+   while(iter.next()) {
+     switch(iter.type()) {
+       case relocInfo::virtual_call_type:
+-      case relocInfo::opt_virtual_call_type: {  
+-	CompiledIC *ic = CompiledIC_at(iter.reloc());
+-	// Ok, to lookup references to zombies here
+-	CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
+-	if( cb != NULL && cb->is_nmethod() ) {
+-	  nmethod* nm = (nmethod*)cb;
+-	  // Clean inline caches pointing to both zombie and not_entrant methods
+-	  if (!nm->is_in_use()) ic->set_to_clean();
+-	}                                             
++      case relocInfo::opt_virtual_call_type: {
++        CompiledIC *ic = CompiledIC_at(iter.reloc());
++        // Ok, to lookup references to zombies here
++        CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
++        if( cb != NULL && cb->is_nmethod() ) {
++          nmethod* nm = (nmethod*)cb;
++          // Clean inline caches pointing to both zombie and not_entrant methods
++          if (!nm->is_in_use()) ic->set_to_clean();
++        }
+         break;
+       }
+-      case relocInfo::static_call_type: {        
+-	CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
+-	CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
+-	if( cb != NULL && cb->is_nmethod() ) {
+-	  nmethod* nm = (nmethod*)cb;
+-	  // Clean inline caches pointing to both zombie and not_entrant methods
+-	  if (!nm->is_in_use()) csc->set_to_clean();
+-	}                                             
++      case relocInfo::static_call_type: {
++        CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
++        CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
++        if( cb != NULL && cb->is_nmethod() ) {
++          nmethod* nm = (nmethod*)cb;
++          // Clean inline caches pointing to both zombie and not_entrant methods
++          if (!nm->is_in_use()) csc->set_to_clean();
++        }
+         break;
+       }
+-    }    
++    }
+   }
+ }
+ 
+@@ -906,7 +919,7 @@
+ bool nmethod::can_not_entrant_be_converted() {
+   assert(is_not_entrant(), "must be a non-entrant method");
+   assert(SafepointSynchronize::is_at_safepoint(), "must be called during a safepoint");
+-  
++
+   // Since the nmethod sweeper only does partial sweep the sweeper's traversal
+   // count can be greater than the stack traversal count before it hits the
+   // nmethod for the second time.
+@@ -928,7 +941,7 @@
+   post_compiled_method_unload();
+ 
+   // Since this nmethod is being unloaded, make sure that dependencies
+-  // recorded in instanceKlasses get flushed and pass non-NULL closure to 
++  // recorded in instanceKlasses get flushed and pass non-NULL closure to
+   // indicate that this work is being done during a GC.
+   assert(Universe::heap()->is_gc_active(), "should only be called during gc");
+   assert(is_alive != NULL, "Should be non-NULL");
+@@ -938,7 +951,7 @@
+   // Break cycle between nmethod & method
+   if (TraceClassUnloading && WizardMode) {
+     tty->print_cr("[Class unloading: Making nmethod " INTPTR_FORMAT
+-                  " unloadable], methodOop(" INTPTR_FORMAT 
++                  " unloadable], methodOop(" INTPTR_FORMAT
+                   "), cause(" INTPTR_FORMAT ")",
+                   this, (address)_method, (address)cause);
+     cause->klass()->print();
+@@ -973,15 +986,15 @@
+   NMethodSweeper::notify(this);
+ }
+ 
+-void nmethod::invalidate_osr_method() { 
+-  assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");   
++void nmethod::invalidate_osr_method() {
++  assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
+   if (_entry_bci != InvalidOSREntryBci)
+     inc_decompile_count();
+   // Remove from list of active nmethods
+-  if (method() != NULL) 
++  if (method() != NULL)
+     instanceKlass::cast(method()->method_holder())->remove_osr_nmethod(this);
+   // Set entry as invalid
+-  _entry_bci = InvalidOSREntryBci;   
++  _entry_bci = InvalidOSREntryBci;
+ }
+ 
+ void nmethod::log_state_change(int state) const {
+@@ -1017,7 +1030,7 @@
+       // only log this once
+       log_state_change(state);
+     }
+-    invalidate_osr_method();  
++    invalidate_osr_method();
+     return;
+   }
+ 
+@@ -1027,7 +1040,7 @@
+   }
+ 
+   log_state_change(state);
+-  
++
+   // Make sure the nmethod is not flushed in case of a safepoint in code below.
+   nmethodLocker nml(this);
+ 
+@@ -1039,9 +1052,9 @@
+     if (!is_not_entrant()) {
+       NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
+                   SharedRuntime::get_handle_wrong_method_stub());
+-      assert (NativeJump::instruction_size == nmethod::_zombie_instruction_size, "");      
++      assert (NativeJump::instruction_size == nmethod::_zombie_instruction_size, "");
+     }
+-  
++
+     // When the nmethod becomes zombie it is no longer alive so the
+     // dependencies must be flushed.  nmethods in the not_entrant
+     // state will be flushed later when the transition to zombie
+@@ -1052,26 +1065,26 @@
+     } else {
+       assert(state == not_entrant, "other cases may need to be handled differently");
+     }
+-  
++
+     // Change state
+     flags.state = state;
+   } // leave critical region under Patching_lock
+-    
++
+   if (state == not_entrant) {
+     Events::log("Make nmethod not entrant " INTPTR_FORMAT, this);
+   } else {
+-    Events::log("Make nmethod zombie " INTPTR_FORMAT, this);    
++    Events::log("Make nmethod zombie " INTPTR_FORMAT, this);
+   }
+ 
+-  if (TraceCreateZombies) {    
++  if (TraceCreateZombies) {
+     tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie");
+   }
+-  
+-  // Make sweeper aware that there is a zombie method that needs to be removed  
+-  NMethodSweeper::notify(this);  
++
++  // Make sweeper aware that there is a zombie method that needs to be removed
++  NMethodSweeper::notify(this);
+ 
+   // not_entrant only stuff
+-  if (state == not_entrant) {    
++  if (state == not_entrant) {
+     mark_as_seen_on_stack();
+   }
+ 
+@@ -1082,7 +1095,7 @@
+   // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event
+   // and it hasn't already been reported for this nmethod then report it now.
+   // (the event may have been reported earilier if the GC marked it for unloading).
+-  if (state == zombie) { 
++  if (state == zombie) {
+ 
+     DTRACE_METHOD_UNLOAD_PROBE(method());
+ 
+@@ -1092,15 +1105,15 @@
+       {
+         HandleMark hm;
+         JvmtiExport::post_compiled_method_unload_at_safepoint(
+-            method()->jmethod_id(), code_begin());    
++            method()->jmethod_id(), code_begin());
+       }
+-      set_unload_reported();    
++      set_unload_reported();
+     }
+   }
+ 
+ 
+   // Zombie only stuff
+-  if (state == zombie) {    
++  if (state == zombie) {
+     VTune::delete_nmethod(this);
+   }
+ 
+@@ -1115,7 +1128,7 @@
+   // If the vep() points to the zombie nmethod, the memory for the nmethod
+   // could be flushed and the compiler and vtable stubs could still call
+   // through it.
+-  if (method()->code() == this || 
++  if (method()->code() == this ||
+       method()->from_compiled_entry() == verified_entry_point()) {
+     HandleMark hm;
+     method()->clear_code();
+@@ -1129,11 +1142,11 @@
+ }
+ #endif
+ 
+- 
++
+ void nmethod::flush() {
+   // Note that there are no valid oops in the nmethod anymore.
+   assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
+-  assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");  
++  assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
+ 
+   assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
+   check_safepoint();
+@@ -1172,12 +1185,12 @@
+ // of dependencies must happen during phase 1 since after GC any
+ // dependencies in the unloaded nmethod won't be updated, so
+ // traversing the dependency information in unsafe.  In that case this
+-// function is called with a non-NULL argument and this function only 
++// function is called with a non-NULL argument and this function only
+ // notifies instanceKlasses that are reachable
+ 
+ void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
+   assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
+-  assert(Universe::heap()->is_gc_active() == (is_alive != NULL), 
++  assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
+   "is_alive is non-NULL if and only if we are called during GC");
+   if (!has_flushed_dependencies()) {
+     set_has_flushed_dependencies();
+@@ -1185,7 +1198,7 @@
+       klassOop klass = deps.context_type();
+       if (klass == NULL)  continue;  // ignore things like evol_method
+ 
+-      // During GC the is_alive closure is non-NULL, and is used to 
++      // During GC the is_alive closure is non-NULL, and is used to
+       // determine liveness of dependees that need to be updated.
+       if (is_alive == NULL || is_alive->do_object_b(klass)) {
+         instanceKlass::cast(klass)->remove_dependent_nmethod(this);
+@@ -1230,12 +1243,12 @@
+ void nmethod::post_compiled_method_load_event() {
+ 
+   methodOop moop = method();
+-  HS_DTRACE_PROBE8(hotspot, compiled__method__load, 
+-      moop->klass_name()->bytes(), 
++  HS_DTRACE_PROBE8(hotspot, compiled__method__load,
++      moop->klass_name()->bytes(),
+       moop->klass_name()->utf8_length(),
+-      moop->name()->bytes(), 
++      moop->name()->bytes(),
+       moop->name()->utf8_length(),
+-      moop->signature()->bytes(), 
++      moop->signature()->bytes(),
+       moop->signature()->utf8_length(),
+       code_begin(), code_size());
+ 
+@@ -1247,8 +1260,8 @@
+ void nmethod::post_compiled_method_unload() {
+   assert(_method != NULL && !is_unloaded(), "just checking");
+   DTRACE_METHOD_UNLOAD_PROBE(method());
+-    
+-  // If a JVMTI agent has enabled the CompiledMethodUnload event then 
++
++  // If a JVMTI agent has enabled the CompiledMethodUnload event then
+   // post the event. Sometime later this nmethod will be made a zombie by
+   // the sweeper but the methodOop will not be valid at that point.
+   if (JvmtiExport::should_post_compiled_method_unload()) {
+@@ -1259,9 +1272,9 @@
+   }
+ 
+   // The JVMTI CompiledMethodUnload event can be enabled or disabled at
+-  // any time. As the nmethod is being unloaded now we mark it has 
++  // any time. As the nmethod is being unloaded now we mark it has
+   // having the unload event reported - this will ensure that we don't
+-  // attempt to report the event in the unlikely scenario where the 
++  // attempt to report the event in the unlikely scenario where the
+   // event is enabled at the time the nmethod is made a zombie.
+   set_unload_reported();
+ }
+@@ -1302,7 +1315,7 @@
+   if (can_unload(is_alive, keep_alive, (oop*)&_method, unloading_occurred)) {
+     return;
+   }
+-  
++
+   // Exception cache
+   ExceptionCache* ec = exception_cache();
+   while (ec != NULL) {
+@@ -1311,7 +1324,7 @@
+     ExceptionCache* next_ec = ec->next();
+     if (ex != NULL && !is_alive->do_object_b(ex)) {
+       assert(!ex->is_compiledICHolder(), "Possible error here");
+-      remove_from_exception_cache(ec); 
++      remove_from_exception_cache(ec);
+     }
+     ec = next_ec;
+   }
+@@ -1330,8 +1343,8 @@
+           // The only exception is compiledICHolder oops which may
+           // yet be marked below. (We check this further below).
+           if (ic_oop->is_compiledICHolder()) {
+-            compiledICHolderOop cichk_oop = compiledICHolderOop(ic_oop); 
+-            if (is_alive->do_object_b( 
++            compiledICHolderOop cichk_oop = compiledICHolderOop(ic_oop);
++            if (is_alive->do_object_b(
+                   cichk_oop->holder_method()->method_holder()) &&
+                 is_alive->do_object_b(cichk_oop->holder_klass())) {
+               continue;
+@@ -1344,7 +1357,7 @@
+     }
+   }
+ 
+-  // Compiled code 
++  // Compiled code
+   RelocIterator iter(this, low_boundary);
+   while (iter.next()) {
+     if (iter.type() == relocInfo::oop_type) {
+@@ -1358,7 +1371,7 @@
+         if (can_unload(is_alive, keep_alive, r->oop_addr(), unloading_occurred)) {
+           return;
+         }
+-      }      
++      }
+     }
+   }
+ 
+@@ -1373,7 +1386,7 @@
+ 
+ #ifndef PRODUCT
+   // This nmethod was not unloaded; check below that all CompiledICs
+-  // refer to marked oops. 
++  // refer to marked oops.
+   {
+     RelocIterator iter(this, low_boundary);
+     while (iter.next()) {
+@@ -1415,13 +1428,13 @@
+   RelocIterator iter(this, low_boundary);
+   while (iter.next()) {
+     if (iter.type() == relocInfo::oop_type ) {
+-      oop_Relocation* r = iter.oop_reloc();      
++      oop_Relocation* r = iter.oop_reloc();
+       // In this loop, we must only follow those oops directly embedded in
+       // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
+       assert(1 == (r->oop_is_immediate()) + (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()), "oop must be found in exactly one place");
+       if (r->oop_is_immediate() && r->oop_value() != NULL) {
+         f->do_oop(r->oop_addr());
+-      }      
++      }
+     }
+   }
+ 
+@@ -1434,7 +1447,7 @@
+ 
+ // Method that knows how to preserve outgoing arguments at call. This method must be
+ // called with a frame corresponding to a Java invoke
+-void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {  
++void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
+   if (!method()->is_native()) {
+     SimpleScopeDesc ssd(this, fr.pc());
+     Bytecode_invoke* call = Bytecode_invoke_at(ssd.method(), ssd.bci());
+@@ -1579,7 +1592,7 @@
+     }
+     assert_LU_OK;
+   }
+-  
++
+   // Sneak up on the value with a linear search of length ~16.
+   while (true) {
+     assert_LU_OK;
+@@ -1605,31 +1618,28 @@
+ }
+ 
+ 
+-bool nmethod::is_dependent_on(klassOop dependee) {
++bool nmethod::check_all_dependencies() {
+   bool found_check = false;
+-  if (dependee == NULL) {
+-    // wholesale check of all dependencies
+-    for (Dependencies::DepStream deps(this); deps.next(); ) {
+-      if (deps.check_dependency() != NULL) {
+-        found_check = true;
+-        NOT_DEBUG(break);
+-      }
++  // wholesale check of all dependencies
++  for (Dependencies::DepStream deps(this); deps.next(); ) {
++    if (deps.check_dependency() != NULL) {
++      found_check = true;
++      NOT_DEBUG(break);
+     }
+-  } else {
+-    // What has happened:
+-    // 1) a new class dependee has been added
+-    // 2) dependee and all its super classes have been marked
+-    for (Dependencies::DepStream deps(this); deps.next(); ) {
+-      // Evaluate only relevant dependencies.
+-      klassOop ctxk = deps.context_type();
+-      if (ctxk == NULL)  continue;  // e.g., evol_method
+-      bool ctxk_is_marked = instanceKlass::cast(ctxk)->is_marked_dependent();
+-      assert(ctxk_is_marked == Klass::cast(dependee)->is_subtype_of(ctxk),
+-             "correct marking of potential context types");
+-      if (ctxk_is_marked && deps.check_dependency() != NULL) {
+-        found_check = true;
+-        NOT_DEBUG(break);
+-      }
++  }
++  return found_check;  // tell caller if we found anything
++}
++
++bool nmethod::check_dependency_on(DepChange& changes) {
++  // What has happened:
++  // 1) a new class dependee has been added
++  // 2) dependee and all its super classes have been marked
++  bool found_check = false;  // set true if we are upset
++  for (Dependencies::DepStream deps(this); deps.next(); ) {
++    // Evaluate only relevant dependencies.
++    if (deps.spot_check_dependency_at(changes) != NULL) {
++      found_check = true;
++      NOT_DEBUG(break);
+     }
+   }
+   return found_check;
+@@ -1752,9 +1762,9 @@
+ void nmethod::verify() {
+ 
+   // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
+-  // seems odd. 
++  // seems odd.
+ 
+-  if( is_zombie() || is_not_entrant() ) 
++  if( is_zombie() || is_not_entrant() )
+     return;
+ 
+   // Make sure all the entry points are correctly aligned for patching.
+@@ -1768,7 +1778,7 @@
+     fatal1("nmethod at " INTPTR_FORMAT " not in zone", this);
+   }
+ 
+-  if(is_native_method() ) 
++  if(is_native_method() )
+     return;
+ 
+   nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
+@@ -1796,20 +1806,21 @@
+        SafepointSynchronize::is_at_safepoint())) {
+     ic = CompiledIC_at(call_site);
+     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
+-  } else {    
++  } else {
+     MutexLocker ml_verify (CompiledIC_lock);
+     ic = CompiledIC_at(call_site);
+   }
+   PcDesc* pd = pc_desc_at(ic->end_of_call());
+   assert(pd != NULL, "PcDesc must exist");
+-  for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset());
++  for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(),
++                                     pd->obj_decode_offset());
+        !sd->is_top(); sd = sd->sender()) {
+     sd->verify();
+   }
+ }
+ 
+ void nmethod::verify_scopes() {
+-  if( !method() ) return;	// Runtime stubs have no scope
++  if( !method() ) return;       // Runtime stubs have no scope
+   if (method()->is_native()) return; // Ignore stub methods.
+   // iterate through all interrupt point
+   // and verify the debug information is valid.
+@@ -1825,10 +1836,10 @@
+         verify_interrupt_point(iter.addr());
+         break;
+       case relocInfo::static_call_type:
+-        stub = iter.static_call_reloc()->static_stub();          
++        stub = iter.static_call_reloc()->static_stub();
+         //verify_interrupt_point(iter.addr());
+         break;
+-      case relocInfo::runtime_call_type: 
++      case relocInfo::runtime_call_type:
+         address destination = iter.reloc()->value();
+         // Right now there is no way to find out which entries support
+         // an interrupt point.  It would be nice if we had this
+@@ -1867,14 +1878,16 @@
+   ttyLocker ttyl;   // keep the following output all in one block
+ 
+   tty->print("Compiled ");
+-#ifdef TIERED
+-  if (compiler()->is_c1()) {
++
++  if (is_compiled_by_c1()) {
+     tty->print("(c1) ");
+-  } else {
+-    assert(compiler()->is_c2(), "Who else?");
++  } else if (is_compiled_by_c2()) {
+     tty->print("(c2) ");
++  } else {
++    assert(is_native_method(), "Who else?");
++    tty->print("(nm) ");
+   }
+-#endif // TIERED
++
+   print_on(tty, "nmethod");
+   tty->cr();
+   if (WizardMode) {
+@@ -1887,52 +1900,52 @@
+     if (is_not_entrant()) tty->print("not_entrant ");
+     if (is_zombie())      tty->print("zombie ");
+     if (is_unloaded())    tty->print("unloaded ");
+-    tty->print_cr("}:");  
++    tty->print_cr("}:");
+   }
+   if (size              () > 0) tty->print_cr(" total in heap  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+-					      (address)this,
+-					      (address)this + size(),
+-					      size());
++                                              (address)this,
++                                              (address)this + size(),
++                                              size());
+   if (relocation_size   () > 0) tty->print_cr(" relocation     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+-					      relocation_begin(),
+-					      relocation_end(),
+-					      relocation_size());
+-  if (code_size         () > 0) tty->print_cr(" main code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", 
+-					      code_begin(),
+-					      code_end(),
+-					      code_size());
++                                              relocation_begin(),
++                                              relocation_end(),
++                                              relocation_size());
++  if (code_size         () > 0) tty->print_cr(" main code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
++                                              code_begin(),
++                                              code_end(),
++                                              code_size());
+   if (stub_size         () > 0) tty->print_cr(" stub code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+-					      stub_begin(),
+-					      stub_end(),
+-					      stub_size());
++                                              stub_begin(),
++                                              stub_end(),
++                                              stub_size());
+   if (consts_size       () > 0) tty->print_cr(" constants      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+-					      consts_begin(),
+-					      consts_end(),
+-					      consts_size());
++                                              consts_begin(),
++                                              consts_end(),
++                                              consts_size());
+   if (scopes_data_size  () > 0) tty->print_cr(" scopes data    [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+-					      scopes_data_begin(),
+-					      scopes_data_end(),
+-					      scopes_data_size());
++                                              scopes_data_begin(),
++                                              scopes_data_end(),
++                                              scopes_data_size());
+   if (scopes_pcs_size   () > 0) tty->print_cr(" scopes pcs     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+-					      scopes_pcs_begin(),
+-					      scopes_pcs_end(),
+-					      scopes_pcs_size());
++                                              scopes_pcs_begin(),
++                                              scopes_pcs_end(),
++                                              scopes_pcs_size());
+   if (dependencies_size () > 0) tty->print_cr(" dependencies   [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+-					      dependencies_begin(),
+-					      dependencies_end(),
+-					      dependencies_size());
++                                              dependencies_begin(),
++                                              dependencies_end(),
++                                              dependencies_size());
+   if (handler_table_size() > 0) tty->print_cr(" handler table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+-					      handler_table_begin(),
+-					      handler_table_end(),
+-					      handler_table_size());
++                                              handler_table_begin(),
++                                              handler_table_end(),
++                                              handler_table_size());
+   if (nul_chk_table_size() > 0) tty->print_cr(" nul chk table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+-					      nul_chk_table_begin(),
+-					      nul_chk_table_end(),
+-					      nul_chk_table_size());
++                                              nul_chk_table_begin(),
++                                              nul_chk_table_end(),
++                                              nul_chk_table_size());
+   if (oops_size         () > 0) tty->print_cr(" oops           [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+-					      oops_begin(),
+-					      oops_end(),
+-					      oops_size());
++                                              oops_begin(),
++                                              oops_end(),
++                                              oops_size());
+ }
+ 
+ 
+@@ -1954,6 +1967,13 @@
+   tty->print_cr("Dependencies:");
+   for (Dependencies::DepStream deps(this); deps.next(); ) {
+     deps.print_dependency();
++    klassOop ctxk = deps.context_type();
++    if (ctxk != NULL) {
++      Klass* k = Klass::cast(ctxk);
++      if (k->oop_is_instance() && ((instanceKlass*)k)->is_dependent_nmethod(this)) {
++        tty->print("   [nmethod<=klass]%s", k->external_name());
++      }
++    }
+     deps.log_dependency();  // put it into the xml log also
+   }
+ }
+@@ -1979,13 +1999,13 @@
+     if (index_size > 0) {
+       jint* ip;
+       for (ip = index_start; ip+2 <= index_end; ip += 2)
+-	tty->print_cr("  (%d %d) addr=" INTPTR_FORMAT " @" INTPTR_FORMAT,
+-		      ip[0],
+-		      ip[1],
+-		      header_end()+ip[0],
+-		      relocation_begin()-1+ip[1]);
++        tty->print_cr("  (%d %d) addr=" INTPTR_FORMAT " @" INTPTR_FORMAT,
++                      ip[0],
++                      ip[1],
++                      header_end()+ip[0],
++                      relocation_begin()-1+ip[1]);
+       for (; ip < index_end; ip++)
+-	tty->print_cr("  (%d ?)", ip[0]);
++        tty->print_cr("  (%d ?)", ip[0]);
+       tty->print_cr("          @" INTPTR_FORMAT ": index_size=%d", ip, *ip++);
+       tty->print_cr("reloc_end @" INTPTR_FORMAT ":", ip);
+     }
+@@ -2022,7 +2042,7 @@
+         case relocInfo::virtual_call_type:     return "virtual_call";
+         case relocInfo::opt_virtual_call_type: return "optimized virtual_call";
+         case relocInfo::static_call_type:      return "static_call";
+-        case relocInfo::static_stub_type:      return "static_stub";        
++        case relocInfo::static_stub_type:      return "static_stub";
+         case relocInfo::runtime_call_type:     return "runtime_call";
+         case relocInfo::external_word_type:    return "external_word";
+         case relocInfo::internal_word_type:    return "internal_word";
+@@ -2040,7 +2060,8 @@
+ ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
+   PcDesc* p = pc_desc_near(begin+1);
+   if (p != NULL && p->real_pc(this) <= end) {
+-    return new ScopeDesc(this, p->scope_decode_offset());
++    return new ScopeDesc(this, p->scope_decode_offset(),
++                         p->obj_decode_offset());
+   }
+   return NULL;
+ }
+@@ -2070,7 +2091,7 @@
+       }
+     }
+   }
+-  ScopeDesc* sd  = scope_desc_in(begin, end);  
++  ScopeDesc* sd  = scope_desc_in(begin, end);
+   if (sd != NULL) {
+     st->fill_to(column);
+     if (sd->bci() == SynchronizationEntryBCI) {
+@@ -2188,6 +2209,7 @@
+   nmethod_stats.print_nmethod_stats();
+   DebugInformationRecorder::print_statistics();
+   nmethod_stats.print_pc_stats();
++  Dependencies::print_statistics();
+   if (xtty != NULL)  xtty->tail("statistics");
+ }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/code/nmethod.hpp openjdk/hotspot/src/share/vm/code/nmethod.hpp
+--- openjdk6/hotspot/src/share/vm/code/nmethod.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/nmethod.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)nmethod.hpp	1.170 07/05/17 15:50:48 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This class is used internally by nmethods, to cache
+@@ -31,7 +28,7 @@
+ class ExceptionCache : public CHeapObj {
+   friend class VMStructs;
+  private:
+-  static address _unwind_handler; 
++  static address _unwind_handler;
+   enum { cache_size = 16 };
+   klassOop _exception_type;
+   address  _pc[cache_size];
+@@ -101,10 +98,10 @@
+ };
+ 
+ 
+-// A nmethod contains:    
++// A nmethod contains:
+ //  - header                 (the nmethod structure)
+ //  [Relocation]
+-//  - relocation information  
++//  - relocation information
+ //  - constant part          (doubles, longs and floats used in nmethod)
+ //  [Code]
+ //  - code body
+@@ -155,7 +152,7 @@
+   // location in frame (offset for sp) that deopt can store the original
+   // pc during a deopt.
+   int _orig_pc_offset;
+-   
++
+   int _compile_id;                     // which compilation made this nmethod
+   int _comp_level;                     // compilation level
+ 
+@@ -168,7 +165,7 @@
+   bool _markedForDeoptimization;       // Used for stack deoptimization
+   enum { alive        = 0,
+          not_entrant  = 1, // uncommon trap has happend but activations may still exist
+-         zombie       = 2, 
++         zombie       = 2,
+          unloaded     = 3 };
+ 
+   // used by jvmti to track if an unload event has been posted for this nmethod.
+@@ -179,7 +176,7 @@
+   // Nmethod Flushing lock (if non-zero, then the nmethod is not removed)
+   jint  _lock_count;
+ 
+-  // not_entrant method removal. Each mark_sweep pass will update 
++  // not_entrant method removal. Each mark_sweep pass will update
+   // this mark to current sweep invocation count if it is seen on the
+   // stack.  An not_entrant method can be removed when there is no
+   // more activations, i.e., when the _stack_traversal_mark is less than
+@@ -203,19 +200,19 @@
+   ByteSize _compiled_synchronized_native_basic_lock_sp_offset;
+ 
+   friend class nmethodLocker;
+-  
++
+   // For native wrappers
+   nmethod(methodOop method,
+-	  int nmethod_size,
++          int nmethod_size,
+           CodeOffsets* offsets,
+-	  CodeBuffer *code_buffer,
+-	  int frame_size,
++          CodeBuffer *code_buffer,
++          int frame_size,
+           ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
+           ByteSize basic_lock_sp_offset,       /* synchronized natives only */
+-	  OopMapSet* oop_maps);
++          OopMapSet* oop_maps);
+ 
+   // Creation support
+-  nmethod(methodOop method,          
++  nmethod(methodOop method,
+           int nmethod_size,
+           int compile_id,
+           int entry_bci,
+@@ -241,7 +238,7 @@
+ 
+   // used to check that writes to nmFlags are done consistently.
+   static void check_safepoint() PRODUCT_RETURN;
+- 
++
+   // Used to manipulate the exception cache
+   void add_exception_cache_entry(ExceptionCache* new_entry);
+   ExceptionCache* exception_cache_entry_for_exception(Handle exception);
+@@ -256,12 +253,12 @@
+                               int entry_bci,
+                               CodeOffsets* offsets,
+                               int orig_pc_offset,
+-                              DebugInformationRecorder* recorder, 
++                              DebugInformationRecorder* recorder,
+                               Dependencies* dependencies,
+                               CodeBuffer *code_buffer,
+-                              int frame_size, 
+-                              OopMapSet* oop_maps, 
+-                              ExceptionHandlerTable* handler_table, 
++                              int frame_size,
++                              OopMapSet* oop_maps,
++                              ExceptionHandlerTable* handler_table,
+                               ImplicitExceptionTable* nul_chk_table,
+                               AbstractCompiler* compiler,
+                               int comp_level);
+@@ -293,7 +290,7 @@
+ 
+   bool is_compiled_by_c1() const;
+   bool is_compiled_by_c2() const;
+-    
++
+   // boundaries for different parts
+   address code_begin         () const             { return _entry_point; }
+   address code_end           () const             { return           header_begin() + _stub_offset          ; }
+@@ -342,7 +339,7 @@
+   bool  is_alive() const                          { return flags.state == alive || flags.state == not_entrant; }
+   bool  is_not_entrant() const                    { return flags.state == not_entrant; }
+   bool  is_zombie() const                         { return flags.state == zombie; }
+-  bool  is_unloaded() const                       { return flags.state == unloaded;   }      
++  bool  is_unloaded() const                       { return flags.state == unloaded;   }
+ 
+   // Make the nmethod non entrant. The nmethod will continue to be alive.
+   // It is used when an uncommon trap happens.
+@@ -350,9 +347,9 @@
+   void  make_zombie()                             { make_not_entrant_or_zombie(zombie); }
+ 
+   // used by jvmti to track if the unload event has been reported
+-  bool  unload_reported()			  { return _unload_reported; }
+-  void  set_unload_reported()			  { _unload_reported = true; }
+-  
++  bool  unload_reported()                         { return _unload_reported; }
++  void  set_unload_reported()                     { _unload_reported = true; }
++
+   bool  is_marked_for_deoptimization() const      { return _markedForDeoptimization; }
+   void  mark_for_deoptimization()                 { _markedForDeoptimization = true; }
+ 
+@@ -384,7 +381,7 @@
+ 
+   // Sweeper support
+   long  stack_traversal_mark()                    { return _stack_traversal_mark; }
+-  void  set_stack_traversal_mark(long l)          { _stack_traversal_mark = l; } 
++  void  set_stack_traversal_mark(long l)          { _stack_traversal_mark = l; }
+ 
+   // Exception cache support
+   ExceptionCache* exception_cache() const         { return _exception_cache; }
+@@ -406,7 +403,7 @@
+   // tells whether frames described by this nmethod can be deoptimized
+   // note: native wrappers cannot be deoptimized.
+   bool can_be_deoptimized() const { return is_java_method(); }
+-  
++
+   // Inline cache support
+   void clear_inline_caches();
+   void cleanup_inline_caches();
+@@ -414,9 +411,9 @@
+     return (addr >= instructions_begin() && addr < verified_entry_point());
+   }
+ 
+-  // unlink and deallocate this nmethod 
++  // unlink and deallocate this nmethod
+   // Only NMethodSweeper class is expected to use this. NMethodSweeper is not
+-  // expected to use any other private methods/data in this class. 
++  // expected to use any other private methods/data in this class.
+ 
+  protected:
+   void flush();
+@@ -425,7 +422,7 @@
+   // If returning true, it is unsafe to remove this nmethod even though it is a zombie
+   // nmethod, since the VM might have a reference to it. Should only be called from a  safepoint.
+   bool is_locked_by_vm() const                    { return _lock_count >0; }
+- 
++
+   // See comment at definition of _last_seen_on_stack
+   void mark_as_seen_on_stack();
+   bool can_not_entrant_be_converted();
+@@ -440,7 +437,7 @@
+                   oop* root, bool unloading_occurred);
+ 
+   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
+-				     OopClosure* f);
++                                     OopClosure* f);
+   void oops_do(OopClosure* f);
+ 
+   // ScopeDesc for an instruction
+@@ -486,7 +483,7 @@
+   void verify();
+   void verify_scopes();
+   void verify_interrupt_point(address interrupt_point);
+-  
++
+   // printing support
+   void print()                          const     PRODUCT_RETURN;
+   void print_code()                               PRODUCT_RETURN;
+@@ -523,8 +520,13 @@
+   // PrimitiveIC*   primitiveIC_at(char* p) const;
+   oop embeddedOop_at(address p);
+ 
+-  // tells if this compiled method is dependent on
+-  bool is_dependent_on(klassOop dependee);  
++  // tells if any of this method's dependencies have been invalidated
++  // (this is expensive!)
++  bool check_all_dependencies();
++
++  // tells if this compiled method is dependent on the given changes,
++  // and the changes have invalidated it
++  bool check_dependency_on(DepChange& changes);
+ 
+   // Evolution support. Tells if this compiled method is dependent on any of
+   // methods m() of class dependee, such that if m() in dependee is replaced,
+@@ -534,11 +536,11 @@
+   // Fast breakpoint support. Tells if this compiled method is
+   // dependent on the given method. Returns true if this nmethod
+   // corresponds to the given method as well.
+-  bool is_dependent_on_method(methodOop dependee);  
++  bool is_dependent_on_method(methodOop dependee);
+ 
+   // is it ok to patch at address?
+   bool is_patchable_at(address instr_address);
+-  
++
+   // UseBiasedLocking support
+   ByteSize compiled_synchronized_native_basic_lock_owner_sp_offset() {
+     return _compiled_synchronized_native_basic_lock_owner_sp_offset;
+@@ -574,5 +576,3 @@
+     lock_nmethod(_nm);
+   }
+ };
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/code/oopRecorder.cpp openjdk/hotspot/src/share/vm/code/oopRecorder.cpp
+--- openjdk6/hotspot/src/share/vm/code/oopRecorder.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/oopRecorder.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)oopRecorder.cpp	1.22 07/05/05 17:05:22 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -157,5 +154,3 @@
+   }
+   return -1;
+ }
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/code/oopRecorder.hpp openjdk/hotspot/src/share/vm/code/oopRecorder.hpp
+--- openjdk6/hotspot/src/share/vm/code/oopRecorder.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/oopRecorder.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)oopRecorder.hpp	1.22 07/05/05 17:05:21 JVM"
+-#endif
+ /*
+  * Copyright 1998-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Recording and retrieval of oop relocations in compiled code.
+diff -ruN openjdk6/hotspot/src/share/vm/code/pcDesc.cpp openjdk/hotspot/src/share/vm/code/pcDesc.cpp
+--- openjdk6/hotspot/src/share/vm/code/pcDesc.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/pcDesc.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)pcDesc.cpp	1.30 07/05/05 17:05:18 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,15 +19,16 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_pcDesc.cpp.incl"
+ 
+-PcDesc::PcDesc(int pc_offset, int scope_decode_offset) {
++PcDesc::PcDesc(int pc_offset, int scope_decode_offset, int obj_decode_offset) {
+   _pc_offset           = pc_offset;
+   _scope_decode_offset = scope_decode_offset;
++  _obj_decode_offset   = obj_decode_offset;
+ }
+ 
+ address PcDesc::real_pc(const nmethod* code) const {
+@@ -61,4 +59,3 @@
+   //Unimplemented();
+   return true;
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/code/pcDesc.hpp openjdk/hotspot/src/share/vm/code/pcDesc.hpp
+--- openjdk6/hotspot/src/share/vm/code/pcDesc.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/pcDesc.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)pcDesc.hpp	1.36 07/05/05 17:05:20 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // PcDescs map a physical PC (given as offset from start of nmethod) to
+@@ -35,17 +32,20 @@
+  private:
+   int _pc_offset;           // offset from start of nmethod
+   int _scope_decode_offset; // offset for scope in nmethod
++  int _obj_decode_offset;
+ 
+  public:
+   int pc_offset() const           { return _pc_offset;   }
+   int scope_decode_offset() const { return _scope_decode_offset; }
++  int obj_decode_offset() const   { return _obj_decode_offset; }
+ 
+   void set_pc_offset(int x)           { _pc_offset           = x; }
+   void set_scope_decode_offset(int x) { _scope_decode_offset = x; }
++  void set_obj_decode_offset(int x)   { _obj_decode_offset   = x; }
+ 
+   // Constructor (only used for static in nmethod.cpp)
+   // Also used by ScopeDesc::sender()]
+-  PcDesc(int pc_offset, int scope_decode_offset);
++  PcDesc(int pc_offset, int scope_decode_offset, int obj_decode_offset);
+ 
+   enum {
+     // upper and lower exclusive limits real offsets:
+@@ -59,4 +59,3 @@
+   void print(nmethod* code);
+   bool verify(nmethod* code);
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/code/relocInfo.cpp openjdk/hotspot/src/share/vm/code/relocInfo.cpp
+--- openjdk6/hotspot/src/share/vm/code/relocInfo.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/relocInfo.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)relocInfo.cpp	1.89 07/05/05 17:05:21 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -98,15 +95,15 @@
+ 
+ 
+ void relocInfo::change_reloc_info_for_address(RelocIterator *itr, address pc, relocType old_type, relocType new_type) {
+-  bool found = false;  
+-  while (itr->next() && !found) {    
+-    if (itr->addr() == pc) {      
++  bool found = false;
++  while (itr->next() && !found) {
++    if (itr->addr() == pc) {
+       assert(itr->type()==old_type, "wrong relocInfo type found");
+-      itr->current()->set_type(new_type);      
++      itr->current()->set_type(new_type);
+       found=true;
+     }
+   }
+-  assert(found, "no relocInfo found for pc");      
++  assert(found, "no relocInfo found for pc");
+ }
+ 
+ 
+@@ -132,7 +129,7 @@
+   _end     = cb->relocation_end();
+   _addr    = (address) cb->instructions_begin();
+ 
+-  assert(!has_current(), "just checking");  
++  assert(!has_current(), "just checking");
+   address code_end = cb->instructions_end();
+ 
+   assert(begin == NULL || begin >= cb->instructions_begin(), "in bounds");
+@@ -190,14 +187,14 @@
+ 
+ void RelocIterator::create_index(relocInfo* dest_begin, int dest_count, relocInfo* dest_end) {
+   address relocation_begin = (address)dest_begin;
+-  address relocation_end   = (address)dest_end; 
++  address relocation_end   = (address)dest_end;
+   int     total_size       = relocation_end - relocation_begin;
+   int     locs_size        = dest_count * sizeof(relocInfo);
+   if (!UseRelocIndex) {
+     Copy::fill_to_bytes(relocation_begin + locs_size, total_size-locs_size, 0);
+     return;
+   }
+-  int     index_size       = total_size - locs_size - BytesPerInt;	// find out how much space is left
++  int     index_size       = total_size - locs_size - BytesPerInt;      // find out how much space is left
+   int     ncards           = index_size / sizeof(RelocIndexEntry);
+   assert(total_size == locs_size + index_size + BytesPerInt, "checkin'");
+   assert(index_size >= 0 && index_size % sizeof(RelocIndexEntry) == 0, "checkin'");
+@@ -209,7 +206,7 @@
+   if (index_size != 0) {
+     assert(index_size > 0, "checkin'");
+ 
+-    RelocIndexEntry* index = (RelocIndexEntry *)(relocation_begin + locs_size); 
++    RelocIndexEntry* index = (RelocIndexEntry *)(relocation_begin + locs_size);
+     assert(index == (RelocIndexEntry*)index_size_addr - ncards, "checkin'");
+ 
+     // walk over the relocations, and fill in index entries as we go
+@@ -233,10 +230,10 @@
+       reloc_offset = iter._current - initial_current;
+       if (!iter.next())  break;
+       while (iter.addr() >= next_card_addr) {
+-	index[i].addr_offset  = addr_offset;
+-	index[i].reloc_offset = reloc_offset;
+-	i++;
+-	next_card_addr += indexCardSize;
++        index[i].addr_offset  = addr_offset;
++        index[i].reloc_offset = reloc_offset;
++        i++;
++        next_card_addr += indexCardSize;
+       }
+     }
+     while (i < ncards) {
+@@ -272,8 +269,8 @@
+       assert(_addr == _code->instructions_begin(), "_addr must be unadjusted");
+       int card = (begin - _addr) / indexCardSize;
+       if (card > 0) {
+-	if (index+card-1 < index_limit)  index += card-1;
+-	else                             index = index_limit - 1;
++        if (index+card-1 < index_limit)  index += card-1;
++        else                             index = index_limit - 1;
+ #ifdef ASSERT
+         addrCheck = _addr    + index->addr_offset;
+         infoCheck = _current + index->reloc_offset;
+@@ -322,19 +319,19 @@
+ 
+ void PatchingRelocIterator:: prepass() {
+   // turn breakpoints off during patching
+-  _init_state = (*this);	// save cursor
++  _init_state = (*this);        // save cursor
+   while (next()) {
+     if (type() == relocInfo::breakpoint_type) {
+       breakpoint_reloc()->set_active(false);
+     }
+   }
+-  (RelocIterator&)(*this) = _init_state;	// reset cursor for client
++  (RelocIterator&)(*this) = _init_state;        // reset cursor for client
+ }
+ 
+ 
+ void PatchingRelocIterator:: postpass() {
+   // turn breakpoints back on after patching
+-  (RelocIterator&)(*this) = _init_state;	// reset cursor again
++  (RelocIterator&)(*this) = _init_state;        // reset cursor again
+   while (next()) {
+     if (type() == relocInfo::breakpoint_type) {
+       breakpoint_Relocation* bpt = breakpoint_reloc();
+@@ -417,8 +414,8 @@
+       break;
+     case relocInfo::oop_type:
+       {
+-	oop_Relocation* r = (oop_Relocation*)reloc();
+-	return oop_Relocation::spec(r->oop_index(), r->offset() + offset);
++        oop_Relocation* r = (oop_Relocation*)reloc();
++        return oop_Relocation::spec(r->oop_index(), r->offset() + offset);
+       }
+     default:
+       ShouldNotReachHere();
+@@ -437,7 +434,7 @@
+   ShouldNotReachHere();
+   return NULL;
+ }
+-  
++
+ 
+ void Relocation::set_value(address x) {
+   ShouldNotReachHere();
+@@ -493,7 +490,7 @@
+     return p->begin();
+   } else {
+ #ifndef _LP64
+-    // this only works on 32bit machines 
++    // this only works on 32bit machines
+     return (address) ((intptr_t) index);
+ #else
+     fatal("Relocation::index_to_runtime_address, int32_t not pointer sized");
+@@ -570,7 +567,7 @@
+   short*  p     = (short*) dest->locs_end();
+   address point =          dest->locs_point();
+ 
+-  // Try to make a pointer NULL first.  
++  // Try to make a pointer NULL first.
+   if (_oop_limit >= point &&
+       _oop_limit <= point + NativeCall::instruction_size) {
+     _oop_limit = NULL;
+@@ -746,7 +743,7 @@
+   else                      { ShouldNotReachHere(); }
+ 
+   _target = internal() ? address_from_scaled_offset(target_bits, addr())
+-		       : index_to_runtime_address  (target_bits);
++                       : index_to_runtime_address  (target_bits);
+ }
+ 
+ 
+@@ -779,7 +776,7 @@
+ }
+ 
+ 
+-RelocIterator virtual_call_Relocation::parse_ic(CodeBlob* &code, address &ic_call, address &first_oop, 
++RelocIterator virtual_call_Relocation::parse_ic(CodeBlob* &code, address &ic_call, address &first_oop,
+                                                 oop* &oop_addr, bool *is_optimized) {
+   assert(ic_call != NULL, "ic_call address must be set");
+   assert(ic_call != NULL || first_oop != NULL, "must supply a non-null input");
+@@ -793,16 +790,16 @@
+   }
+   assert(ic_call   == NULL || code->contains(ic_call),   "must be in CodeBlob");
+   assert(first_oop == NULL || code->contains(first_oop), "must be in CodeBlob");
+-  
++
+   address oop_limit = NULL;
+ 
+   if (ic_call != NULL) {
+     // search for the ic_call at the given address
+     RelocIterator iter(code, ic_call, ic_call+1);
+     bool ret = iter.next();
+-    assert(ret == true, "relocInfo must exist at this address");    
++    assert(ret == true, "relocInfo must exist at this address");
+     assert(iter.addr() == ic_call, "must find ic_call");
+-    if (iter.type() == relocInfo::virtual_call_type) {      
++    if (iter.type() == relocInfo::virtual_call_type) {
+       virtual_call_Relocation* r = iter.virtual_call_reloc();
+       first_oop = r->first_oop();
+       oop_limit = r->oop_limit();
+@@ -814,7 +811,7 @@
+       first_oop = NULL;
+       return iter;
+     }
+-  }   
++  }
+ 
+   // search for the first_oop, to get its oop_addr
+   RelocIterator all_oops(code, first_oop);
+@@ -836,16 +833,16 @@
+     // search forward for the ic_call matching the given first_oop
+     while (iter.next()) {
+       if (iter.type() == relocInfo::virtual_call_type) {
+-	virtual_call_Relocation* r = iter.virtual_call_reloc();
+-	if (r->first_oop() == first_oop) {
+-	  ic_call   = r->addr();
+-	  oop_limit = r->oop_limit();
+-	  break;
+-	}
++        virtual_call_Relocation* r = iter.virtual_call_reloc();
++        if (r->first_oop() == first_oop) {
++          ic_call   = r->addr();
++          oop_limit = r->oop_limit();
++          break;
++        }
+       }
+     }
+     guarantee(!did_reset, "cannot find ic_call");
+-    iter = RelocIterator(code);	// search the whole CodeBlob
++    iter = RelocIterator(code); // search the whole CodeBlob
+     did_reset = true;
+   }
+ 
+@@ -874,8 +871,8 @@
+   // No stubs for ICs
+   // Clean IC
+   ResourceMark rm;
+-  CompiledIC* icache = CompiledIC_at(this);  
+-  icache->set_to_clean();  
++  CompiledIC* icache = CompiledIC_at(this);
++  icache->set_to_clean();
+ }
+ 
+ 
+@@ -883,8 +880,8 @@
+   // No stubs for ICs
+   // Clean IC
+   ResourceMark rm;
+-  CompiledIC* icache = CompiledIC_at(this);  
+-  icache->set_to_clean();  
++  CompiledIC* icache = CompiledIC_at(this);
++  icache->set_to_clean();
+ }
+ 
+ 
+@@ -895,7 +892,7 @@
+   while (iter.next()) {
+     if (iter.type() == relocInfo::static_stub_type) {
+       if (iter.static_stub_reloc()->static_call() == static_call_addr) {
+-	return iter.addr();
++        return iter.addr();
+       }
+     }
+   }
+@@ -917,7 +914,7 @@
+   while (iter.next()) {
+     if (iter.type() == relocInfo::static_stub_type) {
+       if (iter.static_stub_reloc()->static_call() == static_call_addr) {
+-	return iter.addr();
++        return iter.addr();
+       }
+     }
+   }
+@@ -964,7 +961,7 @@
+       target = new_addr_for(pd_get_address_from_code(), src, dest);
+     }
+   }
+-  set_value(target);  
++  set_value(target);
+ }
+ 
+ 
+@@ -1024,7 +1021,7 @@
+   if (b) {
+     set_bits(bits() | enabled_state);
+   } else {
+-    set_active(false);		// remove the actual breakpoint insn, if any
++    set_active(false);          // remove the actual breakpoint insn, if any
+     set_bits(bits() & ~enabled_state);
+   }
+ }
+@@ -1037,7 +1034,7 @@
+ 
+   // %%% should probably seize a lock here (might not be the right lock)
+   //MutexLockerEx ml_patch(Patching_lock, true);
+-  //if (active() == b)  return;		// recheck state after locking
++  //if (active() == b)  return;         // recheck state after locking
+ 
+   if (b) {
+     set_bits(bits() | active_state);
+@@ -1081,7 +1078,7 @@
+     return;
+   }
+   tty->print("relocInfo@" INTPTR_FORMAT " [type=%d(%s) addr=" INTPTR_FORMAT,
+-	     _current, type(), reloc_type_string((relocInfo::relocType) type()), _addr);
++             _current, type(), reloc_type_string((relocInfo::relocType) type()), _addr);
+   if (current()->format() != 0)
+     tty->print(" format=%d", current()->format());
+   if (datalen() == 1) {
+@@ -1102,17 +1099,17 @@
+       oop  raw_oop   = NULL;
+       oop  oop_value = NULL;
+       if (code() != NULL || r->oop_is_immediate()) {
+-	oop_addr  = r->oop_addr();
+-	raw_oop   = *oop_addr;
+-	oop_value = r->oop_value();
++        oop_addr  = r->oop_addr();
++        raw_oop   = *oop_addr;
++        oop_value = r->oop_value();
+       }
+       tty->print(" | [oop_addr=" INTPTR_FORMAT " *=" INTPTR_FORMAT " offset=%d]",
+                  oop_addr, (address)raw_oop, r->offset());
+       // Do not print the oop by default--we want this routine to
+       // work even during GC or other inconvenient times.
+       if (WizardMode && oop_value != NULL) {
+-	tty->print("oop_value=" INTPTR_FORMAT ": ", (address)oop_value);
+-	oop_value->print_value_on(tty);
++        tty->print("oop_value=" INTPTR_FORMAT ": ", (address)oop_value);
++        oop_value->print_value_on(tty);
+       }
+       break;
+     }
+@@ -1135,7 +1132,7 @@
+     {
+       virtual_call_Relocation* r = (virtual_call_Relocation*) reloc();
+       tty->print(" | [destination=" INTPTR_FORMAT " first_oop=" INTPTR_FORMAT " oop_limit=" INTPTR_FORMAT "]",
+-		 r->destination(), r->first_oop(), r->oop_limit());
++                 r->destination(), r->first_oop(), r->oop_limit());
+       break;
+     }
+   case relocInfo::static_stub_type:
+diff -ruN openjdk6/hotspot/src/share/vm/code/relocInfo.hpp openjdk/hotspot/src/share/vm/code/relocInfo.hpp
+--- openjdk6/hotspot/src/share/vm/code/relocInfo.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/relocInfo.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)relocInfo.hpp	1.86 07/05/05 17:05:22 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Types in this file:
+@@ -155,7 +152,7 @@
+ //   The identity of the callee is extracted from debugging information.
+ //   //%note reloc_3
+ //
+-// relocInfo::virtual_call_type -- a virtual call site (which includes an inline 
++// relocInfo::virtual_call_type -- a virtual call site (which includes an inline
+ //                                 cache)
+ //   Value:  an CodeBlob, a stub, the interpreter, or a fixup routine
+ //   Instruction types: a call, plus some associated set-oop instructions
+@@ -227,7 +224,7 @@
+ // This uses 4 instruction words, 8 relocation halfwords,
+ // and an entry (which is sharable) in the CodeBlob's oop pool,
+ // for a total of 36 bytes.
+-// 
++//
+ // Note that the compiler is responsible for ensuring the "fldOffset" when
+ // added to "%lo(myObject)" does not overflow the immediate fields of the
+ // memory instructions.
+@@ -251,12 +248,12 @@
+   friend class RelocIterator;
+  public:
+   enum relocType {
+-    none	            =  0, // Used when no relocation should be generated
++    none                    =  0, // Used when no relocation should be generated
+     oop_type                =  1, // embedded oop
+     virtual_call_type       =  2, // a standard inline cache call for a virtual send
+     opt_virtual_call_type   =  3, // a virtual call that has been statically bound (i.e., no IC cache)
+-    static_call_type        =  4, // a static send 
+-    static_stub_type        =  5, // stub-entry for static send  (takes care of interpreter case)    
++    static_call_type        =  4, // a static send
++    static_stub_type        =  5, // stub-entry for static send  (takes care of interpreter case)
+     runtime_call_type       =  6, // call to fixed external routine
+     external_word_type      =  7, // reference to fixed external address
+     internal_word_type      =  8, // reference within the current code blob
+@@ -319,7 +316,7 @@
+   };
+ 
+   // accessors
+- public: 
++ public:
+   relocType  type()       const { return (relocType)((unsigned)_value >> nontype_width); }
+   int  format()           const { return format_mask==0? 0: format_mask &
+                                          ((unsigned)_value >> offset_width); }
+@@ -330,9 +327,9 @@
+   const short* data()     const { assert(is_datalen(), "must have data");
+                                   return (const short*)(this + 1); }
+   int          datalen()  const { assert(is_datalen(), "must have data");
+-				  return (_value & datalen_mask); }
++                                  return (_value & datalen_mask); }
+   int         immediate() const { assert(is_immediate(), "must have immed");
+-				  return (_value & datalen_mask); }
++                                  return (_value & datalen_mask); }
+  public:
+   static int addr_unit()        { return offset_unit; }
+   static int offset_limit()     { return (1 << offset_width) * offset_unit; }
+@@ -498,8 +495,8 @@
+ //      case relocInfo::prim_type         :
+ //      case relocInfo::uncommon_type     :
+ //      case relocInfo::runtime_call_type :
+-//      case relocInfo::internal_word_type: 
+-//      case relocInfo::external_word_type: 
++//      case relocInfo::internal_word_type:
++//      case relocInfo::external_word_type:
+ //      ...
+ //     }
+ //   }
+@@ -508,7 +505,7 @@
+   enum { SECT_CONSTS = 2,
+          SECT_LIMIT = 3 };  // must be equal to CodeBuffer::SECT_LIMIT
+   friend class Relocation;
+-  friend class relocInfo;	// for change_reloc_info_for_address only
++  friend class relocInfo;       // for change_reloc_info_for_address only
+   typedef relocInfo::relocType relocType;
+ 
+  private:
+@@ -589,7 +586,7 @@
+     return true;
+   }
+ 
+-  // accessors  
++  // accessors
+   address      limit()        const { return _limit; }
+   void     set_limit(address x);
+   relocType    type()         const { return current()->type(); }
+@@ -629,8 +626,8 @@
+ 
+ #ifndef PRODUCT
+  public:
+-  void print();  
+-  void print_current();  
++  void print();
++  void print_current();
+ #endif
+ };
+ 
+@@ -790,7 +787,7 @@
+   address old_addr_for(address newa, const CodeBuffer* src, CodeBuffer* dest);
+   address new_addr_for(address olda, const CodeBuffer* src, CodeBuffer* dest);
+   void normalize_address(address& addr, const CodeSection* dest, bool allow_other_sections = false);
+- 
++
+  public:
+   // accessors which only make sense for a bound Relocation
+   address   addr()         const { return binding()->addr(); }
+@@ -859,11 +856,11 @@
+   virtual int    offset()                      { return 0; }
+   address         value()                      = 0;
+   void        set_value(address x)             { set_value(x, offset()); }
+-  void        set_value(address x, intptr_t o) { 
++  void        set_value(address x, intptr_t o) {
+     if (addr_in_const())
+       *(address*)addr() = x;
+-    else 
+-      pd_set_data_value(x, o); 
++    else
++      pd_set_data_value(x, o);
+   }
+ 
+   // The "o" (displacement) argument is relevant only to split relocations
+@@ -966,7 +963,7 @@
+   address _first_oop;               // location of first set-oop instruction
+   address _oop_limit;               // search limit for set-oop instructions
+ 
+-  friend class RelocIterator; 
++  friend class RelocIterator;
+   virtual_call_Relocation() { }
+ 
+ 
+@@ -1005,7 +1002,7 @@
+   }
+ 
+  private:
+-  friend class RelocIterator; 
++  friend class RelocIterator;
+   opt_virtual_call_Relocation() { }
+ 
+  public:
+@@ -1027,7 +1024,7 @@
+   }
+ 
+  private:
+-  friend class RelocIterator; 
++  friend class RelocIterator;
+   static_call_Relocation() { }
+ 
+  public:
+@@ -1054,7 +1051,7 @@
+     _static_call = static_call;
+   }
+ 
+-  friend class RelocIterator; 
++  friend class RelocIterator;
+   static_stub_Relocation() { }
+ 
+  public:
+@@ -1078,7 +1075,7 @@
+   }
+ 
+  private:
+-  friend class RelocIterator; 
++  friend class RelocIterator;
+   runtime_call_Relocation() { }
+ 
+  public:
+@@ -1110,7 +1107,7 @@
+     _target = target;
+   }
+ 
+-  friend class RelocIterator; 
++  friend class RelocIterator;
+   external_word_Relocation() { }
+ 
+  public:
+@@ -1154,7 +1151,7 @@
+   address _target;                  // address in CodeBlob
+   int     _section;                 // section providing base address, if any
+ 
+-  friend class RelocIterator; 
++  friend class RelocIterator;
+   internal_word_Relocation() { }
+ 
+   // bit-width of LSB field in packed offset, if section >= 0
+@@ -1176,7 +1173,7 @@
+ 
+ class section_word_Relocation : public internal_word_Relocation {
+   relocInfo::relocType type() { return relocInfo::section_word_type; }
+-  
++
+  public:
+   static RelocationHolder spec(address target, int section) {
+     RelocationHolder rh = newHolder();
+@@ -1195,16 +1192,18 @@
+   void unpack_data();
+ 
+  private:
+-  friend class RelocIterator; 
++  friend class RelocIterator;
+   section_word_Relocation() { }
+ };
+ 
+ 
+ class poll_Relocation : public Relocation {
++  bool          is_data()                      { return true; }
+   relocInfo::relocType type() { return relocInfo::poll_type; }
+ };
+ 
+ class poll_return_Relocation : public Relocation {
++  bool          is_data()                      { return true; }
+   relocInfo::relocType type() { return relocInfo::poll_return_type; }
+ };
+ 
+@@ -1250,7 +1249,7 @@
+ 
+   breakpoint_Relocation(int kind, address target, bool internal_target);
+ 
+-  friend class RelocIterator; 
++  friend class RelocIterator;
+   breakpoint_Relocation() { }
+ 
+   short    bits()       const { return _bits; }
+@@ -1274,8 +1273,8 @@
+   bool removable()      const { return (bits() & removable_attr) != 0; }
+   bool settable()       const { return (bits() &  settable_attr) != 0; }
+ 
+-  void set_enabled(bool b);	// to activate, you must also say set_active
+-  void set_active(bool b);	// actually inserts bpt (must be enabled 1st)
++  void set_enabled(bool b);     // to activate, you must also say set_active
++  void set_active(bool b);      // actually inserts bpt (must be enabled 1st)
+ 
+   // data is packed as 16 bits, followed by the target (1 or 2 words), followed
+   // if necessary by empty storage for saving away original instruction bytes.
+@@ -1290,7 +1289,7 @@
+ 
+ 
+ // We know all the xxx_Relocation classes, so now we can define these:
+-#define EACH_CASE(name)                         		\
++#define EACH_CASE(name)                                         \
+ inline name##_Relocation* RelocIterator::name##_reloc() {       \
+   assert(type() == relocInfo::name##_type, "type must agree");  \
+   /* The purpose of the placed "new" is to re-use the same */   \
+@@ -1313,8 +1312,8 @@
+  private:
+   RelocIterator _init_state;
+ 
+-  void prepass();		// deactivates all breakpoints
+-  void postpass();		// reactivates all enabled breakpoints
++  void prepass();               // deactivates all breakpoints
++  void postpass();              // reactivates all enabled breakpoints
+ 
+   // do not copy these puppies; it would have unpredictable side effects
+   // these are private and have no bodies defined because they should not be called
+@@ -1327,4 +1326,3 @@
+ 
+   ~PatchingRelocIterator()                           { postpass(); }
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/code/scopeDesc.cpp openjdk/hotspot/src/share/vm/code/scopeDesc.cpp
+--- openjdk6/hotspot/src/share/vm/code/scopeDesc.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/scopeDesc.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)scopeDesc.cpp	1.57 07/05/05 17:05:22 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,39 +19,58 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_scopeDesc.cpp.incl"
+ 
+ 
++ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset) {
++  _code          = code;
++  _decode_offset = decode_offset;
++  _objects       = decode_object_values(obj_decode_offset);
++  decode_body();
++}
++
+ ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset) {
+   _code          = code;
+   _decode_offset = decode_offset;
++  _objects       = decode_object_values(DebugInformationRecorder::serialized_null);
++  decode_body();
++}
+ 
+-  if (decode_offset == DebugInformationRecorder::serialized_null) {
++
++ScopeDesc::ScopeDesc(const ScopeDesc* parent) {
++  _code          = parent->_code;
++  _decode_offset = parent->_sender_decode_offset;
++  _objects       = parent->_objects;
++  decode_body();
++}
++
++
++void ScopeDesc::decode_body() {
++  if (decode_offset() == DebugInformationRecorder::serialized_null) {
+     // This is a sentinel record, which is only relevant to
+     // approximate queries.  Decode a reasonable frame.
+     _sender_decode_offset = DebugInformationRecorder::serialized_null;
+-    _method = methodHandle(code->method());
++    _method = methodHandle(_code->method());
+     _bci = InvocationEntryBci;
+     _locals_decode_offset = DebugInformationRecorder::serialized_null;
+     _expressions_decode_offset = DebugInformationRecorder::serialized_null;
+     _monitors_decode_offset = DebugInformationRecorder::serialized_null;
+-    return;
+-  }
+-
+-  // decode header
+-  DebugInfoReadStream* stream  = stream_at(_decode_offset);
++  } else {
++    // decode header
++    DebugInfoReadStream* stream  = stream_at(decode_offset());
+ 
+-  _sender_decode_offset = stream->read_int();
+-  _method = methodHandle((methodOop) stream->read_oop());
+-  _bci    = stream->read_bci();
+-  // decode offsets for body and sender
+-  _locals_decode_offset      = stream->read_int();
+-  _expressions_decode_offset = stream->read_int();
+-  _monitors_decode_offset    = stream->read_int();
++    _sender_decode_offset = stream->read_int();
++    _method = methodHandle((methodOop) stream->read_oop());
++    _bci    = stream->read_bci();
++    // decode offsets for body and sender
++    _locals_decode_offset      = stream->read_int();
++    _expressions_decode_offset = stream->read_int();
++    _monitors_decode_offset    = stream->read_int();
++  }
+ }
+ 
+ 
+@@ -69,6 +85,18 @@
+   return result;
+ }
+ 
++GrowableArray<ScopeValue*>* ScopeDesc::decode_object_values(int decode_offset) {
++  if (decode_offset == DebugInformationRecorder::serialized_null) return NULL;
++  GrowableArray<ScopeValue*>* result = new GrowableArray<ScopeValue*>();
++  DebugInfoReadStream* stream = new DebugInfoReadStream(_code, decode_offset, result);
++  int length = stream->read_int();
++  for (int index = 0; index < length; index++) {
++    result->push(ScopeValue::read_from(stream));
++  }
++  assert(result->length() == length, "inconsistent debug information");
++  return result;
++}
++
+ 
+ GrowableArray<MonitorValue*>* ScopeDesc::decode_monitor_values(int decode_offset) {
+   if (decode_offset == DebugInformationRecorder::serialized_null) return NULL;
+@@ -82,7 +110,7 @@
+ }
+ 
+ DebugInfoReadStream* ScopeDesc::stream_at(int decode_offset) const {
+-  return new DebugInfoReadStream(_code, decode_offset);
++  return new DebugInfoReadStream(_code, decode_offset, _objects);
+ }
+ 
+ GrowableArray<ScopeValue*>* ScopeDesc::locals() {
+@@ -97,13 +125,17 @@
+   return decode_monitor_values(_monitors_decode_offset);
+ }
+ 
++GrowableArray<ScopeValue*>* ScopeDesc::objects() {
++  return _objects;
++}
++
+ bool ScopeDesc::is_top() const {
+  return _sender_decode_offset == DebugInformationRecorder::serialized_null;
+ }
+ 
+ ScopeDesc* ScopeDesc::sender() const {
+   if (is_top()) return NULL;
+-  return new ScopeDesc(_code, _sender_decode_offset);
++  return new ScopeDesc(this);
+ }
+ 
+ 
+@@ -174,6 +206,18 @@
+       }
+     }
+   }
++
++#ifdef COMPILER2
++  if (DoEscapeAnalysis && is_top() && _objects != NULL) {
++    tty->print_cr("Objects");
++    for (int i = 0; i < _objects->length(); i++) {
++      ObjectValue* sv = (ObjectValue*) _objects->at(i);
++      tty->print(" - %d: ", sv->id());
++      sv->print_fields_on(tty);
++      tty->cr();
++    }
++  }
++#endif // COMPILER2
+ }
+ 
+ #endif
+diff -ruN openjdk6/hotspot/src/share/vm/code/scopeDesc.hpp openjdk/hotspot/src/share/vm/code/scopeDesc.hpp
+--- openjdk6/hotspot/src/share/vm/code/scopeDesc.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/scopeDesc.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)scopeDesc.hpp	1.36 07/05/05 17:05:22 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // SimpleScopeDesc is used when all you need to extract from
+@@ -44,7 +41,7 @@
+     _method           = methodOop(buffer.read_oop());
+     _bci              = buffer.read_bci();
+   }
+-  
++
+   methodOop method() { return _method; }
+   int bci() { return _bci; }
+ };
+@@ -55,15 +52,21 @@
+ class ScopeDesc : public ResourceObj {
+  public:
+   // Constructor
++  ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset);
++
++  // Calls above, giving default value of "serialized_null" to the
++  // "obj_decode_offset" argument.  (We don't use a default argument to
++  // avoid a .hpp-.hpp dependency.)
+   ScopeDesc(const nmethod* code, int decode_offset);
+ 
+   // JVM state
+   methodHandle method() const { return _method; }
+   int          bci()    const { return _bci;    }
+-  
++
+   GrowableArray<ScopeValue*>*   locals();
+-  GrowableArray<ScopeValue*>*   expressions();  
++  GrowableArray<ScopeValue*>*   expressions();
+   GrowableArray<MonitorValue*>* monitors();
++  GrowableArray<ScopeValue*>*   objects();
+ 
+   // Stack walking, returns NULL if this is the outer most scope.
+   ScopeDesc* sender() const;
+@@ -77,6 +80,9 @@
+   bool is_equal(ScopeDesc* sd) const;
+ 
+  private:
++  // Alternative constructor
++  ScopeDesc(const ScopeDesc* parent);
++
+   // JVM state
+   methodHandle  _method;
+   int           _bci;
+@@ -88,6 +94,9 @@
+   int _expressions_decode_offset;
+   int _monitors_decode_offset;
+ 
++  // Object pool
++  GrowableArray<ScopeValue*>* _objects;
++
+   // Nmethod information
+   const nmethod* _code;
+ 
+@@ -95,6 +104,7 @@
+   void decode_body();
+   GrowableArray<ScopeValue*>* decode_scope_values(int decode_offset);
+   GrowableArray<MonitorValue*>* decode_monitor_values(int decode_offset);
++  GrowableArray<ScopeValue*>* decode_object_values(int decode_offset);
+ 
+   DebugInfoReadStream* stream_at(int decode_offset) const;
+ 
+@@ -111,6 +121,3 @@
+   void print_value_on(outputStream* st) const;
+ #endif
+ };
+-
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/code/stubs.cpp openjdk/hotspot/src/share/vm/code/stubs.cpp
+--- openjdk6/hotspot/src/share/vm/code/stubs.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/stubs.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)stubs.cpp	1.47 07/05/05 17:05:20 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -86,7 +83,7 @@
+ }
+ 
+ 
+-Stub* StubQueue::stub_containing(address pc) const {  
++Stub* StubQueue::stub_containing(address pc) const {
+   if (contains(pc)) {
+     for (Stub* s = first(); s != NULL; s = next(s)) {
+       if (stub_contains(s, pc)) return s;
+diff -ruN openjdk6/hotspot/src/share/vm/code/stubs.hpp openjdk/hotspot/src/share/vm/code/stubs.hpp
+--- openjdk6/hotspot/src/share/vm/code/stubs.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/stubs.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)stubs.hpp	1.34 07/05/05 17:05:22 JVM"
+-#endif
+ /*
+  * Copyright 1997-2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The classes in this file provide a simple framework for the
+@@ -39,7 +36,7 @@
+ // A concrete stub layout may look like this (both data
+ // and code sections could be empty as well):
+ //
+-//                ________ 
++//                ________
+ // stub       -->|        | <--+
+ //               |  data  |    |
+ //               |________|    |
+@@ -209,6 +206,3 @@
+   void  verify();                                // verifies the stub queue
+   void  print();                                 // prints information about the stub queue
+ };
+-
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/code/vmreg.cpp openjdk/hotspot/src/share/vm/code/vmreg.cpp
+--- openjdk6/hotspot/src/share/vm/code/vmreg.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/vmreg.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)vmreg.cpp	1.35 07/05/05 17:05:22 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/code/vmreg.hpp openjdk/hotspot/src/share/vm/code/vmreg.hpp
+--- openjdk6/hotspot/src/share/vm/code/vmreg.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/vmreg.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)vmreg.hpp	1.37 07/05/05 17:05:22 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //------------------------------VMReg------------------------------------------
+@@ -36,7 +33,7 @@
+ class VMRegImpl;
+ typedef VMRegImpl* VMReg;
+ 
+-class VMRegImpl { 
++class VMRegImpl {
+ // friend class OopMap;
+ friend class VMStructs;
+ friend class OptoReg;
+@@ -76,7 +73,7 @@
+   // A concrete register is a value that returns true for is_reg() and is
+   // also a register you could use in the assembler. On machines with
+   // 64bit registers only one half of the VMReg (and OptoReg) is considered
+-  // concrete. 
++  // concrete.
+   bool is_concrete();
+ 
+   // VMRegs are 4 bytes wide on all platforms
+@@ -115,19 +112,19 @@
+   }
+ 
+   // Convert register numbers to stack slots and vice versa
+-  static VMReg stack2reg( int idx ) { 
++  static VMReg stack2reg( int idx ) {
+     return (VMReg) (intptr_t) (stack0->value() + idx);
+   }
+ 
+   uintptr_t reg2stack() {
+     assert( is_stack(), "Not a stack-based register" );
+-    return value() - stack0->value(); 
++    return value() - stack0->value();
+   }
+ 
+   static void set_regName();
+ 
+ #include "incls/_vmreg_pd.hpp.incl"
+-  
++
+ };
+ 
+ //---------------------------VMRegPair-------------------------------------------
+diff -ruN openjdk6/hotspot/src/share/vm/code/vtableStubs.cpp openjdk/hotspot/src/share/vm/code/vtableStubs.cpp
+--- openjdk6/hotspot/src/share/vm/code/vtableStubs.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/vtableStubs.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)vtableStubs.cpp	1.55 07/05/05 17:05:22 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -60,7 +57,7 @@
+       JvmtiExport::post_dynamic_code_generated_while_holding_locks("vtable stub", _chunk, _chunk_end);
+     }
+     align_chunk();
+-  } 
++  }
+   assert(_chunk + real_size <= _chunk_end, "bad allocation");
+   void* res = _chunk;
+   _chunk += real_size;
+@@ -71,7 +68,7 @@
+ 
+ void VtableStub::print() {
+   tty->print("vtable stub (index = %d, receiver_location = %d, code = [" INTPTR_FORMAT ", " INTPTR_FORMAT "[)",
+-	     index(), receiver_location(), code_begin(), code_end());
++             index(), receiver_location(), code_begin(), code_end());
+ }
+ 
+ 
+@@ -101,7 +98,7 @@
+ 
+ address VtableStubs::create_stub(bool is_vtable_stub, int vtable_index, methodOop method) {
+   assert(vtable_index >= 0, "must be positive");
+-  
++
+   VtableStub* s = ShareVtableStubs ? lookup(is_vtable_stub, vtable_index) : NULL;
+   if (s == NULL) {
+     if (is_vtable_stub) {
+@@ -124,7 +121,7 @@
+ 
+ inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index){
+   // Assumption: receiver_location < 4 in most cases.
+-  int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index;  
++  int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index;
+   return (is_vtable_stub ? ~hash : hash)  & mask;
+ }
+ 
+@@ -178,8 +175,8 @@
+   return NULL;
+ }
+ 
+-void vtableStubs_init() { 
+-  VtableStubs::initialize(); 
++void vtableStubs_init() {
++  VtableStubs::initialize();
+ }
+ 
+ 
+@@ -198,8 +195,3 @@
+ }
+ 
+ #endif // Product
+-
+-
+-
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/code/vtableStubs.hpp openjdk/hotspot/src/share/vm/code/vtableStubs.hpp
+--- openjdk6/hotspot/src/share/vm/code/vtableStubs.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/code/vtableStubs.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)vtableStubs.hpp	1.27 07/05/05 17:05:22 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A VtableStub holds an individual code stub for a pair (vtable index, #args) for either itables or vtables
+@@ -61,7 +58,7 @@
+     return _index == index && _is_vtable_stub == is_vtable_stub;
+   }
+   bool contains(address pc) const                { return code_begin() <= pc && pc < code_end(); }
+-  
++
+   void set_exception_points(address npe_addr, address ame_addr) {
+     _npe_offset = npe_addr - code_begin();
+     _ame_offset = ame_addr - code_begin();
+@@ -72,10 +69,10 @@
+   }
+ 
+   // platform-dependent routines
+-  static int  pd_code_size_limit(bool is_vtable_stub);  
++  static int  pd_code_size_limit(bool is_vtable_stub);
+   static int  pd_code_alignment();
+   // CNC: Removed because vtable stubs are now made with an ideal graph
+-  // static bool pd_disregard_arg_size(); 
++  // static bool pd_disregard_arg_size();
+ 
+   static void align_chunk() {
+     uintptr_t off = (uintptr_t)( _chunk + sizeof(VtableStub) ) % pd_code_alignment();
+@@ -84,8 +81,8 @@
+ 
+  public:
+   // Query
+-  bool is_itable_stub()                          { return !_is_vtable_stub; } 
+-  bool is_vtable_stub()                          { return  _is_vtable_stub; } 
++  bool is_itable_stub()                          { return !_is_vtable_stub; }
++  bool is_vtable_stub()                          { return  _is_vtable_stub; }
+   bool is_abstract_method_error(address epc)     { return epc == code_begin()+_ame_offset; }
+   bool is_null_pointer_exception(address epc)    { return epc == code_begin()+_npe_offset; }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/compiler/abstractCompiler.cpp openjdk/hotspot/src/share/vm/compiler/abstractCompiler.cpp
+--- openjdk6/hotspot/src/share/vm/compiler/abstractCompiler.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/compiler/abstractCompiler.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,7 +1,4 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)abstractCompiler.cpp	1.4 07/05/05 17:05:23 JVM"
+-#endif
+-// 
++//
+ // Copyright 2007 Sun Microsystems, Inc.  All Rights Reserved.
+ // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ //
+@@ -22,18 +19,18 @@
+ // Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ // CA 95054 USA or visit www.sun.com if you need additional information or
+ // have any questions.
+-// 
++//
+ 
+ #include "incls/_precompiled.incl"
+ #include "incls/_abstractCompiler.cpp.incl"
+ 
+ void AbstractCompiler::initialize_runtimes(initializer f, volatile int* state) {
+   if (*state != initialized) {
+-    
++
+     // We are thread in native here...
+     CompilerThread* thread = CompilerThread::current();
+     bool do_initialization = false;
+-    { 
++    {
+       ThreadInVMfromNative tv(thread);
+       MutexLocker only_one(CompileThread_lock, thread);
+       if ( *state == uninitialized) {
+diff -ruN openjdk6/hotspot/src/share/vm/compiler/abstractCompiler.hpp openjdk/hotspot/src/share/vm/compiler/abstractCompiler.hpp
+--- openjdk6/hotspot/src/share/vm/compiler/abstractCompiler.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/compiler/abstractCompiler.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)abstractCompiler.hpp	1.26 07/05/05 17:05:24 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ typedef void (*initializer)(void);
+@@ -47,18 +44,18 @@
+ 
+   // Missing feature tests
+   virtual bool supports_native()                 { return true; }
+-  virtual bool supports_osr   ()                 { return true; } 
++  virtual bool supports_osr   ()                 { return true; }
+ #if defined(TIERED) || ( !defined(COMPILER1) && !defined(COMPILER2))
+-  virtual bool is_c1   ()                        { return false; } 
+-  virtual bool is_c2   ()                        { return false; } 
++  virtual bool is_c1   ()                        { return false; }
++  virtual bool is_c2   ()                        { return false; }
+ #else
+ #ifdef COMPILER1
+-  bool is_c1   ()                                { return true; } 
+-  bool is_c2   ()                                { return false; } 
++  bool is_c1   ()                                { return true; }
++  bool is_c2   ()                                { return false; }
+ #endif // COMPILER1
+ #ifdef COMPILER2
+-  bool is_c1   ()                                { return false; } 
+-  bool is_c2   ()                                { return true; } 
++  bool is_c1   ()                                { return false; }
++  bool is_c2   ()                                { return true; }
+ #endif // COMPILER2
+ #endif // TIERED
+ 
+@@ -72,11 +69,11 @@
+ 
+   // Compilation entry point for methods
+   virtual void compile_method(ciEnv* env,
+-			      ciMethod* target,
+-			      int entry_bci) {
++                              ciMethod* target,
++                              int entry_bci) {
+     ShouldNotReachHere();
+   }
+-  
++
+ 
+   // Print compilation timers and statistics
+   virtual void print_timers() {
+diff -ruN openjdk6/hotspot/src/share/vm/compiler/cha.cpp openjdk/hotspot/src/share/vm/compiler/cha.cpp
+--- openjdk6/hotspot/src/share/vm/compiler/cha.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/compiler/cha.cpp	1969-12-31 19:00:00.000000000 -0500
+@@ -1,213 +0,0 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)cha.cpp	1.54 07/05/05 17:05:23 JVM"
+-#endif
+-/*
+- * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+- *
+- * This code is free software; you can redistribute it and/or modify it
+- * under the terms of the GNU General Public License version 2 only, as
+- * published by the Free Software Foundation.
+- *
+- * This code is distributed in the hope that it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+- * version 2 for more details (a copy is included in the LICENSE file that
+- * accompanied this code).
+- *
+- * You should have received a copy of the GNU General Public License version
+- * 2 along with this work; if not, write to the Free Software Foundation,
+- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+- * CA 95054 USA or visit www.sun.com if you need additional information or
+- * have any questions.
+- *  
+- */
+-
+-#include "incls/_precompiled.incl"
+-#include "incls/_cha.cpp.incl"
+-
+-bool CHA::_used = false;
+-int CHA::_max_result = 5;
+-
+-
+-CHAResult* CHA::analyze_call(KlassHandle calling_klass, KlassHandle static_receiver, KlassHandle actual_receiver, 
+-                             symbolHandle name, symbolHandle signature) {
+-  assert(static_receiver->oop_is_instance(), "must be instance klass");
+-  
+-  methodHandle m;
+-  // Only do exact lookup if receiver klass has been linked.  Otherwise,
+-  // the vtables has not been setup, and the LinkResolver will fail.
+-  if (instanceKlass::cast(static_receiver())->is_linked() && instanceKlass::cast(actual_receiver())->is_linked()) {    
+-    if (static_receiver->is_interface()) {
+-      // no point trying to resolve unless actual receiver is a klass
+-      if (!actual_receiver->is_interface()) {
+-        m = LinkResolver::resolve_interface_call_or_null(actual_receiver, static_receiver, name, signature, calling_klass);
+-      }
+-    } else {
+-      m = LinkResolver::resolve_virtual_call_or_null(actual_receiver, static_receiver, name, signature, calling_klass);
+-    }
+-
+-    if (m.is_null()) {
+-      // didn't find method (e.g., could be abstract method)
+-      return new CHAResult(actual_receiver, name, signature, NULL, NULL, m, false);
+-    } 
+-    if( m()->can_be_statically_bound() ||
+-        m()->is_private() || 
+-        actual_receiver->subklass() == NULL ) {
+-      // always optimize final methods, private methods or methods with no
+-      // subclasses.
+-      return new CHAResult(actual_receiver, name, signature, NULL, NULL, m);
+-    } 
+-    if (!UseCHA) {
+-      // don't optimize this call
+-      return new CHAResult(actual_receiver, name, signature, NULL, NULL, m, false);
+-    }
+-  }
+-
+-  // If the method is abstract then each non-abstract subclass must implement 
+-  // the method and inlining is not possible.  If there is exactly 1 subclass
+-  // then there can be only 1 implementation and we are OK.  
+-  // (This test weakens CHA slightly, for the sake of the old dependency mechanism.)
+-  if( !m.is_null() && m()->is_abstract() ) {// Method is abstract?
+-    Klass *sr = Klass::cast(static_receiver());
+-    if( sr == sr->up_cast_abstract() )
+-      return new CHAResult(actual_receiver, name, signature, NULL, NULL, m, false);
+-    // Fall into the next code; it will find the one implementation
+-    // and that implementation is correct.
+-  }
+-
+-  _used = true;
+-  GrowableArray<methodHandle>* methods  = new GrowableArray<methodHandle>(CHA::max_result());
+-  GrowableArray<KlassHandle>* receivers = new GrowableArray<KlassHandle>(CHA::max_result());
+-
+-  // Since 'm' is visible from the actual receiver we can call it if the
+-  // runtime receiver class does not override 'm'.  
+-  if( !m.is_null() && m()->method_holder() != actual_receiver() &&
+-      !m->is_abstract() ) {
+-    receivers->push(actual_receiver);
+-    methods->push(m);
+-  }
+-  if (static_receiver->is_interface()) {
+-    instanceKlassHandle sr = static_receiver();
+-    process_interface(sr, receivers, methods, name, signature);
+-  } else {
+-    process_class(static_receiver, receivers, methods, name, signature);
+-  }
+-
+-  methodHandle dummy;
+-  CHAResult* res = new CHAResult(actual_receiver, name, signature, receivers, methods, dummy);
+-
+-  //res->print();
+-  return res;
+-}
+-
+-void CHA::process_class(KlassHandle r, GrowableArray<KlassHandle>* receivers, GrowableArray<methodHandle>* methods, symbolHandle name, symbolHandle signature) {    
+-  // recursively add non-abstract subclasses of r to receivers list
+-  assert(!r->is_interface(), "should call process_interface instead");
+-  for (Klass* s = r->subklass(); s != NULL && !methods->is_full(); s = s->next_sibling()) {
+-    // preorder traversal, so check subclasses first
+-    if (s->is_interface()) {
+-      // can only happen if r == Object
+-      assert(r->superklass() == NULL, "must be klass Object");
+-    } else {
+-      process_class(s, receivers, methods, name, signature);
+-    }
+-  }
+-  // now check r itself (after subclasses because of preorder)
+-  if (!methods->is_full()) {
+-    // don't add abstract classes to receivers list
+-    // (but still consider their methods -- they may be non-abstract)
+-    if (!receivers->is_full() && !r->is_abstract()) {
+-      // don't duplicate the actual receiver
+-      if (!receivers->contains(r)) receivers->push(r);
+-    }
+-    methodOop m = NULL;
+-    if (r->oop_is_instance()) m = instanceKlass::cast(r())->find_method(name(), signature()); 
+-    if (m != NULL && !m->is_abstract()) {
+-      if (!methods->contains(m)) methods->push(m);
+-    }
+-  }
+-}
+-
+-void CHA::process_interface(instanceKlassHandle r, GrowableArray<KlassHandle>* receivers, GrowableArray<methodHandle>* methods, 
+-                            symbolHandle name, symbolHandle signature) {
+-  // recursively add non-abstract implementors of interface r to receivers list
+-  assert(r->is_interface(), "should call process_class instead");
+-  
+-  // We only store the implementors for an interface, if there is exactly one implementor  
+-  klassOop k = NULL;
+-  if (r->nof_implementors() == 1)  k = r->implementor(0);
+-  if (k == NULL)  methods->clear();  // no news is bad news
+-  if (k != NULL && !methods->is_full()) {   
+-    instanceKlass* kl = instanceKlass::cast(k);
+-    assert(kl->oop_is_instance(), "primitive klasses don't implement interfaces");
+-    assert(!kl->is_interface(), "must be a real klass");
+-    process_class(kl, receivers, methods, name, signature);
+-  }
+-
+-  // there are no links to subinterfaces
+-  assert(r->subklass() == NULL, "interfaces have no subclasses");
+-}
+-
+-
+-CHAResult::CHAResult(KlassHandle r, symbolHandle n, symbolHandle s,
+-                     GrowableArray<KlassHandle>* rs, GrowableArray<methodHandle>* ms, 
+-                     methodHandle target, bool v) :
+-  _receiver(r), _name(n), _signature(s), _receivers(rs), _target_methods(ms), _valid(v), _target(target) {}
+-
+-bool CHAResult::is_monomorphic() const {
+-  // note: check number of target methods, not number of receivers
+-  // (send can be monomorphic even with many receiver classes, if all inherit same method)
+-  return _valid && (_target_methods == NULL || _target_methods->length() == 1);
+-}
+-
+-methodHandle CHAResult::monomorphic_target() const {
+-  assert(is_monomorphic(), "not monomorphic");
+-  if (_target_methods != NULL) {
+-    assert(_target_methods->length() == 1, "expected single target");
+-    return _target_methods->first();
+-  } else {
+-    // final method
+-    //    assert(_target->is_final_method(), "expected final method");
+-    return _target;
+-  }
+-}
+-
+-KlassHandle CHAResult::monomorphic_receiver() const {
+-  assert(is_monomorphic(), "not monomorphic");
+-  if (_receivers != NULL) {
+-    // since all lookups will find the same method, it doesn't matter that much
+-    // which klass we return; for beauty's sake, return the target's method holder
+-    // (note: don't return _receiver -- its method may be abstract)
+-    return _target_methods->first()->method_holder();
+-  } else {
+-    // final method
+-    //    assert(_target->is_final_method(), "expected final method");
+-    return _receiver;
+-  }
+-}
+-
+-void CHAResult::print() {
+-  tty->print("(CHAResult*)%#x : ", this); 
+-  (instanceKlass::cast(_receiver()))->name()->print_value();
+-  tty->print("::");
+-  _name()->print_value();
+-  tty->print_cr(" %s", _valid ? "(Found)" : "(Not found)");
+-  if (_receivers != NULL) 
+-    tty->print("%d receiver klasses ", _receivers->length());
+-  if (_target_methods != NULL) 
+-    tty->print("%d target methods %s", _target_methods->length(), 
+-                _target_methods->is_full() ? "(FULL)" : "");
+-  if (is_monomorphic()) {
+-    methodHandle target = monomorphic_target();
+-    tty->print("monomorphic target method : ");
+-    target->print_short_name(tty);
+-    if (target->is_final())
+-      tty->print(" (final)");
+-    if (target->is_abstract())
+-      tty->print(" (abstract)");
+-  }
+-  tty->cr();
+-}
+diff -ruN openjdk6/hotspot/src/share/vm/compiler/cha.hpp openjdk/hotspot/src/share/vm/compiler/cha.hpp
+--- openjdk6/hotspot/src/share/vm/compiler/cha.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/compiler/cha.hpp	1969-12-31 19:00:00.000000000 -0500
+@@ -1,88 +0,0 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)cha.hpp	1.21 07/05/05 17:05:23 JVM"
+-#endif
+-/*
+- * Copyright 1997-1998 Sun Microsystems, Inc.  All Rights Reserved.
+- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+- *
+- * This code is free software; you can redistribute it and/or modify it
+- * under the terms of the GNU General Public License version 2 only, as
+- * published by the Free Software Foundation.
+- *
+- * This code is distributed in the hope that it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+- * version 2 for more details (a copy is included in the LICENSE file that
+- * accompanied this code).
+- *
+- * You should have received a copy of the GNU General Public License version
+- * 2 along with this work; if not, write to the Free Software Foundation,
+- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+- * CA 95054 USA or visit www.sun.com if you need additional information or
+- * have any questions.
+- *  
+- */
+-
+-// Class Hierarchy Analysis 
+-// Computes the set of overriding methods for a particular call,
+-// using the subclass links in instanceKlass.
+-// Right now the CHA just traverses these links for every query;
+-// if this should become too slow we can put in a cache.
+-
+-// result of a CHA query
+-class CHAResult : public ResourceObj {
+-  friend class CHA;
+-  const KlassHandle  _receiver;                                 // copies of the lookup (for better debugging)
+-  const symbolHandle _name;
+-  const symbolHandle _signature;
+-  const methodHandle _target;                                   // target method (if final)
+-  const bool         _valid;
+-  const GrowableArray<methodHandle>* const _target_methods;     // list of possible targets (NULL for final methods or if !UseCHA)
+-  const GrowableArray<KlassHandle>* const  _receivers;          // list of possible receiver klasses (NULL for final methods or if !UseCHA)
+-
+-  CHAResult(KlassHandle receiver, symbolHandle name, symbolHandle signature,
+-            GrowableArray<KlassHandle>* receivers, GrowableArray<methodHandle>* methods, 
+-            methodHandle target, bool valid = true);
+- public:
+-  KlassHandle  receiver() const                               { return _receiver; }
+-  symbolHandle name() const                                   { return _name; }
+-  symbolHandle signature() const                              { return _signature; }
+-  bool      is_accurate() const                               { return !_target_methods->is_full(); }
+-  bool      is_monomorphic() const;
+-  methodHandle monomorphic_target() const;                    // returns the single target (if is_monomorphic)
+-  KlassHandle  monomorphic_receiver() const;                  // receiver klass of monomorphic_target
+-  const GrowableArray<KlassHandle>*  receivers() const        { return _receivers; }
+-    // Returns the list of all subclasses that are possible receivers (empty array if none, capped at max_result).
+-    // The static receiver klass *is* included in the result (unless it is abstract).
+-    // The list is a class hierarchy preorder, i.e., subclasses precede their superclass.
+-    // All possible receiver classes are included, not just those that (re)define the method.
+-    // Abstract classes are suppressed.
+-  const GrowableArray<methodHandle>* target_methods() const   { return _target_methods; }
+-    // Returns the list of possible target methods, i.e., all methods potentially invoked
+-    // by this send (empty array if none, capped at max_result).
+-    // If the receiver klass (or one of its superclasses) defines the method, this definition 
+-    // is included in the result.  Abstract methods are suppressed.
+-  void print();
+-};
+-
+-
+-class CHA : AllStatic {
+-  static int _max_result;           // maximum result size (for efficiency)
+-  static bool _used;                // has CHA been used yet?  (will go away when deoptimization implemented)
+-
+-  static void process_class(KlassHandle r, GrowableArray<KlassHandle>* receivers, GrowableArray<methodHandle>* methods, 
+-                            symbolHandle name, symbolHandle signature);
+-  static void process_interface(instanceKlassHandle r, GrowableArray<KlassHandle>* receivers, GrowableArray<methodHandle>* methods, 
+-                            symbolHandle name, symbolHandle signature);
+- public:
+-  static bool has_been_used()       { return _used; }
+-  static int  max_result()          { return _max_result; }
+-  static void set_max_result(int n) { _max_result = n; }
+-
+-  static CHAResult* analyze_call(KlassHandle calling_klass, KlassHandle static_receiver, 
+-                                 KlassHandle actual_receiver, symbolHandle name, symbolHandle signature);
+-};
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/compiler/compileBroker.cpp openjdk/hotspot/src/share/vm/compiler/compileBroker.cpp
+--- openjdk6/hotspot/src/share/vm/compiler/compileBroker.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/compiler/compileBroker.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)compileBroker.cpp	1.147 07/05/17 15:50:51 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -32,9 +29,9 @@
+ 
+ // Only bother with this argument setup if dtrace is available
+ 
+-HS_DTRACE_PROBE_DECL8(hotspot, method__compile__begin, 
++HS_DTRACE_PROBE_DECL8(hotspot, method__compile__begin,
+   char*, intptr_t, char*, intptr_t, char*, intptr_t, char*, intptr_t);
+-HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end, 
++HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end,
+   char*, intptr_t, char*, intptr_t, char*, intptr_t, char*, intptr_t, bool);
+ 
+ #define DTRACE_METHOD_COMPILE_BEGIN_PROBE(compiler, method)              \
+@@ -192,7 +189,7 @@
+   _is_complete = false;
+   _is_success = false;
+   _code_handle = NULL;
+- 
++
+   _hot_method = NULL;
+   _hot_count = hot_count;
+   _time_queued = 0;  // tidy
+@@ -251,8 +248,8 @@
+ // ------------------------------------------------------------------
+ // CompileTask::print_line_on_error
+ //
+-// This function is called by fatal error handler when the thread 
+-// causing troubles is a compiler thread. 
++// This function is called by fatal error handler when the thread
++// causing troubles is a compiler thread.
+ //
+ // Do not grab any lock, do not allocate memory.
+ //
+@@ -273,7 +270,7 @@
+     const char compile_type   = is_osr                             ? '%' : ' ';
+     const char sync_char      = method->is_synchronized()          ? 's' : ' ';
+     const char exception_char = method->has_exception_handler()    ? '!' : ' ';
+-    const char tier_char      = 
++    const char tier_char      =
+       is_highest_tier_compile(comp_level())                        ? ' ' : ('0' + comp_level());
+     st->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, tier_char);
+   }
+@@ -310,7 +307,7 @@
+     const char compile_type   = is_osr                             ? '%' : ' ';
+     const char sync_char      = method->is_synchronized()          ? 's' : ' ';
+     const char exception_char = method->has_exception_handler()    ? '!' : ' ';
+-    const char tier_char      = 
++    const char tier_char      =
+       is_highest_tier_compile(comp_level())                        ? ' ' : ('0' + comp_level());
+     tty->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, tier_char);
+   }
+@@ -464,7 +461,7 @@
+ // Get the next CompileTask from a CompileQueue
+ CompileTask* CompileQueue::get() {
+   MutexLocker locker(lock());
+-  
++
+   // Wait for an available CompileTask.
+   while (_first == NULL) {
+     // There is no work to be done right now.  Wait.
+@@ -510,7 +507,7 @@
+     // counters  from having a ".0" namespace.
+     const char* thread_i = (instance == -1) ? thread_name :
+                       PerfDataManager::name_space(thread_name, instance);
+-                      
++
+ 
+     char* name = PerfDataManager::counter_name(thread_i, "method");
+     _perf_current_method =
+@@ -678,40 +675,40 @@
+                                       true, CHECK_0);
+   instanceKlassHandle klass (THREAD, k);
+   instanceHandle thread_oop = klass->allocate_instance_handle(CHECK_0);
+-  Handle string = java_lang_String::create_from_str(name, CHECK_0);    
++  Handle string = java_lang_String::create_from_str(name, CHECK_0);
+ 
+-  // Initialize thread_oop to put it into the system threadGroup    
++  // Initialize thread_oop to put it into the system threadGroup
+   Handle thread_group (THREAD,  Universe::system_thread_group());
+   JavaValue result(T_VOID);
+-  JavaCalls::call_special(&result, thread_oop, 
+-                       klass, 
+-                       vmSymbolHandles::object_initializer_name(), 
+-                       vmSymbolHandles::threadgroup_string_void_signature(), 
+-                       thread_group, 
+-                       string, 
+-                       CHECK_0);  
++  JavaCalls::call_special(&result, thread_oop,
++                       klass,
++                       vmSymbolHandles::object_initializer_name(),
++                       vmSymbolHandles::threadgroup_string_void_signature(),
++                       thread_group,
++                       string,
++                       CHECK_0);
+ 
+   {
+     MutexLocker mu(Threads_lock, THREAD);
+     compiler_thread = new CompilerThread(queue, counters);
+     // At this point the new CompilerThread data-races with this startup
+     // thread (which I believe is the primoridal thread and NOT the VM
+-    // thread).  This means Java bytecodes being executed at startup can 
++    // thread).  This means Java bytecodes being executed at startup can
+     // queue compile jobs which will run at whatever default priority the
+     // newly created CompilerThread runs at.
+ 
+-   
++
+     // At this point it may be possible that no osthread was created for the
+     // JavaThread due to lack of memory. We would have to throw an exception
+     // in that case. However, since this must work and we do not allow
+     // exceptions anyway, check and abort if this fails.
+ 
+     if (compiler_thread == NULL || compiler_thread->osthread() == NULL){
+-      vm_exit_during_initialization("java.lang.OutOfMemoryError", 
++      vm_exit_during_initialization("java.lang.OutOfMemoryError",
+                                     "unable to create new native thread");
+     }
+- 
+-    java_lang_Thread::set_thread(thread_oop(), compiler_thread);      
++
++    java_lang_Thread::set_thread(thread_oop(), compiler_thread);
+ 
+     // Note that this only sets the JavaThread _priority field, which by
+     // definition is limited to Java priorities and not OS priorities.
+@@ -733,11 +730,11 @@
+       // priorities and I am *explicitly* using OS priorities so that it's
+       // possible to set the compiler thread priority higher than any Java
+       // thread.
+-    
++
+     java_lang_Thread::set_daemon(thread_oop());
+-    
++
+     compiler_thread->set_threadObj(thread_oop());
+-    Threads::add(compiler_thread);  
++    Threads::add(compiler_thread);
+     Thread::start(compiler_thread);
+   }
+   // Let go of Threads_lock before yielding
+@@ -797,12 +794,12 @@
+ // CompileBroker::compile_method
+ //
+ // Request compilation of a method.
+-void CompileBroker::compile_method_base(methodHandle method, 
++void CompileBroker::compile_method_base(methodHandle method,
+                                         int osr_bci,
+                                         int comp_level,
+-                                        methodHandle hot_method, 
++                                        methodHandle hot_method,
+                                         int hot_count,
+-                                        const char* comment, 
++                                        const char* comment,
+                                         TRAPS) {
+   // do nothing if compiler thread(s) is not available
+   if (!_initialized ) {
+@@ -993,7 +990,7 @@
+     // osr compilation
+ #ifndef TIERED
+     // seems like an assert of dubious value
+-    assert(comp_level == CompLevel_full_optimization, 
++    assert(comp_level == CompLevel_full_optimization,
+            "all OSR compiles are assumed to be at a single compilation lavel");
+ #endif // TIERED
+     nmethod* nm = method->lookup_osr_nmethod_for(osr_bci);
+@@ -1011,7 +1008,7 @@
+   }
+ 
+   // If the method is native, do the lookup in the thread requesting
+-  // the compilation. Native lookups can load code, which is not 
++  // the compilation. Native lookups can load code, which is not
+   // permitted during compilation.
+   //
+   // Note: A native method implies non-osr compilation which is
+@@ -1036,8 +1033,8 @@
+ 
+   // JVMTI -- post_compile_event requires jmethod_id() that may require
+   // a lock the compiling thread can not acquire. Prefetch it here.
+-  if (JvmtiExport::should_post_compiled_method_load()) { 
+-    method->jmethod_id(); 
++  if (JvmtiExport::should_post_compiled_method_load()) {
++    method->jmethod_id();
+   }
+ 
+   // do the compilation
+@@ -1122,7 +1119,7 @@
+     method->set_not_compilable();
+     return true;
+   }
+-  
++
+   bool is_osr = (osr_bci != standard_entry_bci);
+   // Some compilers may not support on stack replacement.
+   if (is_osr &&
+@@ -1174,7 +1171,7 @@
+       return id;
+     }
+   }
+-  
++
+   // Method was not in the appropriate compilation range.
+   method->set_not_compilable();
+   return 0;
+@@ -1266,7 +1263,7 @@
+ 
+   JavaThread *thread = JavaThread::current();
+   thread->set_blocked_on_compilation(true);
+-  
++
+   methodHandle method(thread,
+                       (methodOop)JNIHandles::resolve(task->method_handle()));
+   {
+@@ -1299,7 +1296,7 @@
+ 
+   // For the thread that initializes the ciObjectFactory
+   // this resource mark holds all the shared objects
+-  ResourceMark rm; 
++  ResourceMark rm;
+ 
+   // First thread to get here will initialize the compiler interface
+ 
+@@ -1345,11 +1342,11 @@
+             vm_direct_exit(CompileTheWorld ? 0 : 1);
+           }
+ #endif
+-          UseCompiler               = false;    
++          UseCompiler               = false;
+           AlwaysCompileLoopMethods  = false;
+         }
+       }
+-      
++
+       CompileTask* task = queue->get();
+ 
+       // Give compiler threads an extra quanta.  They tend to be bursty and
+@@ -1368,7 +1365,7 @@
+       task->set_code_handle(&result_handle);
+       methodHandle method(thread,
+                      (methodOop)JNIHandles::resolve(task->method_handle()));
+-    
++
+       // Never compile a method if breakpoints are present in it
+       if (method()->number_of_breakpoints() == 0) {
+         // Compile the method.
+@@ -1432,7 +1429,7 @@
+ 
+       if (xtty != NULL) {
+         ttyLocker ttyl;
+-        
++
+         // Record any per thread log files
+         xtty->elem("thread_logfile thread='%d' filename='%s'", thread_id, file);
+       }
+@@ -1575,7 +1572,7 @@
+ 
+   methodHandle method(thread,
+                       (methodOop)JNIHandles::resolve(task->method_handle()));
+-  
++
+   DTRACE_METHOD_COMPILE_END_PROBE(compiler(task->comp_level()), method, task->is_success());
+ 
+   collect_statistics(thread, time, task);
+@@ -1724,7 +1721,7 @@
+ 
+   assert(code == NULL || code->is_locked_by_vm(), "will survive the MutexLocker");
+   MutexLocker locker(CompileStatistics_lock);
+-  
++
+   // _perf variables are production performance counters which are
+   // updated regardless of the setting of the CITime and CITimeEach flags
+   //
+diff -ruN openjdk6/hotspot/src/share/vm/compiler/compileBroker.hpp openjdk/hotspot/src/share/vm/compiler/compileBroker.hpp
+--- openjdk6/hotspot/src/share/vm/compiler/compileBroker.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/compiler/compileBroker.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)compileBroker.hpp	1.55 07/05/05 17:05:24 JVM"
+-#endif
+ /*
+  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class nmethod;
+@@ -250,16 +247,16 @@
+   static int _total_native_compile_count;
+   static int _total_osr_compile_count;
+   static int _total_standard_compile_count;
+-  
++
+   static int _sum_osr_bytes_compiled;
+   static int _sum_standard_bytes_compiled;
+   static int _sum_nmethod_size;
+   static int _sum_nmethod_code_size;
+ 
+-  static int compiler_count() { 
++  static int compiler_count() {
+     return CICompilerCountPerCPU
+       // Example: if CICompilerCountPerCPU is true, then we get
+-      // max(log2(8)-1,1) = 2 compiler threads on an 8-way machine.  
++      // max(log2(8)-1,1) = 2 compiler threads on an 8-way machine.
+       // May help big-app startup time.
+       ? (MAX2(log2_intptr(os::active_processor_count())-1,1))
+       : CICompilerCount;
+@@ -294,12 +291,12 @@
+   static bool check_break_at(methodHandle method, int compile_id, bool is_osr);
+   static void collect_statistics(CompilerThread* thread, elapsedTimer time, CompileTask* task);
+ 
+-  static void compile_method_base(methodHandle method, 
++  static void compile_method_base(methodHandle method,
+                                   int osr_bci,
+                                   int comp_level,
+-                                  methodHandle hot_method, 
++                                  methodHandle hot_method,
+                                   int hot_count,
+-                                  const char* comment, 
++                                  const char* comment,
+                                   TRAPS);
+ 
+  public:
+diff -ruN openjdk6/hotspot/src/share/vm/compiler/compileLog.cpp openjdk/hotspot/src/share/vm/compiler/compileLog.cpp
+--- openjdk6/hotspot/src/share/vm/compiler/compileLog.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/compiler/compileLog.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)compileLog.cpp	1.20 07/05/05 17:05:23 JVM"
+-#endif
+ /*
+  * Copyright 2002-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -116,7 +113,7 @@
+   assert(id < _identities_limit, "oob");
+   // Mark this id as processed.
+   // (Be sure to do this before any recursive calls to identify.)
+-  _identities[id] = 1;  // mark 
++  _identities[id] = 1;  // mark
+ 
+   // Now, print the object's identity once, in detail.
+   if (obj->is_klass()) {
+@@ -288,11 +285,10 @@
+ // ------------------------------------------------------------------
+ // CompileLog::finish_log
+ //
+-// Called during normal shutdown. For now, any clean-up needed in normal 
++// Called during normal shutdown. For now, any clean-up needed in normal
+ // shutdown is also needed in VM abort, so is covered by finish_log_on_error().
+ // Just allocate a buffer and call finish_log_on_error().
+ void CompileLog::finish_log(outputStream* file) {
+   char buf[4 * K];
+   finish_log_on_error(file, buf, sizeof(buf));
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/compiler/compileLog.hpp openjdk/hotspot/src/share/vm/compiler/compileLog.hpp
+--- openjdk6/hotspot/src/share/vm/compiler/compileLog.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/compiler/compileLog.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)compileLog.hpp	1.12 07/05/05 17:05:24 JVM"
+-#endif
+ /*
+  * Copyright 2002-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class ciObject;
+diff -ruN openjdk6/hotspot/src/share/vm/compiler/compilerOracle.cpp openjdk/hotspot/src/share/vm/compiler/compilerOracle.cpp
+--- openjdk6/hotspot/src/share/vm/compiler/compilerOracle.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/compiler/compilerOracle.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)compilerOracle.cpp	1.34 07/05/17 15:50:53 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -142,17 +139,17 @@
+   ResourceMark rm;
+   const char * candidate_string = candidate->as_C_string();
+   const char * match_string = match->as_C_string();
+-  
++
+   switch (match_mode) {
+   case Prefix:
+     return strstr(candidate_string, match_string) == candidate_string;
+-    
++
+   case Suffix: {
+     size_t clen = strlen(candidate_string);
+     size_t mlen = strlen(match_string);
+     return clen >= mlen && strcmp(candidate_string + clen - mlen, match_string) == 0;
+   }
+-  
++
+   case Substring:
+     return strstr(candidate_string, match_string) != NULL;
+ 
+@@ -365,14 +362,14 @@
+ // The characters allowed in a class or method name.  All characters > 0x7f
+ // are allowed in order to handle obfuscated class files (e.g. Volano)
+ #define RANGEBASE "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789$_<>" \
+-	"\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" \
+-	"\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" \
+-	"\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf" \
+-	"\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" \
+-	"\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" \
+-	"\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" \
+-	"\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef" \
+-	"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
++        "\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" \
++        "\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" \
++        "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf" \
++        "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" \
++        "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" \
++        "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" \
++        "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef" \
++        "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
+ 
+ #define RANGE0 "[*" RANGEBASE "]"
+ #define RANGEDOT "[*" RANGEBASE ".]"
+@@ -386,7 +383,7 @@
+ //   cmd  java.lang.String::foo
+ //  VM syntax
+ //   cmd  java/lang/String[. ]foo
+-// 
++//
+ 
+ static const char* patterns[] = {
+   "%*[ \t]%255" RANGEDOT    " "     "%255"  RANGE0 "%n",
+@@ -442,15 +439,15 @@
+   for (char* lp = line; *lp != '\0'; lp++) {
+     // Allow '.' to separate the class name from the method name.
+     // This is the preferred spelling of methods:
+-    //	    exclude java/lang/String.indexOf(I)I
++    //      exclude java/lang/String.indexOf(I)I
+     // Allow ',' for spaces (eases command line quoting).
+-    //	    exclude,java/lang/String.indexOf
++    //      exclude,java/lang/String.indexOf
+     // For backward compatibility, allow space as separator also.
+-    //	    exclude java/lang/String indexOf
+-    //	    exclude,java/lang/String,indexOf
++    //      exclude java/lang/String indexOf
++    //      exclude,java/lang/String,indexOf
+     // For easy cut-and-paste of method names, allow VM output format
+     // as produced by methodOopDesc::print_short_name:
+-    //	    exclude java.lang.String::indexOf
++    //      exclude java.lang.String::indexOf
+     // For simple implementation convenience here, convert them all to space.
+     if (have_colon) {
+       if (*lp == '.')  *lp = '/';   // dots build the package prefix
+@@ -539,7 +536,7 @@
+ }
+ 
+ static const char* cc_file() {
+-  if (CompileCommandFile[0] == '\0')
++  if (CompileCommandFile == NULL)
+     return ".hotspot_compiler";
+   return CompileCommandFile;
+ }
+@@ -561,7 +558,7 @@
+       token[pos++] = c;
+     }
+     c = getc(stream);
+-  }   
++  }
+   token[pos++] = '\0';
+   parse_from_line(token);
+ 
+@@ -647,7 +644,7 @@
+         return;
+       strncpy(newName, name, i);
+       newName[i] = '\0';
+-      
++
+       if (className == NULL) {
+         className = newName;
+         c_match = MethodMatcher::Prefix;
+@@ -655,7 +652,7 @@
+         methodName = newName;
+       }
+     }
+-    
++
+     if (*line == method_sep) {
+       if (className == NULL) {
+         className = "";
+@@ -683,7 +680,7 @@
+         }
+       }
+     }
+-  
++
+     // each directive is terminated by , or NUL or . followed by NUL
+     if (*line == ',' || *line == '\0' || (line[0] == '.' && line[1] == '\0')) {
+       if (methodName == NULL) {
+@@ -711,5 +708,3 @@
+     line = *line == '\0' ? line : line + 1;
+   }
+ }
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/compiler/compilerOracle.hpp openjdk/hotspot/src/share/vm/compiler/compilerOracle.hpp
+--- openjdk6/hotspot/src/share/vm/compiler/compilerOracle.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/compiler/compilerOracle.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)compilerOracle.hpp	1.21 07/05/05 17:05:23 JVM"
+-#endif
+ /*
+  * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // CompilerOracle is an interface for turning on and off compilation
+@@ -35,7 +32,7 @@
+   static bool _quiet;
+ 
+  public:
+-  // Reads from file and adds to lists 
++  // Reads from file and adds to lists
+   static void parse_from_file();
+ 
+   // Tells whether we to exclude compilation of method
+diff -ruN openjdk6/hotspot/src/share/vm/compiler/disassemblerEnv.hpp openjdk/hotspot/src/share/vm/compiler/disassemblerEnv.hpp
+--- openjdk6/hotspot/src/share/vm/compiler/disassemblerEnv.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/compiler/disassemblerEnv.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)disassemblerEnv.hpp	1.18 07/05/05 17:05:24 JVM"
+-#endif
+ /*
+  * Copyright 1997-2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Call-back interface for external disassembler
+@@ -36,4 +33,3 @@
+   virtual char* string_for_offset(intptr_t value) = 0;
+   virtual char* string_for_constant(unsigned char* pc, intptr_t value, int is_decimal) = 0;
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/compiler/methodLiveness.cpp openjdk/hotspot/src/share/vm/compiler/methodLiveness.cpp
+--- openjdk6/hotspot/src/share/vm/compiler/methodLiveness.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/compiler/methodLiveness.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)methodLiveness.cpp	1.41 07/05/05 17:05:24 JVM"
+-#endif
+ /*
+  * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The MethodLiveness class performs a simple liveness analysis on a method
+@@ -53,7 +50,7 @@
+ //    at that bci.
+ //
+ // The algorithm is approximate in many respects.  Notably:
+-// 
++//
+ // 1. We do not do the analysis necessary to match jsr's with the appropriate ret.
+ //    Instead we make the conservative assumption that any ret can return to any
+ //    jsr return site.
+@@ -110,7 +107,7 @@
+ 
+ long MethodLiveness::_total_locals_queried = 0;
+ long MethodLiveness::_total_live_locals_queried = 0;
+-  
++
+ long MethodLiveness::_total_visits = 0;
+ 
+ #endif
+@@ -122,7 +119,7 @@
+ elapsedTimer MethodLiveness::_time_query;
+ elapsedTimer MethodLiveness::_time_total;
+ 
+-MethodLiveness::MethodLiveness(Arena* arena, ciMethod* method) 
++MethodLiveness::MethodLiveness(Arena* arena, ciMethod* method)
+ #ifdef COMPILER1
+   : _bci_block_start((uintptr_t*)arena->Amalloc((method->code_size() >> LogBitsPerByte) + 1), method->code_size())
+ #endif
+@@ -258,7 +255,7 @@
+       case Bytecodes::_if_icmple:
+       case Bytecodes::_if_acmpeq:
+       case Bytecodes::_if_acmpne:
+-      case Bytecodes::_ifnull:   
++      case Bytecodes::_ifnull:
+       case Bytecodes::_ifnonnull:
+         // Two way branch.  Set predecessors at each destination.
+         dest = _block_map->at(bytes.next_bci());
+@@ -274,18 +271,18 @@
+         assert(dest != NULL, "branch desination must start a block.");
+         dest->add_normal_predecessor(current_block);
+         break;
+-      case Bytecodes::_goto_w:         
++      case Bytecodes::_goto_w:
+         dest = _block_map->at(bytes.get_far_dest());
+         assert(dest != NULL, "branch desination must start a block.");
+         dest->add_normal_predecessor(current_block);
+         break;
+-      case Bytecodes::_tableswitch:  
++      case Bytecodes::_tableswitch:
+         {
+           Bytecode_tableswitch *tableswitch =
+             Bytecode_tableswitch_at(bytes.cur_bcp());
+ 
+-          int len = tableswitch->length();        
+-        
++          int len = tableswitch->length();
++
+           dest = _block_map->at(bci + tableswitch->default_offset());
+           assert(dest != NULL, "branch desination must start a block.");
+           dest->add_normal_predecessor(current_block);
+@@ -294,16 +291,16 @@
+             assert(dest != NULL, "branch desination must start a block.");
+             dest->add_normal_predecessor(current_block);
+           }
+-          break; 
++          break;
+         }
+ 
+       case Bytecodes::_lookupswitch:
+         {
+           Bytecode_lookupswitch *lookupswitch =
+             Bytecode_lookupswitch_at(bytes.cur_bcp());
+-          
+-          int npairs = lookupswitch->number_of_pairs(); 
+-        
++
++          int npairs = lookupswitch->number_of_pairs();
++
+           dest = _block_map->at(bci + lookupswitch->default_offset());
+           assert(dest != NULL, "branch desination must start a block.");
+           dest->add_normal_predecessor(current_block);
+@@ -313,10 +310,10 @@
+             assert(dest != NULL, "branch desination must start a block.");
+             dest->add_normal_predecessor(current_block);
+           }
+-          break; 
++          break;
+         }
+ 
+-      case Bytecodes::_jsr: 
++      case Bytecodes::_jsr:
+         {
+           assert(bytes.is_wide()==false, "sanity check");
+           dest = _block_map->at(bytes.get_dest());
+@@ -328,7 +325,7 @@
+           break;
+         }
+       case Bytecodes::_jsr_w:
+-        {       
++        {
+           dest = _block_map->at(bytes.get_far_dest());
+           assert(dest != NULL, "branch desination must start a block.");
+           dest->add_normal_predecessor(current_block);
+@@ -338,7 +335,7 @@
+           break;
+         }
+ 
+-      case Bytecodes::_wide:           
++      case Bytecodes::_wide:
+         assert(false, "wide opcodes should not be seen here");
+         break;
+       case Bytecodes::_athrow:
+@@ -347,7 +344,7 @@
+       case Bytecodes::_freturn:
+       case Bytecodes::_dreturn:
+       case Bytecodes::_areturn:
+-      case Bytecodes::_return:         
++      case Bytecodes::_return:
+         // These opcodes are  not the normal predecessors of any other opcodes.
+         break;
+       case Bytecodes::_ret:
+@@ -358,7 +355,7 @@
+         // Bail out of there are breakpoints in here.
+         bailout = true;
+         break;
+-      default:                 
++      default:
+         // Do nothing.
+         break;
+     }
+@@ -437,7 +434,7 @@
+     _work_list = block;
+   }
+ 
+-  
++
+   while ((block = work_list_get()) != NULL) {
+     block->propagate(this);
+     NOT_PRODUCT(_total_visits++;)
+@@ -567,7 +564,7 @@
+ 
+ #endif
+ 
+-    
++
+ MethodLiveness::BasicBlock::BasicBlock(MethodLiveness *analyzer, int start, int limit) :
+          _gen((uintptr_t*)analyzer->arena()->Amalloc(BytesPerWord * analyzer->bit_map_size_words()),
+                          analyzer->bit_map_size_bits()),
+@@ -595,7 +592,7 @@
+   // _gen and _kill are cleared at the beginning of compute_gen_kill_range()
+   _gen.clear();
+   _kill.clear();
+-}  
++}
+ 
+ 
+ 
+@@ -630,7 +627,7 @@
+   bytes.reset_to_bci(start_bci());
+   bytes.set_max_bci(limit_bci());
+   compute_gen_kill_range(&bytes);
+-  
++
+ }
+ 
+ void MethodLiveness::BasicBlock::compute_gen_kill_range(ciBytecodeStream *bytes) {
+@@ -653,19 +650,19 @@
+     case Bytecodes::_nop:
+     case Bytecodes::_goto:
+     case Bytecodes::_goto_w:
+-    case Bytecodes::_aconst_null:       
++    case Bytecodes::_aconst_null:
+     case Bytecodes::_new:
+-    case Bytecodes::_iconst_m1:          
+-    case Bytecodes::_iconst_0:          
+-    case Bytecodes::_iconst_1:          
+-    case Bytecodes::_iconst_2:          
+-    case Bytecodes::_iconst_3:          
+-    case Bytecodes::_iconst_4:          
+-    case Bytecodes::_iconst_5:          
+-    case Bytecodes::_fconst_0:          
+-    case Bytecodes::_fconst_1:          
+-    case Bytecodes::_fconst_2:          
+-    case Bytecodes::_bipush:            
++    case Bytecodes::_iconst_m1:
++    case Bytecodes::_iconst_0:
++    case Bytecodes::_iconst_1:
++    case Bytecodes::_iconst_2:
++    case Bytecodes::_iconst_3:
++    case Bytecodes::_iconst_4:
++    case Bytecodes::_iconst_5:
++    case Bytecodes::_fconst_0:
++    case Bytecodes::_fconst_1:
++    case Bytecodes::_fconst_2:
++    case Bytecodes::_bipush:
+     case Bytecodes::_sipush:
+     case Bytecodes::_lconst_0:
+     case Bytecodes::_lconst_1:
+@@ -674,16 +671,16 @@
+     case Bytecodes::_ldc2_w:
+     case Bytecodes::_ldc:
+     case Bytecodes::_ldc_w:
+-    case Bytecodes::_iaload:            
+-    case Bytecodes::_faload:            
+-    case Bytecodes::_baload:    
++    case Bytecodes::_iaload:
++    case Bytecodes::_faload:
++    case Bytecodes::_baload:
+     case Bytecodes::_caload:
+     case Bytecodes::_saload:
+     case Bytecodes::_laload:
+     case Bytecodes::_daload:
+     case Bytecodes::_aaload:
+-    case Bytecodes::_iastore:           
+-    case Bytecodes::_fastore:           
++    case Bytecodes::_iastore:
++    case Bytecodes::_fastore:
+     case Bytecodes::_bastore:
+     case Bytecodes::_castore:
+     case Bytecodes::_sastore:
+@@ -700,56 +697,56 @@
+     case Bytecodes::_dup2_x2:
+     case Bytecodes::_swap:
+     case Bytecodes::_iadd:
+-    case Bytecodes::_fadd:                
+-    case Bytecodes::_isub:              
+-    case Bytecodes::_fsub:              
+-    case Bytecodes::_imul:              
+-    case Bytecodes::_fmul:              
+-    case Bytecodes::_idiv:              
+-    case Bytecodes::_fdiv:              
+-    case Bytecodes::_irem:              
+-    case Bytecodes::_frem:              
+-    case Bytecodes::_ishl:              
+-    case Bytecodes::_ishr:              
+-    case Bytecodes::_iushr:             
+-    case Bytecodes::_iand:              
+-    case Bytecodes::_ior:               
+-    case Bytecodes::_ixor:              
+-    case Bytecodes::_l2f:               
++    case Bytecodes::_fadd:
++    case Bytecodes::_isub:
++    case Bytecodes::_fsub:
++    case Bytecodes::_imul:
++    case Bytecodes::_fmul:
++    case Bytecodes::_idiv:
++    case Bytecodes::_fdiv:
++    case Bytecodes::_irem:
++    case Bytecodes::_frem:
++    case Bytecodes::_ishl:
++    case Bytecodes::_ishr:
++    case Bytecodes::_iushr:
++    case Bytecodes::_iand:
++    case Bytecodes::_ior:
++    case Bytecodes::_ixor:
++    case Bytecodes::_l2f:
+     case Bytecodes::_l2i:
+-    case Bytecodes::_d2f:               
+-    case Bytecodes::_d2i:               
++    case Bytecodes::_d2f:
++    case Bytecodes::_d2i:
+     case Bytecodes::_fcmpl:
+     case Bytecodes::_fcmpg:
+-    case Bytecodes::_ladd:              
+-    case Bytecodes::_dadd:              
+-    case Bytecodes::_lsub:              
+-    case Bytecodes::_dsub:              
+-    case Bytecodes::_lmul:              
+-    case Bytecodes::_dmul:              
+-    case Bytecodes::_ldiv:              
+-    case Bytecodes::_ddiv:              
+-    case Bytecodes::_lrem:              
+-    case Bytecodes::_drem:              
+-    case Bytecodes::_land:              
+-    case Bytecodes::_lor:               
++    case Bytecodes::_ladd:
++    case Bytecodes::_dadd:
++    case Bytecodes::_lsub:
++    case Bytecodes::_dsub:
++    case Bytecodes::_lmul:
++    case Bytecodes::_dmul:
++    case Bytecodes::_ldiv:
++    case Bytecodes::_ddiv:
++    case Bytecodes::_lrem:
++    case Bytecodes::_drem:
++    case Bytecodes::_land:
++    case Bytecodes::_lor:
+     case Bytecodes::_lxor:
+-    case Bytecodes::_ineg:              
++    case Bytecodes::_ineg:
+     case Bytecodes::_fneg:
+     case Bytecodes::_i2f:
+     case Bytecodes::_f2i:
+     case Bytecodes::_i2c:
+-    case Bytecodes::_i2s:               
++    case Bytecodes::_i2s:
+     case Bytecodes::_i2b:
+-    case Bytecodes::_lneg:              
+-    case Bytecodes::_dneg:              
+-    case Bytecodes::_l2d:               
++    case Bytecodes::_lneg:
++    case Bytecodes::_dneg:
++    case Bytecodes::_l2d:
+     case Bytecodes::_d2l:
+-    case Bytecodes::_lshl:              
+-    case Bytecodes::_lshr:              
++    case Bytecodes::_lshl:
++    case Bytecodes::_lshr:
+     case Bytecodes::_lushr:
+-    case Bytecodes::_i2l:               
+-    case Bytecodes::_i2d:               
++    case Bytecodes::_i2l:
++    case Bytecodes::_i2d:
+     case Bytecodes::_f2l:
+     case Bytecodes::_f2d:
+     case Bytecodes::_lcmp:
+@@ -760,9 +757,9 @@
+     case Bytecodes::_iflt:
+     case Bytecodes::_ifge:
+     case Bytecodes::_ifgt:
+-    case Bytecodes::_ifle:              
+-    case Bytecodes::_tableswitch:    
+-    case Bytecodes::_ireturn:           
++    case Bytecodes::_ifle:
++    case Bytecodes::_tableswitch:
++    case Bytecodes::_ireturn:
+     case Bytecodes::_freturn:
+     case Bytecodes::_if_icmpeq:
+     case Bytecodes::_if_icmpne:
+@@ -775,7 +772,7 @@
+     case Bytecodes::_if_acmpeq:
+     case Bytecodes::_if_acmpne:
+     case Bytecodes::_jsr:
+-    case Bytecodes::_jsr_w:    
++    case Bytecodes::_jsr_w:
+     case Bytecodes::_getstatic:
+     case Bytecodes::_putstatic:
+     case Bytecodes::_getfield:
+@@ -809,33 +806,33 @@
+       break;
+ 
+ 
+-    case Bytecodes::_lload:             
++    case Bytecodes::_lload:
+     case Bytecodes::_dload:
+       load_two(instruction->get_index());
+       break;
+-        
+-    case Bytecodes::_lload_0:           
++
++    case Bytecodes::_lload_0:
+     case Bytecodes::_dload_0:
+       load_two(0);
+       break;
+ 
+-    case Bytecodes::_lload_1:           
++    case Bytecodes::_lload_1:
+     case Bytecodes::_dload_1:
+       load_two(1);
+       break;
+ 
+-    case Bytecodes::_lload_2:           
++    case Bytecodes::_lload_2:
+     case Bytecodes::_dload_2:
+       load_two(2);
+       break;
+ 
+-    case Bytecodes::_lload_3:           
++    case Bytecodes::_lload_3:
+     case Bytecodes::_dload_3:
+       load_two(3);
+       break;
+-    
++
+     case Bytecodes::_iload:
+-    case Bytecodes::_iinc:  
++    case Bytecodes::_iinc:
+     case Bytecodes::_fload:
+     case Bytecodes::_aload:
+     case Bytecodes::_ret:
+@@ -866,66 +863,66 @@
+       load_one(3);
+       break;
+ 
+-    case Bytecodes::_lstore:            
++    case Bytecodes::_lstore:
+     case Bytecodes::_dstore:
+       store_two(localNum = instruction->get_index());
+       break;
+ 
+-    case Bytecodes::_lstore_0:          
++    case Bytecodes::_lstore_0:
+     case Bytecodes::_dstore_0:
+       store_two(0);
+       break;
+ 
+-    case Bytecodes::_lstore_1:          
++    case Bytecodes::_lstore_1:
+     case Bytecodes::_dstore_1:
+       store_two(1);
+       break;
+ 
+-    case Bytecodes::_lstore_2:          
++    case Bytecodes::_lstore_2:
+     case Bytecodes::_dstore_2:
+       store_two(2);
+       break;
+ 
+-    case Bytecodes::_lstore_3:          
++    case Bytecodes::_lstore_3:
+     case Bytecodes::_dstore_3:
+       store_two(3);
+       break;
+ 
+-    case Bytecodes::_istore:            
++    case Bytecodes::_istore:
+     case Bytecodes::_fstore:
+     case Bytecodes::_astore:
+       store_one(instruction->get_index());
+       break;
+ 
+-    case Bytecodes::_istore_0:          
++    case Bytecodes::_istore_0:
+     case Bytecodes::_fstore_0:
+     case Bytecodes::_astore_0:
+       store_one(0);
+       break;
+ 
+-    case Bytecodes::_istore_1:          
++    case Bytecodes::_istore_1:
+     case Bytecodes::_fstore_1:
+     case Bytecodes::_astore_1:
+       store_one(1);
+       break;
+ 
+-    case Bytecodes::_istore_2:          
++    case Bytecodes::_istore_2:
+     case Bytecodes::_fstore_2:
+     case Bytecodes::_astore_2:
+       store_one(2);
+       break;
+ 
+-    case Bytecodes::_istore_3:          
++    case Bytecodes::_istore_3:
+     case Bytecodes::_fstore_3:
+     case Bytecodes::_astore_3:
+       store_one(3);
+       break;
+-        
++
+     case Bytecodes::_wide:
+       fatal("Iterator should skip this bytecode");
+       break;
+ 
+-    default: 
++    default:
+       tty->print("unexpected opcode: %d\n", instruction->cur_bc());
+       ShouldNotReachHere();
+       break;
+@@ -1009,7 +1006,7 @@
+   ResourceMark rm;
+   BitMap g(_gen.size()); g.set_from(_gen);
+   BitMap k(_kill.size()); k.set_from(_kill);
+-#endif    
++#endif
+   if (_last_bci != bci || trueInDebug) {
+     ciBytecodeStream bytes(method);
+     bytes.reset_to_bci(bci);
+@@ -1064,4 +1061,3 @@
+ }
+ 
+ #endif // PRODUCT
+-
+diff -ruN openjdk6/hotspot/src/share/vm/compiler/methodLiveness.hpp openjdk/hotspot/src/share/vm/compiler/methodLiveness.hpp
+--- openjdk6/hotspot/src/share/vm/compiler/methodLiveness.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/compiler/methodLiveness.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)methodLiveness.hpp	1.25 07/05/05 17:05:24 JVM"
+-#endif
+ /*
+  * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class ciMethod;
+@@ -47,7 +44,7 @@
+ };
+ 
+ class MethodLiveness : public ResourceObj {
+- public: 
++ public:
+   // The BasicBlock class is used to represent a basic block in the
+   // liveness analysis.
+   class BasicBlock : public ResourceObj {
+@@ -57,7 +54,7 @@
+ 
+     // The analyzer which created this basic block.
+     MethodLiveness* _analyzer;
+-    
++
+     // The range of this basic block is [start_bci,limit_bci)
+     int _start_bci;
+     int _limit_bci;
+@@ -73,7 +70,7 @@
+     // by exceptional control flow
+     BitMap _exception_exit;
+ 
+-    // These members hold the results of the last call to 
++    // These members hold the results of the last call to
+     // compute_gen_kill_range().  _gen is the set of locals
+     // used before they are defined in the range.  _kill is the
+     // set of locals defined before they are used.
+@@ -85,7 +82,7 @@
+     // in normal (non-exceptional) control flow.  We propagate liveness
+     // information to these blocks.
+     GrowableArray<BasicBlock*>* _normal_predecessors;
+-    
++
+     // A list of all blocks which could come directly before this one
+     // in exceptional control flow.
+     GrowableArray<BasicBlock*>* _exception_predecessors;
+@@ -123,7 +120,7 @@
+     int start_bci() const { return _start_bci; }
+ 
+     int limit_bci() const { return _limit_bci; }
+-    void set_limit_bci(int limit) { _limit_bci = limit; } 
++    void set_limit_bci(int limit) { _limit_bci = limit; }
+ 
+     BasicBlock *next() const { return _next; }
+     void set_next(BasicBlock *next) { _next = next; }
+@@ -148,7 +145,7 @@
+     BasicBlock *split(int splitBci);
+ 
+     // -- Dataflow.
+-    
++
+     void compute_gen_kill(ciMethod* method);
+ 
+     // Propagate changes from this basic block
+@@ -248,10 +245,10 @@
+ 
+   static long _total_method_locals;
+   static int  _max_method_locals;
+-  
++
+   static long _total_locals_queried;
+   static long _total_live_locals_queried;
+-  
++
+   static long _total_visits;
+ 
+ #endif
+diff -ruN openjdk6/hotspot/src/share/vm/compiler/oopMap.cpp openjdk/hotspot/src/share/vm/compiler/oopMap.cpp
+--- openjdk6/hotspot/src/share/vm/compiler/oopMap.cpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/compiler/oopMap.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)oopMap.cpp	1.151 07/05/05 17:05:23 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -78,7 +75,7 @@
+   set_omv_data(NULL);
+   set_omv_count(0);
+ 
+-#ifdef ASSERT  
++#ifdef ASSERT
+   _locs_length = VMRegImpl::stack2reg(0)->value() + frame_size + arg_count;
+   _locs_used   = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
+   for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
+@@ -94,7 +91,7 @@
+   set_omv_count(0);
+   set_offset(source->offset());
+ 
+-#ifdef ASSERT  
++#ifdef ASSERT
+   _locs_length = source->_locs_length;
+   _locs_used = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
+   for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
+@@ -140,7 +137,7 @@
+ // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
+ // slots to hold 4-byte values like ints and floats in the LP64 build.
+ void OopMap::set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional) {
+-  
++
+   assert(reg->value() < _locs_length, "too big reg value for stack size");
+   assert( _locs_used[reg->value()] == OopMapValue::unused_value, "cannot insert twice" );
+   debug_only( _locs_used[reg->value()] = x; )
+@@ -187,13 +184,17 @@
+ 
+ void OopMap::set_derived_oop(VMReg reg, VMReg derived_from_local_register ) {
+   if( reg == derived_from_local_register ) {
+-    // Actually an oop, derived shares storage with base, 
++    // Actually an oop, derived shares storage with base,
+     set_oop(reg);
+   } else {
+     set_xxx(reg, OopMapValue::derived_oop_value, derived_from_local_register);
+   }
+ }
+ 
++void OopMap::set_stack_obj(VMReg reg) {
++  set_xxx(reg, OopMapValue::stack_obj, VMRegImpl::Bad());
++}
++
+ // OopMapSet
+ 
+ OopMapSet::OopMapSet() {
+@@ -204,7 +205,7 @@
+ }
+ 
+ 
+-void OopMapSet::grow_om_data() {  
++void OopMapSet::grow_om_data() {
+   int new_size = om_size() * 2;
+   OopMap** new_data = NEW_RESOURCE_ARRAY(OopMap*, new_size);
+   memcpy(new_data,om_data(),om_size() * sizeof(OopMap*));
+@@ -295,7 +296,7 @@
+     if( at(i)->offset() >= pc_offset )
+       break;
+   }
+-  
++
+   assert( i < len, "oopmap not found" );
+ 
+   OopMap* m = at(i);
+@@ -327,7 +328,7 @@
+   OopMap* map = cb->oop_map_for_return_address(fr->pc());
+   map->print();
+   if( cb->is_nmethod() ) {
+-    nmethod* nm = (nmethod*)cb;            
++    nmethod* nm = (nmethod*)cb;
+     // native wrappers have no scope data, it is implied
+     if (nm->is_native_method()) {
+       tty->print("bci: 0 (native)");
+@@ -337,12 +338,12 @@
+     }
+   }
+   tty->cr();
+-  fr->print_on(tty);   
+-  tty->print("     "); 
++  fr->print_on(tty);
++  tty->print("     ");
+   cb->print_value_on(tty);  tty->cr();
+   reg_map->print();
+   tty->print_cr("------ ");
+-  
++
+ }
+ #endif // PRODUCT
+ 
+@@ -352,31 +353,31 @@
+ }
+ 
+ 
+-void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map, 
++void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
+                        OopClosure* oop_fn, void derived_oop_fn(oop*, oop*),
+-		       OopClosure* value_fn, OopClosure* dead_fn) {    
++                       OopClosure* value_fn, OopClosure* dead_fn) {
+   CodeBlob* cb = fr->cb();
+-  { 
+-    assert(cb != NULL, "no codeblob");      
+-  }  
++  {
++    assert(cb != NULL, "no codeblob");
++  }
+ 
+   NOT_PRODUCT(if (TraceCodeBlobStacks) trace_codeblob_maps(fr, reg_map);)
+ 
+   OopMapSet* maps = cb->oop_maps();
+   OopMap* map  = cb->oop_map_for_return_address(fr->pc());
+-  assert(map != NULL, " no ptr map found");   
+-  
++  assert(map != NULL, " no ptr map found");
++
+   // handle derived pointers first (otherwise base pointer may be
+   // changed before derived pointer offset has been collected)
+   OopMapValue omv;
+-  {    
++  {
+     OopMapStream oms(map,OopMapValue::derived_oop_value);
+     if (!oms.is_done()) {
+ #ifndef TIERED
+       COMPILER1_PRESENT(ShouldNotReachHere();)
+ #endif // !TIERED
+       // Protect the operation on the derived pointers.  This
+-      // protects the addition of derived pointers to the shared 
++      // protects the addition of derived pointers to the shared
+       // derived pointer table in DerivedPointerTable::add().
+       MutexLockerEx x(DerivedPointerTableGC_lock, Mutex::_no_safepoint_check_flag);
+       do {
+@@ -385,10 +386,10 @@
+         if ( loc != NULL ) {
+           oop *base_loc    = fr->oopmapreg_to_location(omv.content_reg(), reg_map);
+           oop *derived_loc = loc;
+-          derived_oop_fn(base_loc, derived_loc); 
++          derived_oop_fn(base_loc, derived_loc);
+         }
+-	oms.next();
+-      }  while (!oms.is_done()); 
++        oms.next();
++      }  while (!oms.is_done());
+     }
+   }
+ 
+@@ -401,7 +402,7 @@
+       if ( loc != NULL ) {
+         if ( omv.type() == OopMapValue::oop_value ) {
+ #ifdef ASSERT
+-          if (!Universe::heap()->is_in_or_null(*loc)) {
++          if (COMPILER2_PRESENT(!DoEscapeAnalysis &&) !Universe::heap()->is_in_or_null(*loc)) {
+             tty->print_cr("# Found non oop pointer.  Dumping state at failure");
+             // try to dump out some helpful debugging information
+             trace_codeblob_maps(fr, reg_map);
+@@ -420,6 +421,17 @@
+       }
+     }
+   }
++
++#ifdef COMPILER2
++  if (DoEscapeAnalysis) {
++    for (OopMapStream oms(map, OopMapValue::stack_obj); !oms.is_done(); oms.next()) {
++      omv = oms.current();
++      assert(omv.is_stack_loc(), "should refer to stack location");
++      oop loc = (oop) fr->oopmapreg_to_location(omv.reg(),reg_map);
++      oop_fn->do_oop(&loc);
++    }
++  }
++#endif // COMPILER2
+ }
+ 
+ 
+@@ -435,14 +447,14 @@
+          "already updated this map; do not 'update' it twice!" );
+   debug_only(reg_map->_update_for_id = fr->id());
+ 
+-  // Check if caller must update oop argument  
+-  assert((reg_map->include_argument_oops() || 
+-          !cb->caller_must_gc_arguments(reg_map->thread())), 
++  // Check if caller must update oop argument
++  assert((reg_map->include_argument_oops() ||
++          !cb->caller_must_gc_arguments(reg_map->thread())),
+          "include_argument_oops should already be set");
+ 
+   int nof_callee = 0;
+-  oop*        locs[2*max_saved_on_entry_reg_count+1]; 
+-  VMReg regs[2*max_saved_on_entry_reg_count+1]; 
++  oop*        locs[2*max_saved_on_entry_reg_count+1];
++  VMReg regs[2*max_saved_on_entry_reg_count+1];
+   // ("+1" because max_saved_on_entry_reg_count might be zero)
+ 
+   // Scan through oopmap and find location of all callee-saved registers
+@@ -452,7 +464,7 @@
+ 
+   OopMap* map  = cb->oop_map_for_return_address(pc);
+ 
+-  assert(map != NULL, " no ptr map found"); 
++  assert(map != NULL, " no ptr map found");
+ 
+   OopMapValue omv;
+   for(OopMapStream oms(map,OopMapValue::callee_saved_value); !oms.is_done(); oms.next()) {
+@@ -465,7 +477,7 @@
+ 
+   // Check that runtime stubs save all callee-saved registers
+ #ifdef COMPILER2
+-  assert(cb->is_compiled_by_c1() || !cb->is_runtime_stub() || 
++  assert(cb->is_compiled_by_c1() || !cb->is_runtime_stub() ||
+          (nof_callee >= SAVED_ON_ENTRY_REG_COUNT || nof_callee >= C_SAVED_ON_ENTRY_REG_COUNT),
+          "must save all");
+ #endif // COMPILER2
+@@ -513,6 +525,9 @@
+     tty->print("Derived_oop_" );
+     optional->print();
+     break;
++  case OopMapValue::stack_obj:
++    tty->print("Stack");
++    break;
+   default:
+     ShouldNotReachHere();
+   }
+@@ -527,25 +542,25 @@
+ }
+ 
+ 
+-void OopMap::print() const {
++void OopMap::print_on(outputStream* st) const {
+   OopMapValue omv;
+   for(OopMapStream oms((OopMap*)this); !oms.is_done(); oms.next()) {
+     omv = oms.current();
+-    omv.print();
++    omv.print_on(st);
+   }
+ }
+ 
+ 
+-void OopMapSet::print() const {
++void OopMapSet::print_on(outputStream* st) const {
+   int i, len = om_count();
+ 
+-  tty->print_cr("OopMapSet contains %d OopMaps\n",len);
+-  
++  st->print_cr("OopMapSet contains %d OopMaps\n",len);
++
+   for( i = 0; i < len; i++) {
+     OopMap* m = at(i);
+-    tty->print_cr("OopMap #%d offset:%p",i,m->offset());
+-    m->print();
+-    tty->print_cr("\n");
++    st->print_cr("OopMap #%d offset:%p",i,m->offset());
++    m->print_on(st);
++    st->print_cr("\n");
+   }
+ }
+ #endif // !PRODUCT
+@@ -558,7 +573,7 @@
+ class DerivedPointerEntry : public CHeapObj {
+  private:
+   oop*     _location; // Location of derived pointer (also pointing to the base)
+-  intptr_t _offset;   // Offset from base pointer   
++  intptr_t _offset;   // Offset from base pointer
+  public:
+   DerivedPointerEntry(oop* location, intptr_t offset) { _location = location; _offset = offset; }
+   oop* location()    { return _location; }
+@@ -587,48 +602,48 @@
+ intptr_t value_of_loc(oop *pointer) { return (intptr_t)(*pointer); }
+ 
+ 
+-void DerivedPointerTable::add(oop *derived_loc, oop *base_loc) {      
+-  assert(Universe::heap()->is_in_or_null(*base_loc), "not an oop");  
++void DerivedPointerTable::add(oop *derived_loc, oop *base_loc) {
++  assert(Universe::heap()->is_in_or_null(*base_loc), "not an oop");
+   assert(derived_loc != base_loc, "Base and derived in same location");
+   if (_active) {
+-    assert(*derived_loc != (oop)base_loc, "location already added");    
++    assert(*derived_loc != (oop)base_loc, "location already added");
+     assert(_list != NULL, "list must exist");
+     intptr_t offset = value_of_loc(derived_loc) - value_of_loc(base_loc);
+     assert(offset >= -1000000, "wrong derived pointer info");
+ 
+     if (TraceDerivedPointers) {
+       tty->print_cr(
+-        "Add derived pointer@" INTPTR_FORMAT 
+-	" - Derived: " INTPTR_FORMAT 
+-	" Base: " INTPTR_FORMAT " (@" INTPTR_FORMAT ") (Offset: %d)", 
++        "Add derived pointer@" INTPTR_FORMAT
++        " - Derived: " INTPTR_FORMAT
++        " Base: " INTPTR_FORMAT " (@" INTPTR_FORMAT ") (Offset: %d)",
+         derived_loc, (address)*derived_loc, (address)*base_loc, base_loc, offset
+       );
+-    }    
++    }
+     // Set derived oop location to point to base.
+-    *derived_loc = (oop)base_loc;  
++    *derived_loc = (oop)base_loc;
+     assert_lock_strong(DerivedPointerTableGC_lock);
+     DerivedPointerEntry *entry = new DerivedPointerEntry(derived_loc, offset);
+-    _list->append(entry);    
++    _list->append(entry);
+   }
+ }
+ 
+ 
+-void DerivedPointerTable::update_pointers() {  
++void DerivedPointerTable::update_pointers() {
+   assert(_list != NULL, "list must exist");
+   for(int i = 0; i < _list->length(); i++) {
+     DerivedPointerEntry* entry = _list->at(i);
+     oop* derived_loc = entry->location();
+     intptr_t offset  = entry->offset();
+     // The derived oop was setup to point to location of base
+-    oop  base        = **(oop**)derived_loc; 
++    oop  base        = **(oop**)derived_loc;
+     assert(Universe::heap()->is_in_or_null(base), "must be an oop");
+-    
++
+     *derived_loc = (oop)(((address)base) + offset);
+     assert(value_of_loc(derived_loc) - value_of_loc(&base) == offset, "sanity check");
+ 
+     if (TraceDerivedPointers) {
+-      tty->print_cr("Updating derived pointer@" INTPTR_FORMAT 
+-		    " - Derived: " INTPTR_FORMAT "  Base: " INTPTR_FORMAT " (Offset: %d)",
++      tty->print_cr("Updating derived pointer@" INTPTR_FORMAT
++                    " - Derived: " INTPTR_FORMAT "  Base: " INTPTR_FORMAT " (Offset: %d)",
+           derived_loc, (address)*derived_loc, (address)base, offset);
+     }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/compiler/oopMap.hpp openjdk/hotspot/src/share/vm/compiler/oopMap.hpp
+--- openjdk6/hotspot/src/share/vm/compiler/oopMap.hpp	2008-02-28 05:02:34.000000000 -0500
++++ openjdk/hotspot/src/share/vm/compiler/oopMap.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)oopMap.hpp	1.79 07/05/05 17:05:24 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,10 +19,10 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+-// Interface for generating the frame map for compiled code.  A frame map 
++// Interface for generating the frame map for compiled code.  A frame map
+ // describes for a specific pc whether each register and frame stack slot is:
+ //   Oop         - A GC root for current frame
+ //   Value       - Live non-oop, non-float value: int, either half of double
+@@ -49,9 +46,9 @@
+ 
+ public:
+   // Constants
+-  enum { type_bits                = 5,
++  enum { type_bits                = 6,
+          register_bits            = BitsPerShort - type_bits };
+- 
++
+   enum { type_shift               = 0,
+          register_shift           = type_bits };
+ 
+@@ -66,7 +63,8 @@
+          value_value = 2,
+          dead_value = 4,
+          callee_saved_value = 8,
+-         derived_oop_value= 16 };
++         derived_oop_value= 16,
++         stack_obj = 32 };
+ 
+   // Constructors
+   OopMapValue () { set_value(0); set_content_reg(VMRegImpl::Bad()); }
+@@ -90,17 +88,19 @@
+   }
+ 
+   // Querying
+-  bool is_oop()               { return mask_bits(value(), type_mask_in_place) == oop_value; } 
+-  bool is_value()             { return mask_bits(value(), type_mask_in_place) == value_value; } 
+-  bool is_dead()              { return mask_bits(value(), type_mask_in_place) == dead_value; } 
+-  bool is_callee_saved()      { return mask_bits(value(), type_mask_in_place) == callee_saved_value; } 
+-  bool is_derived_oop()       { return mask_bits(value(), type_mask_in_place) == derived_oop_value; } 
+-
+-  void set_oop()              { set_value((value() & register_mask_in_place) | oop_value); } 
+-  void set_value()            { set_value((value() & register_mask_in_place) | value_value); } 
+-  void set_dead()             { set_value((value() & register_mask_in_place) | dead_value); } 
+-  void set_callee_saved()     { set_value((value() & register_mask_in_place) | callee_saved_value); } 
+-  void set_derived_oop()      { set_value((value() & register_mask_in_place) | derived_oop_value); } 
++  bool is_oop()               { return mask_bits(value(), type_mask_in_place) == oop_value; }
++  bool is_value()             { return mask_bits(value(), type_mask_in_place) == value_value; }
++  bool is_dead()              { return mask_bits(value(), type_mask_in_place) == dead_value; }
++  bool is_callee_saved()      { return mask_bits(value(), type_mask_in_place) == callee_saved_value; }
++  bool is_derived_oop()       { return mask_bits(value(), type_mask_in_place) == derived_oop_value; }
++  bool is_stack_obj()         { return mask_bits(value(), type_mask_in_place) == stack_obj; }
++
++  void set_oop()              { set_value((value() & register_mask_in_place) | oop_value); }
++  void set_value()            { set_value((value() & register_mask_in_place) | value_value); }
++  void set_dead()             { set_value((value() & register_mask_in_place) | dead_value); }
++  void set_callee_saved()     { set_value((value() & register_mask_in_place) | callee_saved_value); }
++  void set_derived_oop()      { set_value((value() & register_mask_in_place) | derived_oop_value); }
++  void set_stack_obj()        { set_value((value() & register_mask_in_place) | stack_obj); }
+ 
+   VMReg reg() const { return VMRegImpl::as_VMReg(mask_bits(value(), register_mask_in_place) >> register_shift); }
+   oop_types type() const      { return (oop_types)mask_bits(value(), type_mask_in_place); }
+@@ -109,7 +109,7 @@
+     return (p->value()  == (p->value() & register_mask));
+   }
+ 
+-  void set_reg_type(VMReg p, oop_types t) { 
++  void set_reg_type(VMReg p, oop_types t) {
+     set_value((p->value() << register_shift) | t);
+     assert(reg() == p, "sanity check" );
+     assert(type() == t, "sanity check" );
+@@ -137,7 +137,7 @@
+   friend class OopMapStream;
+   friend class VMStructs;
+  private:
+-  int  _pc_offset; 
++  int  _pc_offset;
+   int  _omv_count;
+   int  _omv_data_size;
+   unsigned char* _omv_data;
+@@ -169,7 +169,7 @@
+ 
+   // Check to avoid double insertion
+   debug_only(OopMapValue::oop_types locs_used( int indx ) { return _locs_used[indx]; })
+-  
++
+   // Construction
+   // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
+   // slots to hold 4-byte values like ints and floats in the LP64 build.
+@@ -178,6 +178,7 @@
+   void set_dead ( VMReg local);
+   void set_callee_saved( VMReg local, VMReg caller_machine_register );
+   void set_derived_oop ( VMReg local, VMReg derived_from_local_register );
++  void set_stack_obj( VMReg local);
+   void set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional);
+ 
+   int heap_size() const;
+@@ -191,7 +192,8 @@
+   }
+ 
+   // Printing
+-  void print() const PRODUCT_RETURN;
++  void print_on(outputStream* st) const PRODUCT_RETURN;
++  void print() const { print_on(tty); }
+ };
+ 
+ 
+@@ -223,29 +225,30 @@
+   // Collect OopMaps.
+   void add_gc_map(int pc, OopMap* map);
+ 
+-  // Returns the only oop map. Used for reconstructing 
++  // Returns the only oop map. Used for reconstructing
+   // Adapter frames during deoptimization
+   OopMap* singular_oop_map();
+ 
+   // returns OopMap in that is anchored to the pc
+-  OopMap* find_map_at_offset(int pc_offset) const; 
++  OopMap* find_map_at_offset(int pc_offset) const;
+ 
+   int heap_size() const;
+   void copy_to(address addr);
+ 
+   // Iterates through frame for a compiled method
+   static void oops_do            (const frame* fr,
+-				  const RegisterMap* reg_map, OopClosure* f); 
++                                  const RegisterMap* reg_map, OopClosure* f);
+   static void update_register_map(const frame* fr, RegisterMap *reg_map);
+ 
+   // Iterates through frame for a compiled method for dead ones and values, too
+   static void all_do(const frame* fr, const RegisterMap* reg_map,
+                      OopClosure* oop_fn,
+-		     void derived_oop_fn(oop* base, oop* derived),
++                     void derived_oop_fn(oop* base, oop* derived),
+                      OopClosure* value_fn, OopClosure* dead_fn);
+ 
+   // Printing
+-  void print() const PRODUCT_RETURN;
++  void print_on(outputStream* st) const PRODUCT_RETURN;
++  void print() const { print_on(tty); }
+ };
+ 
+ 
+@@ -277,12 +280,12 @@
+ class DerivedPointerTable : public AllStatic {
+   friend class VMStructs;
+  private:
+-   static GrowableArray<DerivedPointerEntry*>* _list; 
++   static GrowableArray<DerivedPointerEntry*>* _list;
+    static bool _active;                      // do not record pointers for verify pass etc.
+- public:  
++ public:
+   static void clear();                       // Called before scavenge/GC
+   static void add(oop *derived, oop *base);  // Called during scavenge/GC
+-  static void update_pointers();             // Called after  scavenge/GC  
++  static void update_pointers();             // Called after  scavenge/GC
+   static bool is_empty()                     { return _list == NULL || _list->is_empty(); }
+   static bool is_active()                    { return _active; }
+   static void set_active(bool value)         { _active = value; }
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)binaryTreeDictionary.cpp	1.37 07/05/05 17:05:43 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -74,7 +71,7 @@
+ TreeList* TreeList::as_TreeList(HeapWord* addr, size_t size) {
+   TreeChunk* tc = (TreeChunk*) addr;
+   assert(size >= sizeof(TreeChunk), "Chunk is too small for a TreeChunk");
+-  assert(tc->size() == 0 && tc->prev() == NULL && tc->next() == NULL, 
++  assert(tc->size() == 0 && tc->prev() == NULL && tc->next() == NULL,
+     "Space should be clear");
+   tc->setSize(size);
+   tc->linkPrev(NULL);
+@@ -89,7 +86,7 @@
+   FreeChunk* list = head();
+   assert(!list || list != list->next(), "Chunk on list twice");
+   assert(tc != NULL, "Chunk being removed is NULL");
+-  assert(parent() == NULL || this == parent()->left() || 
++  assert(parent() == NULL || this == parent()->left() ||
+     this == parent()->right(), "list is inconsistent");
+   assert(tc->isFree(), "Header is not marked correctly");
+   assert(head() == NULL || head()->prev() == NULL, "list invariant");
+@@ -106,7 +103,7 @@
+     // because the first chunk is also acting as the tree node.
+     // When coalescing happens, however, the first chunk in the a tree
+     // list can be the start of a free range.  Free ranges are removed
+-    // from the free lists so that they are not available to be 
++    // from the free lists so that they are not available to be
+     // allocated when the sweeper yields (giving up the free list lock)
+     // to allow mutator activity.  If this chunk is the first in the
+     // list and is not the last in the list, do the work to copy the
+@@ -124,28 +121,28 @@
+       // This can be slow for a long list.  Consider having
+       // an option that does not allow the first chunk on the
+       // list to be coalesced.
+-      for (TreeChunk* curTC = nextTC; curTC != NULL; 
+-	  curTC = TreeChunk::as_TreeChunk(curTC->next())) {
++      for (TreeChunk* curTC = nextTC; curTC != NULL;
++          curTC = TreeChunk::as_TreeChunk(curTC->next())) {
+         curTC->set_list(retTL);
+       }
+       // Fix the parent to point to the new TreeList.
+       if (retTL->parent() != NULL) {
+-	if (this == retTL->parent()->left()) {
+-	  retTL->parent()->setLeft(retTL);
+-	} else {
+-	  assert(this == retTL->parent()->right(), "Parent is incorrect");
+-	  retTL->parent()->setRight(retTL);
+-	}
++        if (this == retTL->parent()->left()) {
++          retTL->parent()->setLeft(retTL);
++        } else {
++          assert(this == retTL->parent()->right(), "Parent is incorrect");
++          retTL->parent()->setRight(retTL);
++        }
+       }
+       // Fix the children's parent pointers to point to the
+       // new list.
+       assert(right() == retTL->right(), "Should have been copied");
+       if (retTL->right() != NULL) {
+-	retTL->right()->setParent(retTL);
++        retTL->right()->setParent(retTL);
+       }
+       assert(left() == retTL->left(), "Should have been copied");
+       if (retTL->left() != NULL) {
+-	retTL->left()->setParent(retTL);
++        retTL->left()->setParent(retTL);
+       }
+       retTL->link_head(nextTC);
+       assert(nextTC->isFree(), "Should be a free chunk");
+@@ -160,40 +157,40 @@
+   }
+ 
+   // Below this point the embeded TreeList being used for the
+-  // tree node may have changed. Don't use "this" 
++  // tree node may have changed. Don't use "this"
+   // TreeList*.
+   // chunk should still be a free chunk (bit set in _prev)
+-  assert(!retTL->head() || retTL->size() == retTL->head()->size(), 
++  assert(!retTL->head() || retTL->size() == retTL->head()->size(),
+     "Wrong sized chunk in list");
+   debug_only(
+-    tc->linkPrev(NULL);  
++    tc->linkPrev(NULL);
+     tc->linkNext(NULL);
+     tc->set_list(NULL);
+     bool prev_found = false;
+     bool next_found = false;
+-    for (FreeChunk* curFC = retTL->head(); 
+-	 curFC != NULL; curFC = curFC->next()) {
++    for (FreeChunk* curFC = retTL->head();
++         curFC != NULL; curFC = curFC->next()) {
+       assert(curFC != tc, "Chunk is still in list");
+       if (curFC == prevFC) {
+-	prev_found = true;
++        prev_found = true;
+       }
+       if (curFC == nextTC) {
+-	next_found = true;
++        next_found = true;
+       }
+     }
+     assert(prevFC == NULL || prev_found, "Chunk was lost from list");
+     assert(nextTC == NULL || next_found, "Chunk was lost from list");
+     assert(retTL->parent() == NULL ||
+-	   retTL == retTL->parent()->left() || 
+-	   retTL == retTL->parent()->right(),
++           retTL == retTL->parent()->left() ||
++           retTL == retTL->parent()->right(),
+            "list is inconsistent");
+   )
+   retTL->decrement_count();
+ 
+   assert(tc->isFree(), "Should still be a free chunk");
+-  assert(retTL->head() == NULL || retTL->head()->prev() == NULL, 
++  assert(retTL->head() == NULL || retTL->head()->prev() == NULL,
+     "list invariant");
+-  assert(retTL->tail() == NULL || retTL->tail()->next() == NULL, 
++  assert(retTL->tail() == NULL || retTL->tail()->next() == NULL,
+     "list invariant");
+   return retTL;
+ }
+@@ -205,7 +202,7 @@
+   assert(!verifyChunkInFreeLists(chunk), "Double entry");
+   assert(head() == NULL || head()->prev() == NULL, "list invariant");
+   assert(tail() == NULL || tail()->next() == NULL, "list invariant");
+-  
++
+   FreeChunk* fc = tail();
+   fc->linkAfter(chunk);
+   link_tail(chunk);
+@@ -218,7 +215,7 @@
+ }
+ 
+ // Add this chunk at the head of the list.  "At the head of the list"
+-// is defined to be after the chunk pointer to by head().  This is 
++// is defined to be after the chunk pointer to by head().  This is
+ // because the TreeList is embedded in the first TreeChunk in the
+ // list.  See the definition of TreeChunk.
+ void TreeList::returnChunkAtHead(TreeChunk* chunk) {
+@@ -325,7 +322,7 @@
+   for (prevTL = curTL = root(); curTL != NULL;) {
+     if (curTL->size() == size) {        // exact match
+       break;
+-    } 
++    }
+     prevTL = curTL;
+     if (curTL->size() < size) {        // proceed to right sub-tree
+       curTL = curTL->right();
+@@ -346,7 +343,7 @@
+   if (curTL != NULL) {
+     assert(curTL->size() >= size, "size inconsistency");
+     if (UseCMSAdaptiveFreeLists) {
+-  
++
+       // A candidate chunk has been found.  If it is already under
+       // populated, get a chunk associated with the hint for this
+       // chunk.
+@@ -354,29 +351,29 @@
+         /* Use the hint to find a size with a surplus, and reset the hint. */
+         TreeList* hintTL = curTL;
+         while (hintTL->hint() != 0) {
+-  	  assert(hintTL->hint() == 0 || hintTL->hint() > hintTL->size(),
+-	    "hint points in the wrong direction");
++          assert(hintTL->hint() == 0 || hintTL->hint() > hintTL->size(),
++            "hint points in the wrong direction");
+           hintTL = findList(hintTL->hint());
+-  	  assert(curTL != hintTL, "Infinite loop");
+-          if (hintTL == NULL || 
+-	      hintTL == curTL /* Should not happen but protect against it */ ) {
+-  	    // No useful hint.  Set the hint to NULL and go on.
++          assert(curTL != hintTL, "Infinite loop");
++          if (hintTL == NULL ||
++              hintTL == curTL /* Should not happen but protect against it */ ) {
++            // No useful hint.  Set the hint to NULL and go on.
+             curTL->set_hint(0);
+             break;
+           }
+           assert(hintTL->size() > size, "hint is inconsistent");
+           if (hintTL->surplus() > 0) {
+-  	    // The hint led to a list that has a surplus.  Use it.
+-  	    // Set the hint for the candidate to an overpopulated
+-  	    // size.  
++            // The hint led to a list that has a surplus.  Use it.
++            // Set the hint for the candidate to an overpopulated
++            // size.
+             curTL->set_hint(hintTL->size());
+             // Change the candidate.
+             curTL = hintTL;
+             break;
+           }
+-  	  // The evm code reset the hint of the candidate as
+-  	  // at an interrim point.  Why?  Seems like this leaves
+-  	  // the hint pointing to a list that didn't work.
++          // The evm code reset the hint of the candidate as
++          // at an interrim point.  Why?  Seems like this leaves
++          // the hint pointing to a list that didn't work.
+           // curTL->set_hint(hintTL->size());
+         }
+       }
+@@ -388,7 +385,7 @@
+     retTC = curTL->first_available();
+     assert((retTC != NULL) && (curTL->count() > 0),
+       "A list in the binary tree should not be NULL");
+-    assert(retTC->size() >= size, 
++    assert(retTC->size() >= size,
+       "A chunk of the wrong size was found");
+     removeChunkFromTree(retTC);
+     assert(retTC->isFree(), "Header is not marked correctly");
+@@ -405,8 +402,8 @@
+   for (curTL = root(); curTL != NULL;) {
+     if (curTL->size() == size) {        // exact match
+       break;
+-    } 
+-    
++    }
++
+     if (curTL->size() < size) {        // proceed to right sub-tree
+       curTL = curTL->right();
+     } else {                           // proceed to left sub-tree
+@@ -438,7 +435,7 @@
+   }
+ }
+ 
+-// Remove the current chunk from the tree.  If it is not the last 
++// Remove the current chunk from the tree.  If it is not the last
+ // chunk in a list on a tree node, just unlink it.
+ // If it is the last chunk in the list (the next link is NULL),
+ // remove the node and repair the tree.
+@@ -455,15 +452,15 @@
+     if (tl == _root) {
+       if ((_root->left() == NULL) && (_root->right() == NULL)) {
+         if (_root->count() == 1) {
+-	  assert(_root->head() == tc, "Should only be this one chunk");
+-	  removing_only_chunk = true;
++          assert(_root->head() == tc, "Should only be this one chunk");
++          removing_only_chunk = true;
+         }
+       }
+     }
+   )
+   assert(tl != NULL, "List should be set");
+-  assert(tl->parent() == NULL || tl == tl->parent()->left() || 
+-	 tl == tl->parent()->right(), "list is inconsistent");
++  assert(tl->parent() == NULL || tl == tl->parent()->left() ||
++         tl == tl->parent()->right(), "list is inconsistent");
+ 
+   bool complicatedSplice = false;
+ 
+@@ -473,8 +470,8 @@
+   TreeList* replacementTL = tl->removeChunkReplaceIfNeeded(tc);
+   assert(tc->isFree(), "Chunk should still be free");
+   assert(replacementTL->parent() == NULL ||
+-	 replacementTL == replacementTL->parent()->left() || 
+-	 replacementTL == replacementTL->parent()->right(),
++         replacementTL == replacementTL->parent()->left() ||
++         replacementTL == replacementTL->parent()->right(),
+          "list is inconsistent");
+   if (tl == root()) {
+     assert(replacementTL->parent() == NULL, "Incorrectly replacing root");
+@@ -482,7 +479,7 @@
+   }
+   debug_only(
+     if (tl != replacementTL) {
+-      assert(replacementTL->head() != NULL, 
++      assert(replacementTL->head() != NULL,
+         "If the tree list was replaced, it should not be a NULL list");
+       TreeList* rhl = replacementTL->head_as_TreeChunk()->list();
+       TreeList* rtl = TreeChunk::as_TreeChunk(replacementTL->tail())->list();
+@@ -494,8 +491,8 @@
+ 
+   // Does the tree need to be repaired?
+   if (replacementTL->count() == 0) {
+-    assert(replacementTL->head() == NULL && 
+-	   replacementTL->tail() == NULL, "list count is incorrect");
++    assert(replacementTL->head() == NULL &&
++           replacementTL->tail() == NULL, "list count is incorrect");
+     // Find the replacement node for the (soon to be empty) node being removed.
+     // if we have a single (or no) child, splice child in our stead
+     if (replacementTL->left() == NULL) {
+@@ -520,14 +517,14 @@
+       verifyTree();
+     }
+     // first make newTL my parent's child
+-    if ((parentTL = replacementTL->parent()) == NULL) {  
++    if ((parentTL = replacementTL->parent()) == NULL) {
+       // newTL should be root
+       assert(tl == root(), "Incorrectly replacing root");
+       set_root(newTL);
+       if (newTL != NULL) {
+         newTL->clearParent();
+       }
+-    } else if (parentTL->right() == replacementTL) {   
++    } else if (parentTL->right() == replacementTL) {
+       // replacementTL is a right child
+       parentTL->setRight(newTL);
+     } else {                                // replacementTL is a left child
+@@ -535,8 +532,8 @@
+       parentTL->setLeft(newTL);
+     }
+     debug_only(replacementTL->clearParent();)
+-    if (complicatedSplice) {  // we need newTL to get replacementTL's 
+-			      // two children
++    if (complicatedSplice) {  // we need newTL to get replacementTL's
++                              // two children
+       assert(newTL != NULL &&
+              newTL->left() == NULL && newTL->right() == NULL,
+             "newTL should not have encumbrances from the past");
+@@ -544,9 +541,9 @@
+       // assert(replacementTL->left() != NULL && replacementTL->right() != NULL,
+       //       "else !complicatedSplice");
+       // ... however, the above assertion is too strong because we aren't
+-      // guaranteed that replacementTL->right() is still NULL. 
++      // guaranteed that replacementTL->right() is still NULL.
+       // Recall that we removed
+-      // the right sub-tree minimum from replacementTL. 
++      // the right sub-tree minimum from replacementTL.
+       // That may well have been its right
+       // child! So we'll just assert half of the above:
+       assert(replacementTL->left() != NULL, "else !complicatedSplice");
+@@ -557,9 +554,9 @@
+         replacementTL->clearLeft();
+       )
+     }
+-    assert(replacementTL->right() == NULL && 
+-	   replacementTL->left() == NULL && 
+-	   replacementTL->parent() == NULL,
++    assert(replacementTL->right() == NULL &&
++           replacementTL->left() == NULL &&
++           replacementTL->parent() == NULL,
+         "delete without encumbrances");
+   }
+ 
+@@ -643,10 +640,10 @@
+   }
+   // XXX: do i need to clear the FreeChunk fields, let me do it just in case
+   // Revisit this later
+-  
++
+   fc->clearNext();
+   fc->linkPrev(NULL);
+-  
++
+   // work down from the _root, looking for insertion point
+   for (prevTL = curTL = root(); curTL != NULL;) {
+     if (curTL->size() == size)  // exact match
+@@ -746,7 +743,7 @@
+ }
+ 
+ size_t BinaryTreeDictionary::numFreeBlocks() const {
+-  assert(totalFreeBlocksInTree(root()) == totalFreeBlocks(), 
++  assert(totalFreeBlocksInTree(root()) == totalFreeBlocks(),
+          "_totalFreeBlocks inconsistency");
+   return totalFreeBlocks();
+ }
+@@ -843,14 +840,14 @@
+     }
+   }
+ };
+-       
++
+ // For each list in the tree, calculate the desired, desired
+ // coalesce, count before sweep, and surplus before sweep.
+ class BeginSweepClosure : public AscendTreeCensusClosure {
+   double _percentage;
+   float _inter_sweep_current;
+   float _inter_sweep_estimate;
+-  
++
+  public:
+   BeginSweepClosure(double p, float inter_sweep_current,
+                               float inter_sweep_estimate) :
+@@ -916,8 +913,8 @@
+     FreeChunk* item = fl->head();
+     while (item != NULL) {
+       if (item->end() == _target) {
+-	_found = item;
+-	return true;
++        _found = item;
++        return true;
+       }
+       item = item->next();
+     }
+@@ -950,12 +947,12 @@
+       fl->set_returnedBytes(0);
+     }
+   };
+-  
++
+   void BinaryTreeDictionary::initializeDictReturnedBytes() {
+     InitializeDictReturnedBytesClosure idrb;
+     idrb.do_tree(root());
+   }
+-  
++
+   class ReturnedBytesClosure : public AscendTreeCensusClosure {
+     size_t _dictReturnedBytes;
+    public:
+@@ -965,11 +962,11 @@
+     }
+     size_t dictReturnedBytes() { return _dictReturnedBytes; }
+   };
+-  
++
+   size_t BinaryTreeDictionary::sumDictReturnedBytes() {
+     ReturnedBytesClosure rbc;
+     rbc.do_tree(root());
+-  
++
+     return rbc.dictReturnedBytes();
+   }
+ 
+@@ -1014,7 +1011,7 @@
+   setTreeHintsClosure(size_t v) { hint = v; }
+   void do_list(FreeList* fl) {
+     fl->set_hint(hint);
+-    assert(fl->hint() == 0 || fl->hint() > fl->size(), 
++    assert(fl->hint() == 0 || fl->hint() > fl->size(),
+       "Current hint is inconsistent");
+     if (fl->surplus() > 0) {
+       hint = fl->size();
+@@ -1053,7 +1050,7 @@
+   }
+   clearTreeCensus();
+ }
+-    
++
+ // Print summary statistics
+ void BinaryTreeDictionary::reportStatistics() const {
+   verify_par_locked();
+@@ -1077,7 +1074,7 @@
+   size_t _totalFree;
+   AllocationStats _totals;
+   size_t _count;
+- 
++
+  public:
+   printTreeCensusClosure() {
+     _totalFree = 0;
+@@ -1100,7 +1097,7 @@
+                fl->desired(), fl->prevSweep(), fl->beforeSweep(), fl->count(),
+                fl->coalBirths(), fl->coalDeaths(), fl->splitBirths(),
+                fl->splitDeaths());
+-  
++
+     increment_totalFree_by(fl->count() * fl->size());
+     increment_count_by(fl->count());
+     totals()->set_bfrSurp(totals()->bfrSurp() + fl->bfrSurp());
+@@ -1115,7 +1112,7 @@
+ };
+ 
+ void BinaryTreeDictionary::printDictCensus(void) const {
+-  
++
+   gclog_or_tty->print("\nBinaryTree\n");
+   gclog_or_tty->print(
+              "%4s\t\t" "%7s\t"   "%7s\t"    "%7s\t"    "%7s\t"    "%7s\t"
+@@ -1135,15 +1132,15 @@
+              "%s\t\t"  "%7d\t"    "%7d\t"     "%7d\t"    "%7d\t"
+              "%7d\t"   "%7d\t"    "%7d\t"     "%7d\t"    "%7d\t"    "\n",
+              "totl",
+-             ptc.totals()->bfrSurp(), 
+-	     ptc.totals()->surplus(), 
+-	     ptc.totals()->prevSweep(), 
+-	     ptc.totals()->beforeSweep(), 
+-	     ptc.count(), 
+-	     ptc.totals()->coalBirths(), 
+-	     ptc.totals()->coalDeaths(), 
+-	     ptc.totals()->splitBirths(), 
+-	     ptc.totals()->splitDeaths());
++             ptc.totals()->bfrSurp(),
++             ptc.totals()->surplus(),
++             ptc.totals()->prevSweep(),
++             ptc.totals()->beforeSweep(),
++             ptc.count(),
++             ptc.totals()->coalBirths(),
++             ptc.totals()->coalDeaths(),
++             ptc.totals()->splitBirths(),
++             ptc.totals()->splitDeaths());
+   gclog_or_tty->print("totalFree(words): %7d growth: %8.5f  deficit: %8.5f\n",
+               ptc.totalFree(),
+               (double)(ptc.totals()->splitBirths()+ptc.totals()->coalBirths()
+@@ -1170,7 +1167,7 @@
+   size_t ct = 0;
+   for (FreeChunk* curFC = tl->head(); curFC != NULL; curFC = curFC->next()) {
+     ct++;
+-    assert(curFC->prev() == NULL || curFC->prev()->isFree(), 
++    assert(curFC->prev() == NULL || curFC->prev()->isFree(),
+       "Chunk should be free");
+   }
+   return ct;
+@@ -1189,10 +1186,10 @@
+          "parent<-/->right");;
+   guarantee(tl->left() == NULL  || tl->left()->size()    <  tl->size(),
+          "parent !> left");
+-  guarantee(tl->right() == NULL || tl->right()->size()   >  tl->size(), 
++  guarantee(tl->right() == NULL || tl->right()->size()   >  tl->size(),
+          "parent !< left");
+   guarantee(tl->head() == NULL || tl->head()->isFree(), "!Free");
+-  guarantee(tl->head() == NULL || tl->head_as_TreeChunk()->list() == tl, 
++  guarantee(tl->head() == NULL || tl->head_as_TreeChunk()->list() == tl,
+     "list inconsistency");
+   guarantee(tl->count() > 0 || (tl->head() == NULL && tl->tail() == NULL),
+     "list count is inconsistent");
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.hpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)binaryTreeDictionary.hpp	1.26 07/05/05 17:05:41 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,10 +19,10 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+-/* 
++/*
+  * A binary tree based search structure for free blocks.
+  * This is currently used in the Concurrent Mark&Sweep implementation.
+  */
+@@ -71,7 +68,7 @@
+   void clearLeft()               { _left = NULL;   }
+   void clearRight()              { _right = NULL;  }
+   void clearParent()             { _parent = NULL; }
+-  void initialize()		 { clearLeft(); clearRight(), clearParent(); }
++  void initialize()              { clearLeft(); clearRight(), clearParent(); }
+ 
+   // For constructing a TreeList from a Tree chunk or
+   // address and size.
+@@ -99,7 +96,7 @@
+ 
+ // A TreeChunk is a subclass of a FreeChunk that additionally
+ // maintains a pointer to the free list on which it is currently
+-// linked.  
++// linked.
+ // A TreeChunk is also used as a node in the binary tree.  This
+ // allows the binary tree to be maintained without any additional
+ // storage (the free chunks are used).  In a binary tree the first
+@@ -151,8 +148,8 @@
+   void set_root(TreeList* v) { _root = v; }
+ 
+   // Remove a chunk of size "size" or larger from the tree and
+-  // return it.  If the chunk 
+-  // is the last chunk of that size, remove the node for that size 
++  // return it.  If the chunk
++  // is the last chunk of that size, remove the node for that size
+   // from the tree.
+   TreeChunk* getChunkFromTree(size_t size, Dither dither, bool splay);
+   // Return a list of the specified size or NULL from the tree.
+@@ -198,13 +195,13 @@
+ 
+   // Reset the dictionary to the initial conditions with
+   // a single free chunk.
+-  void	     reset(MemRegion mr);
++  void       reset(MemRegion mr);
+   void       reset(HeapWord* addr, size_t size);
+   // Reset the dictionary to be empty.
+   void       reset();
+ 
+   // Return a chunk of size "size" or greater from
+-  // the tree.  
++  // the tree.
+   // want a better dynamic splay strategy for the future.
+   FreeChunk* getChunk(size_t size, Dither dither) {
+     verify_par_locked();
+@@ -264,21 +261,21 @@
+   // Return the largest free chunk in the tree.
+   FreeChunk* findLargestDict() const;
+   // Accessors for statistics
+-  void 	     setTreeSurplus(double splitSurplusPercent);
+-  void 	     setTreeHints(void);
++  void       setTreeSurplus(double splitSurplusPercent);
++  void       setTreeHints(void);
+   // Reset statistics for all the lists in the tree.
+-  void	     clearTreeCensus(void);
++  void       clearTreeCensus(void);
+   // Print the statistcis for all the lists in the tree.  Also may
+   // print out summaries.
+-  void	     printDictCensus(void) const;
++  void       printDictCensus(void) const;
+ 
+   // For debugging.  Returns the sum of the _returnedBytes for
+   // all lists in the tree.
+-  size_t     sumDictReturnedBytes()	PRODUCT_RETURN0;
++  size_t     sumDictReturnedBytes()     PRODUCT_RETURN0;
+   // Sets the _returnedBytes for all the lists in the tree to zero.
+-  void	     initializeDictReturnedBytes()	PRODUCT_RETURN;
++  void       initializeDictReturnedBytes()      PRODUCT_RETURN;
+   // For debugging.  Return the total number of chunks in the dictionary.
+-  size_t     totalCount()	PRODUCT_RETURN0;
++  size_t     totalCount()       PRODUCT_RETURN0;
+ 
+   void       reportStatistics() const;
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)cmsAdaptiveSizePolicy.cpp	1.19 07/05/05 17:05:24 JVM"
+-#endif
+ /*
+  * Copyright 2004-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ #include "incls/_precompiled.incl"
+ #include "incls/_cmsAdaptiveSizePolicy.cpp.incl"
+@@ -34,16 +31,16 @@
+ #define CLOCK_GRANULARITY_TOO_LARGE
+ 
+ CMSAdaptiveSizePolicy::CMSAdaptiveSizePolicy(size_t init_eden_size,
+-					     size_t init_promo_size,
+-					     size_t init_survivor_size,
+-					     double max_gc_minor_pause_sec,
+-					     double max_gc_pause_sec,
+-					     uint gc_cost_ratio) :
+-  AdaptiveSizePolicy(init_eden_size, 
+-		     init_promo_size,
+-		     init_survivor_size, 
+-		     max_gc_pause_sec,
+-		     gc_cost_ratio) {
++                                             size_t init_promo_size,
++                                             size_t init_survivor_size,
++                                             double max_gc_minor_pause_sec,
++                                             double max_gc_pause_sec,
++                                             uint gc_cost_ratio) :
++  AdaptiveSizePolicy(init_eden_size,
++                     init_promo_size,
++                     init_survivor_size,
++                     max_gc_pause_sec,
++                     gc_cost_ratio) {
+ 
+   clear_internal_time_intervals();
+ 
+@@ -51,37 +48,37 @@
+ 
+   if (CMSConcurrentMTEnabled && (ParallelCMSThreads > 1)) {
+     assert(_processor_count > 0, "Processor count is suspect");
+-    _concurrent_processor_count = MIN2((uint) ParallelCMSThreads, 
+-				       (uint) _processor_count);
++    _concurrent_processor_count = MIN2((uint) ParallelCMSThreads,
++                                       (uint) _processor_count);
+   } else {
+     _concurrent_processor_count = 1;
+   }
+ 
+-  _avg_concurrent_time 	= new AdaptiveWeightedAverage(AdaptiveTimeWeight);
++  _avg_concurrent_time  = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
+   _avg_concurrent_interval = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
+   _avg_concurrent_gc_cost = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
+ 
+-  _avg_initial_pause 	= new AdaptivePaddedAverage(AdaptiveTimeWeight, 
+-						    PausePadding);
+-  _avg_remark_pause 	= new AdaptivePaddedAverage(AdaptiveTimeWeight,
++  _avg_initial_pause    = new AdaptivePaddedAverage(AdaptiveTimeWeight,
++                                                    PausePadding);
++  _avg_remark_pause     = new AdaptivePaddedAverage(AdaptiveTimeWeight,
+                                                     PausePadding);
+ 
+-  _avg_cms_STW_time 	= new AdaptiveWeightedAverage(AdaptiveTimeWeight);
+-  _avg_cms_STW_gc_cost 	= new AdaptiveWeightedAverage(AdaptiveTimeWeight);
++  _avg_cms_STW_time     = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
++  _avg_cms_STW_gc_cost  = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
+ 
+-  _avg_cms_free 	= new AdaptiveWeightedAverage(AdaptiveTimeWeight);
++  _avg_cms_free         = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
+   _avg_cms_free_at_sweep = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
+-  _avg_cms_promo 	= new AdaptiveWeightedAverage(AdaptiveTimeWeight);
++  _avg_cms_promo        = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
+ 
+   // Mark-sweep-compact
+-  _avg_msc_pause	= new AdaptiveWeightedAverage(AdaptiveTimeWeight);
+-  _avg_msc_interval	= new AdaptiveWeightedAverage(AdaptiveTimeWeight);
+-  _avg_msc_gc_cost	= new AdaptiveWeightedAverage(AdaptiveTimeWeight);
++  _avg_msc_pause        = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
++  _avg_msc_interval     = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
++  _avg_msc_gc_cost      = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
+ 
+   // Mark-sweep
+-  _avg_ms_pause	= new AdaptiveWeightedAverage(AdaptiveTimeWeight);
+-  _avg_ms_interval	= new AdaptiveWeightedAverage(AdaptiveTimeWeight);
+-  _avg_ms_gc_cost	= new AdaptiveWeightedAverage(AdaptiveTimeWeight);
++  _avg_ms_pause = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
++  _avg_ms_interval      = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
++  _avg_ms_gc_cost       = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
+ 
+   // Variables that estimate pause times as a function of generation
+   // size.
+@@ -114,18 +111,18 @@
+ }
+ 
+ double CMSAdaptiveSizePolicy::concurrent_collection_cost(
+-						  double interval_in_seconds) {
++                                                  double interval_in_seconds) {
+   //  When the precleaning and sweeping phases use multiple
+-  // threads, change one_processor_fraction to 
++  // threads, change one_processor_fraction to
+   // concurrent_processor_fraction().
+   double one_processor_fraction = 1.0 / ((double) processor_count());
+-  double concurrent_cost = 
+-    collection_cost(_latest_cms_concurrent_marking_time_secs, 
+-	        interval_in_seconds) * concurrent_processor_fraction() +
++  double concurrent_cost =
++    collection_cost(_latest_cms_concurrent_marking_time_secs,
++                interval_in_seconds) * concurrent_processor_fraction() +
+     collection_cost(_latest_cms_concurrent_precleaning_time_secs,
+-		interval_in_seconds) * one_processor_fraction +
++                interval_in_seconds) * one_processor_fraction +
+     collection_cost(_latest_cms_concurrent_sweeping_time_secs,
+-		interval_in_seconds) * one_processor_fraction;
++                interval_in_seconds) * one_processor_fraction;
+   if (PrintAdaptiveSizePolicy && Verbose) {
+     gclog_or_tty->print_cr(
+       "\nCMSAdaptiveSizePolicy::scaled_concurrent_collection_cost(%f) "
+@@ -136,11 +133,11 @@
+       "concurrent_cost %f ",
+       interval_in_seconds,
+       collection_cost(_latest_cms_concurrent_marking_time_secs,
+-	interval_in_seconds),
++        interval_in_seconds),
+       collection_cost(_latest_cms_concurrent_precleaning_time_secs,
+-	interval_in_seconds),
++        interval_in_seconds),
+       collection_cost(_latest_cms_concurrent_sweeping_time_secs,
+-	interval_in_seconds),
++        interval_in_seconds),
+       concurrent_processor_fraction(),
+       concurrent_cost);
+   }
+@@ -148,7 +145,7 @@
+ }
+ 
+ double CMSAdaptiveSizePolicy::concurrent_collection_time() {
+-  double latest_cms_sum_concurrent_phases_time_secs = 
++  double latest_cms_sum_concurrent_phases_time_secs =
+     _latest_cms_concurrent_marking_time_secs +
+     _latest_cms_concurrent_precleaning_time_secs +
+     _latest_cms_concurrent_sweeping_time_secs;
+@@ -157,10 +154,10 @@
+ 
+ double CMSAdaptiveSizePolicy::scaled_concurrent_collection_time() {
+   //  When the precleaning and sweeping phases use multiple
+-  // threads, change one_processor_fraction to 
++  // threads, change one_processor_fraction to
+   // concurrent_processor_fraction().
+   double one_processor_fraction = 1.0 / ((double) processor_count());
+-  double latest_cms_sum_concurrent_phases_time_secs = 
++  double latest_cms_sum_concurrent_phases_time_secs =
+     _latest_cms_concurrent_marking_time_secs * concurrent_processor_fraction() +
+     _latest_cms_concurrent_precleaning_time_secs * one_processor_fraction +
+     _latest_cms_concurrent_sweeping_time_secs * one_processor_fraction ;
+@@ -187,14 +184,14 @@
+   // that is available for promotions in the CMS generation
+   // and use that to update _minor_pause_old_estimator
+ 
+-  // Don't implement this until it is needed. A warning is 
++  // Don't implement this until it is needed. A warning is
+   // printed if _minor_pause_old_estimator is used.
+-}  
++}
+ 
+ void CMSAdaptiveSizePolicy::concurrent_marking_begin() {
+   if (PrintAdaptiveSizePolicy && Verbose) {
+-    gclog_or_tty->print(" "); 
+-    gclog_or_tty->stamp(); 
++    gclog_or_tty->print(" ");
++    gclog_or_tty->stamp();
+     gclog_or_tty->print(": concurrent_marking_begin ");
+   }
+   //  Update the interval time
+@@ -210,7 +207,7 @@
+ 
+ void CMSAdaptiveSizePolicy::concurrent_marking_end() {
+   if (PrintAdaptiveSizePolicy && Verbose) {
+-    gclog_or_tty->stamp(); 
++    gclog_or_tty->stamp();
+     gclog_or_tty->print_cr("CMSAdaptiveSizePolicy::concurrent_marking_end()");
+   }
+ 
+@@ -226,7 +223,7 @@
+ 
+ void CMSAdaptiveSizePolicy::concurrent_precleaning_begin() {
+   if (PrintAdaptiveSizePolicy && Verbose) {
+-    gclog_or_tty->stamp(); 
++    gclog_or_tty->stamp();
+     gclog_or_tty->print_cr(
+       "CMSAdaptiveSizePolicy::concurrent_precleaning_begin()");
+   }
+@@ -237,7 +234,7 @@
+ 
+ void CMSAdaptiveSizePolicy::concurrent_precleaning_end() {
+   if (PrintAdaptiveSizePolicy && Verbose) {
+-    gclog_or_tty->stamp(); 
++    gclog_or_tty->stamp();
+     gclog_or_tty->print_cr("CMSAdaptiveSizePolicy::concurrent_precleaning_end()");
+   }
+ 
+@@ -254,7 +251,7 @@
+ 
+ void CMSAdaptiveSizePolicy::concurrent_sweeping_begin() {
+   if (PrintAdaptiveSizePolicy && Verbose) {
+-    gclog_or_tty->stamp(); 
++    gclog_or_tty->stamp();
+     gclog_or_tty->print_cr(
+       "CMSAdaptiveSizePolicy::concurrent_sweeping_begin()");
+   }
+@@ -265,7 +262,7 @@
+ 
+ void CMSAdaptiveSizePolicy::concurrent_sweeping_end() {
+   if (PrintAdaptiveSizePolicy && Verbose) {
+-    gclog_or_tty->stamp(); 
++    gclog_or_tty->stamp();
+     gclog_or_tty->print_cr("CMSAdaptiveSizePolicy::concurrent_sweeping_end()");
+   }
+ 
+@@ -281,10 +278,10 @@
+ 
+ void CMSAdaptiveSizePolicy::concurrent_phases_end(GCCause::Cause gc_cause,
+                                                   size_t cur_eden,
+-						  size_t cur_promo) {
++                                                  size_t cur_promo) {
+   if (PrintAdaptiveSizePolicy && Verbose) {
+-    gclog_or_tty->print(" "); 
+-    gclog_or_tty->stamp(); 
++    gclog_or_tty->print(" ");
++    gclog_or_tty->stamp();
+     gclog_or_tty->print(": concurrent_phases_end ");
+   }
+ 
+@@ -295,7 +292,7 @@
+       UseAdaptiveSizePolicyWithSystemGC) {
+ 
+     avg_cms_free()->sample(cur_promo);
+-    double latest_cms_sum_concurrent_phases_time_secs = 
++    double latest_cms_sum_concurrent_phases_time_secs =
+       concurrent_collection_time();
+ 
+     _avg_concurrent_time->sample(latest_cms_sum_concurrent_phases_time_secs);
+@@ -304,7 +301,7 @@
+ 
+     // Total interval for collection.  May not be valid.  Tests
+     // below determine whether to use this.
+-    // 
++    //
+   if (PrintAdaptiveSizePolicy && Verbose) {
+     gclog_or_tty->print_cr("\nCMSAdaptiveSizePolicy::concurrent_phases_end \n"
+       "_latest_cms_reset_end_to_initial_mark_start_secs %f \n"
+@@ -315,14 +312,14 @@
+       "_latest_cms_concurrent_sweeping_time_secs %f \n"
+       "latest_cms_sum_concurrent_phases_time_secs %f \n"
+       "_latest_cms_collection_end_to_collection_start_secs %f \n"
+-      "concurrent_processor_fraction %f", 
+-      _latest_cms_reset_end_to_initial_mark_start_secs, 
+-      _latest_cms_initial_mark_start_to_end_time_secs, 
+-      _latest_cms_remark_start_to_end_time_secs, 
+-      _latest_cms_concurrent_marking_time_secs, 
+-      _latest_cms_concurrent_precleaning_time_secs, 
+-      _latest_cms_concurrent_sweeping_time_secs, 
+-      latest_cms_sum_concurrent_phases_time_secs, 
++      "concurrent_processor_fraction %f",
++      _latest_cms_reset_end_to_initial_mark_start_secs,
++      _latest_cms_initial_mark_start_to_end_time_secs,
++      _latest_cms_remark_start_to_end_time_secs,
++      _latest_cms_concurrent_marking_time_secs,
++      _latest_cms_concurrent_precleaning_time_secs,
++      _latest_cms_concurrent_sweeping_time_secs,
++      latest_cms_sum_concurrent_phases_time_secs,
+       _latest_cms_collection_end_to_collection_start_secs,
+       concurrent_processor_fraction());
+   }
+@@ -343,7 +340,7 @@
+       "Bad initial mark pause");
+     assert(_latest_cms_remark_start_to_end_time_secs >= 0.0,
+       "Bad remark pause");
+-    double STW_time_in_seconds = 
++    double STW_time_in_seconds =
+       _latest_cms_initial_mark_start_to_end_time_secs +
+       _latest_cms_remark_start_to_end_time_secs;
+     double STW_collection_cost = 0.0;
+@@ -361,7 +358,7 @@
+         (double) interval_in_seconds * MILLIUNITS);
+     }
+ 
+-    double concurrent_cost = 0.0; 
++    double concurrent_cost = 0.0;
+     if (latest_cms_sum_concurrent_phases_time_secs > 0.0) {
+       concurrent_cost = concurrent_collection_cost(interval_in_seconds);
+ 
+@@ -370,14 +367,14 @@
+ 
+       if (PrintAdaptiveSizePolicy && Verbose) {
+         gclog_or_tty->print("cmsAdaptiveSizePolicy::concurrent_phases_end: "
+-          "concurrent gc cost: %f  average: %f", 
+-	  concurrent_cost,
++          "concurrent gc cost: %f  average: %f",
++          concurrent_cost,
+           _avg_concurrent_gc_cost->average());
+         gclog_or_tty->print_cr("  concurrent time: %f (ms) cms period %f (ms)"
+-	  " processor fraction: %f",
++          " processor fraction: %f",
+           latest_cms_sum_concurrent_phases_time_secs * MILLIUNITS,
+           interval_in_seconds * MILLIUNITS,
+-	  concurrent_processor_fraction());
++          concurrent_processor_fraction());
+       }
+     }
+     double total_collection_cost = STW_collection_cost + concurrent_cost;
+@@ -409,7 +406,7 @@
+   set_first_after_collection();
+ 
+   // The concurrent phases keeps track of it's own mutator interval
+-  // with this timer.  This allows the stop-the-world phase to 
++  // with this timer.  This allows the stop-the-world phase to
+   // be included in the mutator time so that the stop-the-world time
+   // is not double counted.  Reset and start it.
+   _concurrent_timer.reset();
+@@ -441,7 +438,7 @@
+ 
+     if (PrintAdaptiveSizePolicy && Verbose) {
+       gclog_or_tty->print(
+-	"cmsAdaptiveSizePolicy::checkpoint_roots_initial_end: "
++        "cmsAdaptiveSizePolicy::checkpoint_roots_initial_end: "
+         "initial pause: %f ", _latest_cms_initial_mark_start_to_end_time_secs);
+     }
+   }
+@@ -493,8 +490,8 @@
+ 
+ void CMSAdaptiveSizePolicy::msc_collection_begin() {
+   if (PrintAdaptiveSizePolicy && Verbose) {
+-    gclog_or_tty->print(" "); 
+-    gclog_or_tty->stamp(); 
++    gclog_or_tty->print(" ");
++    gclog_or_tty->stamp();
+     gclog_or_tty->print(": msc_collection_begin ");
+   }
+   _STW_timer.stop();
+@@ -511,57 +508,57 @@
+ 
+ void CMSAdaptiveSizePolicy::msc_collection_end(GCCause::Cause gc_cause) {
+   if (PrintAdaptiveSizePolicy && Verbose) {
+-    gclog_or_tty->print(" "); 
+-    gclog_or_tty->stamp(); 
++    gclog_or_tty->print(" ");
++    gclog_or_tty->stamp();
+     gclog_or_tty->print(": msc_collection_end ");
+   }
+   _STW_timer.stop();
+   if (gc_cause != GCCause::_java_lang_system_gc ||
+-	UseAdaptiveSizePolicyWithSystemGC) {
++        UseAdaptiveSizePolicyWithSystemGC) {
+     double msc_pause_in_seconds = _STW_timer.seconds();
+     if ((_latest_cms_msc_end_to_msc_start_time_secs > 0.0) &&
+         (msc_pause_in_seconds > 0.0)) {
+       avg_msc_pause()->sample(msc_pause_in_seconds);
+       double mutator_time_in_seconds = 0.0;
+       if (_latest_cms_collection_end_to_collection_start_secs == 0.0) {
+-	// This assertion may fail because of time stamp gradularity.
+-	// Comment it out and investiage it at a later time.  The large
+-	// time stamp granularity occurs on some older linux systems.
+-#ifndef CLOCK_GRANULARITY_TOO_LARGE 
+-	assert((_latest_cms_concurrent_marking_time_secs == 0.0) &&
+-	       (_latest_cms_concurrent_precleaning_time_secs == 0.0) &&
+-	       (_latest_cms_concurrent_sweeping_time_secs == 0.0),
+-	  "There should not be any concurrent time");
++        // This assertion may fail because of time stamp gradularity.
++        // Comment it out and investiage it at a later time.  The large
++        // time stamp granularity occurs on some older linux systems.
++#ifndef CLOCK_GRANULARITY_TOO_LARGE
++        assert((_latest_cms_concurrent_marking_time_secs == 0.0) &&
++               (_latest_cms_concurrent_precleaning_time_secs == 0.0) &&
++               (_latest_cms_concurrent_sweeping_time_secs == 0.0),
++          "There should not be any concurrent time");
+ #endif
+-	// A concurrent collection did not start.  Mutator time
+-	// between collections comes from the STW MSC timer.
+-	mutator_time_in_seconds = _latest_cms_msc_end_to_msc_start_time_secs;
++        // A concurrent collection did not start.  Mutator time
++        // between collections comes from the STW MSC timer.
++        mutator_time_in_seconds = _latest_cms_msc_end_to_msc_start_time_secs;
+       } else {
+-	// The concurrent collection did start so count the mutator
+-	// time to the start of the concurrent collection.  In this
+-	// case the _latest_cms_msc_end_to_msc_start_time_secs measures
+-	// the time between the initial mark or remark and the
+-	// start of the MSC.  That has no real meaning.
+-	mutator_time_in_seconds = _latest_cms_collection_end_to_collection_start_secs;
++        // The concurrent collection did start so count the mutator
++        // time to the start of the concurrent collection.  In this
++        // case the _latest_cms_msc_end_to_msc_start_time_secs measures
++        // the time between the initial mark or remark and the
++        // start of the MSC.  That has no real meaning.
++        mutator_time_in_seconds = _latest_cms_collection_end_to_collection_start_secs;
+       }
+ 
+-      double latest_cms_sum_concurrent_phases_time_secs = 
+-	concurrent_collection_time();
++      double latest_cms_sum_concurrent_phases_time_secs =
++        concurrent_collection_time();
+       double interval_in_seconds =
+-	mutator_time_in_seconds +
+-	_latest_cms_initial_mark_start_to_end_time_secs + 
+-	_latest_cms_remark_start_to_end_time_secs +
+-	latest_cms_sum_concurrent_phases_time_secs +
++        mutator_time_in_seconds +
++        _latest_cms_initial_mark_start_to_end_time_secs +
++        _latest_cms_remark_start_to_end_time_secs +
++        latest_cms_sum_concurrent_phases_time_secs +
+         msc_pause_in_seconds;
+ 
+       if (PrintAdaptiveSizePolicy && Verbose) {
+         gclog_or_tty->print_cr("  interval_in_seconds %f \n"
+-          "	mutator_time_in_seconds %f \n"
+-          "	_latest_cms_initial_mark_start_to_end_time_secs %f\n"
+-          "	_latest_cms_remark_start_to_end_time_secs %f\n"
+-          "	latest_cms_sum_concurrent_phases_time_secs %f\n"
+-          "	msc_pause_in_seconds %f\n",
+-          interval_in_seconds, 
++          "     mutator_time_in_seconds %f \n"
++          "     _latest_cms_initial_mark_start_to_end_time_secs %f\n"
++          "     _latest_cms_remark_start_to_end_time_secs %f\n"
++          "     latest_cms_sum_concurrent_phases_time_secs %f\n"
++          "     msc_pause_in_seconds %f\n",
++          interval_in_seconds,
+           mutator_time_in_seconds,
+           _latest_cms_initial_mark_start_to_end_time_secs,
+           _latest_cms_remark_start_to_end_time_secs,
+@@ -576,8 +573,8 @@
+       // Initial mark and remark, also wasted.
+       double STW_time_in_seconds = _latest_cms_initial_mark_start_to_end_time_secs +
+         _latest_cms_remark_start_to_end_time_secs;
+-      double STW_collection_cost = 
+-	collection_cost(STW_time_in_seconds, interval_in_seconds) +
++      double STW_collection_cost =
++        collection_cost(STW_time_in_seconds, interval_in_seconds) +
+         concurrent_cost;
+ 
+       if (PrintAdaptiveSizePolicy && Verbose) {
+@@ -589,7 +586,7 @@
+           "latest_cms_sum_concurrent_phases_time_secs %f\n",
+           _latest_cms_collection_end_to_collection_start_secs,
+           _latest_cms_msc_end_to_msc_start_time_secs,
+-          _latest_cms_initial_mark_start_to_end_time_secs, 
++          _latest_cms_initial_mark_start_to_end_time_secs,
+           _latest_cms_remark_start_to_end_time_secs,
+           latest_cms_sum_concurrent_phases_time_secs);
+ 
+@@ -603,7 +600,7 @@
+       }
+ 
+       double cost = concurrent_cost + STW_collection_cost +
+-	collection_cost(msc_pause_in_seconds, interval_in_seconds);
++        collection_cost(msc_pause_in_seconds, interval_in_seconds);
+ 
+       _avg_msc_gc_cost->sample(cost);
+ 
+@@ -616,7 +613,7 @@
+         gclog_or_tty->print("cmsAdaptiveSizePolicy::msc_collection_end: "
+           "MSC gc cost: %f  average: %f", cost,
+           _avg_msc_gc_cost->average());
+-  
++
+         double msc_pause_in_ms = msc_pause_in_seconds * MILLIUNITS;
+         gclog_or_tty->print_cr("  MSC pause: %f (ms) MSC period %f (ms)",
+           msc_pause_in_ms, (double) interval_in_seconds * MILLIUNITS);
+@@ -630,7 +627,7 @@
+   set_first_after_collection();
+ 
+   // The concurrent phases keeps track of it's own mutator interval
+-  // with this timer.  This allows the stop-the-world phase to 
++  // with this timer.  This allows the stop-the-world phase to
+   // be included in the mutator time so that the stop-the-world time
+   // is not double counted.  Reset and start it.
+   _concurrent_timer.stop();
+@@ -643,8 +640,8 @@
+ 
+ void CMSAdaptiveSizePolicy::ms_collection_begin() {
+   if (PrintAdaptiveSizePolicy && Verbose) {
+-    gclog_or_tty->print(" "); 
+-    gclog_or_tty->stamp(); 
++    gclog_or_tty->print(" ");
++    gclog_or_tty->stamp();
+     gclog_or_tty->print(": ms_collection_begin ");
+   }
+   _STW_timer.stop();
+@@ -661,8 +658,8 @@
+ 
+ void CMSAdaptiveSizePolicy::ms_collection_end(GCCause::Cause gc_cause) {
+   if (PrintAdaptiveSizePolicy && Verbose) {
+-    gclog_or_tty->print(" "); 
+-    gclog_or_tty->stamp(); 
++    gclog_or_tty->print(" ");
++    gclog_or_tty->stamp();
+     gclog_or_tty->print(": ms_collection_end ");
+   }
+   _STW_timer.stop();
+@@ -671,10 +668,10 @@
+     // The MS collection is a foreground collection that does all
+     // the parts of a mostly concurrent collection.
+     //
+-    // For this collection include the cost of the 
++    // For this collection include the cost of the
+     //  initial mark
+     //  remark
+-    //  all concurrent time (scaled down by the 
++    //  all concurrent time (scaled down by the
+     //    concurrent_processor_fraction).  Some
+     //    may be zero if the baton was passed before
+     //    it was reached.
+@@ -684,7 +681,7 @@
+     //  STW after baton was passed (STW_in_foreground_in_seconds)
+     double STW_in_foreground_in_seconds = _STW_timer.seconds();
+ 
+-    double latest_cms_sum_concurrent_phases_time_secs = 
++    double latest_cms_sum_concurrent_phases_time_secs =
+       concurrent_collection_time();
+     if (PrintAdaptiveSizePolicy && Verbose) {
+       gclog_or_tty->print_cr("\nCMSAdaptiveSizePolicy::ms_collecton_end "
+@@ -693,12 +690,12 @@
+         "_latest_cms_remark_start_to_end_time_secs %f "
+         "latest_cms_sum_concurrent_phases_time_secs %f "
+         "_latest_cms_ms_marking_start_to_end_time_secs %f "
+-        "_latest_cms_ms_end_to_ms_start %f", 
+-        STW_in_foreground_in_seconds, 
+-        _latest_cms_initial_mark_start_to_end_time_secs, 
+-        _latest_cms_remark_start_to_end_time_secs, 
+-        latest_cms_sum_concurrent_phases_time_secs, 
+-        _latest_cms_ms_marking_start_to_end_time_secs, 
++        "_latest_cms_ms_end_to_ms_start %f",
++        STW_in_foreground_in_seconds,
++        _latest_cms_initial_mark_start_to_end_time_secs,
++        _latest_cms_remark_start_to_end_time_secs,
++        latest_cms_sum_concurrent_phases_time_secs,
++        _latest_cms_ms_marking_start_to_end_time_secs,
+         _latest_cms_ms_end_to_ms_start);
+     }
+ 
+@@ -706,30 +703,30 @@
+       _latest_cms_remark_start_to_end_time_secs;
+ #ifndef CLOCK_GRANULARITY_TOO_LARGE
+     assert(_latest_cms_ms_marking_start_to_end_time_secs == 0.0 ||
+-	   latest_cms_sum_concurrent_phases_time_secs == 0.0,
+-	   "marking done twice?");
++           latest_cms_sum_concurrent_phases_time_secs == 0.0,
++           "marking done twice?");
+ #endif
+     double ms_time_in_seconds = STW_marking_in_seconds +
+-      STW_in_foreground_in_seconds + 
++      STW_in_foreground_in_seconds +
+       _latest_cms_ms_marking_start_to_end_time_secs +
+       scaled_concurrent_collection_time();
+     avg_ms_pause()->sample(ms_time_in_seconds);
+     // Use the STW costs from the initial mark and remark plus
+-    // the cost of the concurrent phase to calculate a 
++    // the cost of the concurrent phase to calculate a
+     // collection cost.
+     double cost = 0.0;
+     if ((_latest_cms_ms_end_to_ms_start > 0.0) &&
+         (ms_time_in_seconds > 0.0)) {
+       double interval_in_seconds =
+         _latest_cms_ms_end_to_ms_start + ms_time_in_seconds;
+-      
++
+       if (PrintAdaptiveSizePolicy && Verbose) {
+         gclog_or_tty->print_cr("\n ms_time_in_seconds  %f  "
+-	  "latest_cms_sum_concurrent_phases_time_secs %f  "
+-	  "interval_in_seconds %f",
+-          ms_time_in_seconds, 
+-	  latest_cms_sum_concurrent_phases_time_secs, 
+-	  interval_in_seconds);
++          "latest_cms_sum_concurrent_phases_time_secs %f  "
++          "interval_in_seconds %f",
++          ms_time_in_seconds,
++          latest_cms_sum_concurrent_phases_time_secs,
++          interval_in_seconds);
+       }
+ 
+       cost = collection_cost(ms_time_in_seconds, interval_in_seconds);
+@@ -752,14 +749,14 @@
+     }
+   }
+ 
+-  // Consider putting this code (here to end) into a 
++  // Consider putting this code (here to end) into a
+   // method for convenience.
+   clear_internal_time_intervals();
+ 
+   set_first_after_collection();
+ 
+   // The concurrent phases keeps track of it's own mutator interval
+-  // with this timer.  This allows the stop-the-world phase to 
++  // with this timer.  This allows the stop-the-world phase to
+   // be included in the mutator time so that the stop-the-world time
+   // is not double counted.  Reset and start it.
+   _concurrent_timer.stop();
+@@ -792,7 +789,7 @@
+ 
+ void CMSAdaptiveSizePolicy::concurrent_phases_resume() {
+   if (PrintAdaptiveSizePolicy && Verbose) {
+-    gclog_or_tty->stamp(); 
++    gclog_or_tty->stamp();
+     gclog_or_tty->print_cr("CMSAdaptiveSizePolicy::concurrent_phases_resume()");
+   }
+   _concurrent_timer.start();
+@@ -836,7 +833,7 @@
+     _latest_cms_ms_marking_start_to_end_time_secs = _STW_timer.seconds();
+     if (PrintAdaptiveSizePolicy && Verbose) {
+       gclog_or_tty->print_cr("CMSAdaptiveSizePolicy::"
+-	"msc_collection_marking_end: mutator time %f",
++        "msc_collection_marking_end: mutator time %f",
+         _latest_cms_ms_marking_start_to_end_time_secs);
+     }
+   }
+@@ -852,8 +849,8 @@
+ }
+ 
+ // Cost of collection (unit-less)
+-double CMSAdaptiveSizePolicy::collection_cost(double pause_in_seconds, 
+-					      double interval_in_seconds) {
++double CMSAdaptiveSizePolicy::collection_cost(double pause_in_seconds,
++                                              double interval_in_seconds) {
+   // Cost of collection (unit-less)
+   double cost = 0.0;
+   if ((interval_in_seconds > 0.0) &&
+@@ -867,7 +864,7 @@
+ size_t CMSAdaptiveSizePolicy::adjust_eden_for_pause_time(size_t cur_eden) {
+   size_t change = 0;
+   size_t desired_eden = cur_eden;
+-  
++
+   // reduce eden size
+   change = eden_decrement_aligned_down(cur_eden);
+   desired_eden = cur_eden - change;
+@@ -915,7 +912,7 @@
+ 
+ size_t CMSAdaptiveSizePolicy::adjust_eden_for_footprint(size_t cur_eden) {
+ 
+-  set_decrease_for_footprint(decrease_young_gen_for_footprint_true); 
++  set_decrease_for_footprint(decrease_young_gen_for_footprint_true);
+ 
+   size_t change = eden_decrement(cur_eden);
+   size_t desired_eden_size = cur_eden - change;
+@@ -986,9 +983,9 @@
+     // the generation be shrunk?
+     if (get_and_clear_first_after_collection() &&
+         ((avg_remark_pause()->padded_average() > gc_pause_goal_sec() &&
+-	  remark_pause_young_estimator()->decrement_will_decrease()) ||
++          remark_pause_young_estimator()->decrement_will_decrease()) ||
+          (avg_initial_pause()->padded_average() > gc_pause_goal_sec() &&
+-	  initial_pause_young_estimator()->decrement_will_decrease()))) {
++          initial_pause_young_estimator()->decrement_will_decrease()))) {
+ 
+        set_change_young_gen_for_maj_pauses(
+          decrease_young_gen_for_maj_pauses_true);
+@@ -1000,10 +997,10 @@
+     }
+     // If not the first young gen collection after a cms collection,
+     // don't do anything.  In this case an adjustment has already
+-    // been made and the results of the adjustment has not yet been 
++    // been made and the results of the adjustment has not yet been
+     // measured.
+-  } else if ((minor_gc_cost() >= 0.0) && 
+-	     (adjusted_mutator_cost() < _throughput_goal)) {
++  } else if ((minor_gc_cost() >= 0.0) &&
++             (adjusted_mutator_cost() < _throughput_goal)) {
+     desired_eden_size = adjust_eden_for_throughput(desired_eden_size);
+   } else {
+     desired_eden_size = adjust_eden_for_footprint(desired_eden_size);
+@@ -1013,15 +1010,15 @@
+     gclog_or_tty->print_cr(
+       "CMSAdaptiveSizePolicy::compute_young_generation_free_space limits:"
+       " desired_eden_size: " SIZE_FORMAT
+-      " old_eden_size: " SIZE_FORMAT, 
++      " old_eden_size: " SIZE_FORMAT,
+       desired_eden_size, cur_eden);
+   }
+ 
+   set_eden_size(desired_eden_size);
+ }
+ 
+-size_t CMSAdaptiveSizePolicy::adjust_promo_for_pause_time(size_t cur_promo) { 
+-  size_t change = 0; 
++size_t CMSAdaptiveSizePolicy::adjust_promo_for_pause_time(size_t cur_promo) {
++  size_t change = 0;
+   size_t desired_promo = cur_promo;
+   // Move this test up to caller like the adjust_eden_for_pause_time()
+   // call.
+@@ -1040,7 +1037,7 @@
+     change = promo_decrement_aligned_down(cur_promo);
+     desired_promo = cur_promo - change;
+   }
+-  
++
+   if ((change != 0) &&PrintAdaptiveSizePolicy && Verbose) {
+     gclog_or_tty->print_cr(
+       "CMSAdaptiveSizePolicy::adjust_promo_for_pause_time "
+@@ -1054,12 +1051,12 @@
+   return desired_promo;
+ }
+ 
+-// Try to share this with PS.  
++// Try to share this with PS.
+ size_t CMSAdaptiveSizePolicy::scale_by_gen_gc_cost(size_t base_change,
+-						  double gen_gc_cost) {
++                                                  double gen_gc_cost) {
+ 
+   // Calculate the change to use for the tenured gen.
+-  size_t scaled_change = 0; 
++  size_t scaled_change = 0;
+   // Can the increment to the generation be scaled?
+   if (gc_cost() >= 0.0 && gen_gc_cost >= 0.0) {
+     double scale_by_ratio = gen_gc_cost / gc_cost();
+@@ -1068,7 +1065,7 @@
+     if (PrintAdaptiveSizePolicy && Verbose) {
+       gclog_or_tty->print_cr(
+         "Scaled tenured increment: " SIZE_FORMAT " by %f down to "
+-	  SIZE_FORMAT,
++          SIZE_FORMAT,
+         base_change, scale_by_ratio, scaled_change);
+     }
+   } else if (gen_gc_cost >= 0.0) {
+@@ -1116,9 +1113,9 @@
+ }
+ 
+ size_t CMSAdaptiveSizePolicy::adjust_promo_for_footprint(size_t cur_promo,
+-							 size_t cur_eden) {
++                                                         size_t cur_eden) {
+ 
+-  set_decrease_for_footprint(decrease_young_gen_for_footprint_true); 
++  set_decrease_for_footprint(decrease_young_gen_for_footprint_true);
+ 
+   size_t change = promo_decrement(cur_promo);
+   size_t desired_promo_size = cur_promo - change;
+@@ -1136,9 +1133,9 @@
+ }
+ 
+ void CMSAdaptiveSizePolicy::compute_tenured_generation_free_space(
+-				size_t cur_tenured_free,
++                                size_t cur_tenured_free,
+                                 size_t max_tenured_available,
+-				size_t cur_eden) {
++                                size_t cur_eden) {
+   // This can be bad if the desired value grows/shrinks without
+   // any connection to the read free space
+   size_t desired_promo_size = promo_size();
+@@ -1148,7 +1145,7 @@
+   if (PrintGC && PrintAdaptiveSizePolicy) {
+     gclog_or_tty->print_cr(
+       "CMSAdaptiveSizePolicy::compute_tenured_generation_free_space: "
+-      "cur_tenured_free " SIZE_FORMAT 
++      "cur_tenured_free " SIZE_FORMAT
+       " max_tenured_available " SIZE_FORMAT,
+       cur_tenured_free, max_tenured_available);
+   }
+@@ -1163,19 +1160,19 @@
+   } else if (avg_minor_pause()->padded_average() > gc_pause_goal_sec()) {
+     // Nothing to do since the minor collections are too large and
+     // this method only deals with the cms generation.
+-  } else if ((cms_gc_cost() >= 0.0) && 
+-	     (adjusted_mutator_cost() < _throughput_goal)) {
++  } else if ((cms_gc_cost() >= 0.0) &&
++             (adjusted_mutator_cost() < _throughput_goal)) {
+     desired_promo_size = adjust_promo_for_throughput(cur_tenured_free);
+   } else {
+     desired_promo_size = adjust_promo_for_footprint(cur_tenured_free,
+-						    cur_eden);
++                                                    cur_eden);
+   }
+ 
+   if (PrintGC && PrintAdaptiveSizePolicy) {
+     gclog_or_tty->print_cr(
+       "CMSAdaptiveSizePolicy::compute_tenured_generation_free_space limits:"
+       " desired_promo_size: " SIZE_FORMAT
+-      " old_promo_size: " SIZE_FORMAT, 
++      " old_promo_size: " SIZE_FORMAT,
+       desired_promo_size, cur_tenured_free);
+   }
+ 
+@@ -1187,12 +1184,12 @@
+                                              int tenuring_threshold,
+                                              size_t survivor_limit) {
+   assert(survivor_limit >= generation_alignment(),
+-	 "survivor_limit too small");
++         "survivor_limit too small");
+   assert((size_t)align_size_down(survivor_limit, generation_alignment())
+-	 == survivor_limit, "survivor_limit not aligned");
++         == survivor_limit, "survivor_limit not aligned");
+ 
+   // Change UsePSAdaptiveSurvivorSizePolicy -> UseAdaptiveSurvivorSizePolicy?
+-  if (!UsePSAdaptiveSurvivorSizePolicy || 
++  if (!UsePSAdaptiveSurvivorSizePolicy ||
+       !young_gen_policy_is_ready()) {
+     return tenuring_threshold;
+   }
+@@ -1209,11 +1206,11 @@
+   set_decrement_tenuring_threshold_for_survivor_limit(false);
+ 
+   if (!is_survivor_overflow) {
+-    // Keep running averages on how much survived 
++    // Keep running averages on how much survived
+ 
+     // We use the tenuring threshold to equalize the cost of major
+     // and minor collections.
+-    // ThresholdTolerance is used to indicate how sensitive the 
++    // ThresholdTolerance is used to indicate how sensitive the
+     // tenuring threshold is to differences in cost betweent the
+     // collection types.
+ 
+@@ -1237,7 +1234,7 @@
+     // Survivor space overflow occurred, so promoted and survived are
+     // not accurate. We'll make our best guess by combining survived
+     // and promoted and count them as survivors.
+-    // 
++    //
+     // We'll lower the tenuring threshold to see if we can correct
+     // things. Also, set the survivor size conservatively. We're
+     // trying to avoid many overflows from occurring if defnew size
+@@ -1250,7 +1247,7 @@
+   // we use this to see how good of an estimate we have of what survived.
+   // We're trying to pad the survivor size as little as possible without
+   // overflowing the survivor spaces.
+-  size_t target_size = align_size_up((size_t)_avg_survived->padded_average(), 
++  size_t target_size = align_size_up((size_t)_avg_survived->padded_average(),
+                                      generation_alignment());
+   target_size = MAX2(target_size, generation_alignment());
+ 
+@@ -1276,24 +1273,24 @@
+   }
+ 
+   // We keep a running average of the amount promoted which is used
+-  // to decide when we should collect the old generation (when 
++  // to decide when we should collect the old generation (when
+   // the amount of old gen free space is less than what we expect to
+   // promote).
+- 
++
+   if (PrintAdaptiveSizePolicy) {
+     // A little more detail if Verbose is on
+     GenCollectedHeap* gch = GenCollectedHeap::heap();
+-    if (Verbose) { 
++    if (Verbose) {
+       gclog_or_tty->print( "  avg_survived: %f"
+                   "  avg_deviation: %f",
+-                  _avg_survived->average(), 
++                  _avg_survived->average(),
+                   _avg_survived->deviation());
+     }
+ 
+     gclog_or_tty->print( "  avg_survived_padded_avg: %f",
+                 _avg_survived->padded_average());
+ 
+-    if (Verbose) { 
++    if (Verbose) {
+       gclog_or_tty->print( "  avg_promoted_avg: %f"
+                   "  avg_promoted_dev: %f",
+                   gch->gc_stats(1)->avg_promoted()->average(),
+@@ -1304,7 +1301,7 @@
+                 "  avg_pretenured_padded_avg: %f"
+                 "  tenuring_thresh: %d"
+                 "  target_size: " SIZE_FORMAT
+-		"  survivor_limit: " SIZE_FORMAT,
++                "  survivor_limit: " SIZE_FORMAT,
+                 gch->gc_stats(1)->avg_promoted()->padded_average(),
+                 _avg_pretenured->padded_average(),
+                 tenuring_threshold, target_size, survivor_limit);
+@@ -1323,15 +1320,15 @@
+ }
+ 
+ bool CMSAdaptiveSizePolicy::print_adaptive_size_policy_on(
+-						    outputStream* st) const {
++                                                    outputStream* st) const {
+ 
+   if (!UseAdaptiveSizePolicy) return false;
+ 
+   GenCollectedHeap* gch = GenCollectedHeap::heap();
+   Generation* gen0 = gch->get_gen(0);
+   DefNewGeneration* def_new = gen0->as_DefNewGeneration();
+-  return 
++  return
+     AdaptiveSizePolicy::print_adaptive_size_policy_on(
+-					 st, 
+-					 def_new->tenuring_threshold());
++                                         st,
++                                         def_new->tenuring_threshold());
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)cmsAdaptiveSizePolicy.hpp	1.16 07/05/05 17:05:25 JVM"
+-#endif
+ /*
+  * Copyright 2004-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This class keeps statistical information and computes the
+@@ -32,9 +29,9 @@
+ //   minor collection
+ //   concurrent collection
+ //      stop-the-world component
+-//	concurrent component
++//      concurrent component
+ //   major compacting collection
+-//	uses decaying cost
++//      uses decaying cost
+ 
+ // Forward decls
+ class elapsedTimer;
+@@ -56,7 +53,7 @@
+   // the time during which the cms collector runs concurrently
+   // with the mutators.
+   //   Between end of most recent cms reset and start of initial mark
+-		// This may be redundant
++                // This may be redundant
+   double _latest_cms_reset_end_to_initial_mark_start_secs;
+   //   Between end of the most recent initial mark and start of remark
+   double _latest_cms_initial_mark_end_to_remark_start_secs;
+@@ -68,7 +65,7 @@
+   double _latest_cms_concurrent_marking_time_secs;
+   double _latest_cms_concurrent_precleaning_time_secs;
+   double _latest_cms_concurrent_sweeping_time_secs;
+-  //   Between end of most recent STW MSC and start of next STW MSC 
++  //   Between end of most recent STW MSC and start of next STW MSC
+   double _latest_cms_msc_end_to_msc_start_time_secs;
+   //   Between end of most recent MS and start of next MS
+   //   This does not include any time spent during a concurrent
+@@ -95,11 +92,11 @@
+   size_t _generation_alignment;
+ 
+   // If this variable is true, the size of the young generation
+-  // may be changed in order to reduce the pause(s) of the 
++  // may be changed in order to reduce the pause(s) of the
+   // collection of the tenured generation in order to meet the
+-  // pause time goal.  It is common to change the size of the 
++  // pause time goal.  It is common to change the size of the
+   // tenured generation in order to meet the pause time goal
+-  // for the tenured generation.  With the CMS collector for 
++  // for the tenured generation.  With the CMS collector for
+   // the tenured generation, the size of the young generation
+   // can have an significant affect on the pause times for collecting the
+   // tenured generation.
+@@ -123,13 +120,13 @@
+   void set_first_after_collection() { _first_after_collection = true; }
+ 
+  protected:
+-  // Average of the sum of the concurrent times for 
++  // Average of the sum of the concurrent times for
+   // one collection in seconds.
+   AdaptiveWeightedAverage* _avg_concurrent_time;
+   // Average time between concurrent collections in seconds.
+   AdaptiveWeightedAverage* _avg_concurrent_interval;
+   // Average cost of the concurrent part of a collection
+-  // in seconds. 
++  // in seconds.
+   AdaptiveWeightedAverage* _avg_concurrent_gc_cost;
+ 
+   // Average of the initial pause of a concurrent collection in seconds.
+@@ -137,7 +134,7 @@
+   // Average of the remark pause of a concurrent collection in seconds.
+   AdaptivePaddedAverage* _avg_remark_pause;
+ 
+-  // Average of the stop-the-world (STW) (initial mark + remark) 
++  // Average of the stop-the-world (STW) (initial mark + remark)
+   // times in seconds for concurrent collections.
+   AdaptiveWeightedAverage* _avg_cms_STW_time;
+   // Average of the STW collection cost for concurrent collections.
+@@ -147,7 +144,7 @@
+   AdaptiveWeightedAverage* _avg_cms_free_at_sweep;
+   // Average of the bytes free at the end of the collection.
+   AdaptiveWeightedAverage* _avg_cms_free;
+-  // Average of the bytes promoted between cms collections. 
++  // Average of the bytes promoted between cms collections.
+   AdaptiveWeightedAverage* _avg_cms_promo;
+ 
+   // stop-the-world (STW) mark-sweep-compact
+@@ -179,7 +176,7 @@
+   // and a pause time as the dependent variable.
+   // For example _remark_pause_old_estimator
+   // is a fit of the old generation size as the
+-  // independent variable and the remark pause 
++  // independent variable and the remark pause
+   // as the dependent variable.
+   //   remark pause time vs. cms gen size
+   LinearLeastSquareFit* _remark_pause_old_estimator;
+@@ -194,11 +191,11 @@
+   int processor_count() const { return _processor_count; }
+   int concurrent_processor_count() const { return _concurrent_processor_count; }
+ 
+-  AdaptiveWeightedAverage* avg_concurrent_time() const { 
+-    return _avg_concurrent_time; 
++  AdaptiveWeightedAverage* avg_concurrent_time() const {
++    return _avg_concurrent_time;
+   }
+ 
+-  AdaptiveWeightedAverage* avg_concurrent_interval() const { 
++  AdaptiveWeightedAverage* avg_concurrent_interval() const {
+     return _avg_concurrent_interval;
+   }
+ 
+@@ -322,17 +319,17 @@
+ 
+   // This returns the maximum average for the concurrent, ms, and
+   // msc collections.  This is meant to be used for the calculation
+-  // of the decayed major gc cost and is not in general the 
++  // of the decayed major gc cost and is not in general the
+   // average of all the different types of major collections.
+   virtual double major_gc_interval_average_for_decay() const;
+ 
+  public:
+   CMSAdaptiveSizePolicy(size_t init_eden_size,
+-			size_t init_promo_size,
+-		        size_t init_survivor_size,
+-		        double max_gc_minor_pause_sec,
+-		        double max_gc_pause_sec,
+-			uint gc_cost_ratio);
++                        size_t init_promo_size,
++                        size_t init_survivor_size,
++                        double max_gc_minor_pause_sec,
++                        double max_gc_pause_sec,
++                        uint gc_cost_ratio);
+ 
+   // The timers for the stop-the-world phases measure a total
+   // stop-the-world time.  The timer is started and stopped
+@@ -344,7 +341,7 @@
+ 
+   // Methods for gathering information about the
+   // concurrent marking phase of the collection.
+-  // Records the mutator times and 
++  // Records the mutator times and
+   // resets the concurrent timer.
+   void concurrent_marking_begin();
+   // Resets concurrent phase timer in the begin methods and
+@@ -360,8 +357,8 @@
+   // Stops the concurrent phases time.  Gathers
+   // information and resets the timer.
+   void concurrent_phases_end(GCCause::Cause gc_cause,
+-			      size_t cur_eden,
+-			      size_t cur_promo);
++                              size_t cur_eden,
++                              size_t cur_promo);
+ 
+   // Methods for gather information about STW Mark-Sweep-Compact
+   void msc_collection_begin();
+@@ -386,7 +383,7 @@
+     return MAX2(0.0F, _avg_msc_gc_cost->average());
+   }
+ 
+-  // 
++  //
+   double compacting_gc_cost() const {
+     double result = MIN2(1.0, minor_gc_cost() + msc_gc_cost());
+     assert(result >= 0.0, "Both minor and major costs are non-negative");
+@@ -399,13 +396,13 @@
+    // Time begining and end of the marking phase for
+    // a synchronous MS collection.  A MS collection
+    // that finishes in the foreground can have started
+-   // in the background.  These methods capture the 
++   // in the background.  These methods capture the
+    // completion of the marking (after the initial
+    // marking) that is done in the foreground.
+    void ms_collection_marking_begin();
+    void ms_collection_marking_end(GCCause::Cause gc_cause);
+ 
+-   static elapsedTimer* concurrent_timer_ptr() { 
++   static elapsedTimer* concurrent_timer_ptr() {
+      return &_concurrent_timer;
+    }
+ 
+@@ -434,7 +431,7 @@
+   size_t generation_alignment() { return _generation_alignment; }
+ 
+   virtual void compute_young_generation_free_space(size_t cur_eden,
+-						   size_t max_eden_size);
++                                                   size_t max_eden_size);
+   // Calculates new survivor space size;  returns a new tenuring threshold
+   // value. Stores new survivor size in _survivor_size.
+   virtual int compute_survivor_space_size_and_threshold(
+@@ -444,7 +441,7 @@
+ 
+   virtual void compute_tenured_generation_free_space(size_t cur_tenured_free,
+                                            size_t max_tenured_available,
+-					   size_t cur_eden);
++                                           size_t cur_eden);
+ 
+   size_t eden_decrement_aligned_down(size_t cur_eden);
+   size_t eden_increment_aligned_up(size_t cur_eden);
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,28 +1,25 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)cmsCollectorPolicy.cpp	1.1 07/05/16 10:53:57 JVM"
+-#endif
+ /*
+  * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+- *   
++ *
+  * This code is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License version 2 only, as
+  * published by the Free Software Foundation.
+- *   
++ *
+  * This code is distributed in the hope that it will be useful, but WITHOUT
+  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+  * version 2 for more details (a copy is included in the LICENSE file that
+  * accompanied this code).
+- *  
++ *
+  * You should have received a copy of the GNU General Public License version
+  * 2 along with this work; if not, write to the Free Software Foundation,
+  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+- *   
++ *
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -41,7 +38,7 @@
+   _generations = new GenerationSpecPtr[number_of_generations()];
+   if (_generations == NULL)
+     vm_exit_during_initialization("Unable to allocate gen spec");
+-  
++
+   if (UseParNewGC && ParallelGCThreads > 0) {
+     if (UseAdaptiveSizePolicy) {
+       _generations[0] = new GenerationSpec(Generation::ASParNew,
+@@ -68,12 +65,12 @@
+ }
+ 
+ void ConcurrentMarkSweepPolicy::initialize_size_policy(size_t init_eden_size,
+-					       size_t init_promo_size,
+-					       size_t init_survivor_size) {
++                                               size_t init_promo_size,
++                                               size_t init_survivor_size) {
+   double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
+   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
+   _size_policy = new CMSAdaptiveSizePolicy(init_eden_size,
+-					   init_promo_size,
++                                           init_promo_size,
+                                            init_survivor_size,
+                                            max_gc_minor_pause_sec,
+                                            max_gc_pause_sec,
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,28 +1,25 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)cmsCollectorPolicy.hpp	1.1 07/05/16 10:53:57 JVM"
+-#endif
+ /*
+  * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+- *   
++ *
+  * This code is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License version 2 only, as
+  * published by the Free Software Foundation.
+- *   
++ *
+  * This code is distributed in the hope that it will be useful, but WITHOUT
+  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+  * version 2 for more details (a copy is included in the LICENSE file that
+  * accompanied this code).
+- *  
++ *
+  * You should have received a copy of the GNU General Public License version
+  * 2 along with this work; if not, write to the Free Software Foundation,
+  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+- *   
++ *
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class ConcurrentMarkSweepPolicy : public TwoGenerationCollectorPolicy {
+@@ -37,8 +34,8 @@
+   void initialize_gc_policy_counters();
+ #if 1
+   virtual void initialize_size_policy(size_t init_eden_size,
+-				      size_t init_promo_size,
+-				      size_t init_survivor_size);
++                                      size_t init_promo_size,
++                                      size_t init_survivor_size);
+ #endif
+ 
+   // Returns true if the incremental mode is enabled.
+@@ -55,7 +52,7 @@
+   // the initialization of the generations.
+   void initialize_gc_policy_counters();
+ 
+-  virtual CollectorPolicy::Name kind() { 
+-    return CollectorPolicy::ASConcurrentMarkSweepPolicyKind; 
++  virtual CollectorPolicy::Name kind() {
++    return CollectorPolicy::ASConcurrentMarkSweepPolicyKind;
+   }
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.cpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.cpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)cmsGCAdaptivePolicyCounters.cpp	1.16 07/05/05 17:05:25 JVM"
+-#endif
+ /*
+  * Copyright 2004-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -40,25 +37,25 @@
+     EXCEPTION_MARK;
+     ResourceMark rm;
+ 
+-    const char* cname = 
++    const char* cname =
+       PerfDataManager::counter_name(name_space(), "cmsCapacity");
+     _cms_capacity_counter = PerfDataManager::create_variable(SUN_GC, cname,
+       PerfData::U_Bytes, (jlong) OldSize, CHECK);
+ #ifdef NOT_PRODUCT
+-    cname = 
++    cname =
+       PerfDataManager::counter_name(name_space(), "initialPause");
+     _initial_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
+-      PerfData::U_Ticks, 
++      PerfData::U_Ticks,
+       (jlong) cms_size_policy()->avg_initial_pause()->last_sample(),
+       CHECK);
+ 
+     cname = PerfDataManager::counter_name(name_space(), "remarkPause");
+     _remark_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
+-      PerfData::U_Ticks, 
++      PerfData::U_Ticks,
+       (jlong) cms_size_policy()->avg_remark_pause()->last_sample(),
+       CHECK);
+ #endif
+-    cname = 
++    cname =
+       PerfDataManager::counter_name(name_space(), "avgInitialPause");
+     _avg_initial_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
+       PerfData::U_Ticks,
+@@ -72,82 +69,82 @@
+       CHECK);
+ 
+     cname = PerfDataManager::counter_name(name_space(), "avgSTWGcCost");
+-    _avg_cms_STW_gc_cost_counter = PerfDataManager::create_variable(SUN_GC, 
++    _avg_cms_STW_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
+         cname,
+-        PerfData::U_Ticks, 
++        PerfData::U_Ticks,
+       (jlong) cms_size_policy()->avg_cms_STW_gc_cost()->average(),
+         CHECK);
+ 
+     cname = PerfDataManager::counter_name(name_space(), "avgSTWTime");
+-    _avg_cms_STW_time_counter = PerfDataManager::create_variable(SUN_GC, 
++    _avg_cms_STW_time_counter = PerfDataManager::create_variable(SUN_GC,
+         cname,
+-        PerfData::U_Ticks, 
++        PerfData::U_Ticks,
+       (jlong) cms_size_policy()->avg_cms_STW_time()->average(),
+         CHECK);
+ 
+ 
+     cname = PerfDataManager::counter_name(name_space(), "avgConcurrentTime");
+-    _avg_concurrent_time_counter = PerfDataManager::create_variable(SUN_GC, 
++    _avg_concurrent_time_counter = PerfDataManager::create_variable(SUN_GC,
+         cname,
+-        PerfData::U_Ticks, 
+-        (jlong) cms_size_policy()->avg_concurrent_time()->average(), 
++        PerfData::U_Ticks,
++        (jlong) cms_size_policy()->avg_concurrent_time()->average(),
+         CHECK);
+ 
+-    cname = 
++    cname =
+       PerfDataManager::counter_name(name_space(), "avgConcurrentInterval");
+-    _avg_concurrent_interval_counter = PerfDataManager::create_variable(SUN_GC, 
++    _avg_concurrent_interval_counter = PerfDataManager::create_variable(SUN_GC,
+         cname,
+-        PerfData::U_Ticks, 
+-        (jlong) cms_size_policy()->avg_concurrent_interval()->average(), 
++        PerfData::U_Ticks,
++        (jlong) cms_size_policy()->avg_concurrent_interval()->average(),
+         CHECK);
+ 
+     cname = PerfDataManager::counter_name(name_space(), "avgConcurrentGcCost");
+-    _avg_concurrent_gc_cost_counter = PerfDataManager::create_variable(SUN_GC, 
++    _avg_concurrent_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
+         cname,
+-        PerfData::U_Ticks, 
+-        (jlong) cms_size_policy()->avg_concurrent_gc_cost()->average(), 
++        PerfData::U_Ticks,
++        (jlong) cms_size_policy()->avg_concurrent_gc_cost()->average(),
+         CHECK);
+ 
+     cname = PerfDataManager::counter_name(name_space(), "avgCMSFreeAtSweep");
+-    _avg_cms_free_at_sweep_counter = PerfDataManager::create_variable(SUN_GC, 
++    _avg_cms_free_at_sweep_counter = PerfDataManager::create_variable(SUN_GC,
+         cname,
+-        PerfData::U_Ticks, 
+-        (jlong) cms_size_policy()->avg_cms_free_at_sweep()->average(), 
++        PerfData::U_Ticks,
++        (jlong) cms_size_policy()->avg_cms_free_at_sweep()->average(),
+         CHECK);
+ 
+     cname = PerfDataManager::counter_name(name_space(), "avgCMSFree");
+-    _avg_cms_free_counter = PerfDataManager::create_variable(SUN_GC, 
++    _avg_cms_free_counter = PerfDataManager::create_variable(SUN_GC,
+         cname,
+-        PerfData::U_Ticks, 
+-        (jlong) cms_size_policy()->avg_cms_free()->average(), 
++        PerfData::U_Ticks,
++        (jlong) cms_size_policy()->avg_cms_free()->average(),
+         CHECK);
+ 
+     cname = PerfDataManager::counter_name(name_space(), "avgCMSPromo");
+-    _avg_cms_promo_counter = PerfDataManager::create_variable(SUN_GC, 
++    _avg_cms_promo_counter = PerfDataManager::create_variable(SUN_GC,
+         cname,
+-        PerfData::U_Ticks, 
+-        (jlong) cms_size_policy()->avg_cms_promo()->average(), 
++        PerfData::U_Ticks,
++        (jlong) cms_size_policy()->avg_cms_promo()->average(),
+         CHECK);
+ 
+     cname = PerfDataManager::counter_name(name_space(), "avgMscPause");
+-    _avg_msc_pause_counter = PerfDataManager::create_variable(SUN_GC, 
++    _avg_msc_pause_counter = PerfDataManager::create_variable(SUN_GC,
+         cname,
+-        PerfData::U_Ticks, 
+-        (jlong) cms_size_policy()->avg_msc_pause()->average(), 
++        PerfData::U_Ticks,
++        (jlong) cms_size_policy()->avg_msc_pause()->average(),
+         CHECK);
+ 
+     cname = PerfDataManager::counter_name(name_space(), "avgMscInterval");
+-    _avg_msc_interval_counter = PerfDataManager::create_variable(SUN_GC, 
++    _avg_msc_interval_counter = PerfDataManager::create_variable(SUN_GC,
+         cname,
+-        PerfData::U_Ticks, 
+-        (jlong) cms_size_policy()->avg_msc_interval()->average(), 
++        PerfData::U_Ticks,
++        (jlong) cms_size_policy()->avg_msc_interval()->average(),
+         CHECK);
+ 
+     cname = PerfDataManager::counter_name(name_space(), "mscGcCost");
+-    _msc_gc_cost_counter = PerfDataManager::create_variable(SUN_GC, 
++    _msc_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
+         cname,
+-        PerfData::U_Ticks, 
+-        (jlong) cms_size_policy()->avg_msc_gc_cost()->average(), 
++        PerfData::U_Ticks,
++        (jlong) cms_size_policy()->avg_msc_gc_cost()->average(),
+         CHECK);
+ 
+     cname = PerfDataManager::counter_name(name_space(), "avgMsPause");
+@@ -206,13 +203,13 @@
+       PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
+         (jlong) cms_size_policy()->initial_pause_old_slope(), CHECK);
+ 
+-    cname = 
++    cname =
+       PerfDataManager::counter_name(name_space(), "remarkPauseYoungSlope") ;
+     _remark_pause_young_slope_counter =
+       PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
+         (jlong) cms_size_policy()->remark_pause_young_slope(), CHECK);
+ 
+-    cname = 
++    cname =
+       PerfDataManager::counter_name(name_space(), "initialPauseYoungSlope");
+     _initial_pause_young_slope_counter =
+       PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
+@@ -220,7 +217,7 @@
+ 
+ 
+   }
+-  assert(size_policy()->is_gc_cms_adaptive_size_policy(), 
++  assert(size_policy()->is_gc_cms_adaptive_size_policy(),
+     "Wrong type of size policy");
+ }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)cmsGCAdaptivePolicyCounters.hpp	1.16 07/05/05 17:05:25 JVM"
+-#endif
+ /*
+  * Copyright 2004-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // CMSGCAdaptivePolicyCounters is a holder class for performance counters
+@@ -86,7 +83,7 @@
+   // MSC collection and the beginning of the next
+   // MSC collection.
+   PerfVariable* _avg_msc_interval_counter;
+-  // Average for the GC cost of a MSC collection based on 
++  // Average for the GC cost of a MSC collection based on
+   // _avg_msc_pause_counter and _avg_msc_interval_counter.
+   PerfVariable* _msc_gc_cost_counter;
+ 
+@@ -97,7 +94,7 @@
+   // MS collection and the beginning of the next
+   // MS collection.
+   PerfVariable* _avg_ms_interval_counter;
+-  // Average for the GC cost of a MS collection based on 
++  // Average for the GC cost of a MS collection based on
+   // _avg_ms_pause_counter and _avg_ms_interval_counter.
+   PerfVariable* _ms_gc_cost_counter;
+ 
+@@ -108,7 +105,7 @@
+   // Padded average of the bytes promoted per minor colleciton
+   PerfVariable* _promoted_padded_avg_counter;
+ 
+-  // See description of the _change_young_gen_for_maj_pauses 
++  // See description of the _change_young_gen_for_maj_pauses
+   // variable recently in cmsAdaptiveSizePolicy.hpp.
+   PerfVariable* _change_young_gen_for_maj_pauses_counter;
+ 
+@@ -119,16 +116,16 @@
+   PerfVariable* _remark_pause_young_slope_counter;
+   PerfVariable* _initial_pause_young_slope_counter;
+ 
+-  CMSAdaptiveSizePolicy* cms_size_policy() { 
+-    assert(_size_policy->kind() == 
++  CMSAdaptiveSizePolicy* cms_size_policy() {
++    assert(_size_policy->kind() ==
+       AdaptiveSizePolicy::_gc_cms_adaptive_size_policy,
+       "Wrong size policy");
+-    return (CMSAdaptiveSizePolicy*)_size_policy; 
++    return (CMSAdaptiveSizePolicy*)_size_policy;
+   }
+ 
+   inline void update_avg_cms_STW_time_counter() {
+     _avg_cms_STW_time_counter->set_value(
+-      (jlong) (cms_size_policy()->avg_cms_STW_time()->average() * 
++      (jlong) (cms_size_policy()->avg_cms_STW_time()->average() *
+       (double) MILLIUNITS));
+   }
+ 
+@@ -139,37 +136,37 @@
+ 
+   inline void update_avg_initial_pause_counter() {
+     _avg_initial_pause_counter->set_value(
+-      (jlong) (cms_size_policy()->avg_initial_pause()->average() * 
++      (jlong) (cms_size_policy()->avg_initial_pause()->average() *
+       (double) MILLIUNITS));
+   }
+ #ifdef NOT_PRODUCT
+   inline void update_avg_remark_pause_counter() {
+     _avg_remark_pause_counter->set_value(
+-      (jlong) (cms_size_policy()-> avg_remark_pause()->average() * 
++      (jlong) (cms_size_policy()-> avg_remark_pause()->average() *
+       (double) MILLIUNITS));
+   }
+ 
+   inline void update_initial_pause_counter() {
+     _initial_pause_counter->set_value(
+-      (jlong) (cms_size_policy()->avg_initial_pause()->average() * 
++      (jlong) (cms_size_policy()->avg_initial_pause()->average() *
+       (double) MILLIUNITS));
+   }
+ #endif
+   inline void update_remark_pause_counter() {
+     _remark_pause_counter->set_value(
+-      (jlong) (cms_size_policy()-> avg_remark_pause()->last_sample() * 
++      (jlong) (cms_size_policy()-> avg_remark_pause()->last_sample() *
+       (double) MILLIUNITS));
+   }
+ 
+   inline void update_avg_concurrent_time_counter() {
+     _avg_concurrent_time_counter->set_value(
+-      (jlong) (cms_size_policy()->avg_concurrent_time()->last_sample() * 
++      (jlong) (cms_size_policy()->avg_concurrent_time()->last_sample() *
+       (double) MILLIUNITS));
+   }
+ 
+   inline void update_avg_concurrent_interval_counter() {
+     _avg_concurrent_interval_counter->set_value(
+-      (jlong) (cms_size_policy()->avg_concurrent_interval()->average() * 
++      (jlong) (cms_size_policy()->avg_concurrent_interval()->average() *
+       (double) MILLIUNITS));
+   }
+ 
+@@ -201,13 +198,13 @@
+ 
+   inline void update_avg_msc_pause_counter() {
+     _avg_msc_pause_counter->set_value(
+-      (jlong) (cms_size_policy()->avg_msc_pause()->average() * 
++      (jlong) (cms_size_policy()->avg_msc_pause()->average() *
+       (double) MILLIUNITS));
+   }
+ 
+   inline void update_avg_msc_interval_counter() {
+     _avg_msc_interval_counter->set_value(
+-      (jlong) (cms_size_policy()->avg_msc_interval()->average() * 
++      (jlong) (cms_size_policy()->avg_msc_interval()->average() *
+       (double) MILLIUNITS));
+   }
+ 
+@@ -218,13 +215,13 @@
+ 
+   inline void update_avg_ms_pause_counter() {
+     _avg_ms_pause_counter->set_value(
+-      (jlong) (cms_size_policy()->avg_ms_pause()->average() * 
++      (jlong) (cms_size_policy()->avg_ms_pause()->average() *
+       (double) MILLIUNITS));
+   }
+ 
+   inline void update_avg_ms_interval_counter() {
+     _avg_ms_interval_counter->set_value(
+-      (jlong) (cms_size_policy()->avg_ms_interval()->average() * 
++      (jlong) (cms_size_policy()->avg_ms_interval()->average() *
+       (double) MILLIUNITS));
+   }
+ 
+@@ -297,7 +294,7 @@
+     _cms_capacity_counter->set_value(size_in_bytes);
+   }
+ 
+-  virtual GCPolicyCounters::Name kind() const { 
+-    return GCPolicyCounters::CMSGCAdaptivePolicyCountersKind; 
++  virtual GCPolicyCounters::Name kind() const {
++    return GCPolicyCounters::CMSGCAdaptivePolicyCountersKind;
+   }
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.cpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.cpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)cmsLockVerifier.cpp	1.14 07/05/05 17:05:44 JVM"
+-#endif
+ /*
+  * Copyright 2002-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -48,7 +45,7 @@
+       // This test might have to change in the future, if there can be
+       // multiple peer CMS threads.  But for now, if we're testing the CMS
+       assert(myThread == ConcurrentMarkSweepThread::cmst(),
+-	     "In CMS, CMS thread is the only Conc GC thread.");
++             "In CMS, CMS thread is the only Conc GC thread.");
+       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
+              "CMS thread should have CMS token");
+     } else if (myThread->is_VM_thread()) {
+@@ -61,7 +58,7 @@
+       assert(myThread->is_GC_task_thread(), "Unexpected thread type");
+     }
+     return;
+-  } 
++  }
+ 
+   if (ParallelGCThreads == 0) {
+     assert_lock_strong(lock);
+@@ -96,4 +93,3 @@
+   }
+ }
+ #endif
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)cmsLockVerifier.hpp	1.9 07/05/05 17:05:44 JVM"
+-#endif
+ /*
+  * Copyright 2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ ///////////// Locking verification specific to CMS //////////////
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,28 +1,25 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)cmsOopClosures.hpp	1.2 07/05/16 16:53:01 JVM"
+-#endif
+ /*
+  * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+- *   
++ *
+  * This code is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License version 2 only, as
+  * published by the Free Software Foundation.
+- *   
++ *
+  * This code is distributed in the hope that it will be useful, but WITHOUT
+  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+  * version 2 for more details (a copy is included in the LICENSE file that
+  * accompanied this code).
+- *  
++ *
+  * You should have received a copy of the GNU General Public License version
+  * 2 along with this work; if not, write to the Free Software Foundation,
+  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+- *   
++ *
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ /////////////////////////////////////////////////////////////////
+@@ -32,8 +29,10 @@
+ class CMSBitMap;
+ class CMSMarkStack;
+ class CMSCollector;
+-class OopTaskQueue;
+-class OopTaskQueueSet;
++template<class E> class GenericTaskQueue;
++typedef GenericTaskQueue<oop> OopTaskQueue;
++template<class E> class GenericTaskQueueSet;
++typedef GenericTaskQueueSet<oop> OopTaskQueueSet;
+ class MarkFromRootsClosure;
+ class Par_MarkFromRootsClosure;
+ 
+@@ -97,7 +96,7 @@
+                      CMSMarkStack*  revisit_stack,
+                      bool           concurrent_precleaning);
+ 
+-  void do_oop(oop* p);    
++  void do_oop(oop* p);
+   void do_oop_nv(oop* p)  { PushAndMarkClosure::do_oop(p); }
+   bool do_header() { return true; }
+   Prefetch::style prefetch_style() {
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,28 +1,25 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)cmsOopClosures.inline.hpp	1.1 07/05/16 10:52:51 JVM"
+-#endif
+ /*
+  * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+- *   
++ *
+  * This code is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License version 2 only, as
+  * published by the Free Software Foundation.
+- *   
++ *
+  * This code is distributed in the hope that it will be useful, but WITHOUT
+  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+  * version 2 for more details (a copy is included in the LICENSE file that
+  * accompanied this code).
+- *  
++ *
+  * You should have received a copy of the GNU General Public License version
+  * 2 along with this work; if not, write to the Free Software Foundation,
+  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+- *   
++ *
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Trim our work_queue so its length is below max at return
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.cpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.cpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,28 +1,25 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)cmsPermGen.cpp	1.2 07/05/16 16:53:01 JVM"
+-#endif
+ /*
+  * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+- *   
++ *
+  * This code is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License version 2 only, as
+  * published by the Free Software Foundation.
+- *   
++ *
+  * This code is distributed in the hope that it will be useful, but WITHOUT
+  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+  * version 2 for more details (a copy is included in the LICENSE file that
+  * accompanied this code).
+- *  
++ *
+  * You should have received a copy of the GNU General Public License version
+  * 2 along with this work; if not, write to the Free Software Foundation,
+  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+- *   
++ *
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -52,7 +49,7 @@
+     return mem_allocate_work(size);
+   }
+ }
+-  
++
+ HeapWord* CMSPermGen::mem_allocate_work(size_t size) {
+   assert(!_gen->freelistLock()->owned_by_self(), "Potetntial deadlock");
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.hpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,28 +1,25 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)cmsPermGen.hpp	1.1 07/05/02 16:12:51 JVM"
+-#endif
+ /*
+  * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+- *   
++ *
+  * This code is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License version 2 only, as
+  * published by the Free Software Foundation.
+- *   
++ *
+  * This code is distributed in the hope that it will be useful, but WITHOUT
+  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+  * version 2 for more details (a copy is included in the LICENSE file that
+  * accompanied this code).
+- *  
++ *
+  * You should have received a copy of the GNU General Public License version
+  * 2 along with this work; if not, write to the Free Software Foundation,
+  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+- *   
++ *
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class CardTableRS;   // fwd decl
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)compactibleFreeListSpace.cpp	1.143 07/07/17 11:43:33 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -49,7 +46,7 @@
+   // are acquired in the program text. This is true today.
+   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
+   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
+-			  "CompactibleFreeListSpace._dict_par_lock", true),
++                          "CompactibleFreeListSpace._dict_par_lock", true),
+   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
+                     CMSRescanMultiple),
+   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
+@@ -93,14 +90,14 @@
+     // The small linAB initially has all the space and will allocate
+     // a chunk of any size.
+     HeapWord* addr = (HeapWord*) fc;
+-    _smallLinearAllocBlock.set(addr, fc->size() , 
++    _smallLinearAllocBlock.set(addr, fc->size() ,
+       1024*SmallForLinearAlloc, fc->size());
+     // Note that _unallocated_block is not updated here.
+     // Allocations from the linear allocation block should
+     // update it.
+   } else {
+-    _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, 
+-			       SmallForLinearAlloc);
++    _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
++                               SmallForLinearAlloc);
+   }
+   // CMSIndexedFreeListReplenish should be at least 1
+   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
+@@ -116,10 +113,10 @@
+   if (ParallelGCThreads > 0) {
+     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
+       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
+-					      "a freelist par lock",
+-					      true);
+-      if (_indexedFreeListParLocks[i] == NULL) 
+-	vm_exit_during_initialization("Could not allocate a par lock");
++                                              "a freelist par lock",
++                                              true);
++      if (_indexedFreeListParLocks[i] == NULL)
++        vm_exit_during_initialization("Could not allocate a par lock");
+       DEBUG_ONLY(
+         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
+       )
+@@ -131,8 +128,8 @@
+ // Like CompactibleSpace forward() but always calls cross_threshold() to
+ // update the block offset table.  Removed initialize_threshold call because
+ // CFLS does not use a block offset array for contiguous spaces.
+-HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size, 
+-				    CompactPoint* cp, HeapWord* compact_top) {
++HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
++                                    CompactPoint* cp, HeapWord* compact_top) {
+   // q is alive
+   // First check if we should switch compaction space
+   assert(this == cp->space, "'this' should be current compaction space.");
+@@ -175,7 +172,7 @@
+     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
+   } else {
+     // if the object isn't moving we can just set the mark to the default
+-    // mark and handle it specially later on.  
++    // mark and handle it specially later on.
+     q->init_mark();
+     assert(q->forwardee() == NULL, "should be forwarded to NULL");
+   }
+@@ -222,7 +219,7 @@
+ 
+ void CompactibleFreeListSpace::resetIndexedFreeListArray() {
+   for (int i = 1; i < IndexSetSize; i++) {
+-    assert(_indexedFreeList[i].size() == (size_t) i, 
++    assert(_indexedFreeList[i].size() == (size_t) i,
+       "Indexed free list sizes are incorrect");
+     _indexedFreeList[i].reset(IndexSetSize);
+     assert(_indexedFreeList[i].count() == 0, "reset check failed");
+@@ -316,7 +313,7 @@
+         total_list_count++;
+       }
+       assert(total_list_count ==  _indexedFreeList[i].count(),
+-	"Count in list is incorrect");
++        "Count in list is incorrect");
+     )
+     count += _indexedFreeList[i].count();
+   }
+@@ -426,8 +423,8 @@
+          fc = fc->next()) {
+         recount += 1;
+       }
+-      assert(recount == _indexedFreeList[i].count(), 
+-	"Incorrect count in list");
++      assert(recount == _indexedFreeList[i].count(),
++        "Incorrect count in list");
+     )
+     res += _indexedFreeList[i].count();
+   }
+@@ -492,14 +489,14 @@
+   // Override.
+ #define walk_mem_region_with_cl_DECL(ClosureType)                       \
+   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
+-				       HeapWord* bottom, HeapWord* top, \
+-				       ClosureType* cl);                \
++                                       HeapWord* bottom, HeapWord* top, \
++                                       ClosureType* cl);                \
+       void walk_mem_region_with_cl_par(MemRegion mr,                    \
+-				       HeapWord* bottom, HeapWord* top, \
+-				       ClosureType* cl);                \
++                                       HeapWord* bottom, HeapWord* top, \
++                                       ClosureType* cl);                \
+     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
+-				       HeapWord* bottom, HeapWord* top, \
+-				       ClosureType* cl)
++                                       HeapWord* bottom, HeapWord* top, \
++                                       ClosureType* cl)
+   walk_mem_region_with_cl_DECL(OopClosure);
+   walk_mem_region_with_cl_DECL(FilteringClosure);
+ 
+@@ -507,8 +504,8 @@
+   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
+                       CMSCollector* collector,
+                       OopClosure* cl,
+-  		      CardTableModRefBS::PrecisionStyle precision,
+-		      HeapWord* boundary) :
++                      CardTableModRefBS::PrecisionStyle precision,
++                      HeapWord* boundary) :
+     Filtering_DCTOC(sp, cl, precision, boundary),
+     _cfls(sp), _collector(collector) {}
+ };
+@@ -517,9 +514,9 @@
+ // space is a CompactibleFreeListSpace.
+ #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
+ void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
+-						 HeapWord* bottom,              \
+-						 HeapWord* top,                 \
+-						 ClosureType* cl) {             \
++                                                 HeapWord* bottom,              \
++                                                 HeapWord* top,                 \
++                                                 ClosureType* cl) {             \
+    if (SharedHeap::heap()->n_par_threads() > 0) {                               \
+      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
+    } else {                                                                     \
+@@ -527,9 +524,9 @@
+    }                                                                            \
+ }                                                                               \
+ void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
+-						      HeapWord* bottom,         \
+-						      HeapWord* top,            \
+-						      ClosureType* cl) {        \
++                                                      HeapWord* bottom,         \
++                                                      HeapWord* top,            \
++                                                      ClosureType* cl) {        \
+   /* Skip parts that are before "mr", in case "block_start" sent us             \
+      back too far. */                                                           \
+   HeapWord* mr_start = mr.start();                                              \
+@@ -543,9 +540,9 @@
+                                                                                 \
+   while (bottom < top) {                                                        \
+     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
+-	!_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
++        !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
+                     oop(bottom)) &&                                             \
+-	!_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
++        !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
+       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
+       bottom += _cfls->adjustObjectSize(word_sz);                               \
+     } else {                                                                    \
+@@ -554,9 +551,9 @@
+   }                                                                             \
+ }                                                                               \
+ void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
+-						        HeapWord* bottom,       \
+-						        HeapWord* top,          \
+-						        ClosureType* cl) {      \
++                                                        HeapWord* bottom,       \
++                                                        HeapWord* top,          \
++                                                        ClosureType* cl) {      \
+   /* Skip parts that are before "mr", in case "block_start" sent us             \
+      back too far. */                                                           \
+   HeapWord* mr_start = mr.start();                                              \
+@@ -570,9 +567,9 @@
+                                                                                 \
+   while (bottom < top) {                                                        \
+     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
+-	!_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
++        !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
+                     oop(bottom)) &&                                             \
+-	!_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
++        !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
+       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
+       bottom += _cfls->adjustObjectSize(word_sz);                               \
+     } else {                                                                    \
+@@ -589,8 +586,8 @@
+ 
+ DirtyCardToOopClosure*
+ CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
+-				      CardTableModRefBS::PrecisionStyle precision,
+-				      HeapWord* boundary) {
++                                      CardTableModRefBS::PrecisionStyle precision,
++                                      HeapWord* boundary) {
+   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
+ }
+ 
+@@ -665,15 +662,15 @@
+     obj_addr += block_size(obj_addr);
+     // If "obj_addr" is not greater than top, then the
+     // entire object "obj" is within the region.
+-    if (obj_addr <= t) {    
++    if (obj_addr <= t) {
+       if (block_is_obj(obj)) {
+         oop(obj)->oop_iterate(cl);
+-      }               
++      }
+     } else {
+       // "obj" extends beyond end of region
+       if (block_is_obj(obj)) {
+         oop(obj)->oop_iterate(&smr_blk);
+-      }    
++      }
+       break;
+     }
+   }
+@@ -806,7 +803,7 @@
+   NOT_PRODUCT(verify_objects_initialized());
+   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
+   // This must be volatile, or else there is a danger that the compiler
+-  // will compile the code below into a sometimes-infinite loop, by keeping 
++  // will compile the code below into a sometimes-infinite loop, by keeping
+   // the value read the first time in a register.
+   oop o = (oop)p;
+   volatile oop* second_word_addr = o->klass_addr();
+@@ -1015,13 +1012,13 @@
+   HeapWord* res = NULL;
+   assert(size == adjustObjectSize(size),
+          "use adjustObjectSize() before calling into allocate()");
+-  
++
+   if (_adaptive_freelists) {
+     res = allocate_adaptive_freelists(size);
+   } else {  // non-adaptive free lists
+     res = allocate_non_adaptive_freelists(size);
+   }
+-  
++
+   if (res != NULL) {
+     // check that res does lie in this space!
+     assert(is_in_reserved(res), "Not in this space!");
+@@ -1033,12 +1030,12 @@
+     assert(oop(fc)->klass() == NULL, "should look uninitialized");
+     // Verify that the block offset table shows this to
+     // be a single block, but not one which is unallocated.
+-    _bt.verify_single_block(res, size); 
++    _bt.verify_single_block(res, size);
+     _bt.verify_not_unallocated(res, size);
+     // mangle a just allocated object with a distinct pattern.
+     debug_only(fc->mangleAllocated(size));
+   }
+-  
++
+   return res;
+ }
+ 
+@@ -1053,7 +1050,7 @@
+   if (res == NULL) {
+     if (size < SmallForDictionary) {
+       res = (HeapWord*) getChunkFromIndexedFreeList(size);
+-    } else { 
++    } else {
+       // else get it from the big dictionary; if even this doesn't
+       // work we are out of luck.
+       res = (HeapWord*)getChunkFromDictionaryExact(size);
+@@ -1068,7 +1065,7 @@
+   HeapWord* res = NULL;
+   assert(size == adjustObjectSize(size),
+          "use adjustObjectSize() before calling into allocate()");
+-  
++
+   // Strategy
+   //   if small
+   //     exact size from small object indexed list if small
+@@ -1081,20 +1078,20 @@
+   if (size < IndexSetSize) {
+     res = (HeapWord*) getChunkFromIndexedFreeList(size);
+     if(res != NULL) {
+-      assert(res != (HeapWord*)_indexedFreeList[size].head(), 
++      assert(res != (HeapWord*)_indexedFreeList[size].head(),
+         "Not removed from free list");
+       // no block offset table adjustment is necessary on blocks in
+       // the indexed lists.
+ 
+     // Try allocating from the small LinAB
+     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
+-	(res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
+-	// if successful, the above also adjusts block offset table
+-	// Note that this call will refill the LinAB to 
+-	// satisfy the request.  This is different that
+-	// evm.  
++        (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
++        // if successful, the above also adjusts block offset table
++        // Note that this call will refill the LinAB to
++        // satisfy the request.  This is different that
++        // evm.
+         // Don't record chunk off a LinAB?  smallSplitBirth(size);
+-  
++
+     } else {
+       // Raid the exact free lists larger than size, even if they are not
+       // overpopulated.
+@@ -1105,12 +1102,12 @@
+     res = (HeapWord*) getChunkFromDictionaryExact(size);
+     if (res == NULL) {
+       // Try hard not to fail since an allocation failure will likely
+-      // trigger a synchronous GC.  Try to get the space from the 
++      // trigger a synchronous GC.  Try to get the space from the
+       // allocation blocks.
+       res = getChunkFromSmallLinearAllocBlockRemainder(size);
+     }
+   }
+-  
++
+   return res;
+ }
+ 
+@@ -1170,7 +1167,7 @@
+   ShouldNotReachHere();
+ }
+ 
+-bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) 
++bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc)
+   const {
+   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
+   return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
+@@ -1246,7 +1243,7 @@
+ CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
+   assert_locked();
+   assert(size >= MinChunkSize, "minimum chunk size");
+-  assert(size <  _smallLinearAllocBlock._allocation_size_limit, 
++  assert(size <  _smallLinearAllocBlock._allocation_size_limit,
+     "maximum from smallLinearAllocBlock");
+   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
+ }
+@@ -1294,7 +1291,7 @@
+   blk->_ptr = NULL; blk->_word_size = 0;
+   refillLinearAllocBlock(blk);
+   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
+-	 "block was replenished");
++         "block was replenished");
+   if (res != NULL) {
+     splitBirth(size);
+     repairLinearAllocBlock(blk);
+@@ -1314,8 +1311,8 @@
+ }
+ 
+ HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
+-					LinearAllocBlock* blk, 
+-					size_t size) {
++                                        LinearAllocBlock* blk,
++                                        size_t size) {
+   assert_locked();
+   assert(size >= MinChunkSize, "too small");
+ 
+@@ -1338,11 +1335,11 @@
+     // Above must occur before BOT is updated below.
+     _bt.split_block(res, blk_size, size);  // adjust block offset table
+     _bt.allocated(res, size);
+-  } 
++  }
+   return res;
+ }
+ 
+-FreeChunk* 
++FreeChunk*
+ CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
+   assert_locked();
+   assert(size < SmallForDictionary, "just checking");
+@@ -1368,28 +1365,28 @@
+       // Replenish list.
+       //
+       // Things tried that failed.
+-      //   Tried allocating out of the two LinAB's first before 
+-      // replenishing lists.  
++      //   Tried allocating out of the two LinAB's first before
++      // replenishing lists.
+       //   Tried small linAB of size 256 (size in indexed list)
+       // and replenishing indexed lists from the small linAB.
+       //
+       FreeChunk* newFc = NULL;
+       size_t replenish_size = CMSIndexedFreeListReplenish * size;
+       if (replenish_size < SmallForDictionary) {
+-	// Do not replenish from an underpopulated size.
+-	if (_indexedFreeList[replenish_size].surplus() > 0 &&
+-	    _indexedFreeList[replenish_size].head() != NULL) {
+-          newFc = 
++        // Do not replenish from an underpopulated size.
++        if (_indexedFreeList[replenish_size].surplus() > 0 &&
++            _indexedFreeList[replenish_size].head() != NULL) {
++          newFc =
+             _indexedFreeList[replenish_size].getChunkAtHead();
+-	} else {
+-	  newFc = bestFitSmall(replenish_size);
+-	}
++        } else {
++          newFc = bestFitSmall(replenish_size);
++        }
+       }
+       if (newFc != NULL) {
+-	splitDeath(replenish_size);
++        splitDeath(replenish_size);
+       } else if (replenish_size > size) {
+         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
+-        newFc = 
++        newFc =
+           getChunkFromIndexedFreeListHelper(replenish_size);
+       }
+       if (newFc != NULL) {
+@@ -1397,30 +1394,30 @@
+         size_t i;
+         FreeChunk *curFc, *nextFc;
+         // carve up and link blocks 0, ..., CMSIndexedFreeListReplenish - 2
+-	// The last chunk is not added to the lists but is returned as the
+-	// free chunk.
+-        for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size), 
+-  	     i = 0;
++        // The last chunk is not added to the lists but is returned as the
++        // free chunk.
++        for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
++             i = 0;
+              i < (CMSIndexedFreeListReplenish - 1);
+-             curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size), 
+-  	     i++) {
++             curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
++             i++) {
+           curFc->setSize(size);
+-  	  // Don't record this as a return in order to try and
+-  	  // determine the "returns" from a GC.
++          // Don't record this as a return in order to try and
++          // determine the "returns" from a GC.
+           _bt.verify_not_unallocated((HeapWord*) fc, size);
+-  	  _indexedFreeList[size].returnChunkAtTail(curFc, false);
+-  	  _bt.mark_block((HeapWord*)curFc, size);
+-  	  splitBirth(size);
+-  	  // Don't record the initial population of the indexed list
+-  	  // as a split birth.
++          _indexedFreeList[size].returnChunkAtTail(curFc, false);
++          _bt.mark_block((HeapWord*)curFc, size);
++          splitBirth(size);
++          // Don't record the initial population of the indexed list
++          // as a split birth.
+         }
+ 
+         // check that the arithmetic was OK above
+         assert((HeapWord*)nextFc == (HeapWord*)newFc + replenish_size,
+           "inconsistency in carving newFc");
+         curFc->setSize(size);
+-  	_bt.mark_block((HeapWord*)curFc, size);
+-  	splitBirth(size);
++        _bt.mark_block((HeapWord*)curFc, size);
++        splitBirth(size);
+         return curFc;
+       }
+     }
+@@ -1466,7 +1463,7 @@
+   if (fc->size() < size + MinChunkSize) {
+     // Return the chunk to the dictionary and go get a bigger one.
+     returnChunkToDictionary(fc);
+-    fc = _dictionary->getChunk(size + MinChunkSize); 
++    fc = _dictionary->getChunk(size + MinChunkSize);
+     if (fc == NULL) {
+       return NULL;
+     }
+@@ -1640,9 +1637,9 @@
+         // Found a list with surplus, reset original hint
+         // and split out a free chunk which is returned.
+         _indexedFreeList[start].set_hint(hint);
+-	FreeChunk* res = getFromListGreater(fl, numWords);
+-	assert(res == NULL || res->isFree(), 
+-	  "Should be returning a free chunk");
++        FreeChunk* res = getFromListGreater(fl, numWords);
++        assert(res == NULL || res->isFree(),
++          "Should be returning a free chunk");
+         return res;
+       }
+       hint = fl->hint(); /* keep looking */
+@@ -1660,16 +1657,16 @@
+   size_t oldNumWords = curr->size();
+   assert(numWords >= MinChunkSize, "Word size is too small");
+   assert(curr != NULL, "List is empty");
+-  assert(oldNumWords >= numWords + MinChunkSize, 
+-	"Size of chunks in the list is too small");
+- 
++  assert(oldNumWords >= numWords + MinChunkSize,
++        "Size of chunks in the list is too small");
++
+   fl->removeChunk(curr);
+-  // recorded indirectly by splitChunkAndReturnRemainder - 
++  // recorded indirectly by splitChunkAndReturnRemainder -
+   // smallSplit(oldNumWords, numWords);
+   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
+   // Does anything have to be done for the remainder in terms of
+   // fixing the card table?
+-  assert(new_chunk == NULL || new_chunk->isFree(), 
++  assert(new_chunk == NULL || new_chunk->isFree(),
+     "Should be returning a free chunk");
+   return new_chunk;
+ }
+@@ -1747,7 +1744,7 @@
+   // mark the "end" of the used space at the time of this call;
+   // note, however, that promoted objects from this point
+   // on are tracked in the _promoInfo below.
+-  set_saved_mark_word(BlockOffsetArrayUseUnallocatedBlock ? 
++  set_saved_mark_word(BlockOffsetArrayUseUnallocatedBlock ?
+                       unallocated_block() : end());
+   // inform allocator that promotions should be tracked.
+   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
+@@ -1757,7 +1754,7 @@
+ bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
+   assert(_promoInfo.tracking(), "No preceding save_marks?");
+   guarantee(SharedHeap::heap()->n_par_threads() == 0,
+-	    "Shouldn't be called (yet) during parallel part of gc.");
++            "Shouldn't be called (yet) during parallel part of gc.");
+   return _promoInfo.noPromotions();
+ }
+ 
+@@ -1780,10 +1777,10 @@
+ ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
+ 
+ //////////////////////////////////////////////////////////////////////////////
+-// We go over the list of promoted objects, removing each from the list,    
+-// and applying the closure (this may, in turn, add more elements to  
+-// the tail of the promoted list, and these newly added objects will 
+-// also be processed) until the list is empty.                      
++// We go over the list of promoted objects, removing each from the list,
++// and applying the closure (this may, in turn, add more elements to
++// the tail of the promoted list, and these newly added objects will
++// also be processed) until the list is empty.
+ // To aid verification and debugging, in the non-product builds
+ // we actually forward _promoHead each time we process a promoted oop.
+ // Note that this is not necessary in general (i.e. when we don't need to
+@@ -1864,7 +1861,7 @@
+ void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
+   assert_locked();
+   if (_smallLinearAllocBlock._ptr == NULL) {
+-    assert(_smallLinearAllocBlock._word_size == 0, 
++    assert(_smallLinearAllocBlock._word_size == 0,
+       "Size of linAB should be zero if the ptr is NULL");
+     // Reset the linAB refill and allocation size limit.
+     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
+@@ -1894,7 +1891,7 @@
+   assert(blk->_word_size == 0 && blk->_ptr == NULL,
+          "linear allocation block should be empty");
+   FreeChunk* fc;
+-  if (blk->_refillSize < SmallForDictionary && 
++  if (blk->_refillSize < SmallForDictionary &&
+       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
+     // A linAB's strategy might be to use small sizes to reduce
+     // fragmentation but still get the benefits of allocation from a
+@@ -1951,7 +1948,7 @@
+   double totFree = itabFree +
+                    _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
+   if (totFree > 0) {
+-    frag = ((frag + _dictionary->sum_of_squared_block_sizes()) / 
++    frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
+             (totFree * totFree));
+     frag = (double)1.0  - frag;
+   } else {
+@@ -1985,7 +1982,7 @@
+   size_t i;
+   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
+     FreeList *fl = &_indexedFreeList[i];
+-    fl->set_surplus(fl->count() - 
++    fl->set_surplus(fl->count() -
+                     (ssize_t)((double)fl->desired() * SplitSurplusPercent));
+   }
+ }
+@@ -2012,7 +2009,7 @@
+     fl->set_coalBirths(0);
+     fl->set_coalDeaths(0);
+     fl->set_splitBirths(0);
+-    fl->set_splitDeaths(0);  
++    fl->set_splitDeaths(0);
+   }
+ }
+ 
+@@ -2055,9 +2052,9 @@
+   if (size  < SmallForDictionary) {
+     smallCoalBirth(size);
+   } else {
+-    dictionary()->dictCensusUpdate(size, 
+-			           false /* split */, 
+-				   true /* birth */);
++    dictionary()->dictCensusUpdate(size,
++                                   false /* split */,
++                                   true /* birth */);
+   }
+ }
+ 
+@@ -2065,9 +2062,9 @@
+   if(size  < SmallForDictionary) {
+     smallCoalDeath(size);
+   } else {
+-    dictionary()->dictCensusUpdate(size, 
+-				   false /* split */, 
+-				   false /* birth */);
++    dictionary()->dictCensusUpdate(size,
++                                   false /* split */,
++                                   false /* birth */);
+   }
+ }
+ 
+@@ -2089,9 +2086,9 @@
+   if (size  < SmallForDictionary) {
+     smallSplitBirth(size);
+   } else {
+-    dictionary()->dictCensusUpdate(size, 
+-				   true /* split */, 
+-				   true /* birth */);
++    dictionary()->dictCensusUpdate(size,
++                                   true /* split */,
++                                   true /* birth */);
+   }
+ }
+ 
+@@ -2099,9 +2096,9 @@
+   if (size  < SmallForDictionary) {
+     smallSplitDeath(size);
+   } else {
+-    dictionary()->dictCensusUpdate(size, 
+-				   true /* split */, 
+-				   false /* birth */);
++    dictionary()->dictCensusUpdate(size,
++                                   true /* split */,
++                                   false /* birth */);
+   }
+ }
+ 
+@@ -2312,21 +2309,21 @@
+   gclog_or_tty->print("%4s\t"    "%7s\t"      "%7s\t"      "%7s\t"      "%7s\t"
+              "%7s\t"    "%7s\t"      "%7s\t"      "%7s\t"      "%7s\t"
+              "%7s\t"    "\n",
+-             "size",    "bfrsurp",   "surplus",   "desired",   "prvSwep",     
++             "size",    "bfrsurp",   "surplus",   "desired",   "prvSwep",
+              "bfrSwep", "count",     "cBirths",   "cDeaths",   "sBirths",
+              "sDeaths");
+ 
+   size_t totalFree = 0;
+   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
+-    const FreeList *fl = &_indexedFreeList[i];                                                       
+-	totalFree += fl->count() * fl->size();
++    const FreeList *fl = &_indexedFreeList[i];
++        totalFree += fl->count() * fl->size();
+ 
+     gclog_or_tty->print("%4d\t"          "%7d\t"             "%7d\t"        "%7d\t"
+                "%7d\t"          "%7d\t"             "%7d\t"        "%7d\t"
+                "%7d\t"          "%7d\t"             "%7d\t"        "\n",
+-               fl->size(),       fl->bfrSurp(),     fl->surplus(), fl->desired(), 
+-	       fl->prevSweep(),  fl->beforeSweep(), fl->count(),   fl->coalBirths(), 
+-	       fl->coalDeaths(), fl->splitBirths(), fl->splitDeaths());
++               fl->size(),       fl->bfrSurp(),     fl->surplus(), fl->desired(),
++               fl->prevSweep(),  fl->beforeSweep(), fl->count(),   fl->coalBirths(),
++               fl->coalDeaths(), fl->splitBirths(), fl->splitDeaths());
+     bfrSurp     += fl->bfrSurp();
+     surplus     += fl->surplus();
+     desired     += fl->desired();
+@@ -2337,7 +2334,7 @@
+     coalDeaths  += fl->coalDeaths();
+     splitBirths += fl->splitBirths();
+     splitDeaths += fl->splitDeaths();
+-  }                                                                                             
++  }
+   gclog_or_tty->print("%4s\t"
+             "%7d\t"      "%7d\t"     "%7d\t"        "%7d\t"       "%7d\t"
+             "%7d\t"      "%7d\t"     "%7d\t"        "%7d\t"       "%7d\t" "\n",
+@@ -2347,7 +2344,7 @@
+   gclog_or_tty->print_cr("Total free in indexed lists %d words", totalFree);
+   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
+     (double)(splitBirths+coalBirths-splitDeaths-coalDeaths)/
+-	    (prevSweep != 0 ? (double)prevSweep : 1.0),
++            (prevSweep != 0 ? (double)prevSweep : 1.0),
+     (double)(desired - count)/(desired != 0 ? (double)desired : 1.0));
+   _dictionary->printDictCensus();
+ }
+@@ -2375,7 +2372,7 @@
+                "spool buffers processing inconsistency");
+       }
+     )
+-  } 
++  }
+   return hdr;
+ }
+ 
+@@ -2507,10 +2504,10 @@
+ // When _spoolTail is not NULL, then the slot <_spoolTail, _nextIndex>
+ // points to the next slot available for filling.
+ // The set of slots holding displaced headers are then all those in the
+-// right-open interval denoted by: 
+-// 
++// right-open interval denoted by:
++//
+ //    [ <_spoolHead, _firstIndex>, <_spoolTail, _nextIndex> )
+-// 
++//
+ // When _spoolTail is NULL, then the set of slots with displaced headers
+ // is all those starting at the slot <_spoolHead, _firstIndex> and
+ // going up to the last slot of last block in the linked list.
+@@ -2560,7 +2557,7 @@
+   // _spoolHead and we undercounted (_nextIndex-1) worth of
+   // slots in block _spoolTail. We make an appropriate
+   // adjustment by subtracting the first and adding the
+-  // second:  - (_firstIndex - 1) + (_nextIndex - 1) 
++  // second:  - (_firstIndex - 1) + (_nextIndex - 1)
+   numDisplacedHdrs += (_nextIndex - _firstIndex);
+   guarantee(numDisplacedHdrs == numObjsWithDisplacedHdrs, "Displaced hdr count");
+ }
+@@ -2627,7 +2624,7 @@
+ par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
+   assert(fl->count() == 0, "Precondition.");
+   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
+-	 "Precondition");
++         "Precondition");
+ 
+   // We'll try all multiples of word_sz in the indexed set (starting with
+   // word_sz itself), then try getting a big chunk and splitting it.
+@@ -2642,41 +2639,41 @@
+       MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
+                       Mutex::_no_safepoint_check_flag);
+       if (gfl->count() != 0) {
+-	size_t nn = MAX2(n/k, (size_t)1);
+-	gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
+-	found = true;
++        size_t nn = MAX2(n/k, (size_t)1);
++        gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
++        found = true;
+       }
+     }
+     // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
+     if (found) {
+       if (k == 1) {
+-	fl->prepend(&fl_for_cur_sz);
++        fl->prepend(&fl_for_cur_sz);
+       } else {
+-	// Divide each block on fl_for_cur_sz up k ways.
+-	FreeChunk* fc;
+-	while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
+-	  // Must do this in reverse order, so that anybody attempting to
+-	  // access the main chunk sees it as a single free block until we
+-	  // change it.
++        // Divide each block on fl_for_cur_sz up k ways.
++        FreeChunk* fc;
++        while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
++          // Must do this in reverse order, so that anybody attempting to
++          // access the main chunk sees it as a single free block until we
++          // change it.
+           size_t fc_size = fc->size();
+-	  for (int i = k-1; i >= 0; i--) {
+-	    FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
+-	    ffc->setSize(word_sz);
+-	    ffc->linkNext(NULL);
+-	    ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
++          for (int i = k-1; i >= 0; i--) {
++            FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
++            ffc->setSize(word_sz);
++            ffc->linkNext(NULL);
++            ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
+             // Above must occur before BOT is updated below.
+             // splitting from the right, fc_size == (k - i + 1) * wordsize
+-	    _bt.mark_block((HeapWord*)ffc, word_sz);
++            _bt.mark_block((HeapWord*)ffc, word_sz);
+             fc_size -= word_sz;
+             _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
+             _bt.verify_single_block((HeapWord*)fc, fc_size);
+             _bt.verify_single_block((HeapWord*)ffc, ffc->size());
+-	    // Push this on "fl".
+-	    fl->returnChunkAtHead(ffc);
+-	  }
+-	  // TRAP
+-	  assert(fl->tail()->next() == NULL, "List invariant.");
+-	}
++            // Push this on "fl".
++            fl->returnChunkAtHead(ffc);
++          }
++          // TRAP
++          assert(fl->tail()->next() == NULL, "List invariant.");
++        }
+       }
+       return;
+     }
+@@ -2690,14 +2687,14 @@
+     MutexLockerEx x(parDictionaryAllocLock(),
+                     Mutex::_no_safepoint_check_flag);
+     while (n > 0) {
+-      fc = dictionary()->getChunk(MAX2(n * word_sz, 
+-				  _dictionary->minSize()),
+-				  FreeBlockDictionary::atLeast);
++      fc = dictionary()->getChunk(MAX2(n * word_sz,
++                                  _dictionary->minSize()),
++                                  FreeBlockDictionary::atLeast);
+       if (fc != NULL) {
+         _bt.allocated((HeapWord*)fc, fc->size());  // update _unallocated_blk
+         dictionary()->dictCensusUpdate(fc->size(),
+-				       true /*split*/,
+-				       false /*birth*/);
++                                       true /*split*/,
++                                       false /*birth*/);
+         break;
+       } else {
+         n--;
+@@ -2726,16 +2723,16 @@
+       // Above must occur before BOT is updated below.
+       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
+       if (rem >= IndexSetSize) {
+-	returnChunkToDictionary(rem_fc);
+-	dictionary()->dictCensusUpdate(fc->size(),
+-				       true /*split*/,
+-				       true /*birth*/);
+-	rem_fc = NULL;
++        returnChunkToDictionary(rem_fc);
++        dictionary()->dictCensusUpdate(fc->size(),
++                                       true /*split*/,
++                                       true /*birth*/);
++        rem_fc = NULL;
+       }
+       // Otherwise, return it to the small list below.
+     }
+   }
+-  // 
++  //
+   if (rem_fc != NULL) {
+     MutexLockerEx x(_indexedFreeListParLocks[rem],
+                     Mutex::_no_safepoint_check_flag);
+@@ -2832,7 +2829,7 @@
+       span = MemRegion(low, low);  // Null region
+     } // else use entire span
+   }
+-  assert(span.is_empty() || 
++  assert(span.is_empty() ||
+          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
+         "span should start at a card boundary");
+   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
+@@ -2846,4 +2843,3 @@
+   pst->set_par_threads(n_threads);
+   pst->set_n_tasks((int)n_tasks);
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)compactibleFreeListSpace.hpp	1.91 07/05/05 17:05:45 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Classes in support of keeping track of promotions into a non-Contiguous
+@@ -50,7 +47,7 @@
+   inline PromotedObject* next() const {
+     return (PromotedObject*)(_next & next_mask);
+   }
+-  inline void setNext(PromotedObject* x) { 
++  inline void setNext(PromotedObject* x) {
+     assert(((intptr_t)x & ~next_mask) == 0,
+            "Conflict in bit usage, "
+            " or insufficient alignment of objects");
+@@ -157,7 +154,7 @@
+   void reset() {
+     _promoHead = NULL;
+     _promoTail = NULL;
+-    _spoolHead = NULL; 
++    _spoolHead = NULL;
+     _spoolTail = NULL;
+     _spareSpool = NULL;
+     _firstIndex = 0;
+@@ -168,14 +165,14 @@
+ 
+ class LinearAllocBlock VALUE_OBJ_CLASS_SPEC {
+  public:
+-  LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0), 
++  LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0),
+     _allocation_size_limit(0) {}
+-  void set(HeapWord* ptr, size_t word_size, size_t refill_size, 
++  void set(HeapWord* ptr, size_t word_size, size_t refill_size,
+     size_t allocation_size_limit) {
+     _ptr = ptr;
+     _word_size = word_size;
+     _refillSize = refill_size;
+-    _allocation_size_limit = allocation_size_limit; 
++    _allocation_size_limit = allocation_size_limit;
+   }
+   HeapWord* _ptr;
+   size_t    _word_size;
+@@ -194,7 +191,7 @@
+   friend class CMSCollector;
+   friend class CMSPermGenGen;
+   // Local alloc buffer for promotion into this space.
+-  friend class CFLS_LAB;   
++  friend class CFLS_LAB;
+ 
+   // "Size" of chunks of work (executed during parallel remark phases
+   // of CMS collection); this probably belongs in CMSCollector, although
+@@ -243,7 +240,7 @@
+ 
+   // a lock protecting the free lists and free blocks;
+   // mutable because of ubiquity of locking even for otherwise const methods
+-  mutable Mutex _freelistLock; 
++  mutable Mutex _freelistLock;
+   // locking verifier convenience function
+   void assert_locked() const PRODUCT_RETURN;
+ 
+@@ -257,7 +254,7 @@
+                                        // indexed array for small size blocks
+   // allocation stategy
+   bool       _fitStrategy;      // Use best fit strategy.
+-  bool	     _adaptive_freelists; // Use adaptive freelists
++  bool       _adaptive_freelists; // Use adaptive freelists
+ 
+   // This is an address close to the largest free chunk in the heap.
+   // It is currently assumed to be at the end of the heap.  Free
+@@ -310,7 +307,7 @@
+   // to be used as linear allocation buffers.
+   HeapWord* allocate_non_adaptive_freelists(size_t size);
+ 
+-  // Gets a chunk from the linear allocation block (LinAB).  If there 
++  // Gets a chunk from the linear allocation block (LinAB).  If there
+   // is not enough space in the LinAB, refills it.
+   HeapWord*  getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size);
+   HeapWord*  getChunkFromSmallLinearAllocBlock(size_t size);
+@@ -338,13 +335,13 @@
+   // Find a chunk in the indexed free list that is the best
+   // fit for size "numWords".
+   FreeChunk* bestFitSmall(size_t numWords);
+-  // For free list "fl" of chunks of size > numWords, 
++  // For free list "fl" of chunks of size > numWords,
+   // remove a chunk, split off a chunk of size numWords
+   // and return it.  The split off remainder is returned to
+   // the free lists.  The old name for getFromListGreater
+   // was lookInListGreater.
+   FreeChunk* getFromListGreater(FreeList* fl, size_t numWords);
+-  // Get a chunk in the indexed free list or dictionary, 
++  // Get a chunk in the indexed free list or dictionary,
+   // by considering a larger chunk and splitting it.
+   FreeChunk* getChunkFromGreater(size_t numWords);
+   //  Verify that the given chunk is in the indexed free lists.
+@@ -354,7 +351,7 @@
+   // Remove the specified chunk from the dictionary.
+   void       removeChunkFromDictionary(FreeChunk* fc);
+   // Split a free chunk into a smaller free chunk of size "new_size".
+-  // Return the smaller free chunk and return the remainder to the 
++  // Return the smaller free chunk and return the remainder to the
+   // free lists.
+   FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size);
+   // Add a chunk to the free lists.
+@@ -380,7 +377,7 @@
+   void       refillLinearAllocBlocksIfNeeded();
+ 
+   void       verify_objects_initialized() const;
+-  
++
+   // Statistics reporting helper functions
+   void       reportFreeListStatistics() const;
+   void       reportIndexedFreeListStatistics() const;
+@@ -409,7 +406,7 @@
+  public:
+   // Constructor...
+   CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr,
+-			   bool use_adaptive_freelists,
++                           bool use_adaptive_freelists,
+                            FreeBlockDictionary::DictionaryChoice);
+   // accessors
+   bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
+@@ -470,7 +467,7 @@
+   bool is_in(const void* p) const {
+     return used_region().contains(p);
+   }
+-    
++
+   virtual bool is_free_block(const HeapWord* p) const;
+ 
+   // Resizing support
+@@ -500,8 +497,8 @@
+ 
+   // Override: provides a DCTO_CL specific to this kind of space.
+   DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl,
+-				     CardTableModRefBS::PrecisionStyle precision,
+-				     HeapWord* boundary);
++                                     CardTableModRefBS::PrecisionStyle precision,
++                                     HeapWord* boundary);
+ 
+   void blk_iterate(BlkClosure* cl);
+   void blk_iterate_careful(BlkClosureCareful* cl);
+@@ -606,7 +603,7 @@
+   // verify that the given chunk is in the free lists.
+   bool verifyChunkInFreeLists(FreeChunk* fc) const;
+   // Do some basic checks on the the free lists.
+-  void checkFreeListConsistency()	  const PRODUCT_RETURN;
++  void checkFreeListConsistency()         const PRODUCT_RETURN;
+ 
+   NOT_PRODUCT (
+     void initializeIndexedFreeListArrayReturnedBytes();
+@@ -620,12 +617,12 @@
+   // The census consists of counts of the quantities such as
+   // the current count of the free chunks, number of chunks
+   // created as a result of the split of a larger chunk or
+-  // coalescing of smaller chucks, etc.  The counts in the 
++  // coalescing of smaller chucks, etc.  The counts in the
+   // census is used to make decisions on splitting and
+   // coalescing of chunks during the sweep of garbage.
+ 
+   // Print the statistics for the free lists.
+-  void printFLCensus(int sweepCt)	  const;
++  void printFLCensus(int sweepCt)         const;
+ 
+   // Statistics functions
+   // Initialize census for lists before the sweep.
+@@ -645,56 +642,56 @@
+ 
+ 
+ // Record (for each size):
+-// 
+-//   split-births = #chunks added due to splits in (prev-sweep-end, 
+-// 	this-sweep-start)
+-//   split-deaths = #chunks removed for splits in (prev-sweep-end, 
+-// 	this-sweep-start)
++//
++//   split-births = #chunks added due to splits in (prev-sweep-end,
++//      this-sweep-start)
++//   split-deaths = #chunks removed for splits in (prev-sweep-end,
++//      this-sweep-start)
+ //   num-curr     = #chunks at start of this sweep
+ //   num-prev     = #chunks at end of previous sweep
+-// 
++//
+ // The above are quantities that are measured. Now define:
+-// 
++//
+ //   num-desired := num-prev + split-births - split-deaths - num-curr
+-// 
++//
+ // Roughly, num-prev + split-births is the supply,
+ // split-deaths is demand due to other sizes
+ // and num-curr is what we have left.
+-// 
++//
+ // Thus, num-desired is roughly speaking the "legitimate demand"
+ // for blocks of this size and what we are striving to reach at the
+ // end of the current sweep.
+-// 
++//
+ // For a given list, let num-len be its current population.
+ // Define, for a free list of a given size:
+-// 
++//
+ //   coal-overpopulated := num-len >= num-desired * coal-surplus
+ // (coal-surplus is set to 1.05, i.e. we allow a little slop when
+ // coalescing -- we do not coalesce unless we think that the current
+ // supply has exceeded the estimated demand by more than 5%).
+-// 
++//
+ // For the set of sizes in the binary tree, which is neither dense nor
+ // closed, it may be the case that for a particular size we have never
+ // had, or do not now have, or did not have at the previous sweep,
+ // chunks of that size. We need to extend the definition of
+ // coal-overpopulated to such sizes as well:
+-// 
++//
+ //   For a chunk in/not in the binary tree, extend coal-overpopulated
+ //   defined above to include all sizes as follows:
+-// 
++//
+ //   . a size that is non-existent is coal-overpopulated
+ //   . a size that has a num-desired <= 0 as defined above is
+-//     coal-overpopulated.  
+-// 
++//     coal-overpopulated.
++//
+ // Also define, for a chunk heap-offset C and mountain heap-offset M:
+-// 
++//
+ //   close-to-mountain := C >= 0.99 * M
+-// 
++//
+ // Now, the coalescing strategy is:
+-// 
++//
+ //    Coalesce left-hand chunk with right-hand chunk if and
+ //    only if:
+-// 
++//
+ //      EITHER
+ //        . left-hand chunk is of a size that is coal-overpopulated
+ //      OR
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentGCThread.cpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentGCThread.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentGCThread.cpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentGCThread.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -19,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // CopyrightVersion 1.2
+@@ -101,7 +101,7 @@
+   MutexLocker x(STS_init_lock);
+   if (!_initialized) {
+     _m             = new Monitor(Mutex::leaf,
+-				 "SuspendibleThreadSetLock", true);
++                                 "SuspendibleThreadSetLock", true);
+     _async         = 0;
+     _async_stop    = false;
+     _async_stopped = 0;
+@@ -133,12 +133,12 @@
+       _async_stopped++;
+       assert(_async_stopped > 0, "Huh.");
+       if (_async_stopped == _async) {
+-	if (ConcGCYieldTimeout > 0) {
+-	  double now = os::elapsedTime();
+-	  guarantee((now - _suspend_all_start) * 1000.0 <
+-		    (double)ConcGCYieldTimeout,
+-		    "Long delay; whodunit?");
+-	}
++        if (ConcGCYieldTimeout > 0) {
++          double now = os::elapsedTime();
++          guarantee((now - _suspend_all_start) * 1000.0 <
++                    (double)ConcGCYieldTimeout,
++                    "Long delay; whodunit?");
++        }
+       }
+       _m->notify_all();
+       while (_async_stop) _m->wait(Mutex::_no_safepoint_check_flag);
+@@ -193,12 +193,12 @@
+   Handle thread_group (THREAD, Universe::system_thread_group());
+   JavaValue result(T_VOID);
+   JavaCalls::call_special(&result, thread_oop,
+-			  klass,
+-			  vmSymbolHandles::object_initializer_name(),
+-			  vmSymbolHandles::threadgroup_string_void_signature(),
+-			  thread_group,
+-			  string,
+-			  CHECK_NULL);
++                          klass,
++                          vmSymbolHandles::object_initializer_name(),
++                          vmSymbolHandles::threadgroup_string_void_signature(),
++                          thread_group,
++                          string,
++                          CHECK_NULL);
+ 
+   SurrogateLockerThread* res;
+   {
+@@ -291,24 +291,24 @@
+ 
+ void ConcurrentGCThread::stsYield(const char* id) {
+   assert( Thread::current()->is_ConcurrentGC_thread(),
+-	  "only a conc GC thread can call this" );
++          "only a conc GC thread can call this" );
+   _sts.yield(id);
+ }
+ 
+ bool ConcurrentGCThread::stsShouldYield() {
+   assert( Thread::current()->is_ConcurrentGC_thread(),
+-	  "only a conc GC thread can call this" );
++          "only a conc GC thread can call this" );
+   return _sts.should_yield();
+ }
+ 
+ void ConcurrentGCThread::stsJoin() {
+   assert( Thread::current()->is_ConcurrentGC_thread(),
+-	  "only a conc GC thread can call this" );
++          "only a conc GC thread can call this" );
+   _sts.join();
+ }
+ 
+ void ConcurrentGCThread::stsLeave() {
+   assert( Thread::current()->is_ConcurrentGC_thread(),
+-	  "only a conc GC thread can call this" );
++          "only a conc GC thread can call this" );
+   _sts.leave();
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentGCThread.hpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentGCThread.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentGCThread.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentGCThread.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -19,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class VoidClosure;
+@@ -47,7 +47,7 @@
+   SuspendibleThreadSet() : _initialized(false) {}
+ 
+   // Add the current thread to the set.  May block if a suspension
+-  // is in progress.    
++  // is in progress.
+   void join();
+   // Removes the current thread from the set.
+   void leave();
+@@ -112,7 +112,7 @@
+ 
+   ConcurrentGCThread();
+   ~ConcurrentGCThread() {} // Exists to call NamedThread destructor.
+-  
++
+   // Tester
+   bool is_ConcurrentGC_thread() const          { return true;       }
+ 
+@@ -152,12 +152,14 @@
+   SLT_msg_type  _buffer;  // communication buffer
+   Monitor       _monitor; // monitor controlling buffer
+   BasicLock     _basicLock; // used for PLL locking
+-  
++
+  public:
+   static SurrogateLockerThread* make(TRAPS);
+ 
+   SurrogateLockerThread();
+ 
++  bool is_hidden_from_external_view() const     { return true; }
++
+   void loop(); // main method
+ 
+   void manipulatePLL(SLT_msg_type msg);
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)concurrentMarkSweepGeneration.cpp	1.290 07/07/17 11:49:58 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -43,11 +40,11 @@
+ // CMS-lock doesn't provide any fairness guarantees, but clients
+ // should ensure that it is only held for very short, bounded
+ // durations.
+-// 
++//
+ // When either of the CMS thread or the VM thread is involved in
+ // collection operations during which it does not want the other
+ // thread to interfere, it obtains the CMS token.
+-// 
++//
+ // If either thread tries to get the token while the other has
+ // it, that thread waits. However, if the VM thread and CMS thread
+ // both want the token, then the VM thread gets priority while the
+@@ -55,28 +52,28 @@
+ // phases of the CMS thread's work do not block out the VM thread
+ // for long periods of time as the CMS thread continues to hog
+ // the token. (See bug 4616232).
+-// 
++//
+ // The baton-passing functions are, however, controlled by the
+ // flags _foregroundGCShouldWait and _foregroundGCIsActive,
+ // and here the low-level CMS lock, not the high level token,
+ // ensures mutual exclusion.
+-// 
++//
+ // Two important conditions that we have to satisfy:
+ // 1. if a thread does a low-level wait on the CMS lock, then it
+ //    relinquishes the CMS token if it were holding that token
+ //    when it acquired the low-level CMS lock.
+ // 2. any low-level notifications on the low-level lock
+ //    should only be sent when a thread has relinquished the token.
+-// 
++//
+ // In the absence of either property, we'd have potential deadlock.
+-// 
++//
+ // We protect each of the CMS (concurrent and sequential) phases
+ // with the CMS _token_, not the CMS _lock_.
+-// 
++//
+ // The only code protected by CMS lock is the token acquisition code
+ // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
+ // baton-passing code.
+-// 
++//
+ // Unfortunately, i couldn't come up with a good abstraction to factor and
+ // hide the naked CGC_lock manipulation in the baton-passing code
+ // further below. That's something we should try to do. Also, the proof
+@@ -178,7 +175,7 @@
+ 
+   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
+                                            use_adaptive_freelists,
+-					   dictionaryChoice);
++                                           dictionaryChoice);
+   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
+   if (_cmsSpace == NULL) {
+     vm_exit_during_initialization(
+@@ -206,7 +203,7 @@
+     }
+     for (uint i = 0; i < ParallelGCThreads; i++) {
+       _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
+-      if (_par_gc_thread_states[i] == NULL) { 
++      if (_par_gc_thread_states[i] == NULL) {
+         vm_exit_during_initialization("Could not allocate par gc structs");
+       }
+     }
+@@ -267,7 +264,7 @@
+ }
+ 
+ CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
+-  CMSGCAdaptivePolicyCounters* results = 
++  CMSGCAdaptivePolicyCounters* results =
+     (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
+   assert(
+     results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
+@@ -324,8 +321,8 @@
+   size_t expected_promotion = gch->get_gen(0)->capacity();
+   if (HandlePromotionFailure) {
+     expected_promotion = MIN2(
+-	(size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average(),
+-	expected_promotion);
++        (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average(),
++        expected_promotion);
+   }
+   if (cms_free > expected_promotion) {
+     // Start a cms collection if there isn't enough space to promote
+@@ -339,10 +336,10 @@
+ 
+     if (PrintGCDetails && Verbose) {
+       gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
+-	SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
+-	cms_free, expected_promotion);
++        SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
++        cms_free, expected_promotion);
+       gclog_or_tty->print_cr("  cms_free_dbl %f cms_consumption_rate %f",
+-	cms_free_dbl, cms_consumption_rate() + 1.0);
++        cms_free_dbl, cms_consumption_rate() + 1.0);
+     }
+     // Add 1 in case the consumption rate goes to zero.
+     return cms_free_dbl / (cms_consumption_rate() + 1.0);
+@@ -384,7 +381,7 @@
+ // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
+ // amount of change to prevent wild oscillation.
+ unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
+-					      unsigned int new_duty_cycle) {
++                                              unsigned int new_duty_cycle) {
+   assert(old_duty_cycle <= 100, "bad input value");
+   assert(new_duty_cycle <= 100, "bad input value");
+ 
+@@ -406,14 +403,14 @@
+ 
+   if (CMSTraceIncrementalPacing) {
+     gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
+-			   old_duty_cycle, new_duty_cycle, damped_duty_cycle);
++                           old_duty_cycle, new_duty_cycle, damped_duty_cycle);
+   }
+   return damped_duty_cycle;
+ }
+ 
+ unsigned int CMSStats::icms_update_duty_cycle_impl() {
+   assert(CMSIncrementalPacing && valid(),
+-	 "should be handled in icms_update_duty_cycle()");
++         "should be handled in icms_update_duty_cycle()");
+ 
+   double cms_time_so_far = cms_timer().seconds();
+   double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
+@@ -428,7 +425,7 @@
+     // Avoid very small duty cycles (1 or 2); 0 is allowed.
+     if (new_duty_cycle > 2) {
+       _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
+-						new_duty_cycle);
++                                                new_duty_cycle);
+     }
+   } else if (_allow_duty_cycle_reduction) {
+     // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
+@@ -450,23 +447,23 @@
+ void CMSStats::print_on(outputStream *st) const {
+   st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
+   st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
+-	       gc0_duration(), gc0_period(), gc0_promoted());
++               gc0_duration(), gc0_period(), gc0_promoted());
+   st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
+-	    cms_duration(), cms_duration_per_mb(),
+-	    cms_period(), cms_allocated());
++            cms_duration(), cms_duration_per_mb(),
++            cms_period(), cms_allocated());
+   st->print(",cms_since_beg=%g,cms_since_end=%g",
+-	    cms_time_since_begin(), cms_time_since_end());
++            cms_time_since_begin(), cms_time_since_end());
+   st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
+-	    _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
++            _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
+   if (CMSIncrementalMode) {
+     st->print(",dc=%d", icms_duty_cycle());
+   }
+ 
+   if (valid()) {
+     st->print(",promo_rate=%g,cms_alloc_rate=%g",
+-	      promotion_rate(), cms_allocation_rate());
++              promotion_rate(), cms_allocation_rate());
+     st->print(",cms_consumption_rate=%g,time_until_full=%g",
+-	      cms_consumption_rate(), time_until_cms_gen_full());
++              cms_consumption_rate(), time_until_cms_gen_full());
+   }
+   st->print(" ");
+ }
+@@ -480,7 +477,7 @@
+ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
+                            ConcurrentMarkSweepGeneration* permGen,
+                            CardTableRS*                   ct,
+-			   ConcurrentMarkSweepPolicy*	  cp):
++                           ConcurrentMarkSweepPolicy*     cp):
+   _cmsGen(cmsGen),
+   _permGen(permGen),
+   _ct(ct),
+@@ -600,7 +597,7 @@
+     // the MT case where it's not fixed yet; see 6178663.
+     CMSCleanOnEnter = false;
+   }
+-  assert((_conc_workers != NULL) == (ParallelCMSThreads > 1), 
++  assert((_conc_workers != NULL) == (ParallelCMSThreads > 1),
+          "Inconsistency");
+ 
+   // Parallel task queues; these are shared for the
+@@ -609,7 +606,7 @@
+   {
+     uint i;
+     uint num_queues = (uint) MAX2(ParallelGCThreads, ParallelCMSThreads);
+-  
++
+     if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
+          || ParallelRefProcEnabled)
+         && num_queues > 0) {
+@@ -629,7 +626,7 @@
+         OopTaskQueue work_queue;
+         char pad[64 - sizeof(OopTaskQueue)];  // prevent false sharing
+       } OopTaskQueuePadded;
+-    
++
+       for (i = 0; i < num_queues; i++) {
+         OopTaskQueuePadded *q_padded = new OopTaskQueuePadded();
+         if (q_padded == NULL) {
+@@ -661,7 +658,7 @@
+     _initiatingOccupancy = ((100 - MinHeapFreeRatio) +
+                            (double)(CMSTriggerRatio *
+                                     MinHeapFreeRatio) / 100.0)
+-			   / 100.0;
++                           / 100.0;
+   }
+   // Clip CMSBootstrapOccupancy between 0 and 100.
+   _bootstrap_occupancy = ((double)MIN2((intx)100, MAX2((intx)0, CMSBootstrapOccupancy)))
+@@ -775,7 +772,7 @@
+ }
+ 
+ // this is an optimized version of update_counters(). it takes the
+-// used value as a parameter rather than computing it. 
++// used value as a parameter rather than computing it.
+ //
+ void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
+   if (UsePerfData) {
+@@ -800,11 +797,11 @@
+   GenCollectedHeap* gch = GenCollectedHeap::heap();
+   if (PrintGCDetails) {
+     if (Verbose) {
+-      gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]", 
+-	level(), short_name(), s, used(), capacity());
++      gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
++        level(), short_name(), s, used(), capacity());
+     } else {
+-      gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]", 
+-	level(), short_name(), s, used() / K, capacity() / K);
++      gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
++        level(), short_name(), s, used() / K, capacity() / K);
+     }
+   }
+   if (Verbose) {
+@@ -839,7 +836,7 @@
+     size_t max_promotion_in_bytes,
+     bool younger_handles_promotion_failure) const {
+ 
+-  // This is the most conservative test.  Full promotion is 
++  // This is the most conservative test.  Full promotion is
+   // guaranteed if this is used. The multiplicative factor is to
+   // account for the worst case "dilatation".
+   double adjusted_max_promo_bytes = _dilatation_factor * max_promotion_in_bytes;
+@@ -857,11 +854,11 @@
+       // With promotion failure handling the test for the ability
+       // to support the promotion does not have to be guaranteed.
+       // Use an average of the amount promoted.
+-      result = max_available() >= (size_t) 
+-	gc_stats()->avg_promoted()->padded_average();
++      result = max_available() >= (size_t)
++        gc_stats()->avg_promoted()->padded_average();
+       if (PrintGC && Verbose && result) {
+         gclog_or_tty->print_cr(
+-	  "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
++          "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
+           " max_available: " SIZE_FORMAT
+           " avg_promoted: " SIZE_FORMAT,
+           max_available(), (size_t)
+@@ -896,7 +893,7 @@
+ void ConcurrentMarkSweepGeneration::reset_after_compaction() {
+   // Clear the promotion information.  These pointers can be adjusted
+   // along with all the other pointers into the heap but
+-  // compaction is expected to be a rare event with 
++  // compaction is expected to be a rare event with
+   // a heap using cms so don't do it without seeing the need.
+   if (ParallelGCThreads > 0) {
+     for (uint i = 0; i < ParallelGCThreads; i++) {
+@@ -936,12 +933,12 @@
+       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
+       gclog_or_tty->print_cr("\nFrom compute_new_size: ");
+       gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
+-      gclog_or_tty->print_cr("  Desired free fraction %f", 
++      gclog_or_tty->print_cr("  Desired free fraction %f",
+         desired_free_percentage);
+-      gclog_or_tty->print_cr("  Maximum free fraction %f", 
++      gclog_or_tty->print_cr("  Maximum free fraction %f",
+         maximum_free_percentage);
+       gclog_or_tty->print_cr("  Capactiy "SIZE_FORMAT, capacity()/1000);
+-      gclog_or_tty->print_cr("  Desired capacity "SIZE_FORMAT, 
++      gclog_or_tty->print_cr("  Desired capacity "SIZE_FORMAT,
+         desired_capacity/1000);
+       int prev_level = level() - 1;
+       if (prev_level >= 0) {
+@@ -953,17 +950,17 @@
+                                  prev_size/1000);
+       }
+       gclog_or_tty->print_cr("  unsafe_max_alloc_nogc "SIZE_FORMAT,
+-	unsafe_max_alloc_nogc()/1000);
+-      gclog_or_tty->print_cr("  contiguous available "SIZE_FORMAT, 
+-	contiguous_available()/1000);
++        unsafe_max_alloc_nogc()/1000);
++      gclog_or_tty->print_cr("  contiguous available "SIZE_FORMAT,
++        contiguous_available()/1000);
+       gclog_or_tty->print_cr("  Expand by "SIZE_FORMAT" (bytes)",
+         expand_bytes);
+     }
+     // safe if expansion fails
+-    expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio); 
++    expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
+     if (PrintGCDetails && Verbose) {
+-      gclog_or_tty->print_cr("  Expanded free fraction %f", 
+-	((double) free()) / capacity());
++      gclog_or_tty->print_cr("  Expanded free fraction %f",
++        ((double) free()) / capacity());
+     }
+   }
+ }
+@@ -1072,13 +1069,13 @@
+         if (par) {
+           _modUnionTable.par_mark_range(mr);
+         } else {
+-	  _modUnionTable.mark_range(mr);
++          _modUnionTable.mark_range(mr);
+         }
+       } else {  // not an obj array; we can just mark the head
+         if (par) {
+-	  _modUnionTable.par_mark(start);
++          _modUnionTable.par_mark(start);
+         } else {
+-	  _modUnionTable.mark(start);
++          _modUnionTable.mark(start);
+         }
+       }
+     }
+@@ -1122,8 +1119,8 @@
+       size_t adjustment = (size_t)adjustment_dbl;
+       HeapWord* tmp_stop = _icms_stop_limit + adjustment;
+       if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
+-	_icms_start_limit += adjustment;
+-	_icms_stop_limit = tmp_stop;
++        _icms_start_limit += adjustment;
++        _icms_stop_limit = tmp_stop;
+       }
+     }
+   }
+@@ -1136,11 +1133,11 @@
+ 
+   if (CMSTraceIncrementalMode) {
+     gclog_or_tty->print(" icms alloc limits:  "
+-			   PTR_FORMAT "," PTR_FORMAT
+-			   " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
+-			   _icms_start_limit, _icms_stop_limit,
+-			   percent_of_space(eden, _icms_start_limit),
+-			   percent_of_space(eden, _icms_stop_limit));
++                           PTR_FORMAT "," PTR_FORMAT
++                           " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
++                           _icms_start_limit, _icms_stop_limit,
++                           percent_of_space(eden, _icms_start_limit),
++                           percent_of_space(eden, _icms_stop_limit));
+     if (Verbose) {
+       gclog_or_tty->print("eden:  ");
+       eden->print_on(gclog_or_tty);
+@@ -1152,39 +1149,39 @@
+ // that if this method is called with _icms_start_limit
+ // and _icms_stop_limit both NULL, then it should return NULL
+ // and not notify the icms thread.
+-HeapWord* 
++HeapWord*
+ CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
+-				       size_t word_size)
++                                       size_t word_size)
+ {
+   // A start_limit equal to end() means the duty cycle is 0, so treat that as a
+   // nop.
+   if (CMSIncrementalMode && _icms_start_limit != space->end()) {
+     if (top <= _icms_start_limit) {
+       if (CMSTraceIncrementalMode) {
+-	space->print_on(gclog_or_tty);
+-	gclog_or_tty->stamp();
+-	gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
+-			       ", new limit=" PTR_FORMAT
+-			       " (" SIZE_FORMAT "%%)",
+-			       top, _icms_stop_limit,
+-			       percent_of_space(space, _icms_stop_limit));
++        space->print_on(gclog_or_tty);
++        gclog_or_tty->stamp();
++        gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
++                               ", new limit=" PTR_FORMAT
++                               " (" SIZE_FORMAT "%%)",
++                               top, _icms_stop_limit,
++                               percent_of_space(space, _icms_stop_limit));
+       }
+       ConcurrentMarkSweepThread::start_icms();
+-      assert(top < _icms_stop_limit, "Tautology"); 
+-      if (word_size < pointer_delta(_icms_stop_limit, top)) { 
+-	return _icms_stop_limit;
++      assert(top < _icms_stop_limit, "Tautology");
++      if (word_size < pointer_delta(_icms_stop_limit, top)) {
++        return _icms_stop_limit;
+       }
+ 
+       // The allocation will cross both the _start and _stop limits, so do the
+       // stop notification also and return end().
+       if (CMSTraceIncrementalMode) {
+-	space->print_on(gclog_or_tty);
+-	gclog_or_tty->stamp();
+-	gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
+-			       ", new limit=" PTR_FORMAT
+-			       " (" SIZE_FORMAT "%%)",
+-			       top, space->end(),
+-			       percent_of_space(space, space->end()));
++        space->print_on(gclog_or_tty);
++        gclog_or_tty->stamp();
++        gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
++                               ", new limit=" PTR_FORMAT
++                               " (" SIZE_FORMAT "%%)",
++                               top, space->end(),
++                               percent_of_space(space, space->end()));
+       }
+       ConcurrentMarkSweepThread::stop_icms();
+       return space->end();
+@@ -1192,13 +1189,13 @@
+ 
+     if (top <= _icms_stop_limit) {
+       if (CMSTraceIncrementalMode) {
+-	space->print_on(gclog_or_tty);
+-	gclog_or_tty->stamp();
+-	gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
+-			       ", new limit=" PTR_FORMAT
+-			       " (" SIZE_FORMAT "%%)",
+-			       top, space->end(),
+-			       percent_of_space(space, space->end()));
++        space->print_on(gclog_or_tty);
++        gclog_or_tty->stamp();
++        gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
++                               ", new limit=" PTR_FORMAT
++                               " (" SIZE_FORMAT "%%)",
++                               top, space->end(),
++                               percent_of_space(space, space->end()));
+       }
+       ConcurrentMarkSweepThread::stop_icms();
+       return space->end();
+@@ -1208,8 +1205,8 @@
+       space->print_on(gclog_or_tty);
+       gclog_or_tty->stamp();
+       gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
+-			     ", new limit=" PTR_FORMAT,
+-			     top, NULL);
++                             ", new limit=" PTR_FORMAT,
++                             top, NULL);
+     }
+   }
+ 
+@@ -1222,17 +1219,17 @@
+   // delegate to underlying space.
+   assert_lock_strong(freelistLock());
+ 
+-#ifndef	PRODUCT
++#ifndef PRODUCT
+   if (Universe::heap()->promotion_should_fail()) {
+     return NULL;
+   }
+-#endif	// #ifndef PRODUCT
++#endif  // #ifndef PRODUCT
+ 
+   oop res = _cmsSpace->promote(obj, obj_size, ref);
+   if (res == NULL) {
+     // expand and retry
+     size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
+-    expand(s*HeapWordSize, MinHeapDeltaBytes, 
++    expand(s*HeapWordSize, MinHeapDeltaBytes,
+       CMSExpansionCause::_satisfy_promotion);
+     // Since there's currently no next generation, we don't try to promote
+     // into a more senior generation.
+@@ -1260,8 +1257,8 @@
+ 
+ HeapWord*
+ ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
+-					     HeapWord* top,
+-					     size_t word_sz)
++                                             HeapWord* top,
++                                             size_t word_sz)
+ {
+   return collector()->allocation_limit_reached(space, top, word_sz);
+ }
+@@ -1269,13 +1266,13 @@
+ // Things to support parallel young-gen collection.
+ oop
+ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
+-					   oop old, markOop m,
+-					   size_t word_sz) {
+-#ifndef	PRODUCT
++                                           oop old, markOop m,
++                                           size_t word_sz) {
++#ifndef PRODUCT
+   if (Universe::heap()->promotion_should_fail()) {
+     return NULL;
+   }
+-#endif	// #ifndef PRODUCT
++#endif  // #ifndef PRODUCT
+ 
+   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
+   PromotionInfo* promoInfo = &ps->promo;
+@@ -1307,12 +1304,12 @@
+   HeapWord* old_ptr = (HeapWord*)old;
+   if (word_sz > (size_t)oopDesc::header_size()) {
+     Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
+-				 obj_ptr + oopDesc::header_size(),
+-				 word_sz - oopDesc::header_size());
++                                 obj_ptr + oopDesc::header_size(),
++                                 word_sz - oopDesc::header_size());
+   }
+   // Restore the mark word copied above.
+   obj->set_mark(m);
+-  // Now we can track the promoted object, if necessary.  We take care 
++  // Now we can track the promoted object, if necessary.  We take care
+   // To delay the transition from uninitialized to full object
+   // (i.e., insertion of klass pointer) until after, so that it
+   // atomically becomes a promoted object.
+@@ -1325,20 +1322,20 @@
+   assert(old->is_oop(), "Will dereference klass ptr below");
+   collector()->promoted(true,          // parallel
+                         obj_ptr, old->is_objArray(), word_sz);
+-  
++
+   NOT_PRODUCT(
+     Atomic::inc(&_numObjectsPromoted);
+     Atomic::add((jint)CompactibleFreeListSpace::adjustObjectSize(obj->size()),
+                 &_numWordsPromoted);
+   )
+ 
+-  return obj; 
++  return obj;
+ }
+ 
+ void
+ ConcurrentMarkSweepGeneration::
+ par_promote_alloc_undo(int thread_num,
+-		       HeapWord* obj, size_t word_sz) {
++                       HeapWord* obj, size_t word_sz) {
+   // CMS does not support promotion undo.
+   ShouldNotReachHere();
+ }
+@@ -1389,12 +1386,12 @@
+   // If the rotation is not on the concurrent collection
+   // type, don't start a concurrent collection.
+   NOT_PRODUCT(
+-    if (RotateCMSCollectionTypes && 
+-	(_cmsGen->debug_collection_type() != 
+-	  ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
+-      assert(_cmsGen->debug_collection_type() != 
+-	ConcurrentMarkSweepGeneration::Unknown_collection_type,
+-	"Bad cms collection type");
++    if (RotateCMSCollectionTypes &&
++        (_cmsGen->debug_collection_type() !=
++          ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
++      assert(_cmsGen->debug_collection_type() !=
++        ConcurrentMarkSweepGeneration::Unknown_collection_type,
++        "Bad cms collection type");
+       return false;
+     }
+   )
+@@ -1499,7 +1496,7 @@
+   if (occupancy() > initiatingOccupancy) {
+     if (PrintGCDetails && Verbose) {
+       gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
+-	short_name(), occupancy(), initiatingOccupancy);
++        short_name(), occupancy(), initiatingOccupancy);
+     }
+     return true;
+   }
+@@ -1509,7 +1506,7 @@
+   if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
+     if (PrintGCDetails && Verbose) {
+       gclog_or_tty->print(" %s: collect because expanded for allocation ",
+-	short_name());
++        short_name());
+     }
+     return true;
+   }
+@@ -1519,15 +1516,15 @@
+   if (gch->incremental_collection_will_fail()) {
+     if (PrintGCDetails && Verbose) {
+       gclog_or_tty->print(" %s: collect because incremental collection will fail ",
+-	short_name());
++        short_name());
+     }
+     return true;
+   }
+-  if (!_cmsSpace->adaptive_freelists() && 
++  if (!_cmsSpace->adaptive_freelists() &&
+       _cmsSpace->linearAllocationWouldFail()) {
+     if (PrintGCDetails && Verbose) {
+       gclog_or_tty->print(" %s: collect because of linAB ",
+-	short_name());
++        short_name());
+     }
+     return true;
+   }
+@@ -1551,8 +1548,8 @@
+     // For debugging purposes skip the collection if the state
+     // is not currently idle
+     if (TraceCMSState) {
+-      gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d", 
+-	Thread::current(), full, _collectorState);
++      gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
++        Thread::current(), full, _collectorState);
+     }
+     return;
+   }
+@@ -1585,13 +1582,13 @@
+     CGC_lock->notify();   // nudge CMS thread
+   }
+ }
+-  
++
+ 
+ // The foreground and background collectors need to coordinate in order
+ // to make sure that they do not mutually interfere with CMS collections.
+ // When a background collection is active,
+ // the foreground collector may need to take over (preempt) and
+-// synchronously complete an ongoing collection. Depending on the 
++// synchronously complete an ongoing collection. Depending on the
+ // frequency of the background collections and the heap usage
+ // of the application, this preemption can be seldom or frequent.
+ // There are only certain
+@@ -1619,16 +1616,16 @@
+ //
+ // Variable used in baton passing.
+ //   _foregroundGCIsActive - Set to true by the foreground collector when
+-//	it wants the baton.  The foreground clears it when it has finished
+-//	the collection.
++//      it wants the baton.  The foreground clears it when it has finished
++//      the collection.
+ //   _foregroundGCShouldWait - Set to true by the background collector
+ //        when it is running.  The foreground collector waits while
+-//	_foregroundGCShouldWait is true.
++//      _foregroundGCShouldWait is true.
+ //  CGC_lock - monitor used to protect access to the above variables
+-//	and to notify the foreground and background collectors.
++//      and to notify the foreground and background collectors.
+ //  _collectorState - current state of the CMS collection.
+-// 
+-// The foreground collector 
++//
++// The foreground collector
+ //   acquires the CGC_lock
+ //   sets _foregroundGCIsActive
+ //   waits on the CGC_lock for _foregroundGCShouldWait to be false
+@@ -1640,7 +1637,7 @@
+ //   returns
+ //
+ // The background collector in a loop iterating on the phases of the
+-//	collection
++//      collection
+ //   acquires the CGC_lock
+ //   sets _foregroundGCShouldWait
+ //   if _foregroundGCIsActive is set
+@@ -1650,13 +1647,13 @@
+ //   otherwise
+ //     proceed with that phase of the collection
+ //     if the phase is a stop-the-world phase,
+-//	 yield the baton once more just before enqueueing
+-//	 the stop-world CMS operation (executed by the VM thread).
++//       yield the baton once more just before enqueueing
++//       the stop-world CMS operation (executed by the VM thread).
+ //   returns after all phases of the collection are done
+-//   
++//
+ 
+ void CMSCollector::acquire_control_and_collect(bool full,
+-	bool clear_all_soft_refs) {
++        bool clear_all_soft_refs) {
+   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
+   assert(!Thread::current()->is_ConcurrentGC_thread(),
+          "shouldn't try to acquire control from self!");
+@@ -1721,7 +1718,7 @@
+   if (TraceCMSState) {
+     gclog_or_tty->print_cr("CMS foreground collector has asked for control "
+       INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
+-    gclog_or_tty->print_cr("	gets control with state %d", _collectorState);
++    gclog_or_tty->print_cr("    gets control with state %d", _collectorState);
+   }
+ 
+   // Check if we need to do a compaction, or if not, whether
+@@ -1733,11 +1730,11 @@
+ 
+ NOT_PRODUCT(
+   if (RotateCMSCollectionTypes) {
+-    if (_cmsGen->debug_collection_type() == 
+-	ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
++    if (_cmsGen->debug_collection_type() ==
++        ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
+       should_compact = true;
+-    } else if (_cmsGen->debug_collection_type() == 
+-	       ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
++    } else if (_cmsGen->debug_collection_type() ==
++               ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
+       should_compact = false;
+     }
+   }
+@@ -1746,7 +1743,7 @@
+   if (PrintGCDetails && first_state > Idling) {
+     GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
+     if (GCCause::is_user_requested_gc(cause) ||
+-	GCCause::is_serviceability_requested_gc(cause)) {
++        GCCause::is_serviceability_requested_gc(cause)) {
+       gclog_or_tty->print(" (concurrent mode interrupted)");
+     } else {
+       gclog_or_tty->print(" (concurrent mode failure)");
+@@ -1793,7 +1790,7 @@
+   }
+ 
+   // Calculate the fraction of the CMS generation was freed during
+-  // the last collection. 
++  // the last collection.
+   // Only consider the STW compacting cost for now.
+   //
+   // Note that the gc time limit test only works for the collections
+@@ -1802,15 +1799,15 @@
+   // freed by the collection is the free space in the young gen +
+   // tenured gen.
+ 
+-  double fraction_free = 
++  double fraction_free =
+     ((double)_cmsGen->free())/((double)_cmsGen->max_capacity());
+-  if ((100.0 * size_policy()->compacting_gc_cost()) > 
+-	 ((double) GCTimeLimit) &&
+-	((fraction_free * 100) < GCHeapFreeLimit)) {
++  if ((100.0 * size_policy()->compacting_gc_cost()) >
++         ((double) GCTimeLimit) &&
++        ((fraction_free * 100) < GCHeapFreeLimit)) {
+     size_policy()->inc_gc_time_limit_count();
+-    if (UseGCOverheadLimit && 
+-	(size_policy()->gc_time_limit_count() > 
+-	 AdaptiveSizePolicyGCTimeLimitThreshold)) {
++    if (UseGCOverheadLimit &&
++        (size_policy()->gc_time_limit_count() >
++         AdaptiveSizePolicyGCTimeLimitThreshold)) {
+       size_policy()->set_gc_time_limit_exceeded(true);
+       // Avoid consecutive OOM due to the gc time limit by resetting
+       // the counter.
+@@ -1949,7 +1946,7 @@
+   // Note that we do not use this sample to update the _sweep_estimate.
+   _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
+                                           _sweep_estimate.padded_average());
+-  
++
+   GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
+     ref_processor(), clear_all_soft_refs);
+   #ifdef ASSERT
+@@ -1988,7 +1985,7 @@
+   // Restart the "sweep timer" for next epoch.
+   _sweep_timer.reset();
+   _sweep_timer.start();
+-  
++
+   // Sample collection pause time and reset for collection interval.
+   if (UseAdaptiveSizePolicy) {
+     size_policy()->msc_collection_end(gch->gc_cause());
+@@ -2096,8 +2093,8 @@
+ void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
+   assert(Thread::current()->is_ConcurrentGC_thread(),
+     "A CMS asynchronous collection is only allowed on a CMS thread.");
+-    
+-  GenCollectedHeap* gch = GenCollectedHeap::heap(); 
++
++  GenCollectedHeap* gch = GenCollectedHeap::heap();
+   {
+     bool safepoint_check = Mutex::_no_safepoint_check_flag;
+     MutexLockerEx hl(Heap_lock, safepoint_check);
+@@ -2137,19 +2134,19 @@
+   // The change of the collection state is normally done at this level;
+   // the exceptions are phases that are executed while the world is
+   // stopped.  For those phases the change of state is done while the
+-  // world is stopped.  For baton passing purposes this allows the 
++  // world is stopped.  For baton passing purposes this allows the
+   // background collector to finish the phase and change state atomically.
+   // The foreground collector cannot wait on a phase that is done
+   // while the world is stopped because the foreground collector already
+   // has the world stopped and would deadlock.
+   while (_collectorState != Idling) {
+     if (TraceCMSState) {
+-      gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d", 
+-	Thread::current(), _collectorState);
++      gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
++        Thread::current(), _collectorState);
+     }
+-    // The foreground collector 
++    // The foreground collector
+     //   holds the Heap_lock throughout its collection.
+-    //	 holds the CMS token (but not the lock)
++    //   holds the CMS token (but not the lock)
+     //     except while it is waiting for the background collector to yield.
+     //
+     // The foreground collector should be blocked (not for long)
+@@ -2177,8 +2174,8 @@
+         assert(_foregroundGCShouldWait == false, "We set it to false in "
+                "waitForForegroundGC()");
+         if (TraceCMSState) {
+-          gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT 
+-            " exiting collection CMS state %d", 
++          gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
++            " exiting collection CMS state %d",
+             Thread::current(), _collectorState);
+         }
+         return;
+@@ -2201,45 +2198,45 @@
+       case InitialMarking:
+         {
+           ReleaseForegroundGC x(this);
+-	  stats().record_cms_begin();
++          stats().record_cms_begin();
+ 
+           VM_CMS_Initial_Mark initial_mark_op(this);
+-	  VMThread::execute(&initial_mark_op);
++          VMThread::execute(&initial_mark_op);
+         }
+-	// The collector state may be any legal state at this point
+-	// since the background collector may have yielded to the
+-	// foreground collector.
+-	break;
++        // The collector state may be any legal state at this point
++        // since the background collector may have yielded to the
++        // foreground collector.
++        break;
+       case Marking:
+-	// initial marking in checkpointRootsInitialWork has been completed
++        // initial marking in checkpointRootsInitialWork has been completed
+         if (markFromRoots(true)) { // we were successful
+-	  assert(_collectorState == Precleaning, "Collector state should "
+-	    "have changed");
++          assert(_collectorState == Precleaning, "Collector state should "
++            "have changed");
+         } else {
+           assert(_foregroundGCIsActive, "Internal state inconsistency");
+         }
+-	break;
++        break;
+       case Precleaning:
+-	if (UseAdaptiveSizePolicy) {
++        if (UseAdaptiveSizePolicy) {
+           size_policy()->concurrent_precleaning_begin();
+-	}
+-	// marking from roots in markFromRoots has been completed
+-	preclean();
+-	if (UseAdaptiveSizePolicy) {
++        }
++        // marking from roots in markFromRoots has been completed
++        preclean();
++        if (UseAdaptiveSizePolicy) {
+           size_policy()->concurrent_precleaning_end();
+-	}
+-	assert(_collectorState == AbortablePreclean ||
++        }
++        assert(_collectorState == AbortablePreclean ||
+                _collectorState == FinalMarking,
+                "Collector state should have changed");
+-	break;
++        break;
+       case AbortablePreclean:
+-	if (UseAdaptiveSizePolicy) {
++        if (UseAdaptiveSizePolicy) {
+         size_policy()->concurrent_phases_resume();
+-	}
++        }
+         abortable_preclean();
+-	if (UseAdaptiveSizePolicy) {
++        if (UseAdaptiveSizePolicy) {
+           size_policy()->concurrent_precleaning_end();
+-	}
++        }
+         assert(_collectorState == FinalMarking, "Collector state should "
+           "have changed");
+         break;
+@@ -2249,26 +2246,26 @@
+ 
+           VM_CMS_Final_Remark final_remark_op(this);
+           VMThread::execute(&final_remark_op);
+-	  }
++          }
+         assert(_foregroundGCShouldWait, "block post-condition");
+-	break;
++        break;
+       case Sweeping:
+-	if (UseAdaptiveSizePolicy) {
++        if (UseAdaptiveSizePolicy) {
+           size_policy()->concurrent_sweeping_begin();
+-	}
+-	// final marking in checkpointRootsFinal has been completed
++        }
++        // final marking in checkpointRootsFinal has been completed
+         sweep(true);
+-	assert(_collectorState == Resizing, "Collector state change "
+-	  "to Resizing must be done under the free_list_lock");
++        assert(_collectorState == Resizing, "Collector state change "
++          "to Resizing must be done under the free_list_lock");
+         _full_gcs_since_conc_gc = 0;
+ 
+         // Stop the timers for adaptive size policy for the concurrent phases
+         if (UseAdaptiveSizePolicy) {
+           size_policy()->concurrent_sweeping_end();
+           size_policy()->concurrent_phases_end(gch->gc_cause(),
+-					     gch->prev_gen(_cmsGen)->capacity(),
++                                             gch->prev_gen(_cmsGen)->capacity(),
+                                              _cmsGen->free());
+-	}
++        }
+ 
+       case Resizing: {
+         // Sweeping has been completed...
+@@ -2291,28 +2288,28 @@
+         break;
+       }
+       case Resetting:
+-	// CMS heap resizing has been completed
++        // CMS heap resizing has been completed
+         reset(true);
+-	assert(_collectorState == Idling, "Collector state should "
+-	  "have changed");
+-	stats().record_cms_end();
+-	// Don't move the concurrent_phases_end() and compute_new_size()
+-	// calls to here because a preempted background collection
+-	// has it's state set to "Resetting".
+-	break;
++        assert(_collectorState == Idling, "Collector state should "
++          "have changed");
++        stats().record_cms_end();
++        // Don't move the concurrent_phases_end() and compute_new_size()
++        // calls to here because a preempted background collection
++        // has it's state set to "Resetting".
++        break;
+       case Idling:
+       default:
+-	ShouldNotReachHere();
+-	break;
++        ShouldNotReachHere();
++        break;
+     }
+     if (TraceCMSState) {
+-      gclog_or_tty->print_cr("	Thread " INTPTR_FORMAT " done - next CMS state %d", 
+-	Thread::current(), _collectorState);
++      gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
++        Thread::current(), _collectorState);
+     }
+     assert(_foregroundGCShouldWait, "block post-condition");
+   }
+ 
+-  // Should this be in gc_epilogue? 
++  // Should this be in gc_epilogue?
+   collector_policy()->counters()->update_counters();
+ 
+   {
+@@ -2328,8 +2325,8 @@
+            "Possible deadlock");
+   }
+   if (TraceCMSState) {
+-    gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT 
+-      " exiting collection CMS state %d", 
++    gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
++      " exiting collection CMS state %d",
+       Thread::current(), _collectorState);
+   }
+   if (PrintGC && Verbose) {
+@@ -2340,12 +2337,12 @@
+ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
+   assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
+          "Foreground collector should be waiting, not executing");
+-  assert(Thread::current()->is_VM_thread(), "A foreground collection" 
++  assert(Thread::current()->is_VM_thread(), "A foreground collection"
+     "may only be done by the VM Thread with the world stopped");
+   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
+          "VM thread should have CMS token");
+ 
+-  NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose, 
++  NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
+     true, gclog_or_tty);)
+   if (UseAdaptiveSizePolicy) {
+     size_policy()->ms_collection_begin();
+@@ -2362,28 +2359,28 @@
+   bool init_mark_was_synchronous = false; // until proven otherwise
+   while (_collectorState != Idling) {
+     if (TraceCMSState) {
+-      gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d", 
+-	Thread::current(), _collectorState);
++      gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
++        Thread::current(), _collectorState);
+     }
+     switch (_collectorState) {
+       case InitialMarking:
+         init_mark_was_synchronous = true;  // fact to be exploited in re-mark
+         checkpointRootsInitial(false);
+-	assert(_collectorState == Marking, "Collector state should have changed"
+-	  " within checkpointRootsInitial()");
+-	break;
++        assert(_collectorState == Marking, "Collector state should have changed"
++          " within checkpointRootsInitial()");
++        break;
+       case Marking:
+-	// initial marking in checkpointRootsInitialWork has been completed
++        // initial marking in checkpointRootsInitialWork has been completed
+         if (VerifyDuringGC &&
+             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+           gclog_or_tty->print("Verify before initial mark: ");
+           Universe::verify(true);
+         }
+-        { 
++        {
+           bool res = markFromRoots(false);
+-	  assert(res && _collectorState == FinalMarking, "Collector state should "
+-	    "have changed");
+-	  break;
++          assert(res && _collectorState == FinalMarking, "Collector state should "
++            "have changed");
++          break;
+         }
+       case FinalMarking:
+         if (VerifyDuringGC &&
+@@ -2393,19 +2390,19 @@
+         }
+         checkpointRootsFinal(false, clear_all_soft_refs,
+                              init_mark_was_synchronous);
+-	assert(_collectorState == Sweeping, "Collector state should not "
+-	  "have changed within checkpointRootsFinal()");
+-	break;
++        assert(_collectorState == Sweeping, "Collector state should not "
++          "have changed within checkpointRootsFinal()");
++        break;
+       case Sweeping:
+-	// final marking in checkpointRootsFinal has been completed
++        // final marking in checkpointRootsFinal has been completed
+         if (VerifyDuringGC &&
+             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+           gclog_or_tty->print("Verify before sweep: ");
+           Universe::verify(true);
+         }
+         sweep(false);
+-	assert(_collectorState == Resizing, "Incorrect state");
+-	break;
++        assert(_collectorState == Resizing, "Incorrect state");
++        break;
+       case Resizing: {
+         // Sweeping has been completed; the actual resize in this case
+         // is done separately; nothing to be done in this state.
+@@ -2413,27 +2410,27 @@
+         break;
+       }
+       case Resetting:
+-	// The heap has been resized.
++        // The heap has been resized.
+         if (VerifyDuringGC &&
+             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+           gclog_or_tty->print("Verify before reset: ");
+           Universe::verify(true);
+         }
+         reset(false);
+-	assert(_collectorState == Idling, "Collector state should "
+-	  "have changed");
+-	break;
++        assert(_collectorState == Idling, "Collector state should "
++          "have changed");
++        break;
+       case Precleaning:
+       case AbortablePreclean:
+         // Elide the preclean phase
+         _collectorState = FinalMarking;
+         break;
+       default:
+-	ShouldNotReachHere();
++        ShouldNotReachHere();
+     }
+     if (TraceCMSState) {
+-      gclog_or_tty->print_cr("	Thread " INTPTR_FORMAT " done - next CMS state %d", 
+-	Thread::current(), _collectorState);
++      gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
++        Thread::current(), _collectorState);
+     }
+   }
+ 
+@@ -2447,8 +2444,8 @@
+     Universe::verify(true);
+   }
+   if (TraceCMSState) {
+-    gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT 
+-      " exiting collection CMS state %d", 
++    gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
++      " exiting collection CMS state %d",
+       Thread::current(), _collectorState);
+   }
+ }
+@@ -2530,7 +2527,7 @@
+     // ignore it since all relevant work has already been done.
+     return;
+   }
+-  
++
+   // set a bit saying prologue has been called; cleared in epilogue
+   _between_prologue_and_epilogue = true;
+   // Claim locks for common data structures, then call gc_prologue_work()
+@@ -2599,7 +2596,7 @@
+          || (   CMSScavengeBeforeRemark
+              && Thread::current()->is_ConcurrentGC_thread()),
+          "Incorrect thread type for epilogue execution");
+-  
++
+   if (!_between_prologue_and_epilogue) {
+     // We have already been invoked; this is a gc_epilogue delegation
+     // from yet another CMS generation that we are responsible for, just
+@@ -2794,7 +2791,7 @@
+   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
+   // Update the saved marks which may affect the root scans.
+   gch->save_marks();
+-  
++
+   if (CMSRemarkVerifyVariant == 1) {
+     // In this first variant of verification, we complete
+     // all marking, then check if the new marks-verctor is
+@@ -2823,7 +2820,7 @@
+   // Mark from roots one level into CMS
+   MarkRefsIntoClosure notOlder(_span, verification_mark_bm(), true /* nmethods */);
+   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
+-  
++
+   gch->gen_process_strong_roots(_cmsGen->level(),
+                                 true,   // younger gens are roots
+                                 true,   // collecting perm gen
+@@ -2873,10 +2870,10 @@
+                                      markBitMap(), true /* nmethods */);
+   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
+   gch->gen_process_strong_roots(_cmsGen->level(),
+-				true,   // younger gens are roots
+-				true,   // collecting perm gen
++                                true,   // younger gens are roots
++                                true,   // collecting perm gen
+                                 SharedHeap::ScanningOption(roots_scanning_options()),
+-				NULL, &notOlder);
++                                NULL, &notOlder);
+ 
+   // Now mark from the roots
+   assert(_revisitStack.isEmpty(), "Should be empty");
+@@ -3105,7 +3102,7 @@
+       return _cmsGen->cmsSpace()->block_start(p);
+     } else {
+       assert(_permGen->cmsSpace()->is_in_reserved(addr),
+-	     "Inconsistent _span?");
++             "Inconsistent _span?");
+       return _permGen->cmsSpace()->block_start(p);
+     }
+   }
+@@ -3116,7 +3113,7 @@
+ HeapWord*
+ ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
+                                                    bool   tlab,
+-						   bool   parallel) {
++                                                   bool   parallel) {
+   assert(!tlab, "Can't deal with TLAB allocation");
+   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
+   expand(word_size*HeapWordSize, MinHeapDeltaBytes,
+@@ -3166,8 +3163,8 @@
+   if (success) {
+     set_expansion_cause(cause);
+     if (PrintGCDetails && Verbose) {
+-      gclog_or_tty->print_cr("Expanded CMS gen for %s", 
+-	CMSExpansionCause::to_string(cause));
++      gclog_or_tty->print_cr("Expanded CMS gen for %s",
++        CMSExpansionCause::to_string(cause));
+     }
+   }
+ }
+@@ -3238,7 +3235,7 @@
+   bool result = _virtual_space.expand_by(bytes);
+   if (result) {
+     HeapWord* old_end = _cmsSpace->end();
+-    size_t new_word_size = 
++    size_t new_word_size =
+       heap_word_size(_virtual_space.committed_size());
+     MemRegion mr(_cmsSpace->bottom(), new_word_size);
+     _bts->resize(new_word_size);  // resize the block offset shared array
+@@ -3288,9 +3285,9 @@
+ // phases.
+ class CMSPhaseAccounting: public StackObj {
+  public:
+-  CMSPhaseAccounting(CMSCollector *collector, 
+-		     const char *phase, 
+-		     bool print_cr = true);
++  CMSPhaseAccounting(CMSCollector *collector,
++                     const char *phase,
++                     bool print_cr = true);
+   ~CMSPhaseAccounting();
+ 
+  private:
+@@ -3312,8 +3309,8 @@
+ };
+ 
+ CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
+-				       const char *phase,
+-				       bool print_cr) :
++                                       const char *phase,
++                                       bool print_cr) :
+   _collector(collector), _phase(phase), _print_cr(print_cr) {
+ 
+   if (PrintCMSStatistics != 0) {
+@@ -3322,7 +3319,7 @@
+   if (PrintGCDetails && PrintGCTimeStamps) {
+     gclog_or_tty->date_stamp(PrintGCDateStamps);
+     gclog_or_tty->stamp();
+-    gclog_or_tty->print_cr(": [%s-concurrent-%s-start]", 
++    gclog_or_tty->print_cr(": [%s-concurrent-%s-start]",
+       _collector->cmsGen()->short_name(), _phase);
+   }
+   _collector->resetTimer();
+@@ -3340,15 +3337,15 @@
+       gclog_or_tty->stamp();
+       gclog_or_tty->print(": ");
+     }
+-    gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]", 
+-		 _collector->cmsGen()->short_name(),
+-		 _phase, _collector->timerValue(), _wallclock.seconds());
++    gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
++                 _collector->cmsGen()->short_name(),
++                 _phase, _collector->timerValue(), _wallclock.seconds());
+     if (_print_cr) {
+       gclog_or_tty->print_cr("");
+     }
+     if (PrintCMSStatistics != 0) {
+       gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
+-		    _collector->yields());
++                    _collector->yields());
+     }
+   }
+ }
+@@ -3405,7 +3402,7 @@
+   // CMS collection cycle.
+   setup_cms_unloading_and_verification_state();
+ 
+-  NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork", 
++  NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork",
+     PrintGCDetails && Verbose, true, gclog_or_tty);)
+   if (UseAdaptiveSizePolicy) {
+     size_policy()->checkpoint_roots_initial_begin();
+@@ -3440,10 +3437,10 @@
+     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
+     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
+     gch->gen_process_strong_roots(_cmsGen->level(),
+-				  true,   // younger gens are roots
+-				  true,   // collecting perm gen
++                                  true,   // younger gens are roots
++                                  true,   // collecting perm gen
+                                   SharedHeap::ScanningOption(roots_scanning_options()),
+-				  NULL, &notOlder);
++                                  NULL, &notOlder);
+   }
+ 
+   // Clear mod-union table; it will be dirtied in the prologue of
+@@ -3459,7 +3456,7 @@
+   #if 0
+   {
+     MemRegion mr = MemRegion((HeapWord*)_virtual_space.low(),
+-			     (HeapWord*)_virtual_space.high());
++                             (HeapWord*)_virtual_space.high());
+     _ct->ct_bs()->preclean_dirty_cards(mr);
+   }
+   #endif
+@@ -3589,7 +3586,7 @@
+     ParallelTaskTerminator(n_threads, queue_set),
+     _collector(collector),
+     _yield(yield) { }
+- 
++
+   void set_task(CMSConcMarkingTask* task) {
+     _task = task;
+   }
+@@ -3614,7 +3611,7 @@
+   CMSConcMarkingTerminator _term;
+ 
+  public:
+-  CMSConcMarkingTask(CMSCollector* collector, 
++  CMSConcMarkingTask(CMSCollector* collector,
+                  CompactibleFreeListSpace* cms_space,
+                  CompactibleFreeListSpace* perm_space,
+                  bool asynch, int n_workers,
+@@ -3648,7 +3645,7 @@
+   CMSConcMarkingTerminator* terminator() { return &_term; }
+ 
+   void work(int i);
+-    
++
+   virtual void coordinator_yield();  // stuff done by coordinator
+   bool result() { return _result; }
+ 
+@@ -3697,7 +3694,7 @@
+ //    . then get a batch of oops from global work queue if any
+ //    . then do work stealing
+ // -- When all tasks claimed (both spaces)
+-//    and local work queue empty, 
++//    and local work queue empty,
+ //    then in a loop do:
+ //    . check global overflow stack; steal a batch of oops and trace
+ //    . try to steal from other threads oif GOS is empty
+@@ -3994,7 +3991,7 @@
+ void CMSConcMarkingTask::coordinator_yield() {
+   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
+          "CMS thread should hold CMS token");
+-  
++
+   // First give up the locks, then yield, then re-lock
+   // We should probably use a constructor/destructor idiom to
+   // do this unlock/lock or modify the MutexUnlocker class to
+@@ -4034,8 +4031,8 @@
+   //
+   // Tony 2006.06.29
+   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
+-	               ConcurrentMarkSweepThread::should_yield() &&
+-	               !CMSCollector::foregroundGCIsActive(); ++i) {
++                       ConcurrentMarkSweepThread::should_yield() &&
++                       !CMSCollector::foregroundGCIsActive(); ++i) {
+     os::sleep(Thread::current(), 1, false);
+     ConcurrentMarkSweepThread::acknowledge_yield_request();
+   }
+@@ -4053,7 +4050,7 @@
+ 
+   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
+   CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
+-  
++
+   CMSConcMarkingTask tsk(this, cms_space, perm_space,
+                          asynch, num_workers /* number requested XXX */,
+                          conc_workers(), task_queues());
+@@ -4064,13 +4061,13 @@
+   // class?? XXX
+   cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
+   perm_space->initialize_sequential_subtasks_for_marking(num_workers);
+-  
++
+   // Refs discovery is already non-atomic.
+   assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
+   // Mutate the Refs discovery so it is MT during the
+   // multi-threaded marking phase.
+   ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1);
+-  
++
+   conc_workers()->start_task(&tsk);
+   while (tsk.yielded()) {
+     tsk.coordinator_yield();
+@@ -4081,7 +4078,7 @@
+   while (_restart_addr != NULL) {
+     // XXX For now we do not make use of ABORTED state and have not
+     // yet implemented the right abort semantics (even in the original
+-    // single-threaded CMS case. That needs some more investigation
++    // single-threaded CMS case). That needs some more investigation
+     // and is deferred for now; see CR# TBF. 07252005YSR. XXX
+     assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
+     // If _restart_addr is non-NULL, a marking stack overflow
+@@ -4216,7 +4213,7 @@
+     size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
+     while (!(should_abort_preclean() ||
+              ConcurrentMarkSweepThread::should_terminate())) {
+-      workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2); 
++      workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
+       cumworkdone += workdone;
+       loops++;
+       // Voluntarily terminate abortable preclean phase if we have
+@@ -4238,7 +4235,9 @@
+       // take a short break.
+       if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
+         // Sleep for some time, waiting for work to accumulate
++        stopTimer();
+         cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
++        startTimer();
+         waited++;
+       }
+     }
+@@ -4323,8 +4322,10 @@
+     // the computed reachability of the referents, the
+     // only properties manipulated by the precleaning
+     // of these reference lists.
++    stopTimer();
+     CMSTokenSyncWithLocks x(true /* is cms thread */,
+                             bitMapLock());
++    startTimer();
+     sample_eden();
+     // The following will yield to allow foreground
+     // collection to proceed promptly. XXX YSR:
+@@ -4346,8 +4347,10 @@
+                              &_markBitMap, &_modUnionTable,
+                              &_markStack, &_revisitStack,
+                              true /* precleaning phase */);
++    stopTimer();
+     CMSTokenSyncWithLocks ts(true /* is cms thread */,
+                              bitMapLock());
++    startTimer();
+     unsigned int before_count =
+       GenCollectedHeap::heap()->total_collections();
+     SurvivorSpacePrecleanClosure
+@@ -4409,7 +4412,7 @@
+   cumNumCards += curNumCards;
+   if (PrintGCDetails && PrintCMSStatistics != 0) {
+     gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
+-		  curNumCards, cumNumCards, numIter);
++                  curNumCards, cumNumCards, numIter);
+   }
+   return cumNumCards;   // as a measure of useful work done
+ }
+@@ -4481,16 +4484,13 @@
+ 
+     MemRegion dirtyRegion;
+     {
++      stopTimer();
+       CMSTokenSync ts(true);
++      startTimer();
+       sample_eden();
+-
+-      if (PrintGCDetails) {
+-        startTimer();
+-      }
+-
+       // Get dirty region starting at nextOffset (inclusive),
+       // simultaneously clearing it.
+-      dirtyRegion = 
++      dirtyRegion =
+         _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
+       assert(dirtyRegion.start() >= nextAddr,
+              "returned region inconsistent?");
+@@ -4507,20 +4507,16 @@
+     // We'll scan the cards in the dirty region (with periodic
+     // yields for foreground GC as needed).
+     if (!dirtyRegion.is_empty()) {
+-      if (PrintGCDetails) {
+-        stopTimer();
+-      }
+       assert(numDirtyCards > 0, "consistency check");
+       HeapWord* stop_point = NULL;
+       {
++        stopTimer();
+         CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
+                                  bitMapLock());
++        startTimer();
+         verify_work_stacks_empty();
+         verify_overflow_empty();
+         sample_eden();
+-        if (PrintGCDetails) {
+-          startTimer();
+-        }
+         stop_point =
+           gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
+       }
+@@ -4535,7 +4531,9 @@
+                (_collectorState == AbortablePreclean && should_abort_preclean()),
+                "Unparsable objects should only be in perm gen.");
+ 
++        stopTimer();
+         CMSTokenSyncWithLocks ts(true, bitMapLock());
++        startTimer();
+         _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
+         if (should_abort_preclean()) {
+           break; // out of preclean loop
+@@ -4545,18 +4543,12 @@
+           lastAddr = next_card_start_after_block(stop_point);
+         }
+       }
+-      if (PrintGCDetails) {
+-        stopTimer();
+-      }
+     } else {
+       assert(lastAddr == endAddr, "consistency check");
+       assert(numDirtyCards == 0, "consistency check");
+       break;
+     }
+   }
+-  if (PrintGCDetails) {
+-    stopTimer();
+-  }
+   verify_work_stacks_empty();
+   verify_overflow_empty();
+   return cumNumDirtyCards;
+@@ -4592,13 +4584,10 @@
+       // See comments in "Precleaning notes" above on why we
+       // do this locking. XXX Could the locking overheads be
+       // too high when dirty cards are sparse? [I don't think so.]
++      stopTimer();
+       CMSTokenSync x(true); // is cms thread
++      startTimer();
+       sample_eden();
+-
+-      if (PrintGCDetails) {
+-        startTimer();
+-      }
+-
+       // Get and clear dirty region from card table
+       dirtyRegion = _ct->ct_bs()->dirty_card_range_after_preclean(
+                                     MemRegion(nextAddr, endAddr));
+@@ -4610,16 +4599,12 @@
+       dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
+ 
+     if (!dirtyRegion.is_empty()) {
+-      if (PrintGCDetails) {
+-        stopTimer();
+-      }
++      stopTimer();
+       CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
++      startTimer();
+       sample_eden();
+       verify_work_stacks_empty();
+       verify_overflow_empty();
+-      if (PrintGCDetails) {
+-        startTimer();
+-      }
+       HeapWord* stop_point =
+         gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
+       if (stop_point != NULL) {
+@@ -4638,16 +4623,10 @@
+           lastAddr = next_card_start_after_block(stop_point);
+         }
+       }
+-      if (PrintGCDetails) {
+-        stopTimer();
+-      }
+     } else {
+       break;
+     }
+   }
+-  if (PrintGCDetails) {
+-    stopTimer();
+-  }
+   verify_work_stacks_empty();
+   verify_overflow_empty();
+   return cumNumDirtyCards;
+@@ -4675,8 +4654,8 @@
+       // Temporarily set flag to false, GCH->do_collection will
+       // expect it to be false and set to true
+       FlagSetting fl(gch->_is_gc_active, false);
+-      NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark", 
+-	PrintGCDetails && Verbose, true, gclog_or_tty);)
++      NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark",
++        PrintGCDetails && Verbose, true, gclog_or_tty);)
+       int level = _cmsGen->level() - 1;
+       if (level >= 0) {
+         gch->do_collection(true,        // full (i.e. force, see below)
+@@ -4738,13 +4717,13 @@
+     // or of an indication of whether the scavenge did indeed occur,
+     // we cannot rely on TLAB's having been filled and must do
+     // so here just in case a scavenge did not happen.
+-      gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
++    gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
+     // Update the saved marks which may affect the root scans.
+     gch->save_marks();
+-  
++
+     {
+       COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
+-  
++
+       // Note on the role of the mod union table:
+       // Since the marker in "markFromRoots" marks concurrently with
+       // mutators, it is possible for some reachable objects not to have been
+@@ -4937,10 +4916,10 @@
+   _timer.reset();
+   _timer.start();
+   gch->gen_process_strong_roots(_collector->_cmsGen->level(),
+-				false,     // yg was scanned above
+-				true,      // collecting perm gen
++                                false,     // yg was scanned above
++                                true,      // collecting perm gen
+                                 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
+-				NULL, &par_mrias_cl);
++                                NULL, &par_mrias_cl);
+   _timer.stop();
+   if (PrintCMSStatistics != 0) {
+     gclog_or_tty->print_cr(
+@@ -5061,7 +5040,7 @@
+   // assumptions made here and necessary for correctness and
+   // efficiency. Note also that this code might yield inefficient
+   // behaviour in the case of very large objects that span one or
+-  // more work chunks. Such objects would potentially be scanned 
++  // more work chunks. Such objects would potentially be scanned
+   // several times redundantly. Work on 4756801 should try and
+   // address that performance anomaly if at all possible. XXX
+   MemRegion  full_span  = _collector->_span;
+@@ -5345,7 +5324,7 @@
+   // claimed by the parallel threads.
+   cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
+   perm_space->initialize_sequential_subtasks_for_rescan(n_workers);
+-  
++
+   // It turns out that even when we're using 1 thread, doing the work in a
+   // separate thread causes wide variance in run times.  We can't help this
+   // in the multi-threaded case, but we special-case n=1 here to get
+@@ -5396,7 +5375,7 @@
+     // collection so there may be dirty bits in the mod-union table.
+     const int alignment =
+       CardTableModRefBS::card_size * BitsPerWord;
+-    { 
++    {
+       // ... First handle dirty cards in CMS gen
+       markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
+       MemRegion ur = _cmsGen->used_region();
+@@ -5410,7 +5389,7 @@
+         gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
+           markFromDirtyCardsClosure.num_dirty_cards());
+       }
+-    } 
++    }
+     {
+       // .. and then repeat for dirty cards in perm gen
+       markFromDirtyCardsClosure.set_space(_permGen->cmsSpace());
+@@ -5436,13 +5415,13 @@
+     TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty);
+ 
+     verify_work_stacks_empty();
+-  
++
+     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
+     gch->gen_process_strong_roots(_cmsGen->level(),
+-				  true,  // younger gens as roots
+-				  true,  // collecting perm gen
++                                  true,  // younger gens as roots
++                                  true,  // collecting perm gen
+                                   SharedHeap::ScanningOption(roots_scanning_options()),
+-				  NULL, &mrias_cl);
++                                  NULL, &mrias_cl);
+   }
+   verify_work_stacks_empty();
+   // Restore evacuated mark words, if any, used for overflow list links
+@@ -5483,7 +5462,7 @@
+   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
+ 
+   ParallelTaskTerminator* terminator() { return &_term; }
+-  
++
+   void do_work_steal(int i,
+                      CMSParDrainMarkingStackClosure* drain,
+                      CMSParKeepAliveClosure* keep_alive,
+@@ -5587,9 +5566,9 @@
+   WorkGang* workers = gch->workers();
+   assert(workers != NULL, "Need parallel worker threads.");
+   int n_workers = workers->total_workers();
+-  CMSRefProcTaskProxy rp_task(task, &_collector, 
+-                              _collector.ref_processor()->span(), 
+-                              _collector.markBitMap(), 
++  CMSRefProcTaskProxy rp_task(task, &_collector,
++                              _collector.ref_processor()->span(),
++                              _collector.markBitMap(),
+                               n_workers, _collector.task_queues());
+   workers->run_task(&rp_task);
+ }
+@@ -5634,7 +5613,7 @@
+     TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
+     if (rp->processing_is_mt()) {
+       CMSRefProcTaskExecutor task_executor(*this);
+-      rp->process_discovered_references(soft_ref_policy, 
++      rp->process_discovered_references(soft_ref_policy,
+                                         &_is_alive_closure,
+                                         &cmsKeepAliveClosure,
+                                         &cmsDrainMarkingStackClosure,
+@@ -5679,7 +5658,7 @@
+       cmsDrainMarkingStackClosure.do_void();
+       verify_work_stacks_empty();
+     }
+-     
++
+     {
+       TraceTime t("scrub symbol & string tables", PrintGCDetails, false, gclog_or_tty);
+       // Now clean up stale oops in SymbolTable and StringTable
+@@ -5715,8 +5694,8 @@
+   // Only the VM thread or the CMS thread should be here.
+   assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
+          "Unexpected thread type");
+-  // If this is the vm thread, the foreground process 
+-  // should not be waiting.  Note that _foregroundGCIsActive is 
++  // If this is the vm thread, the foreground process
++  // should not be waiting.  Note that _foregroundGCIsActive is
+   // true while the foreground collector is waiting.
+   if (_foregroundGCShouldWait) {
+     // We cannot be the VM thread
+@@ -5727,7 +5706,7 @@
+     // phase of CMS collection.
+     if (t->is_ConcurrentGC_thread()) {
+       assert(_collectorState == InitialMarking ||
+-             _collectorState == FinalMarking, 
++             _collectorState == FinalMarking,
+              "Should be a stop-world phase");
+       // The CMS thread should be holding the CMS_token.
+       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
+@@ -5816,13 +5795,13 @@
+   // based on the state of the collection.  The former is done in
+   // the interval [Marking, Sweeping] and the latter in the interval
+   // [Marking, Sweeping).  Thus the transitions into the Marking state
+-  // and out of the Sweeping state must be synchronously visible 
++  // and out of the Sweeping state must be synchronously visible
+   // globally to the mutators.
+   // The transition into the Marking state happens with the world
+   // stopped so the mutators will globally see it.  Sweeping is
+   // done asynchronously by the background collector so the transition
+   // from the Sweeping state to the Resizing state must be done
+-  // under the freelistLock (as is the check for whether to 
++  // under the freelistLock (as is the check for whether to
+   // allocate-live and whether to dirty the mod-union table).
+   assert(_collectorState == Resizing, "Change of collector state to"
+     " Resizing must be done under the freelistLocks (plural)");
+@@ -5852,10 +5831,10 @@
+ void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
+   double nearLargestPercent = 0.999;
+   HeapWord*  minAddr        = _cmsSpace->bottom();
+-  HeapWord*  largestAddr    = 
++  HeapWord*  largestAddr    =
+     (HeapWord*) _cmsSpace->dictionary()->findLargestDict();
+   if (largestAddr == 0) {
+-    // The dictionary appears to be empty.  In this case 
++    // The dictionary appears to be empty.  In this case
+     // try to coalesce at the end of the heap.
+     largestAddr = _cmsSpace->end();
+   }
+@@ -5874,7 +5853,7 @@
+ }
+ 
+ void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
+-						    bool full) {
++                                                    bool full) {
+   // The next lower level has been collected.  Gather any statistics
+   // that are of interest at this point.
+   if (!full && (current_level + 1) == level()) {
+@@ -5891,7 +5870,7 @@
+     gch->gen_policy()->size_policy();
+   assert(sp->is_gc_cms_adaptive_size_policy(),
+     "Wrong type of size policy");
+-  return sp; 
++  return sp;
+ }
+ 
+ void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
+@@ -5899,7 +5878,7 @@
+     gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
+   }
+   _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
+-  _debug_collection_type = 
++  _debug_collection_type =
+     (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
+   if (PrintGCDetails && Verbose) {
+     gclog_or_tty->print_cr("to %d ", _debug_collection_type);
+@@ -5922,7 +5901,7 @@
+   //                 generation by another thread
+   // . bitMapLock: if this is held, no other thread can access or update
+   //
+-    
++
+   // Note that we need to hold the freelistLock if we use
+   // block iterate below; else the iterator might go awry if
+   // a mutator (or promotion) causes block contents to change
+@@ -5932,7 +5911,7 @@
+   // promote), so we might as well prevent all young generation
+   // GC's while we do a sweeping step. For the same reason, we might
+   // as well take the bit map lock for the entire duration
+-  
++
+   // check that we hold the requisite locks
+   assert(have_cms_token(), "Should hold cms token");
+   assert(   (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
+@@ -5940,7 +5919,7 @@
+         "Should possess CMS token to sweep");
+   assert_lock_strong(gen->freelistLock());
+   assert_lock_strong(bitMapLock());
+-  
++
+   assert(!_sweep_timer.is_active(), "Was switched off in an outer context");
+   gen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
+                                       _sweep_estimate.padded_average());
+@@ -5972,7 +5951,7 @@
+     // has done a collection and the resetting.
+     if (_collectorState != Resetting) {
+       assert(_collectorState == Idling, "The state should only change"
+-	" because the foreground collector has finished the collection");
++        " because the foreground collector has finished the collection");
+       return;
+     }
+ 
+@@ -5983,7 +5962,7 @@
+ 
+     HeapWord* curAddr = _markBitMap.startWord();
+     while (curAddr < _markBitMap.endWord()) {
+-      size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr); 
++      size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
+       MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
+       _markBitMap.clear_large_range(chunk);
+       if (ConcurrentMarkSweepThread::should_yield() &&
+@@ -6001,13 +5980,13 @@
+         }
+         icms_wait();
+ 
+-	// See the comment in coordinator_yield()
+-	for (unsigned i = 0; i < CMSYieldSleepCount &&
+-	                ConcurrentMarkSweepThread::should_yield() &&
+-	                !CMSCollector::foregroundGCIsActive(); ++i) {
+-	  os::sleep(Thread::current(), 1, false);    
+-	  ConcurrentMarkSweepThread::acknowledge_yield_request();
+-	}
++        // See the comment in coordinator_yield()
++        for (unsigned i = 0; i < CMSYieldSleepCount &&
++                        ConcurrentMarkSweepThread::should_yield() &&
++                        !CMSCollector::foregroundGCIsActive(); ++i) {
++          os::sleep(Thread::current(), 1, false);
++          ConcurrentMarkSweepThread::acknowledge_yield_request();
++        }
+ 
+         ConcurrentMarkSweepThread::synchronize(true);
+         bitMapLock()->lock_without_safepoint_check();
+@@ -6074,7 +6053,7 @@
+ // the CMS thread and yet continue to run the VM for a while
+ // after that.
+ void CMSCollector::verify_ok_to_terminate() const {
+-  assert(Thread::current()->is_ConcurrentGC_thread(), 
++  assert(Thread::current()->is_ConcurrentGC_thread(),
+          "should be called by CMS thread");
+   assert(!_foregroundGCShouldWait, "should be false");
+   // We could check here that all the various low-level locks
+@@ -6133,7 +6112,7 @@
+ 
+ // CMS Bit Map Wrapper /////////////////////////////////////////
+ 
+-// Construct a CMS bit map infrastructure, but don't create the 
++// Construct a CMS bit map infrastructure, but don't create the
+ // bit vector itself. That is done by a separate call CMSBitMap::allocate()
+ // further below.
+ CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
+@@ -6462,9 +6441,9 @@
+ 
+   // See the comment in coordinator_yield()
+   for (unsigned i = 0; i < CMSYieldSleepCount &&
+-	               ConcurrentMarkSweepThread::should_yield() &&
+-	               !CMSCollector::foregroundGCIsActive(); ++i) {
+-    os::sleep(Thread::current(), 1, false);    
++                       ConcurrentMarkSweepThread::should_yield() &&
++                       !CMSCollector::foregroundGCIsActive(); ++i) {
++    os::sleep(Thread::current(), 1, false);
+     ConcurrentMarkSweepThread::acknowledge_yield_request();
+   }
+ 
+@@ -6603,7 +6582,7 @@
+     } else {
+       // An object not (yet) reached by marking: we merely need to
+       // compute its size so as to go look at the next block.
+-      assert(p->is_oop(true), "should be an oop"); 
++      assert(p->is_oop(true), "should be an oop");
+       size = CompactibleFreeListSpace::adjustObjectSize(p->size());
+     }
+   }
+@@ -6630,9 +6609,9 @@
+ 
+   // See the comment in coordinator_yield()
+   for (unsigned i = 0; i < CMSYieldSleepCount &&
+-	               ConcurrentMarkSweepThread::should_yield() &&
+-	               !CMSCollector::foregroundGCIsActive(); ++i) {
+-    os::sleep(Thread::current(), 1, false);    
++                       ConcurrentMarkSweepThread::should_yield() &&
++                       !CMSCollector::foregroundGCIsActive(); ++i) {
++    os::sleep(Thread::current(), 1, false);
+     ConcurrentMarkSweepThread::acknowledge_yield_request();
+   }
+ 
+@@ -6705,9 +6684,9 @@
+ 
+   // See the comment in coordinator_yield()
+   for (unsigned i = 0; i < CMSYieldSleepCount &&
+-	               ConcurrentMarkSweepThread::should_yield() &&
+-	               !CMSCollector::foregroundGCIsActive(); ++i) {
+-    os::sleep(Thread::current(), 1, false);    
++                       ConcurrentMarkSweepThread::should_yield() &&
++                       !CMSCollector::foregroundGCIsActive(); ++i) {
++    os::sleep(Thread::current(), 1, false);
+     ConcurrentMarkSweepThread::acknowledge_yield_request();
+   }
+ 
+@@ -6867,9 +6846,9 @@
+ 
+   // See the comment in coordinator_yield()
+   for (unsigned i = 0; i < CMSYieldSleepCount &&
+-	               ConcurrentMarkSweepThread::should_yield() &&
+-	               !CMSCollector::foregroundGCIsActive(); ++i) {
+-    os::sleep(Thread::current(), 1, false);    
++                       ConcurrentMarkSweepThread::should_yield() &&
++                       !CMSCollector::foregroundGCIsActive(); ++i) {
++    os::sleep(Thread::current(), 1, false);
+     ConcurrentMarkSweepThread::acknowledge_yield_request();
+   }
+ 
+@@ -7465,7 +7444,7 @@
+   assert(this_oop->is_oop_or_null(true),
+          "expected an oop or NULL");
+   HeapWord* addr = (HeapWord*)this_oop;
+-  // Check if oop points into the CMS generation 
++  // Check if oop points into the CMS generation
+   // and is not marked
+   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
+     // a white object ...
+@@ -7522,9 +7501,9 @@
+ 
+   // See the comment in coordinator_yield()
+   for (unsigned i = 0; i < CMSYieldSleepCount &&
+-	               ConcurrentMarkSweepThread::should_yield() &&
+-	               !CMSCollector::foregroundGCIsActive(); ++i) {
+-    os::sleep(Thread::current(), 1, false);    
++                       ConcurrentMarkSweepThread::should_yield() &&
++                       !CMSCollector::foregroundGCIsActive(); ++i) {
++    os::sleep(Thread::current(), 1, false);
+     ConcurrentMarkSweepThread::acknowledge_yield_request();
+   }
+ 
+@@ -7600,7 +7579,7 @@
+   // The current free range should be returned to the free lists
+   // as one coalesced chunk.
+   if (inFreeRange()) {
+-    flushCurFreeChunk(freeFinger(), 
++    flushCurFreeChunk(freeFinger(),
+       pointer_delta(_limit, freeFinger()));
+     assert(freeFinger() < _limit, "the finger pointeth off base");
+     if (CMSTraceSweeper) {
+@@ -7618,11 +7597,11 @@
+                  _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
+       gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects,  "
+                              SIZE_FORMAT" bytes  "
+-	"Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
+-	_numObjectsLive, _numWordsLive*sizeof(HeapWord), 
+-	_numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
++        "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
++        _numObjectsLive, _numWordsLive*sizeof(HeapWord),
++        _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
+       size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) *
+-	sizeof(HeapWord);
++        sizeof(HeapWord);
+       gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
+ 
+       if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
+@@ -7630,10 +7609,10 @@
+         size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
+         size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
+         gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
+-        gclog_or_tty->print("	Indexed List Returned "SIZE_FORMAT" bytes", 
+-  	  indexListReturnedBytes);
+-        gclog_or_tty->print_cr("	Dictionary Returned "SIZE_FORMAT" bytes",
+-  	  dictReturnedBytes);
++        gclog_or_tty->print("   Indexed List Returned "SIZE_FORMAT" bytes",
++          indexListReturnedBytes);
++        gclog_or_tty->print_cr("        Dictionary Returned "SIZE_FORMAT" bytes",
++          dictReturnedBytes);
+       }
+     }
+   )
+@@ -7644,12 +7623,12 @@
+   }
+ }
+ 
+-void SweepClosure::initialize_free_range(HeapWord* freeFinger, 
++void SweepClosure::initialize_free_range(HeapWord* freeFinger,
+     bool freeRangeInFreeLists) {
+   if (CMSTraceSweeper) {
+     gclog_or_tty->print("---- Start free range 0x%x with free block [%d] (%d)\n",
+                freeFinger, _sp->block_size(freeFinger),
+-	       freeRangeInFreeLists);
++               freeRangeInFreeLists);
+   }
+   assert(!inFreeRange(), "Trampling existing free range");
+   set_inFreeRange(true);
+@@ -7658,7 +7637,7 @@
+   set_freeFinger(freeFinger);
+   set_freeRangeInFreeLists(freeRangeInFreeLists);
+   if (CMSTestInFreeList) {
+-    if (freeRangeInFreeLists) { 
++    if (freeRangeInFreeLists) {
+       FreeChunk* fc = (FreeChunk*) freeFinger;
+       assert(fc->isFree(), "A chunk on the free list should be free.");
+       assert(fc->size() > 0, "Free range should have a size");
+@@ -7733,8 +7712,8 @@
+     res = doLiveChunk(fc);
+     debug_only(_sp->verifyFreeLists());
+     NOT_PRODUCT(
+-	_numObjectsLive++;
+-	_numWordsLive += res;
++        _numObjectsLive++;
++        _numWordsLive += res;
+     )
+   }
+   return res;
+@@ -7742,23 +7721,23 @@
+ 
+ // For the smart allocation, record following
+ //  split deaths - a free chunk is removed from its free list because
+-//	it is being split into two or more chunks.
++//      it is being split into two or more chunks.
+ //  split birth - a free chunk is being added to its free list because
+-//	a larger free chunk has been split and resulted in this free chunk.
++//      a larger free chunk has been split and resulted in this free chunk.
+ //  coal death - a free chunk is being removed from its free list because
+-//	it is being coalesced into a large free chunk.
++//      it is being coalesced into a large free chunk.
+ //  coal birth - a free chunk is being added to its free list because
+-//	it was created when two or more free chunks where coalesced into
+-//	this free chunk.
++//      it was created when two or more free chunks where coalesced into
++//      this free chunk.
+ //
+ // These statistics are used to determine the desired number of free
+ // chunks of a given size.  The desired number is chosen to be relative
+ // to the end of a CMS sweep.  The desired number at the end of a sweep
+-// is the 
+-// 	count-at-end-of-previous-sweep (an amount that was enough)
+-//		- count-at-beginning-of-current-sweep  (the excess)
+-//		+ split-births  (gains in this size during interval)
+-//		- split-deaths  (demands on this size during interval)
++// is the
++//      count-at-end-of-previous-sweep (an amount that was enough)
++//              - count-at-beginning-of-current-sweep  (the excess)
++//              + split-births  (gains in this size during interval)
++//              - split-deaths  (demands on this size during interval)
+ // where the interval is from the end of one sweep to the end of the
+ // next.
+ //
+@@ -7771,16 +7750,16 @@
+ // When making a decision on whether to coalesce a right-hand chunk with
+ // the current left-hand chunk, the current count vs. the desired count
+ // of the left-hand chunk is considered.  Also if the right-hand chunk
+-// is near the large chunk at the end of the heap (see 
+-// ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the 
++// is near the large chunk at the end of the heap (see
++// ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
+ // left-hand chunk is coalesced.
+ //
+ // When making a decision about whether to split a chunk, the desired count
+ // vs. the current count of the candidate to be split is also considered.
+ // If the candidate is underpopulated (currently fewer chunks than desired)
+-// a chunk of an overpopulated (currently more chunks than desired) size may 
++// a chunk of an overpopulated (currently more chunks than desired) size may
+ // be chosen.  The "hint" associated with a free list, if non-null, points
+-// to a free list which may be overpopulated.  
++// to a free list which may be overpopulated.
+ //
+ 
+ void SweepClosure::doAlreadyFreeChunk(FreeChunk* fc) {
+@@ -7788,7 +7767,7 @@
+   // Chunks that cannot be coalesced are not in the
+   // free lists.
+   if (CMSTestInFreeList && !fc->cantCoalesce()) {
+-    assert(_sp->verifyChunkInFreeLists(fc), 
++    assert(_sp->verifyChunkInFreeLists(fc),
+       "free chunk should be in free lists");
+   }
+   // a chunk that is already free, should not have been
+@@ -7799,12 +7778,12 @@
+   // addr and purported end of this block.
+   _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
+ 
+-  // Some chunks cannot be coalesced in under any circumstances.  
++  // Some chunks cannot be coalesced in under any circumstances.
+   // See the definition of cantCoalesce().
+   if (!fc->cantCoalesce()) {
+     // This chunk can potentially be coalesced.
+     if (_sp->adaptive_freelists()) {
+-      // All the work is done in 
++      // All the work is done in
+       doPostIsFreeOrGarbageChunk(fc, size);
+     } else {  // Not adaptive free lists
+       // this is a free chunk that can potentially be coalesced by the sweeper;
+@@ -7819,11 +7798,11 @@
+           // nothing to do
+         } else {
+           // Potentially the start of a new free range:
+-	  // Don't eagerly remove it from the free lists.  
+-	  // No need to remove it if it will just be put
+-	  // back again.  (Also from a pragmatic point of view
+-	  // if it is a free block in a region that is beyond
+-	  // any allocated blocks, an assertion will fail)
++          // Don't eagerly remove it from the free lists.
++          // No need to remove it if it will just be put
++          // back again.  (Also from a pragmatic point of view
++          // if it is a free block in a region that is beyond
++          // any allocated blocks, an assertion will fail)
+           // Remember the start of a free run.
+           initialize_free_range(addr, true);
+           // end - can coalesce with next chunk
+@@ -7831,7 +7810,7 @@
+       } else {
+         // the midst of a free range, we are coalescing
+         debug_only(record_free_block_coalesced(fc);)
+-        if (CMSTraceSweeper) { 
++        if (CMSTraceSweeper) {
+           gclog_or_tty->print("  -- pick up free block 0x%x (%d)\n", fc, size);
+         }
+         // remove it from the free lists
+@@ -7842,15 +7821,15 @@
+         // will be returned to the free lists in its entirety - all
+         // the coalesced pieces included.
+         if (freeRangeInFreeLists()) {
+-	  FreeChunk* ffc = (FreeChunk*) freeFinger();
+-	  assert(ffc->size() == pointer_delta(addr, freeFinger()),
+-	    "Size of free range is inconsistent with chunk size.");
+-	  if (CMSTestInFreeList) {
++          FreeChunk* ffc = (FreeChunk*) freeFinger();
++          assert(ffc->size() == pointer_delta(addr, freeFinger()),
++            "Size of free range is inconsistent with chunk size.");
++          if (CMSTestInFreeList) {
+             assert(_sp->verifyChunkInFreeLists(ffc),
+-	      "free range is not in free lists");
+-	  }
++              "free range is not in free lists");
++          }
+           _sp->removeFreeChunkFromFreeLists(ffc);
+-	  set_freeRangeInFreeLists(false);
++          set_freeRangeInFreeLists(false);
+         }
+       }
+     }
+@@ -7898,15 +7877,15 @@
+       // will be returned to the free lists in its entirety - all
+       // the coalesced pieces included.
+       if (freeRangeInFreeLists()) {
+-	FreeChunk* ffc = (FreeChunk*)freeFinger();
+-	assert(ffc->size() == pointer_delta(addr, freeFinger()),
+-	  "Size of free range is inconsistent with chunk size.");
+-	if (CMSTestInFreeList) {
++        FreeChunk* ffc = (FreeChunk*)freeFinger();
++        assert(ffc->size() == pointer_delta(addr, freeFinger()),
++          "Size of free range is inconsistent with chunk size.");
++        if (CMSTestInFreeList) {
+           assert(_sp->verifyChunkInFreeLists(ffc),
+-	    "free range is not in free lists");
+-	}
++            "free range is not in free lists");
++        }
+         _sp->removeFreeChunkFromFreeLists(ffc);
+-	set_freeRangeInFreeLists(false);
++        set_freeRangeInFreeLists(false);
+       }
+       set_lastFreeRangeCoalesced(true);
+     }
+@@ -7992,19 +7971,19 @@
+   return size;
+ }
+ 
+-void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc, 
+-					    size_t chunkSize) { 
++void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc,
++                                            size_t chunkSize) {
+   // doPostIsFreeOrGarbageChunk() should only be called in the smart allocation
+   // scheme.
+   bool fcInFreeLists = fc->isFree();
+   assert(_sp->adaptive_freelists(), "Should only be used in this case.");
+   assert((HeapWord*)fc <= _limit, "sweep invariant");
+   if (CMSTestInFreeList && fcInFreeLists) {
+-    assert(_sp->verifyChunkInFreeLists(fc), 
++    assert(_sp->verifyChunkInFreeLists(fc),
+       "free chunk is not in free lists");
+   }
+-  
+- 
++
++
+   if (CMSTraceSweeper) {
+     gclog_or_tty->print_cr("  -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
+   }
+@@ -8019,7 +7998,7 @@
+     case 0:  { // never coalesce
+       coalesce = false;
+       break;
+-    } 
++    }
+     case 1: { // coalesce if left & right chunks on overpopulated lists
+       coalesce = _sp->coalOverPopulated(left) &&
+                  _sp->coalOverPopulated(right);
+@@ -8030,7 +8009,7 @@
+       break;
+     }
+     case 3: { // coalesce if left OR right chunk on overpopulated list
+-      coalesce = _sp->coalOverPopulated(left) || 
++      coalesce = _sp->coalOverPopulated(left) ||
+                  _sp->coalOverPopulated(right);
+       break;
+     }
+@@ -8058,7 +8037,7 @@
+         "Size of free range is inconsistent with chunk size.");
+       if (CMSTestInFreeList) {
+         assert(_sp->verifyChunkInFreeLists(ffc),
+-	  "Chunk is not in free lists");
++          "Chunk is not in free lists");
+       }
+       _sp->coalDeath(ffc->size());
+       _sp->removeFreeChunkFromFreeLists(ffc);
+@@ -8066,8 +8045,8 @@
+     }
+     if (fcInFreeLists) {
+       _sp->coalDeath(chunkSize);
+-      assert(fc->size() == chunkSize, 
+-	"The chunk has the wrong size or is not in the free lists");
++      assert(fc->size() == chunkSize,
++        "The chunk has the wrong size or is not in the free lists");
+       _sp->removeFreeChunkFromFreeLists(fc);
+     }
+     set_lastFreeRangeCoalesced(true);
+@@ -8076,8 +8055,8 @@
+     if (inFreeRange()) {
+       // In a free range but cannot coalesce with the right hand chunk.
+       // Put the current free range into the free lists.
+-      flushCurFreeChunk(freeFinger(), 
+-	pointer_delta(addr, freeFinger()));
++      flushCurFreeChunk(freeFinger(),
++        pointer_delta(addr, freeFinger()));
+     }
+     // Set up for new free range.  Pass along whether the right hand
+     // chunk is in the free lists.
+@@ -8086,14 +8065,14 @@
+ }
+ void SweepClosure::flushCurFreeChunk(HeapWord* chunk, size_t size) {
+   assert(inFreeRange(), "Should only be called if currently in a free range.");
+-  assert(size > 0, 
++  assert(size > 0,
+     "A zero sized chunk cannot be added to the free lists.");
+   if (!freeRangeInFreeLists()) {
+     if(CMSTestInFreeList) {
+       FreeChunk* fc = (FreeChunk*) chunk;
+       fc->setSize(size);
+       assert(!_sp->verifyChunkInFreeLists(fc),
+-	"chunk should not be in free lists yet");
++        "chunk should not be in free lists yet");
+     }
+     if (CMSTraceSweeper) {
+       gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists",
+@@ -8108,7 +8087,7 @@
+       _sp->coalBirth(size);
+     }
+     _sp->addChunkAndRepairOffsetTable(chunk, size,
+-	    lastFreeRangeCoalesced());
++            lastFreeRangeCoalesced());
+   }
+   set_inFreeRange(false);
+   set_freeRangeInFreeLists(false);
+@@ -8148,9 +8127,9 @@
+ 
+   // See the comment in coordinator_yield()
+   for (unsigned i = 0; i < CMSYieldSleepCount &&
+-	               ConcurrentMarkSweepThread::should_yield() &&
+-	               !CMSCollector::foregroundGCIsActive(); ++i) {
+-    os::sleep(Thread::current(), 1, false);    
++                       ConcurrentMarkSweepThread::should_yield() &&
++                       !CMSCollector::foregroundGCIsActive(); ++i) {
++    os::sleep(Thread::current(), 1, false);
+     ConcurrentMarkSweepThread::acknowledge_yield_request();
+   }
+ 
+@@ -8180,7 +8159,7 @@
+   HeapWord* addr = (HeapWord*)obj;
+   return addr != NULL &&
+          (!_span.contains(addr) || _bit_map->isMarked(addr));
+-} 
++}
+ 
+ // CMSKeepAliveClosure: the serial version
+ void CMSKeepAliveClosure::do_oop(oop* p) {
+@@ -8215,7 +8194,7 @@
+     // In general, during recursive tracing, several threads
+     // may be concurrently getting here; the first one to
+     // "tag" it, claims it.
+-    if (_bit_map->par_mark(addr)) { 
++    if (_bit_map->par_mark(addr)) {
+       bool res = _work_queue->push(this_oop);
+       assert(res, "Low water mark should be much less than capacity");
+       // Do a recursive trim in the hope that this will keep
+@@ -8263,7 +8242,7 @@
+ }
+ 
+ //////////////////////////////////////////////////////////////////
+-//  CMSExpansionCause		     /////////////////////////////
++//  CMSExpansionCause                /////////////////////////////
+ //////////////////////////////////////////////////////////////////
+ const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
+   switch (cause) {
+@@ -8362,6 +8341,7 @@
+   for (oop next; i > 0 && cur != NULL; cur = next, i--) {
+     next = oop(cur->mark());
+     cur->set_mark(proto);   // until proven otherwise
++    assert(cur->is_oop(), "Should be an oop");
+     bool res = stack->push(cur);
+     assert(res, "Bit off more than can chew?");
+     NOT_PRODUCT(n++;)
+@@ -8416,6 +8396,7 @@
+   for (cur = prefix; cur != NULL; cur = next) {
+     next = oop(cur->mark());
+     cur->set_mark(proto);   // until proven otherwise
++    assert(cur->is_oop(), "Should be an oop");
+     bool res = work_q->push(cur);
+     assert(res, "Bit off more than we can chew?");
+     NOT_PRODUCT(n++;)
+@@ -8472,9 +8453,9 @@
+     assert(_preserved_mark_stack == NULL,
+            "bijection with preserved_oop_stack");
+     // Allocate the stacks
+-    _preserved_oop_stack  = new (ResourceObj::C_HEAP) 
++    _preserved_oop_stack  = new (ResourceObj::C_HEAP)
+       GrowableArray<oop>(PreserveMarkStackSize, true);
+-    _preserved_mark_stack = new (ResourceObj::C_HEAP) 
++    _preserved_mark_stack = new (ResourceObj::C_HEAP)
+       GrowableArray<markOop>(PreserveMarkStackSize, true);
+     if (_preserved_oop_stack == NULL || _preserved_mark_stack == NULL) {
+       vm_exit_out_of_memory(2* PreserveMarkStackSize * sizeof(oop) /* punt */,
+@@ -8581,8 +8562,8 @@
+     size_t expand_bytes = desired_promo_size - cur_promo_size;
+     if (PrintAdaptiveSizePolicy && Verbose) {
+       gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
+-	"Expanding tenured generation by " SIZE_FORMAT " (bytes)",
+-	expand_bytes);
++        "Expanding tenured generation by " SIZE_FORMAT " (bytes)",
++        expand_bytes);
+     }
+     expand(expand_bytes,
+            MinHeapDeltaBytes,
+@@ -8591,8 +8572,8 @@
+     size_t shrink_bytes = cur_promo_size - desired_promo_size;
+     if (PrintAdaptiveSizePolicy && Verbose) {
+       gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
+-	"Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
+-	shrink_bytes);
++        "Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
++        shrink_bytes);
+     }
+     shrink(shrink_bytes);
+   }
+@@ -8664,9 +8645,9 @@
+   size_t cur_eden = younger_gen->eden()->capacity();
+   CMSAdaptiveSizePolicy* size_policy = cms_size_policy();
+   size_t cur_promo = free();
+-  size_policy->compute_tenured_generation_free_space(cur_promo, 
+-						       max_available(),
+-						       cur_eden);
++  size_policy->compute_tenured_generation_free_space(cur_promo,
++                                                       max_available(),
++                                                       cur_eden);
+   resize(cur_promo, size_policy->promo_size());
+ 
+   // Record the new size of the space in the cms generation
+@@ -8692,8 +8673,8 @@
+     // No room to shrink
+     if (PrintGCDetails && Verbose) {
+       gclog_or_tty->print_cr("No room to shrink: old_end  "
+-	PTR_FORMAT "  unallocated_start  " PTR_FORMAT 
+-	" chunk_at_end  " PTR_FORMAT,
++        PTR_FORMAT "  unallocated_start  " PTR_FORMAT
++        " chunk_at_end  " PTR_FORMAT,
+         old_end, unallocated_start, chunk_at_end);
+     }
+     return;
+@@ -8702,34 +8683,34 @@
+     // Find the chunk at the end of the space and determine
+     // how much it can be shrunk.
+     size_t shrinkable_size_in_bytes = chunk_at_end->size();
+-    size_t aligned_shrinkable_size_in_bytes = 
++    size_t aligned_shrinkable_size_in_bytes =
+       align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
+     assert(unallocated_start <= chunk_at_end->end(),
+       "Inconsistent chunk at end of space");
+     size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
+     size_t word_size_before = heap_word_size(_virtual_space.committed_size());
+-  
++
+     // Shrink the underlying space
+     _virtual_space.shrink_by(bytes);
+     if (PrintGCDetails && Verbose) {
+       gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
+-        " desired_bytes " SIZE_FORMAT 
++        " desired_bytes " SIZE_FORMAT
+         " shrinkable_size_in_bytes " SIZE_FORMAT
+-        " aligned_shrinkable_size_in_bytes " SIZE_FORMAT 
+-        "  bytes  " SIZE_FORMAT, 
+-        desired_bytes, shrinkable_size_in_bytes, 
++        " aligned_shrinkable_size_in_bytes " SIZE_FORMAT
++        "  bytes  " SIZE_FORMAT,
++        desired_bytes, shrinkable_size_in_bytes,
+         aligned_shrinkable_size_in_bytes, bytes);
+-      gclog_or_tty->print_cr("		old_end  " SIZE_FORMAT 
+-        "  unallocated_start  " SIZE_FORMAT, 
++      gclog_or_tty->print_cr("          old_end  " SIZE_FORMAT
++        "  unallocated_start  " SIZE_FORMAT,
+         old_end, unallocated_start);
+     }
+-  
++
+     // If the space did shrink (shrinking is not guaranteed),
+     // shrink the chunk at the end by the appropriate amount.
+     if (((HeapWord*)_virtual_space.high()) < old_end) {
+-      size_t new_word_size = 
++      size_t new_word_size =
+         heap_word_size(_virtual_space.committed_size());
+-  
++
+       // Have to remove the chunk from the dictionary because it is changing
+       // size and might be someplace elsewhere in the dictionary.
+ 
+@@ -8740,27 +8721,27 @@
+       size_t chunk_at_end_old_size = chunk_at_end->size();
+       assert(chunk_at_end_old_size >= word_size_change,
+         "Shrink is too large");
+-      chunk_at_end->setSize(chunk_at_end_old_size - 
+-  			  word_size_change);
+-      _cmsSpace->freed((HeapWord*) chunk_at_end->end(), 
++      chunk_at_end->setSize(chunk_at_end_old_size -
++                          word_size_change);
++      _cmsSpace->freed((HeapWord*) chunk_at_end->end(),
+         word_size_change);
+-      
++
+       _cmsSpace->returnChunkToDictionary(chunk_at_end);
+-  
++
+       MemRegion mr(_cmsSpace->bottom(), new_word_size);
+       _bts->resize(new_word_size);  // resize the block offset shared array
+       Universe::heap()->barrier_set()->resize_covered_region(mr);
+       _cmsSpace->assert_locked();
+       _cmsSpace->set_end((HeapWord*)_virtual_space.high());
+-  
++
+       NOT_PRODUCT(_cmsSpace->dictionary()->verify());
+-  
++
+       // update the space and generation capacity counters
+       if (UsePerfData) {
+         _space_counters->update_capacity();
+         _gen_counters->update_all();
+       }
+-  
++
+       if (Verbose && PrintGCDetails) {
+         size_t new_mem_size = _virtual_space.committed_size();
+         size_t old_mem_size = new_mem_size + bytes;
+@@ -8768,10 +8749,10 @@
+                       name(), old_mem_size/K, bytes/K, new_mem_size/K);
+       }
+     }
+-  
+-    assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(), 
++
++    assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
+       "Inconsistency at end of space");
+-    assert(chunk_at_end->end() == _cmsSpace->end(), 
++    assert(chunk_at_end->end() == _cmsSpace->end(),
+       "Shrinking is inconsistent");
+     return;
+   }
+@@ -8782,7 +8763,7 @@
+ bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
+   size_t num = MIN2((size_t)_mark_stack->capacity()/4,
+                     (size_t)ParGCDesiredObjsFromOverflowList);
+-  
++
+   bool res = _collector->take_from_overflow_list(num, _mark_stack);
+   assert(_collector->overflow_list_is_empty() || res,
+          "If list is not empty, we should have taken something");
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)concurrentMarkSweepGeneration.hpp	1.161 07/07/17 11:44:43 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ConcurrentMarkSweepGeneration is in support of a concurrent
+@@ -60,7 +57,7 @@
+ 
+   HeapWord* _bmStartWord;   // base address of range covered by map
+   size_t    _bmWordSize;    // map size (in #HeapWords covered)
+-  const int _shifter;	    // shifts to convert HeapWord to bit position
++  const int _shifter;       // shifts to convert HeapWord to bit position
+   VirtualSpace _virtual_space; // underlying the bit map
+   BitMap    _bm;            // the bit map itself
+  public:
+@@ -112,7 +109,7 @@
+     // checks the memory region for validity
+     void region_invariant(MemRegion mr);
+   )
+-  
++
+   // iteration
+   void iterate(BitMapClosure* cl) {
+     _bm.iterate(cl);
+@@ -120,7 +117,7 @@
+   void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right);
+   void dirty_range_iterate_clear(MemRegionClosure* cl);
+   void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl);
+- 
++
+   // auxiliary support for iteration
+   HeapWord* getNextMarkedWordAddress(HeapWord* addr) const;
+   HeapWord* getNextMarkedWordAddress(HeapWord* start_addr,
+@@ -129,14 +126,14 @@
+   HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr,
+                                               HeapWord* end_addr) const;
+   MemRegion getAndClearMarkedRegion(HeapWord* addr);
+-  MemRegion getAndClearMarkedRegion(HeapWord* start_addr, 
++  MemRegion getAndClearMarkedRegion(HeapWord* start_addr,
+                                            HeapWord* end_addr);
+ 
+   // conversion utilities
+   HeapWord* offsetToHeapWord(size_t offset) const;
+   size_t    heapWordToOffset(HeapWord* addr) const;
+   size_t    heapWordDiffToOffsetDiff(size_t diff) const;
+-  
++
+   // debugging
+   // is this address range covered by the bit-map?
+   NOT_PRODUCT(
+@@ -149,7 +146,7 @@
+ // Represents a marking stack used by the CMS collector.
+ // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
+ class CMSMarkStack: public CHeapObj  {
+-  // 
++  //
+   friend class CMSCollector;   // to get at expasion stats further below
+   //
+ 
+@@ -277,7 +274,7 @@
+     assert(n < end(), "Out of bounds access");
+     return _array[n];
+   }
+-  
++
+   void reset() {
+     _index = 0;
+   }
+@@ -290,17 +287,17 @@
+   }
+ };
+ 
+-// 
++//
+ // Timing, allocation and promotion statistics for gc scheduling and incremental
+ // mode pacing.  Most statistics are exponential averages.
+-// 
++//
+ class CMSStats VALUE_OBJ_CLASS_SPEC {
+  private:
+   ConcurrentMarkSweepGeneration* const _cms_gen;   // The cms (old) gen.
+ 
+   // The following are exponential averages with factor alpha:
+   //   avg = (100 - alpha) * avg + alpha * cur_sample
+-  // 
++  //
+   //   The durations measure:  end_time[n] - start_time[n]
+   //   The periods measure:    start_time[n] - start_time[n-1]
+   //
+@@ -317,12 +314,12 @@
+ 
+   double _gc0_duration;
+   double _gc0_period;
+-  size_t _gc0_promoted;		// bytes promoted per gc0
++  size_t _gc0_promoted;         // bytes promoted per gc0
+   double _cms_duration;
+   double _cms_duration_pre_sweep; // time from initiation to start of sweep
+   double _cms_duration_per_mb;
+   double _cms_period;
+-  size_t _cms_allocated;	// bytes of direct allocation per gc0 period
++  size_t _cms_allocated;        // bytes of direct allocation per gc0 period
+ 
+   // Timers.
+   elapsedTimer _cms_timer;
+@@ -347,7 +344,7 @@
+ 
+   unsigned int _valid_bits;
+ 
+-  unsigned int _icms_duty_cycle;	// icms duty cycle (0-100).
++  unsigned int _icms_duty_cycle;        // icms duty cycle (0-100).
+ 
+  protected:
+ 
+@@ -355,12 +352,12 @@
+   // of change between old_duty_cycle and new_duty_cycle (the latter is treated
+   // as a recommended value).
+   static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle,
+-					     unsigned int new_duty_cycle);
++                                             unsigned int new_duty_cycle);
+   unsigned int icms_update_duty_cycle_impl();
+ 
+  public:
+   CMSStats(ConcurrentMarkSweepGeneration* cms_gen,
+-	   unsigned int alpha = CMSExpAvgFactor);
++           unsigned int alpha = CMSExpAvgFactor);
+ 
+   // Whether or not the statistics contain valid data; higher level statistics
+   // cannot be called until this returns true (they require at least one young
+@@ -466,8 +463,8 @@
+   CMSRefProcTaskExecutor(CMSCollector& collector)
+     : _collector(collector)
+   { }
+-  
+-  // Executes a task using worker threads.  
++
++  // Executes a task using worker threads.
+   virtual void execute(ProcessTask& task);
+   virtual void execute(EnqueueTask& task);
+ private:
+@@ -512,7 +509,7 @@
+   void update_time_of_last_gc(jlong now) {
+     _time_of_last_gc = now;
+   }
+-  
++
+   OopTaskQueueSet* _task_queues;
+ 
+   // Overflow list of grey objects, threaded through mark-word
+@@ -521,8 +518,8 @@
+   // The following array-pair keeps track of mark words
+   // displaced for accomodating overflow list above.
+   // This code will likely be revisited under RFE#4922830.
+-  GrowableArray<oop>*     _preserved_oop_stack; 
+-  GrowableArray<markOop>* _preserved_mark_stack; 
++  GrowableArray<oop>*     _preserved_oop_stack;
++  GrowableArray<markOop>* _preserved_mark_stack;
+ 
+   int*             _hash_seed;
+ 
+@@ -561,7 +558,7 @@
+   ConcurrentMarkSweepPolicy* _collector_policy;
+   ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
+ 
+-  // Check whether the gc time limit has been 
++  // Check whether the gc time limit has been
+   // exceeded and set the size policy flag
+   // appropriately.
+   void check_gc_time_limit();
+@@ -629,7 +626,7 @@
+     Precleaning         = 5,
+     AbortablePreclean   = 6,
+     FinalMarking        = 7,
+-    Sweeping            = 8 
++    Sweeping            = 8
+   };
+   static CollectorState _collectorState;
+ 
+@@ -652,7 +649,7 @@
+   size_t _numDirtyCards;
+   uint   _sweepCount;
+   // number of full gc's since the last concurrent gc.
+-  uint	 _full_gcs_since_conc_gc;
++  uint   _full_gcs_since_conc_gc;
+ 
+   // if occupancy exceeds this, start a new gc cycle
+   double _initiatingOccupancy;
+@@ -669,8 +666,8 @@
+   // CMSIncrementalMode.  When an allocation in the young gen would cross one of
+   // these limits, the cms generation is notified and the cms thread is started
+   // or stopped, respectively.
+-  HeapWord*	_icms_start_limit;
+-  HeapWord*	_icms_stop_limit;
++  HeapWord*     _icms_start_limit;
++  HeapWord*     _icms_stop_limit;
+ 
+   enum CMS_op_type {
+     CMS_op_checkpointRootsInitial,
+@@ -696,7 +693,7 @@
+   size_t     _eden_chunk_index; // ... top (exclusive) of array
+   size_t     _eden_chunk_capacity;  // ... max entries in array
+ 
+-  // Support for parallelizing survivor space rescan 
++  // Support for parallelizing survivor space rescan
+   HeapWord** _survivor_chunk_array;
+   size_t     _survivor_chunk_index;
+   size_t     _survivor_chunk_capacity;
+@@ -710,7 +707,7 @@
+   void par_push_on_overflow_list(oop p);
+   // the following is, obviously, not, in general, "MT-stable"
+   bool overflow_list_is_empty() const;
+-  
++
+   void preserve_mark_if_necessary(oop p);
+   void par_preserve_mark_if_necessary(oop p);
+   void preserve_mark_work(oop p, markOop m);
+@@ -820,7 +817,7 @@
+   CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
+                ConcurrentMarkSweepGeneration* permGen,
+                CardTableRS*                   ct,
+-	       ConcurrentMarkSweepPolicy*     cp);
++               ConcurrentMarkSweepPolicy*     cp);
+   ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
+ 
+   ReferenceProcessor* ref_processor() { return _ref_processor; }
+@@ -872,7 +869,7 @@
+                 bool is_obj_array, size_t obj_size);
+ 
+   HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
+-				     size_t word_size);
++                                     size_t word_size);
+ 
+   void getFreelistLocks() const;
+   void releaseFreelistLocks() const;
+@@ -918,10 +915,10 @@
+   CollectorCounters* counters()    { return _gc_counters; }
+ 
+   // timer stuff
+-  void    startTimer() { _timer.start();   }
+-  void    stopTimer()  { _timer.stop();    }
+-  void    resetTimer() { _timer.reset();   }
+-  double  timerValue() { return _timer.seconds(); }
++  void    startTimer() { assert(!_timer.is_active(), "Error"); _timer.start();   }
++  void    stopTimer()  { assert( _timer.is_active(), "Error"); _timer.stop();    }
++  void    resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset();   }
++  double  timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); }
+ 
+   int  yields()          { return _numYields; }
+   void resetYields()     { _numYields = 0;    }
+@@ -946,7 +943,7 @@
+   static void stop_icms();    // Called at the end of the cms cycle.
+   static void disable_icms(); // Called before a foreground collection.
+   static void enable_icms();  // Called after a foreground collection.
+-  void icms_wait();	     // Called at yield points.
++  void icms_wait();          // Called at yield points.
+ 
+   // Adaptive size policy
+   CMSAdaptiveSizePolicy* size_policy();
+@@ -1040,10 +1037,10 @@
+   const double _dilatation_factor;
+ 
+   enum CollectionTypes {
+-    Concurrent_collection_type		= 0,
+-    MS_foreground_collection_type	= 1,
+-    MSC_foreground_collection_type	= 2,
+-    Unknown_collection_type		= 3
++    Concurrent_collection_type          = 0,
++    MS_foreground_collection_type       = 1,
++    MSC_foreground_collection_type      = 2,
++    Unknown_collection_type             = 3
+   };
+ 
+   CollectionTypes _debug_collection_type;
+@@ -1066,7 +1063,7 @@
+  public:
+   ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
+                                 int level, CardTableRS* ct,
+-				bool use_adaptive_freelists,
++                                bool use_adaptive_freelists,
+                                 FreeBlockDictionary::DictionaryChoice);
+ 
+   // Accessors
+@@ -1076,7 +1073,7 @@
+     _collector = collector;
+   }
+   CompactibleFreeListSpace*  cmsSpace() const { return _cmsSpace;  }
+-  
++
+   Mutex* freelistLock() const;
+ 
+   virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
+@@ -1141,7 +1138,7 @@
+ 
+   // Incremental mode triggering.
+   HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
+-				     size_t word_size);
++                                     size_t word_size);
+ 
+   // Used by CMSStats to track direct allocation.  The value is sampled and
+   // reset after each young gen collection.
+@@ -1150,10 +1147,10 @@
+ 
+   // Overrides for parallel promotion.
+   virtual oop par_promote(int thread_num,
+-			  oop obj, markOop m, size_t word_sz);
++                          oop obj, markOop m, size_t word_sz);
+   // This one should not be called for CMS.
+   virtual void par_promote_alloc_undo(int thread_num,
+-				      HeapWord* obj, size_t word_sz);
++                                      HeapWord* obj, size_t word_sz);
+   virtual void par_promote_alloc_done(int thread_num);
+   virtual void par_oop_since_save_marks_iterate_done(int thread_num);
+ 
+@@ -1169,8 +1166,8 @@
+                bool   tlab);
+ 
+   HeapWord* expand_and_allocate(size_t word_size,
+-				bool tlab,
+-				bool parallel = false);
++                                bool tlab,
++                                bool parallel = false);
+ 
+   // GC prologue and epilogue
+   void gc_prologue(bool full);
+@@ -1188,7 +1185,7 @@
+   }
+ 
+   // Allocation failure
+-  void expand(size_t bytes, size_t expand_bytes, 
++  void expand(size_t bytes, size_t expand_bytes,
+     CMSExpansionCause::Cause cause);
+   void shrink(size_t bytes);
+   HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
+@@ -1221,7 +1218,7 @@
+ 
+   // Get the chunk at the end of the space.  Delagates to
+   // the space.
+-  FreeChunk* find_chunk_at_end(); 
++  FreeChunk* find_chunk_at_end();
+ 
+   // Overriding of unused functionality (sharing not yet supported with CMS)
+   void pre_adjust_pointers();
+@@ -1277,9 +1274,9 @@
+   virtual void compute_new_size();
+   ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
+                                   int level, CardTableRS* ct,
+-				  bool use_adaptive_freelists,
+-                                  FreeBlockDictionary::DictionaryChoice 
+-				    dictionaryChoice) :
++                                  bool use_adaptive_freelists,
++                                  FreeBlockDictionary::DictionaryChoice
++                                    dictionaryChoice) :
+     ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct,
+       use_adaptive_freelists, dictionaryChoice) {}
+ 
+@@ -1303,7 +1300,7 @@
+ };
+ 
+ // This closure is used to do concurrent marking from the roots
+-// following the first checkpoint. 
++// following the first checkpoint.
+ class MarkFromRootsClosure: public BitMapClosure {
+   CMSCollector*  _collector;
+   MemRegion      _span;
+@@ -1333,7 +1330,7 @@
+ };
+ 
+ // This closure is used to do concurrent multi-threaded
+-// marking from the roots following the first checkpoint. 
++// marking from the roots following the first checkpoint.
+ // XXX This should really be a subclass of The serial version
+ // above, but i have not had the time to refactor things cleanly.
+ // That willbe done for Dolphin.
+@@ -1410,7 +1407,7 @@
+ class FalseBitMapClosure: public BitMapClosure {
+  public:
+   void do_bit(size_t offset) {
+-    guarantee(false, "Should not have a 1 bit"); 
++    guarantee(false, "Should not have a 1 bit");
+   }
+ };
+ 
+@@ -1473,7 +1470,7 @@
+     _parallel(true),
+     _bit_map(bit_map),
+     _par_scan_closure(cl) { }
+-                                
++
+   void do_object(oop obj) {
+     guarantee(false, "Call do_object_b(oop, MemRegion) instead");
+   }
+@@ -1562,7 +1559,7 @@
+     _markStack(markStack),
+     _scanningClosure(cl) {
+   }
+-  
++
+   void do_object(oop p) {
+     guarantee(false, "call do_object_careful instead");
+   }
+@@ -1630,7 +1627,7 @@
+ // This closure is used to accomplish the sweeping work
+ // after the second checkpoint but before the concurrent reset
+ // phase.
+-// 
++//
+ // Terminology
+ //   left hand chunk (LHC) - block of one or more chunks currently being
+ //     coalesced.  The LHC is available for coalescing with a new chunk.
+@@ -1642,54 +1639,54 @@
+ // _freeFinger is the address of the current LHC
+ class SweepClosure: public BlkClosureCareful {
+   CMSCollector*                  _collector;  // collector doing the work
+-  ConcurrentMarkSweepGeneration* _g;	// Generation being swept
+-  CompactibleFreeListSpace*      _sp;	// Space being swept
++  ConcurrentMarkSweepGeneration* _g;    // Generation being swept
++  CompactibleFreeListSpace*      _sp;   // Space being swept
+   HeapWord*                      _limit;
+-  Mutex*                         _freelistLock;	// Free list lock (in space)
+-  CMSBitMap*                     _bitMap;	// Marking bit map (in 
+-						// generation)
+-  bool                           _inFreeRange;	// Indicates if we are in the
+-						// midst of a free run
+-  bool				 _freeRangeInFreeLists;	
+-					// Often, we have just found
+-					// a free chunk and started
+-					// a new free range; we do not
+-					// eagerly remove this chunk from
+-					// the free lists unless there is
+-					// a possibility of coalescing.
+-					// When true, this flag indicates
+-					// that the _freeFinger below
+-					// points to a potentially free chunk
+-					// that may still be in the free lists
+-  bool				 _lastFreeRangeCoalesced;
+-					// free range contains chunks
+-					// coalesced
+-  bool                           _yield;	
+-					// Whether sweeping should be 
+-					// done with yields. For instance 
+-					// when done by the foreground 
+-					// collector we shouldn't yield.
+-  HeapWord*                      _freeFinger;	// When _inFreeRange is set, the
+-						// pointer to the "left hand 
+-						// chunk"
+-  size_t			 _freeRangeSize; 
+-					// When _inFreeRange is set, this 
+-					// indicates the accumulated size 
+-					// of the "left hand chunk"
++  Mutex*                         _freelistLock; // Free list lock (in space)
++  CMSBitMap*                     _bitMap;       // Marking bit map (in
++                                                // generation)
++  bool                           _inFreeRange;  // Indicates if we are in the
++                                                // midst of a free run
++  bool                           _freeRangeInFreeLists;
++                                        // Often, we have just found
++                                        // a free chunk and started
++                                        // a new free range; we do not
++                                        // eagerly remove this chunk from
++                                        // the free lists unless there is
++                                        // a possibility of coalescing.
++                                        // When true, this flag indicates
++                                        // that the _freeFinger below
++                                        // points to a potentially free chunk
++                                        // that may still be in the free lists
++  bool                           _lastFreeRangeCoalesced;
++                                        // free range contains chunks
++                                        // coalesced
++  bool                           _yield;
++                                        // Whether sweeping should be
++                                        // done with yields. For instance
++                                        // when done by the foreground
++                                        // collector we shouldn't yield.
++  HeapWord*                      _freeFinger;   // When _inFreeRange is set, the
++                                                // pointer to the "left hand
++                                                // chunk"
++  size_t                         _freeRangeSize;
++                                        // When _inFreeRange is set, this
++                                        // indicates the accumulated size
++                                        // of the "left hand chunk"
+   NOT_PRODUCT(
+-    size_t		         _numObjectsFreed;
+-    size_t		         _numWordsFreed;
+-    size_t			 _numObjectsLive;
+-    size_t			 _numWordsLive;
+-    size_t			 _numObjectsAlreadyFree;
+-    size_t			 _numWordsAlreadyFree;
+-    FreeChunk*			 _last_fc;
++    size_t                       _numObjectsFreed;
++    size_t                       _numWordsFreed;
++    size_t                       _numObjectsLive;
++    size_t                       _numWordsLive;
++    size_t                       _numObjectsAlreadyFree;
++    size_t                       _numWordsAlreadyFree;
++    FreeChunk*                   _last_fc;
+   )
+  private:
+   // Code that is common to a free chunk or garbage when
+   // encountered during sweeping.
+-  void doPostIsFreeOrGarbageChunk(FreeChunk *fc, 
+-				  size_t chunkSize);
++  void doPostIsFreeOrGarbageChunk(FreeChunk *fc,
++                                  size_t chunkSize);
+   // Process a free chunk during sweeping.
+   void doAlreadyFreeChunk(FreeChunk *fc);
+   // Process a garbage chunk during sweeping.
+@@ -1698,15 +1695,15 @@
+   size_t doLiveChunk(FreeChunk* fc);
+ 
+   // Accessors.
+-  HeapWord* freeFinger() const	 	{ return _freeFinger; }
+-  void set_freeFinger(HeapWord* v)  	{ _freeFinger = v; }
+-  size_t freeRangeSize() const	 	{ return _freeRangeSize; }
+-  void set_freeRangeSize(size_t v)  	{ _freeRangeSize = v; }
+-  bool inFreeRange() 	const	 	{ return _inFreeRange; }
+-  void set_inFreeRange(bool v)  	{ _inFreeRange = v; }
+-  bool lastFreeRangeCoalesced()	const	 { return _lastFreeRangeCoalesced; }
++  HeapWord* freeFinger() const          { return _freeFinger; }
++  void set_freeFinger(HeapWord* v)      { _freeFinger = v; }
++  size_t freeRangeSize() const          { return _freeRangeSize; }
++  void set_freeRangeSize(size_t v)      { _freeRangeSize = v; }
++  bool inFreeRange()    const           { return _inFreeRange; }
++  void set_inFreeRange(bool v)          { _inFreeRange = v; }
++  bool lastFreeRangeCoalesced() const    { return _lastFreeRangeCoalesced; }
+   void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; }
+-  bool freeRangeInFreeLists() const	{ return _freeRangeInFreeLists; }
++  bool freeRangeInFreeLists() const     { return _freeRangeInFreeLists; }
+   void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; }
+ 
+   // Initialize a free range.
+@@ -1814,7 +1811,7 @@
+   CMSBitMap*                      _dead_bit_map;
+ public:
+   MarkDeadObjectsClosure(const CMSCollector* collector,
+-                         const CompactibleFreeListSpace* sp, 
++                         const CompactibleFreeListSpace* sp,
+                          CMSBitMap *live_bit_map,
+                          CMSBitMap *dead_bit_map) :
+     _collector(collector),
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)concurrentMarkSweepGeneration.inline.hpp	1.47 07/05/17 15:52:12 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ inline void CMSBitMap::clear_all() {
+@@ -69,56 +66,56 @@
+ inline void CMSBitMap::mark_range(MemRegion mr) {
+   NOT_PRODUCT(region_invariant(mr));
+   // Range size is usually just 1 bit.
+-  _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
++  _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
+                 BitMap::small_range);
+ }
+ 
+ inline void CMSBitMap::clear_range(MemRegion mr) {
+   NOT_PRODUCT(region_invariant(mr));
+   // Range size is usually just 1 bit.
+-  _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
++  _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
+                   BitMap::small_range);
+ }
+ 
+ inline void CMSBitMap::par_mark_range(MemRegion mr) {
+   NOT_PRODUCT(region_invariant(mr));
+   // Range size is usually just 1 bit.
+-  _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
++  _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
+                     BitMap::small_range);
+ }
+ 
+ inline void CMSBitMap::par_clear_range(MemRegion mr) {
+   NOT_PRODUCT(region_invariant(mr));
+   // Range size is usually just 1 bit.
+-  _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
++  _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
+                       BitMap::small_range);
+ }
+ 
+ inline void CMSBitMap::mark_large_range(MemRegion mr) {
+   NOT_PRODUCT(region_invariant(mr));
+   // Range size must be greater than 32 bytes.
+-  _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
++  _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
+                 BitMap::large_range);
+ }
+ 
+ inline void CMSBitMap::clear_large_range(MemRegion mr) {
+   NOT_PRODUCT(region_invariant(mr));
+   // Range size must be greater than 32 bytes.
+-  _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
++  _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
+                   BitMap::large_range);
+ }
+ 
+ inline void CMSBitMap::par_mark_large_range(MemRegion mr) {
+   NOT_PRODUCT(region_invariant(mr));
+   // Range size must be greater than 32 bytes.
+-  _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
++  _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
+                     BitMap::large_range);
+ }
+ 
+ inline void CMSBitMap::par_clear_large_range(MemRegion mr) {
+   NOT_PRODUCT(region_invariant(mr));
+   // Range size must be greater than 32 bytes.
+-  _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
++  _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
+                       BitMap::large_range);
+ }
+ 
+@@ -265,11 +262,11 @@
+ inline bool CMSCollector::is_dead_obj(oop obj) const {
+   HeapWord* addr = (HeapWord*)obj;
+   assert((_cmsGen->cmsSpace()->is_in_reserved(addr)
+-	  && _cmsGen->cmsSpace()->block_is_obj(addr))
+-	 ||
++          && _cmsGen->cmsSpace()->block_is_obj(addr))
++         ||
+          (_permGen->cmsSpace()->is_in_reserved(addr)
+-	  && _permGen->cmsSpace()->block_is_obj(addr)),
+-	 "must be object");
++          && _permGen->cmsSpace()->block_is_obj(addr)),
++         "must be object");
+   return  cms_should_unload_classes() &&
+           _collectorState == Sweeping &&
+          !_markBitMap.isMarked(addr);
+@@ -298,7 +295,7 @@
+ inline void CMSStats::record_gc0_begin() {
+   if (_gc0_begin_time.is_updated()) {
+     float last_gc0_period = _gc0_begin_time.seconds();
+-    _gc0_period = AdaptiveWeightedAverage::exp_avg(_gc0_period, 
++    _gc0_period = AdaptiveWeightedAverage::exp_avg(_gc0_period,
+       last_gc0_period, _gc0_alpha);
+     _gc0_alpha = _saved_alpha;
+     _valid_bits |= _GC0_VALID;
+@@ -310,7 +307,7 @@
+ 
+ inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) {
+   float last_gc0_duration = _gc0_begin_time.seconds();
+-  _gc0_duration = AdaptiveWeightedAverage::exp_avg(_gc0_duration, 
++  _gc0_duration = AdaptiveWeightedAverage::exp_avg(_gc0_duration,
+     last_gc0_duration, _gc0_alpha);
+ 
+   // Amount promoted.
+@@ -319,13 +316,13 @@
+   size_t promoted_bytes = 0;
+   if (_cms_used_at_gc0_end >= _cms_used_at_gc0_begin) {
+     promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;
+-  } 
++  }
+ 
+   // If the younger gen collections were skipped, then the
+   // number of promoted bytes will be 0 and adding it to the
+   // average will incorrectly lessen the average.  It is, however,
+   // also possible that no promotion was needed.
+-  // 
++  //
+   // _gc0_promoted used to be calculated as
+   // _gc0_promoted = AdaptiveWeightedAverage::exp_avg(_gc0_promoted,
+   //  promoted_bytes, _gc0_alpha);
+@@ -335,7 +332,7 @@
+   // Amount directly allocated.
+   size_t allocated_bytes = _cms_gen->direct_allocated_words() * HeapWordSize;
+   _cms_gen->reset_direct_allocated_words();
+-  _cms_allocated = AdaptiveWeightedAverage::exp_avg(_cms_allocated, 
++  _cms_allocated = AdaptiveWeightedAverage::exp_avg(_cms_allocated,
+     allocated_bytes, _gc0_alpha);
+ }
+ 
+@@ -345,7 +342,7 @@
+   // This is just an approximate value, but is good enough.
+   _cms_used_at_cms_begin = _cms_used_at_gc0_end;
+ 
+-  _cms_period = AdaptiveWeightedAverage::exp_avg((float)_cms_period, 
++  _cms_period = AdaptiveWeightedAverage::exp_avg((float)_cms_period,
+     (float) _cms_timer.seconds(), _cms_alpha);
+   _cms_begin_time.update();
+ 
+@@ -357,14 +354,14 @@
+   _cms_timer.stop();
+ 
+   float cur_duration = _cms_timer.seconds();
+-  _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration, 
++  _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,
+     cur_duration, _cms_alpha);
+ 
+   // Avoid division by 0.
+   const size_t cms_used_mb = MAX2(_cms_used_at_cms_begin / M, (size_t)1);
+   _cms_duration_per_mb = AdaptiveWeightedAverage::exp_avg(_cms_duration_per_mb,
+-				 cur_duration / cms_used_mb,
+-				 _cms_alpha);
++                                 cur_duration / cms_used_mb,
++                                 _cms_alpha);
+ 
+   _cms_end_time.update();
+   _cms_alpha = _saved_alpha;
+@@ -453,7 +450,7 @@
+       !_collector->foregroundGCIsActive() &&
+       _yield) {
+     // Sample young gen size before and after yield
+-    _collector->sample_eden(); 
++    _collector->sample_eden();
+     do_yield_work();
+     _collector->sample_eden();
+     return _collector->should_abort_preclean();
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)concurrentMarkSweepThread.cpp	1.48 07/05/05 17:06:45 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -132,7 +129,7 @@
+     _cmst = NULL;
+     Terminator_lock->notify();
+   }
+-  
++
+   // Thread destructor usually does this..
+   ThreadLocalStorage::set_thread(NULL);
+ }
+@@ -166,7 +163,7 @@
+   // it is ok to take late safepoints here, if needed
+   {
+     MutexLockerEx x(Terminator_lock);
+-    _should_terminate = true;  
++    _should_terminate = true;
+   }
+   { // Now post a notify on CGC_lock so as to nudge
+     // CMS thread(s) that might be slumbering in
+@@ -179,7 +176,7 @@
+     while(cmst() != NULL) {
+       Terminator_lock->wait();
+     }
+-  }  
++  }
+ }
+ 
+ void ConcurrentMarkSweepThread::threads_do(ThreadClosure* tc) {
+@@ -187,7 +184,7 @@
+   if (_cmst != NULL) {
+     tc->do_thread(_cmst);
+   }
+-  assert(Universe::is_fully_initialized(), 
++  assert(Universe::is_fully_initialized(),
+          "Called too early, make sure heap is fully initialized");
+   if (_collector != NULL) {
+     AbstractWorkGang* gang = _collector->conc_workers();
+@@ -299,7 +296,7 @@
+     if (_collector->shouldConcurrentCollect()) {
+       return;
+     }
+-    // .. collection criterion not yet met, let's go back 
++    // .. collection criterion not yet met, let's go back
+     // and wait some more
+   }
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)concurrentMarkSweepThread.hpp	1.38 07/05/05 17:06:46 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class ConcurrentMarkSweepGeneration;
+@@ -72,9 +69,9 @@
+   // Tracing messages, enabled by CMSTraceThreadState.
+   static inline void trace_state(const char* desc);
+ 
+-  static volatile bool _icms_enabled;	// iCMS enabled?
+-  static volatile bool _should_run;	// iCMS may run
+-  static volatile bool _should_stop;	// iCMS should stop
++  static volatile bool _icms_enabled;   // iCMS enabled?
++  static volatile bool _should_run;     // iCMS may run
++  static volatile bool _should_stop;    // iCMS should stop
+ 
+   // debugging
+   void verify_ok_to_terminate() const PRODUCT_RETURN;
+@@ -93,9 +90,9 @@
+ 
+   // Printing
+   void print_on(outputStream* st) const;
+-  void print() const 				      { print_on(tty); }
++  void print() const                                  { print_on(tty); }
+   static void print_all_on(outputStream* st);
+-  static void print_all() 			      { print_all_on(tty); }
++  static void print_all()                             { print_all_on(tty); }
+ 
+   // Returns the CMS Thread
+   static ConcurrentMarkSweepThread* cmst()    { return _cmst; }
+@@ -122,7 +119,7 @@
+     return CMS_flag_is_set(CMS_cms_wants_token);
+   }
+ 
+-  // Wait on CMS lock until the next synchronous GC 
++  // Wait on CMS lock until the next synchronous GC
+   // or given timeout, whichever is earlier.
+   void    wait_on_cms_lock(long t); // milliseconds
+ 
+@@ -174,14 +171,14 @@
+   // CMS incremental mode.
+   static void start_icms(); // notify thread to start a quantum of work
+   static void stop_icms();  // request thread to stop working
+-  void icms_wait();	    // if asked to stop, wait until notified to start
++  void icms_wait();         // if asked to stop, wait until notified to start
+ 
+   // Incremental mode is enabled globally by the flag CMSIncrementalMode.  It
+   // must also be enabled/disabled dynamically to allow foreground collections.
+   static inline void enable_icms()              { _icms_enabled = true; }
+   static inline void disable_icms()             { _icms_enabled = false; }
+   static inline void set_icms_enabled(bool val) { _icms_enabled = val; }
+-  static inline bool icms_enabled()             { return _icms_enabled; } 
++  static inline bool icms_enabled()             { return _icms_enabled; }
+ };
+ 
+ inline void ConcurrentMarkSweepThread::trace_state(const char* desc) {
+@@ -192,7 +189,7 @@
+       ts.update();
+     }
+     jio_snprintf(buf, sizeof(buf), " [%.3f:  CMSThread %s] ",
+-		 ts.seconds(), desc);
++                 ts.seconds(), desc);
+     buf[sizeof(buf) - 1] = '\0';
+     gclog_or_tty->print(buf);
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeBlockDictionary.cpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeBlockDictionary.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeBlockDictionary.cpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeBlockDictionary.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)freeBlockDictionary.cpp	1.12 07/05/05 17:05:47 JVM"
+-#endif
+ /*
+  * Copyright 2002-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)freeBlockDictionary.hpp	1.32 07/05/05 17:05:47 JVM"
+-#endif
+ /*
+  * Copyright 2001-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //
+@@ -99,7 +96,7 @@
+   void verify()             const PRODUCT_RETURN;
+   void verifyList()         const PRODUCT_RETURN;
+   void mangleAllocated(size_t size) PRODUCT_RETURN;
+-  void mangleFreed(size_t size)     PRODUCT_RETURN; 
++  void mangleFreed(size_t size)     PRODUCT_RETURN;
+ };
+ 
+ // Alignment helpers etc.
+@@ -135,8 +132,8 @@
+   virtual size_t     minSize()        const = 0;
+   // Reset the dictionary to the initial conditions for a single
+   // block.
+-  virtual void	     reset(HeapWord* addr, size_t size) = 0;
+-  virtual void	     reset() = 0;
++  virtual void       reset(HeapWord* addr, size_t size) = 0;
++  virtual void       reset() = 0;
+ 
+   virtual void       dictCensusUpdate(size_t size, bool split, bool birth) = 0;
+   virtual bool       coalDictOverPopulated(size_t size) = 0;
+@@ -164,7 +161,7 @@
+     gclog_or_tty->print("No statistics available");
+   }
+ 
+-  virtual void 	     printDictCensus() const = 0;
++  virtual void       printDictCensus() const = 0;
+ 
+   virtual void       verify()         const = 0;
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.cpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.cpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)freeChunk.cpp	1.16 07/05/05 17:05:47 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)freeList.cpp	1.31 07/05/05 17:05:48 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -30,7 +27,7 @@
+ 
+ // Free list.  A FreeList is used to access a linked list of chunks
+ // of space in the heap.  The head and tail are maintained so that
+-// items can be (as in the current implementation) added at the 
++// items can be (as in the current implementation) added at the
+ // at the tail of the list and removed from the head of the list to
+ // maintain a FIFO queue.
+ 
+@@ -40,9 +37,9 @@
+   , _protecting_lock(NULL)
+ #endif
+ {
+-  _size		= 0;
+-  _count	= 0;
+-  _hint		= 0;
++  _size         = 0;
++  _count        = 0;
++  _hint         = 0;
+   init_statistics();
+ }
+ 
+@@ -52,9 +49,9 @@
+   , _protecting_lock(NULL)
+ #endif
+ {
+-  _size		= fc->size();
+-  _count	= 1;
+-  _hint		= 0;
++  _size         = fc->size();
++  _count        = 1;
++  _hint         = 0;
+   init_statistics();
+ #ifndef PRODUCT
+   _allocation_stats.set_returnedBytes(size() * HeapWordSize);
+@@ -69,8 +66,8 @@
+ {
+   assert(size > sizeof(FreeChunk), "size is too small");
+   head()->setSize(size);
+-  _size		= size;
+-  _count	= 1;
++  _size         = size;
++  _count        = 1;
+   init_statistics();
+ #ifndef PRODUCT
+   _allocation_stats.set_returnedBytes(_size * HeapWordSize);
+@@ -122,7 +119,7 @@
+       tl = tl->next(); n--; k++;
+     }
+     assert(tl != NULL, "Loop Inv.");
+-    
++
+     // First, fix up the list we took from.
+     FreeChunk* new_head = tl->next();
+     set_head(new_head);
+@@ -160,7 +157,7 @@
+    }
+    if (prevFC == NULL) { // removed head of list
+      link_head(nextFC);
+-     assert(nextFC == NULL || nextFC->prev() == NULL, 
++     assert(nextFC == NULL || nextFC->prev() == NULL,
+        "Prev of head should be NULL");
+    } else {
+      prevFC->linkNext(nextFC);
+@@ -194,7 +191,7 @@
+   assert(size() == chunk->size(), "Wrong size");
+   assert(head() == NULL || head()->prev() == NULL, "list invariant");
+   assert(tail() == NULL || tail()->next() == NULL, "list invariant");
+-  
++
+   FreeChunk* oldHead = head();
+   assert(chunk != oldHead, "double insertion");
+   chunk->linkAfter(oldHead);
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)freeList.hpp	1.31 07/05/05 17:05:48 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class CompactibleFreeListSpace;
+@@ -41,16 +38,16 @@
+ 
+ class FreeList VALUE_OBJ_CLASS_SPEC {
+   friend class CompactibleFreeListSpace;
+-  FreeChunk*	_head;		// List of free chunks
+-  FreeChunk*	_tail;		// Tail of list of free chunks
+-  size_t	_size;		// Size in Heap words of each chunks
+-  ssize_t	_count;		// Number of entries in list
+-  size_t        _hint;		// next larger size list with a positive surplus
++  FreeChunk*    _head;          // List of free chunks
++  FreeChunk*    _tail;          // Tail of list of free chunks
++  size_t        _size;          // Size in Heap words of each chunks
++  ssize_t       _count;         // Number of entries in list
++  size_t        _hint;          // next larger size list with a positive surplus
+ 
+-  AllocationStats _allocation_stats;		// statistics for smart allocation
++  AllocationStats _allocation_stats;            // statistics for smart allocation
+ 
+ #ifdef ASSERT
+-  Mutex*	_protecting_lock;
++  Mutex*        _protecting_lock;
+ #endif
+ 
+   // Asserts false if the protecting lock (if any) is not held.
+@@ -96,16 +93,16 @@
+     assert_proper_lock_protection();
+     return _head;
+   }
+-  void set_head(FreeChunk* v) { 
++  void set_head(FreeChunk* v) {
+     assert_proper_lock_protection();
+-    _head = v; 
+-    assert(!_head || _head->size() == _size, "bad chunk size"); 
++    _head = v;
++    assert(!_head || _head->size() == _size, "bad chunk size");
+   }
+   // Set the head of the list and set the prev field of non-null
+   // values to NULL.
+   void link_head(FreeChunk* v) {
+     assert_proper_lock_protection();
+-    set_head(v); 
++    set_head(v);
+     // If this method is not used (just set the head instead),
+     // this check can be avoided.
+     if (v != NULL) {
+@@ -117,16 +114,16 @@
+     assert_proper_lock_protection();
+     return _tail;
+   }
+-  void set_tail(FreeChunk* v) { 
++  void set_tail(FreeChunk* v) {
+     assert_proper_lock_protection();
+-    _tail = v; 
++    _tail = v;
+     assert(!_tail || _tail->size() == _size, "bad chunk size");
+   }
+   // Set the tail of the list and set the next field of non-null
+   // values to NULL.
+   void link_tail(FreeChunk* v) {
+     assert_proper_lock_protection();
+-    set_tail(v); 
++    set_tail(v);
+     if (v != NULL) {
+       v->clearNext();
+     }
+@@ -265,12 +262,12 @@
+ 
+   NOT_PRODUCT(
+     // For debugging.  The "_returnedBytes" in all the lists are summed
+-    // and compared with the total number of bytes swept during a 
++    // and compared with the total number of bytes swept during a
+     // collection.
+     size_t returnedBytes() const { return _allocation_stats.returnedBytes(); }
+     void set_returnedBytes(size_t v) { _allocation_stats.set_returnedBytes(v); }
+-    void increment_returnedBytes_by(size_t v) { 
+-      _allocation_stats.set_returnedBytes(_allocation_stats.returnedBytes() + v); 
++    void increment_returnedBytes_by(size_t v) {
++      _allocation_stats.set_returnedBytes(_allocation_stats.returnedBytes() + v);
+     }
+   )
+ 
+@@ -278,7 +275,7 @@
+   // the list is empty.
+   FreeChunk* getChunkAtHead();
+ 
+-  // Remove the first "n" or "count", whichever is smaller, chunks from the 
++  // Remove the first "n" or "count", whichever is smaller, chunks from the
+   // list, setting "fl", which is required to be empty, to point to them.
+   void getFirstNChunksFromList(size_t n, FreeList* fl);
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)vmCMSOperations.cpp	1.16 07/05/29 09:44:13 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ # include "incls/_precompiled.incl"
+ # include "incls/_vmCMSOperations.cpp.incl"
+@@ -132,10 +129,10 @@
+   GCCauseSetter gccs(gch, GCCause::_cms_initial_mark);
+ 
+   VM_CMS_Operation::verify_before_gc();
+-  
++
+   IsGCActiveMark x; // stop-world GC active
+   _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial);
+-  
++
+   VM_CMS_Operation::verify_after_gc();
+   HS_DTRACE_PROBE(hs_private, cms__initmark__end);
+ }
+@@ -154,10 +151,10 @@
+   GCCauseSetter gccs(gch, GCCause::_cms_final_remark);
+ 
+   VM_CMS_Operation::verify_before_gc();
+-  
++
+   IsGCActiveMark x; // stop-world GC active
+   _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal);
+-  
++
+   VM_CMS_Operation::verify_after_gc();
+   HS_DTRACE_PROBE(hs_private, cms__remark__end);
+ }
+@@ -166,7 +163,7 @@
+ // GenCollectedHeap heap.
+ void VM_GenCollectFullConcurrent::doit() {
+   assert(Thread::current()->is_VM_thread(), "Should be VM thread");
+-  
++
+   GenCollectedHeap* gch = GenCollectedHeap::heap();
+   if (_gc_count_before == gch->total_collections()) {
+     // The "full" of do_full_collection call below "forces"
+@@ -193,7 +190,7 @@
+     CMSCollector::disable_icms();
+     // In case CMS thread was in icms_wait(), wake it up.
+     CMSCollector::start_icms();
+-    // Nudge the CMS thread to start a concurrent collection    
++    // Nudge the CMS thread to start a concurrent collection
+     CMSCollector::request_full_gc(_full_gc_count_before);
+   } else {
+     FullGCCount_lock->notify_all();  // Inform the Java thread its work is done
+@@ -251,4 +248,3 @@
+   // Enable iCMS back.
+   CMSCollector::enable_icms();
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)vmCMSOperations.hpp	1.13 07/05/29 09:44:13 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The VM_CMS_Operation is slightly different from
+@@ -39,14 +36,14 @@
+ //
+ //      VM_CMS_Initial_Mark
+ //      VM_CMS_Final_Mark
+-//     
++//
+ 
+ // Forward decl.
+ class CMSCollector;
+ 
+ class VM_CMS_Operation: public VM_Operation {
+  protected:
+-  CMSCollector*  _collector;		     // associated collector
++  CMSCollector*  _collector;                 // associated collector
+   bool           _prologue_succeeded;     // whether doit_prologue succeeded
+ 
+   bool lost_race() const;
+@@ -60,7 +57,7 @@
+     _collector(collector),
+     _prologue_succeeded(false) {}
+   ~VM_CMS_Operation() {}
+-  
++
+   // The legal collector state for executing this CMS op.
+   virtual const CMSCollector::CollectorState legal_state() const = 0;
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,28 +1,25 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)vmStructs_cms.hpp	1.2 07/05/01 19:01:30 JVM"
+-#endif
+ /*
+  * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+- *   
++ *
+  * This code is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License version 2 only, as
+  * published by the Free Software Foundation.
+- *   
++ *
+  * This code is distributed in the hope that it will be useful, but WITHOUT
+  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+  * version 2 for more details (a copy is included in the LICENSE file that
+  * accompanied this code).
+- *  
++ *
+  * You should have received a copy of the GNU General Public License version
+  * 2 along with this work; if not, write to the Free Software Foundation,
+  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+- *   
++ *
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #define VM_STRUCTS_CMS(nonstatic_field, \
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.cpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)adjoiningGenerations.cpp	1.16 07/05/05 17:05:26 JVM"
+-#endif
+ /*
+  * Copyright 2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -33,19 +30,19 @@
+ // the old behavior otherwise (with PSYoungGen and PSOldGen).
+ 
+ AdjoiningGenerations::AdjoiningGenerations(ReservedSpace old_young_rs,
+-					   size_t init_low_byte_size,
+-					   size_t min_low_byte_size,
+-					   size_t max_low_byte_size,
+-					   size_t init_high_byte_size,
+-					   size_t min_high_byte_size,
+-					   size_t max_high_byte_size,
+-					   size_t alignment) :
++                                           size_t init_low_byte_size,
++                                           size_t min_low_byte_size,
++                                           size_t max_low_byte_size,
++                                           size_t init_high_byte_size,
++                                           size_t min_high_byte_size,
++                                           size_t max_high_byte_size,
++                                           size_t alignment) :
+   _virtual_spaces(old_young_rs, min_low_byte_size,
+-		  min_high_byte_size, alignment) {
++                  min_high_byte_size, alignment) {
+   assert(min_low_byte_size <= init_low_byte_size &&
+-	 init_low_byte_size <= max_low_byte_size, "Parameter check");
++         init_low_byte_size <= max_low_byte_size, "Parameter check");
+   assert(min_high_byte_size <= init_high_byte_size &&
+-	 init_high_byte_size <= max_high_byte_size, "Parameter check");
++         init_high_byte_size <= max_high_byte_size, "Parameter check");
+   // Create the generations differently based on the option to
+   // move the boundary.
+   if (UseAdaptiveGCBoundary) {
+@@ -55,22 +52,22 @@
+ 
+     // Does the actual creation of the virtual spaces
+     _virtual_spaces.initialize(max_low_byte_size,
+-			       init_low_byte_size,
+-			       init_high_byte_size);
++                               init_low_byte_size,
++                               init_high_byte_size);
+ 
+     // Place the young gen at the high end.  Passes in the virtual space.
+     _young_gen = new ASPSYoungGen(_virtual_spaces.high(),
+-				  _virtual_spaces.high()->committed_size(),
+-				  min_high_byte_size,
+-				  _virtual_spaces.high_byte_size_limit());
++                                  _virtual_spaces.high()->committed_size(),
++                                  min_high_byte_size,
++                                  _virtual_spaces.high_byte_size_limit());
+ 
+     // Place the old gen at the low end. Passes in the virtual space.
+     _old_gen = new ASPSOldGen(_virtual_spaces.low(),
+-			      _virtual_spaces.low()->committed_size(),
++                              _virtual_spaces.low()->committed_size(),
+                               min_low_byte_size,
+-			      _virtual_spaces.low_byte_size_limit(),
++                              _virtual_spaces.low_byte_size_limit(),
+                               "old", 1);
+-    
++
+     young_gen()->initialize_work();
+     assert(young_gen()->reserved().byte_size() <= young_gen()->gen_size_limit(),
+      "Consistency check");
+@@ -78,39 +75,39 @@
+      "Consistency check");
+ 
+     old_gen()->initialize_work("old", 1);
+-    assert(old_gen()->reserved().byte_size() <= old_gen()->gen_size_limit(), 
++    assert(old_gen()->reserved().byte_size() <= old_gen()->gen_size_limit(),
+      "Consistency check");
+-    assert(old_young_rs.size() >= old_gen()->gen_size_limit(), 
++    assert(old_young_rs.size() >= old_gen()->gen_size_limit(),
+      "Consistency check");
+   } else {
+ 
+     // Layout the reserved space for the generations.
+-    ReservedSpace old_rs   = 
++    ReservedSpace old_rs   =
+       virtual_spaces()->reserved_space().first_part(max_low_byte_size);
+-    ReservedSpace heap_rs  = 
++    ReservedSpace heap_rs  =
+       virtual_spaces()->reserved_space().last_part(max_low_byte_size);
+     ReservedSpace young_rs = heap_rs.first_part(max_high_byte_size);
+     assert(young_rs.size() == heap_rs.size(), "Didn't reserve all of the heap");
+ 
+     // Create the generations.  Virtual spaces are not passed in.
+     _young_gen = new PSYoungGen(init_high_byte_size,
+-				min_high_byte_size,
+-				max_high_byte_size);
++                                min_high_byte_size,
++                                max_high_byte_size);
+     _old_gen = new PSOldGen(init_low_byte_size,
+                             min_low_byte_size,
+-			    max_low_byte_size,
++                            max_low_byte_size,
+                             "old", 1);
+ 
+     // The virtual spaces are created by the initialization of the gens.
+     _young_gen->initialize(young_rs, alignment);
+-    assert(young_gen()->gen_size_limit() == young_rs.size(), 
++    assert(young_gen()->gen_size_limit() == young_rs.size(),
+       "Consistency check");
+     _old_gen->initialize(old_rs, alignment, "old", 1);
+     assert(old_gen()->gen_size_limit() == old_rs.size(), "Consistency check");
+   }
+ }
+ 
+-size_t AdjoiningGenerations::reserved_byte_size() { 
++size_t AdjoiningGenerations::reserved_byte_size() {
+   return virtual_spaces()->reserved_space().size();
+ }
+ 
+@@ -132,8 +129,8 @@
+   const size_t old_gen_available = old_gen()->available_for_expansion();
+   const size_t alignment = virtual_spaces()->alignment();
+   size_t change_in_bytes = MIN3(young_gen_available,
+-				old_gen_available,
+-				align_size_up_(expand_in_bytes, alignment));
++                                old_gen_available,
++                                align_size_up_(expand_in_bytes, alignment));
+ 
+   if (change_in_bytes == 0) {
+     return;
+@@ -146,7 +143,7 @@
+     if (!PrintHeapAtGC) {
+       Universe::print_on(gclog_or_tty);
+     }
+-    gclog_or_tty->print_cr("  PSOldGen max size: " SIZE_FORMAT "K", 
++    gclog_or_tty->print_cr("  PSOldGen max size: " SIZE_FORMAT "K",
+       old_gen()->max_gen_size()/K);
+   }
+ 
+@@ -159,8 +156,8 @@
+   // The total reserved for the generations should match the sum
+   // of the two even if the boundary is moving.
+   assert(reserved_byte_size() ==
+-	 old_gen()->max_gen_size() + young_gen()->max_size(),
+-	 "Space is missing");
++         old_gen()->max_gen_size() + young_gen()->max_size(),
++         "Space is missing");
+   young_gen()->space_invariants();
+   old_gen()->space_invariants();
+ 
+@@ -169,7 +166,7 @@
+     if (!PrintHeapAtGC) {
+       Universe::print_on(gclog_or_tty);
+     }
+-    gclog_or_tty->print_cr("  PSOldGen max size: " SIZE_FORMAT "K", 
++    gclog_or_tty->print_cr("  PSOldGen max size: " SIZE_FORMAT "K",
+       old_gen()->max_gen_size()/K);
+   }
+ }
+@@ -190,8 +187,8 @@
+   const size_t old_gen_available = old_gen()->available_for_contraction();
+   const size_t alignment = virtual_spaces()->alignment();
+   size_t change_in_bytes = MIN3(young_gen_available,
+-				old_gen_available,
+-				align_size_up_(expand_in_bytes, alignment));
++                                old_gen_available,
++                                align_size_up_(expand_in_bytes, alignment));
+ 
+   if (change_in_bytes == 0) {
+     return false;
+@@ -204,7 +201,7 @@
+     if (!PrintHeapAtGC) {
+       Universe::print_on(gclog_or_tty);
+     }
+-    gclog_or_tty->print_cr("  PSYoungGen max size: " SIZE_FORMAT "K", 
++    gclog_or_tty->print_cr("  PSYoungGen max size: " SIZE_FORMAT "K",
+       young_gen()->max_size()/K);
+   }
+ 
+@@ -219,8 +216,8 @@
+   // The total reserved for the generations should match the sum
+   // of the two even if the boundary is moving.
+   assert(reserved_byte_size() ==
+-	 old_gen()->max_gen_size() + young_gen()->max_size(),
+-	 "Space is missing");
++         old_gen()->max_gen_size() + young_gen()->max_size(),
++         "Space is missing");
+   young_gen()->space_invariants();
+   old_gen()->space_invariants();
+ 
+@@ -229,7 +226,7 @@
+     if (!PrintHeapAtGC) {
+       Universe::print_on(gclog_or_tty);
+     }
+-    gclog_or_tty->print_cr("  PSYoungGen max size: " SIZE_FORMAT "K", 
++    gclog_or_tty->print_cr("  PSYoungGen max size: " SIZE_FORMAT "K",
+       young_gen()->max_size()/K);
+   }
+ 
+@@ -279,4 +276,3 @@
+     }
+   }
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)adjoiningGenerations.hpp	1.14 07/05/05 17:05:25 JVM"
+-#endif
+ /*
+  * Copyright 2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ 
+@@ -32,7 +29,7 @@
+ // expanding of the generations can still be down with that
+ // virtual space as was previously done.  If expanding of reserved
+ // size of a generation is required, the adjacent generation
+-// must be shrunk.  Adjusting the boundary between the generations  
++// must be shrunk.  Adjusting the boundary between the generations
+ // is called for in this class.
+ 
+ class AdjoiningGenerations : public CHeapObj {
+@@ -40,7 +37,7 @@
+  private:
+   // The young generation and old generation, respectively
+   PSYoungGen* _young_gen;
+-  PSOldGen* _old_gen;	
++  PSOldGen* _old_gen;
+ 
+   // The spaces used by the two generations.
+   AdjoiningVirtualSpaces _virtual_spaces;
+@@ -53,13 +50,13 @@
+ 
+  public:
+   AdjoiningGenerations(ReservedSpace rs,
+-		       size_t init_low_byte_size,
+-		       size_t min_low_byte_size,
+-		       size_t max_low_byte_size,
+-		       size_t init_high_byte_size,
+-		       size_t min_high_byte_size,
+-		       size_t max_high_bytes_size,
+-		       size_t alignment);
++                       size_t init_low_byte_size,
++                       size_t min_low_byte_size,
++                       size_t max_low_byte_size,
++                       size_t init_high_byte_size,
++                       size_t min_high_byte_size,
++                       size_t max_high_bytes_size,
++                       size_t alignment);
+ 
+   // Accessors
+   PSYoungGen* young_gen() { return _young_gen; }
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningVirtualSpaces.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningVirtualSpaces.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningVirtualSpaces.cpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningVirtualSpaces.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)adjoiningVirtualSpaces.cpp	1.14 07/05/05 17:05:26 JVM"
+-#endif
+ /*
+  * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,25 +19,25 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_adjoiningVirtualSpaces.cpp.incl"
+ 
+ AdjoiningVirtualSpaces::AdjoiningVirtualSpaces(ReservedSpace rs,
+-					       size_t min_low_byte_size,
+-					       size_t min_high_byte_size,
+-					       size_t alignment) :
++                                               size_t min_low_byte_size,
++                                               size_t min_high_byte_size,
++                                               size_t alignment) :
+   _reserved_space(rs), _min_low_byte_size(min_low_byte_size),
+   _min_high_byte_size(min_high_byte_size), _low(0), _high(0),
+   _alignment(alignment) {}
+ 
+-// The maximum byte sizes are for the initial layout of the 
++// The maximum byte sizes are for the initial layout of the
+ // virtual spaces and are not the limit on the maximum bytes sizes.
+ void AdjoiningVirtualSpaces::initialize(size_t max_low_byte_size,
+-					size_t init_low_byte_size,
+-					size_t init_high_byte_size) {
++                                        size_t init_low_byte_size,
++                                        size_t init_high_byte_size) {
+ 
+   // The reserved spaces for the two parts of the virtual space.
+   ReservedSpace old_rs   = _reserved_space.first_part(max_low_byte_size);
+@@ -49,14 +46,14 @@
+   _low = new PSVirtualSpace(old_rs, alignment());
+   if (!_low->expand_by(init_low_byte_size)) {
+     vm_exit_during_initialization("Could not reserve enough space for "
+-				  "object heap");
++                                  "object heap");
+   }
+ 
+   _high = new PSVirtualSpaceHighToLow(young_rs, alignment());
+   if (!_high->expand_by(init_high_byte_size)) {
+     vm_exit_during_initialization("Could not reserve enough space for "
+-				  "object heap");
+-  }  
++                                  "object heap");
++  }
+ }
+ 
+ bool AdjoiningVirtualSpaces::adjust_boundary_up(size_t change_in_bytes) {
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)adjoiningVirtualSpaces.hpp	1.11 07/05/05 17:05:25 JVM"
+-#endif
+ /*
+  * Copyright 2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ 
+@@ -30,28 +27,28 @@
+ // most of the reserved region but committed parts of which
+ // cannot overlap.
+ //
+-// 	+-------+ <--- high_boundary for H
+-//	|	|
+-//	|   H	|
+-//	|	|
+-//	|	|
+-//	|	|
+-//	--------- <--- low for H
+-//	|	|
+-//	========= <--- low_boundary for H, high_boundary for L
+-//	|	|
+-//	|	|
+-//	|	|
+-//	--------- <--- high for L
+-//	|	|
+-//	|   L	|
+-//	|	|
+-//	|	|
+-//	|	|
+-//	+-------+ <--- low_boundary for L
++//      +-------+ <--- high_boundary for H
++//      |       |
++//      |   H   |
++//      |       |
++//      |       |
++//      |       |
++//      --------- <--- low for H
++//      |       |
++//      ========= <--- low_boundary for H, high_boundary for L
++//      |       |
++//      |       |
++//      |       |
++//      --------- <--- high for L
++//      |       |
++//      |   L   |
++//      |       |
++//      |       |
++//      |       |
++//      +-------+ <--- low_boundary for L
+ //
+ // Each virtual space in the AdjoiningVirtualSpaces grows and shrink
+-// within its reserved region (between the low_boundary and the 
++// within its reserved region (between the low_boundary and the
+ // boundary) independently.  If L want to grow above its high_boundary,
+ // then the high_boundary of L and the low_boundary of H must be
+ // moved up consistently.  AdjoiningVirtualSpaces provide the
+@@ -76,10 +73,10 @@
+  public:
+   // Allocates two virtual spaces that will be located at the
+   // high and low ends.  Does no initialization.
+-  AdjoiningVirtualSpaces(ReservedSpace rs, 
+-			 size_t min_low_byte_size, 
+-			 size_t min_high_byte_size,
+-			 size_t alignment); 
++  AdjoiningVirtualSpaces(ReservedSpace rs,
++                         size_t min_low_byte_size,
++                         size_t min_high_byte_size,
++                         size_t alignment);
+ 
+   // accessors
+   PSVirtualSpace* high() { return _high; }
+@@ -89,7 +86,7 @@
+   size_t min_high_byte_size() { return _min_high_byte_size; }
+   size_t alignment() const { return _alignment; }
+ 
+-  // move boundary between the two spaces up 
++  // move boundary between the two spaces up
+   bool adjust_boundary_up(size_t size_in_bytes);
+   // and down
+   bool adjust_boundary_down(size_t size_in_bytes);
+@@ -99,14 +96,13 @@
+     return _reserved_space.size() - _min_low_byte_size;
+   }
+   // Maximum byte size for the low space.
+-  size_t low_byte_size_limit() { 
++  size_t low_byte_size_limit() {
+     return _reserved_space.size() - _min_high_byte_size;
+   }
+ 
+   // Sets the boundaries for the virtual spaces and commits and
+   // initial size;
+   void initialize(size_t max_low_byte_size,
+-		  size_t init_low_byte_size,
+-		  size_t init_high_byte_size);
++                  size_t init_low_byte_size,
++                  size_t init_high_byte_size);
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)asPSOldGen.cpp	1.18 07/05/05 17:05:26 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,14 +19,14 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_asPSOldGen.cpp.incl"
+ 
+ // Whereas PSOldGen takes the maximum size of the generation
+-// (which doesn't change in the case of PSOldGen) as a parameter, 
++// (which doesn't change in the case of PSOldGen) as a parameter,
+ // ASPSOldGen takes the upper limit on the size of
+ // the generation as a parameter.  In ASPSOldGen the
+ // maximum size of the generation can change as the boundary
+@@ -43,23 +40,23 @@
+ // That can change as the boundary moves.  Below the limit of
+ // the size of the generation is passed to the PSOldGen constructor
+ // for "_max_gen_size" (have to pass something) but it is not used later.
+-// 
++//
+ ASPSOldGen::ASPSOldGen(size_t initial_size,
+-                       size_t min_size, 
+-		       size_t size_limit,
+-                       const char* gen_name, 
+-		       int level) :
++                       size_t min_size,
++                       size_t size_limit,
++                       const char* gen_name,
++                       int level) :
+   PSOldGen(initial_size, min_size, size_limit, gen_name, level),
+   _gen_size_limit(size_limit)
+ 
+ {}
+ 
+-ASPSOldGen::ASPSOldGen(PSVirtualSpace* vs, 
+-		       size_t initial_size,
+-                       size_t min_size, 
+-		       size_t size_limit,
+-                       const char* gen_name, 
+-		       int level) :
++ASPSOldGen::ASPSOldGen(PSVirtualSpace* vs,
++                       size_t initial_size,
++                       size_t min_size,
++                       size_t size_limit,
++                       const char* gen_name,
++                       int level) :
+   PSOldGen(initial_size, min_size, size_limit, gen_name, level),
+   _gen_size_limit(size_limit)
+ 
+@@ -78,7 +75,10 @@
+   assert(virtual_space()->is_aligned(gen_size_limit()), "not aligned");
+   assert(gen_size_limit() >= virtual_space()->committed_size(), "bad gen size");
+ 
+-  return gen_size_limit() - virtual_space()->committed_size();
++  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
++  size_t result =  gen_size_limit() - virtual_space()->committed_size();
++  size_t result_aligned = align_size_down(result, heap->old_gen_alignment());
++  return result_aligned;
+ }
+ 
+ size_t ASPSOldGen::available_for_contraction() {
+@@ -87,12 +87,12 @@
+     return uncommitted_bytes;
+   }
+ 
+-  const size_t alignment = virtual_space()->alignment();
+-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 
++  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
++  const size_t gen_alignment = heap->old_gen_alignment();
+   PSAdaptiveSizePolicy* policy = heap->size_policy();
+-  const size_t working_size = 
++  const size_t working_size =
+     used_in_bytes() + (size_t) policy->avg_promoted()->padded_average();
+-  const size_t working_aligned = align_size_up(working_size, alignment);
++  const size_t working_aligned = align_size_up(working_size, gen_alignment);
+   const size_t working_or_min = MAX2(working_aligned, min_gen_size());
+   if (working_or_min > reserved().byte_size()) {
+     // If the used or minimum gen size (aligned up) is greater
+@@ -100,7 +100,7 @@
+     // for contraction should (after proper alignment) be 0
+     return 0;
+   }
+-  const size_t max_contraction = 
++  const size_t max_contraction =
+     reserved().byte_size() - working_or_min;
+ 
+   // Use the "increment" fraction instead of the "decrement" fraction
+@@ -109,25 +109,27 @@
+   // only reduce the footprint.
+ 
+   size_t result = policy->promo_increment_aligned_down(max_contraction);
++  // Also adjust for inter-generational alignment
++  size_t result_aligned = align_size_down(result, gen_alignment);
+   if (PrintAdaptiveSizePolicy && Verbose) {
+     gclog_or_tty->print_cr("\nASPSOldGen::available_for_contraction:"
+-      " %d K / 0x%x", result/K, result);
+-    gclog_or_tty->print_cr(" reserved().byte_size() %d K / 0x%x ", 
++      " %d K / 0x%x", result_aligned/K, result_aligned);
++    gclog_or_tty->print_cr(" reserved().byte_size() %d K / 0x%x ",
+       reserved().byte_size()/K, reserved().byte_size());
+     size_t working_promoted = (size_t) policy->avg_promoted()->padded_average();
+-    gclog_or_tty->print_cr(" padded promoted %d K / 0x%x", 
++    gclog_or_tty->print_cr(" padded promoted %d K / 0x%x",
+       working_promoted/K, working_promoted);
+-    gclog_or_tty->print_cr(" used %d K / 0x%x", 
++    gclog_or_tty->print_cr(" used %d K / 0x%x",
+       used_in_bytes()/K, used_in_bytes());
+-    gclog_or_tty->print_cr(" min_gen_size() %d K / 0x%x", 
++    gclog_or_tty->print_cr(" min_gen_size() %d K / 0x%x",
+       min_gen_size()/K, min_gen_size());
+-    gclog_or_tty->print_cr(" max_contraction %d K / 0x%x", 
++    gclog_or_tty->print_cr(" max_contraction %d K / 0x%x",
+       max_contraction/K, max_contraction);
+-    gclog_or_tty->print_cr("	without alignment %d K / 0x%x", 
+-      policy->promo_increment(max_contraction)/K, 
++    gclog_or_tty->print_cr("    without alignment %d K / 0x%x",
++      policy->promo_increment(max_contraction)/K,
+       policy->promo_increment(max_contraction));
+-    gclog_or_tty->print_cr(" alignment 0x%x", alignment);
++    gclog_or_tty->print_cr(" alignment 0x%x", gen_alignment);
+   }
+-  assert(result <= max_contraction, "arithmetic is wrong");
+-  return result;
++  assert(result_aligned <= max_contraction, "arithmetic is wrong");
++  return result_aligned;
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)asPSOldGen.hpp	1.14 07/05/05 17:05:26 JVM"
+-#endif
+ /*
+  * Copyright 2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,26 +19,26 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class ASPSOldGen : public PSOldGen {
+   friend class VMStructs;
+   size_t _gen_size_limit;  // Largest size the generation's reserved size
+-			   // can grow.
++                           // can grow.
+  public:
+   ASPSOldGen(size_t initial_byte_size,
+-             size_t minimum_byte_size, 
+-	     size_t byte_size_limit,
++             size_t minimum_byte_size,
++             size_t byte_size_limit,
+              const char* gen_name, int level);
+-  ASPSOldGen(PSVirtualSpace* vs, 
+-	     size_t initial_byte_size,
+-             size_t minimum_byte_size, 
+-	     size_t byte_size_limit,
++  ASPSOldGen(PSVirtualSpace* vs,
++             size_t initial_byte_size,
++             size_t minimum_byte_size,
++             size_t byte_size_limit,
+              const char* gen_name, int level);
+-  size_t gen_size_limit() 		{ return _gen_size_limit; }
+-  size_t max_gen_size() 		{ return _reserved.byte_size(); }
+-  void set_gen_size_limit(size_t v) 	{ _gen_size_limit = v; }
++  size_t gen_size_limit()               { return _gen_size_limit; }
++  size_t max_gen_size()                 { return _reserved.byte_size(); }
++  void set_gen_size_limit(size_t v)     { _gen_size_limit = v; }
+ 
+   // After a shrink or expand reset the generation
+   void reset_after_change();
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)asPSYoungGen.cpp	1.22 07/05/05 17:05:27 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -30,16 +27,16 @@
+ 
+ ASPSYoungGen::ASPSYoungGen(size_t init_byte_size,
+                            size_t minimum_byte_size,
+-			   size_t byte_size_limit) :
++                           size_t byte_size_limit) :
+   PSYoungGen(init_byte_size, minimum_byte_size, byte_size_limit),
+   _gen_size_limit(byte_size_limit) {
+ }
+ 
+ 
+ ASPSYoungGen::ASPSYoungGen(PSVirtualSpace* vs,
+-			   size_t init_byte_size,
++                           size_t init_byte_size,
+                            size_t minimum_byte_size,
+-			   size_t byte_size_limit) :
++                           size_t byte_size_limit) :
+   //PSYoungGen(init_byte_size, minimum_byte_size, byte_size_limit),
+   PSYoungGen(vs->committed_size(), minimum_byte_size, byte_size_limit),
+   _gen_size_limit(byte_size_limit) {
+@@ -50,12 +47,12 @@
+ }
+ 
+ void ASPSYoungGen::initialize_virtual_space(ReservedSpace rs,
+-					    size_t alignment) {
++                                            size_t alignment) {
+   assert(_init_gen_size != 0, "Should have a finite size");
+   _virtual_space = new PSVirtualSpaceHighToLow(rs, alignment);
+   if (!_virtual_space->expand_by(_init_gen_size)) {
+     vm_exit_during_initialization("Could not reserve enough space for "
+-				  "object heap");
++                                  "object heap");
+   }
+ }
+ 
+@@ -65,15 +62,18 @@
+ }
+ 
+ size_t ASPSYoungGen::available_for_expansion() {
+-  
++
+   size_t current_committed_size = virtual_space()->committed_size();
+   assert((gen_size_limit() >= current_committed_size),
+     "generation size limit is wrong");
+-  return gen_size_limit() - current_committed_size;
++  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
++  size_t result =  gen_size_limit() - current_committed_size;
++  size_t result_aligned = align_size_down(result, heap->young_gen_alignment());
++  return result_aligned;
+ }
+ 
+ // Return the number of bytes the young gen is willing give up.
+-// 
++//
+ // Future implementations could check the survivors and if to_space is in the
+ // right place (below from_space), take a chunk from to_space.
+ size_t ASPSYoungGen::available_for_contraction() {
+@@ -87,7 +87,7 @@
+     // Respect the minimum size for eden and for the young gen as a whole.
+     ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+     const size_t eden_alignment = heap->intra_generation_alignment();
+-    const size_t gen_alignment = heap->generation_alignment();
++    const size_t gen_alignment = heap->young_gen_alignment();
+ 
+     assert(eden_space()->capacity_in_bytes() >= eden_alignment,
+       "Alignment is wrong");
+@@ -104,14 +104,15 @@
+     // for reasons the "increment" fraction is used.
+     PSAdaptiveSizePolicy* policy = heap->size_policy();
+     size_t result = policy->eden_increment_aligned_down(max_contraction);
++    size_t result_aligned = align_size_down(result, gen_alignment);
+     if (PrintAdaptiveSizePolicy && Verbose) {
+       gclog_or_tty->print_cr("ASPSYoungGen::available_for_contraction: %d K",
+-	result/K);
+-      gclog_or_tty->print_cr("	max_contraction %d K", max_contraction/K);
+-      gclog_or_tty->print_cr("	eden_avail %d K", eden_avail/K);
+-      gclog_or_tty->print_cr("	gen_avail %d K", gen_avail/K);
++        result_aligned/K);
++      gclog_or_tty->print_cr("  max_contraction %d K", max_contraction/K);
++      gclog_or_tty->print_cr("  eden_avail %d K", eden_avail/K);
++      gclog_or_tty->print_cr("  gen_avail %d K", gen_avail/K);
+     }
+-    return result;
++    return result_aligned;
+ 
+   }
+ 
+@@ -127,8 +128,8 @@
+ 
+   // Include any space that is committed but is not in eden.
+   size_t available = pointer_delta(eden_space()->bottom(),
+-				   virtual_space()->low(),
+-				   sizeof(char));
++                                   virtual_space()->low(),
++                                   sizeof(char));
+ 
+   const size_t eden_capacity = eden_space()->capacity_in_bytes();
+   if (eden_space()->is_empty() && eden_capacity > alignment) {
+@@ -157,13 +158,13 @@
+ 
+   assert(max_size() == reserved().byte_size(), "max gen size problem?");
+   assert(min_gen_size() <= orig_size && orig_size <= max_size(),
+-	 "just checking");
++         "just checking");
+ 
+   // Adjust new generation size
+   const size_t eden_plus_survivors =
+     align_size_up(eden_size + 2 * survivor_size, alignment);
+-  size_t desired_size = MAX2(MIN2(eden_plus_survivors, gen_size_limit()), 
+-			     min_gen_size());
++  size_t desired_size = MAX2(MIN2(eden_plus_survivors, gen_size_limit()),
++                             min_gen_size());
+   assert(desired_size <= gen_size_limit(), "just checking");
+ 
+   if (desired_size > orig_size) {
+@@ -184,10 +185,10 @@
+   } else {
+     if (Verbose && PrintGC) {
+       if (orig_size == gen_size_limit()) {
+-        gclog_or_tty->print_cr("ASPSYoung generation size at maximum: " 
++        gclog_or_tty->print_cr("ASPSYoung generation size at maximum: "
+           SIZE_FORMAT "K", orig_size/K);
+       } else if (orig_size == min_gen_size()) {
+-        gclog_or_tty->print_cr("ASPSYoung generation size at minium: " 
++        gclog_or_tty->print_cr("ASPSYoung generation size at minium: "
+           SIZE_FORMAT "K", orig_size/K);
+       }
+     }
+@@ -197,14 +198,14 @@
+     reset_after_change();
+     if (Verbose && PrintGC) {
+       size_t current_size  = virtual_space()->committed_size();
+-      gclog_or_tty->print_cr("ASPSYoung generation size changed: " 
+-	SIZE_FORMAT "K->" SIZE_FORMAT "K",
++      gclog_or_tty->print_cr("ASPSYoung generation size changed: "
++        SIZE_FORMAT "K->" SIZE_FORMAT "K",
+         orig_size/K, current_size/K);
+     }
+   }
+ 
+   guarantee(eden_plus_survivors <= virtual_space()->committed_size() ||
+-	    virtual_space()->committed_size() == max_size(), "Sanity");
++            virtual_space()->committed_size() == max_size(), "Sanity");
+ 
+   return true;
+ }
+@@ -214,10 +215,10 @@
+ //  current implementation does not allow holes between the spaces
+ //  _young_generation_boundary has to be reset because it changes.
+ //  so additional verification
+-void ASPSYoungGen::resize_spaces(size_t requested_eden_size, 
+-				 size_t requested_survivor_size) {
++void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
++                                 size_t requested_survivor_size) {
+   assert(requested_eden_size > 0 && requested_survivor_size > 0,
+-	 "just checking");
++         "just checking");
+ 
+   space_invariants();
+ 
+@@ -227,35 +228,35 @@
+   }
+ 
+   if (PrintAdaptiveSizePolicy && Verbose) {
+-    gclog_or_tty->print_cr("PSYoungGen::resize_spaces(requested_eden_size: " 
+-		  SIZE_FORMAT 
++    gclog_or_tty->print_cr("PSYoungGen::resize_spaces(requested_eden_size: "
++                  SIZE_FORMAT
+                   ", requested_survivor_size: " SIZE_FORMAT ")",
+                   requested_eden_size, requested_survivor_size);
+-    gclog_or_tty->print_cr("    eden: [" PTR_FORMAT ".." PTR_FORMAT ") " 
+-		  SIZE_FORMAT, 
+-                  eden_space()->bottom(), 
+-                  eden_space()->end(), 
++    gclog_or_tty->print_cr("    eden: [" PTR_FORMAT ".." PTR_FORMAT ") "
++                  SIZE_FORMAT,
++                  eden_space()->bottom(),
++                  eden_space()->end(),
+                   pointer_delta(eden_space()->end(),
+                                 eden_space()->bottom(),
+                                 sizeof(char)));
+-    gclog_or_tty->print_cr("    from: [" PTR_FORMAT ".." PTR_FORMAT ") " 
+-		  SIZE_FORMAT, 
+-                  from_space()->bottom(), 
+-                  from_space()->end(), 
++    gclog_or_tty->print_cr("    from: [" PTR_FORMAT ".." PTR_FORMAT ") "
++                  SIZE_FORMAT,
++                  from_space()->bottom(),
++                  from_space()->end(),
+                   pointer_delta(from_space()->end(),
+                                 from_space()->bottom(),
+                                 sizeof(char)));
+-    gclog_or_tty->print_cr("      to: [" PTR_FORMAT ".." PTR_FORMAT ") " 
+-		  SIZE_FORMAT, 
+-                  to_space()->bottom(),   
+-                  to_space()->end(), 
++    gclog_or_tty->print_cr("      to: [" PTR_FORMAT ".." PTR_FORMAT ") "
++                  SIZE_FORMAT,
++                  to_space()->bottom(),
++                  to_space()->end(),
+                   pointer_delta(  to_space()->end(),
+                                   to_space()->bottom(),
+                                   sizeof(char)));
+   }
+ 
+   // There's nothing to do if the new sizes are the same as the current
+-  if (requested_survivor_size == to_space()->capacity_in_bytes() && 
++  if (requested_survivor_size == to_space()->capacity_in_bytes() &&
+       requested_survivor_size == from_space()->capacity_in_bytes() &&
+       requested_eden_size == eden_space()->capacity_in_bytes()) {
+     if (PrintAdaptiveSizePolicy && Verbose) {
+@@ -263,9 +264,9 @@
+     }
+     return;
+   }
+-  
++
+   char* eden_start = (char*)virtual_space()->low();
+-  char* eden_end   = (char*)eden_space()->end();   
++  char* eden_end   = (char*)eden_space()->end();
+   char* from_start = (char*)from_space()->bottom();
+   char* from_end   = (char*)from_space()->end();
+   char* to_start   = (char*)to_space()->bottom();
+@@ -285,12 +286,12 @@
+ 
+     // Set eden
+     // Compute how big eden can be, then adjust end.
+-    // See comment in PSYoungGen::resize_spaces() on 
++    // See comment in PSYoungGen::resize_spaces() on
+     // calculating eden_end.
+     const size_t eden_size = MIN2(requested_eden_size,
+-                                  pointer_delta(from_start, 
+-						eden_start, 
+-						sizeof(char)));
++                                  pointer_delta(from_start,
++                                                eden_start,
++                                                sizeof(char)));
+     eden_end = eden_start + eden_size;
+     assert(eden_end >= eden_start, "addition overflowed")
+ 
+@@ -300,21 +301,21 @@
+ 
+     // First calculate an optimal to-space
+     to_end   = (char*)virtual_space()->high();
+-    to_start = (char*)pointer_delta(to_end, 
+-				    (char*)requested_survivor_size, 
+-				    sizeof(char));
++    to_start = (char*)pointer_delta(to_end,
++                                    (char*)requested_survivor_size,
++                                    sizeof(char));
+ 
+     // Does the optimal to-space overlap from-space?
+     if (to_start < (char*)from_space()->end()) {
+       assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+-      
++
+       // Calculate the minimum offset possible for from_end
+-      size_t from_size = 
+-	pointer_delta(from_space()->top(), from_start, sizeof(char));
++      size_t from_size =
++        pointer_delta(from_space()->top(), from_start, sizeof(char));
+ 
+       // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
+       if (from_size == 0) {
+-	from_size = alignment;
++        from_size = alignment;
+       } else {
+         from_size = align_size_up(from_size, alignment);
+       }
+@@ -322,30 +323,30 @@
+       from_end = from_start + from_size;
+       assert(from_end > from_start, "addition overflow or from_size problem");
+ 
+-      guarantee(from_end <= (char*)from_space()->end(), 
+-	"from_end moved to the right");
++      guarantee(from_end <= (char*)from_space()->end(),
++        "from_end moved to the right");
+ 
+       // Now update to_start with the new from_end
+       to_start = MAX2(from_end, to_start);
+     }
+ 
+     guarantee(to_start != to_end, "to space is zero sized");
+-      
++
+     if (PrintAdaptiveSizePolicy && Verbose) {
+       gclog_or_tty->print_cr("    [eden_start .. eden_end): "
+-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
+-                    eden_start, 
+-                    eden_end, 
++                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
++                    eden_start,
++                    eden_end,
+                     pointer_delta(eden_end, eden_start, sizeof(char)));
+       gclog_or_tty->print_cr("    [from_start .. from_end): "
+-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
+-                    from_start, 
+-                    from_end, 
++                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
++                    from_start,
++                    from_end,
+                     pointer_delta(from_end, from_start, sizeof(char)));
+       gclog_or_tty->print_cr("    [  to_start ..   to_end): "
+-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
+-                    to_start,   
+-                    to_end, 
++                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
++                    to_start,
++                    to_end,
+                     pointer_delta(  to_end,   to_start, sizeof(char)));
+     }
+   } else {
+@@ -358,24 +359,24 @@
+     // to space as if we were able to resize from space, even though from
+     // space is not modified.
+     // Giving eden priority was tried and gave poorer performance.
+-    to_end   = (char*)pointer_delta(virtual_space()->high(), 
+-				    (char*)requested_survivor_size, 
+-				    sizeof(char));
++    to_end   = (char*)pointer_delta(virtual_space()->high(),
++                                    (char*)requested_survivor_size,
++                                    sizeof(char));
+     to_end   = MIN2(to_end, from_start);
+-    to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size, 
+-				    sizeof(char));
++    to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
++                                    sizeof(char));
+     // if the space sizes are to be increased by several times then
+     // 'to_start' will point beyond the young generation. In this case
+     // 'to_start' should be adjusted.
+     to_start = MAX2(to_start, eden_start + alignment);
+ 
+     // Compute how big eden can be, then adjust end.
+-    // See comment in PSYoungGen::resize_spaces() on 
++    // See comment in PSYoungGen::resize_spaces() on
+     // calculating eden_end.
+     const size_t eden_size = MIN2(requested_eden_size,
+-                                  pointer_delta(to_start, 
+-						eden_start, 
+-						sizeof(char)));
++                                  pointer_delta(to_start,
++                                                eden_start,
++                                                sizeof(char)));
+     eden_end = eden_start + eden_size;
+     assert(eden_end >= eden_start, "addition overflowed")
+ 
+@@ -385,25 +386,25 @@
+ 
+     if (PrintAdaptiveSizePolicy && Verbose) {
+       gclog_or_tty->print_cr("    [eden_start .. eden_end): "
+-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
+-                    eden_start, 
+-                    eden_end, 
++                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
++                    eden_start,
++                    eden_end,
+                     pointer_delta(eden_end, eden_start, sizeof(char)));
+-      gclog_or_tty->print_cr("    [  to_start ..   to_end): " 
+-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
+-                    to_start,   
+-                    to_end, 
++      gclog_or_tty->print_cr("    [  to_start ..   to_end): "
++                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
++                    to_start,
++                    to_end,
+                     pointer_delta(  to_end,   to_start, sizeof(char)));
+-      gclog_or_tty->print_cr("    [from_start .. from_end): " 
+-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
+-                    from_start, 
+-                    from_end, 
++      gclog_or_tty->print_cr("    [from_start .. from_end): "
++                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
++                    from_start,
++                    from_end,
+                     pointer_delta(from_end, from_start, sizeof(char)));
+     }
+   }
+-  
+ 
+-  guarantee((HeapWord*)from_start <= from_space()->bottom(), 
++
++  guarantee((HeapWord*)from_start <= from_space()->bottom(),
+             "from start moved to the right");
+   guarantee((HeapWord*)from_end >= from_space()->top(),
+             "from end moved into live data");
+@@ -442,7 +443,7 @@
+                   from_space()->capacity_in_bytes(),
+                   to_space()->capacity_in_bytes());
+     gclog_or_tty->cr();
+-  } 
++  }
+   space_invariants();
+ }
+ 
+@@ -460,7 +461,7 @@
+     eden_space()->initialize(eden_mr, true);
+     PSScavenge::set_young_generation_boundary(eden_space()->bottom());
+   }
+-  MemRegion cmr((HeapWord*)virtual_space()->low(), 
++  MemRegion cmr((HeapWord*)virtual_space()->low(),
+                 (HeapWord*)virtual_space()->high());
+   Universe::heap()->barrier_set()->resize_covered_region(cmr);
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)asPSYoungGen.hpp	1.16 07/05/05 17:05:26 JVM"
+-#endif
+ /*
+  * Copyright 2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class ASPSYoungGen : public PSYoungGen {
+@@ -33,14 +30,14 @@
+   virtual size_t available_to_live();
+ 
+  public:
+-  ASPSYoungGen(size_t         initial_byte_size, 
++  ASPSYoungGen(size_t         initial_byte_size,
+                size_t         minimum_byte_size,
+-	       size_t         byte_size_limit);
++               size_t         byte_size_limit);
+ 
+   ASPSYoungGen(PSVirtualSpace* vs,
+-	       size_t         initial_byte_size, 
++               size_t         initial_byte_size,
+                size_t         minimum_byte_size,
+-	       size_t         byte_size_limit);
++               size_t         byte_size_limit);
+ 
+   void initialize(ReservedSpace rs, size_t alignment);
+   void initialize_virtual_space(ReservedSpace rs, size_t alignment);
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)cardTableExtension.cpp	1.34 07/05/17 15:52:46 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -66,7 +63,7 @@
+   CheckForUnmarkedObjects() {
+     ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+     assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+-    
++
+     _young_gen = heap->young_gen();
+     _card_table = (CardTableExtension*)heap->barrier_set();
+     // No point in asserting barrier set type here. Need to make CardTableExtension
+@@ -108,7 +105,7 @@
+ void CardTableExtension::scavenge_contents(ObjectStartArray* start_array,
+                                            MutableSpace* sp,
+                                            HeapWord* space_top,
+-                                           PSPromotionManager* pm) 
++                                           PSPromotionManager* pm)
+ {
+   assert(start_array != NULL && sp != NULL && pm != NULL, "Sanity");
+   assert(start_array->covered_region().contains(sp->used_region()),
+@@ -191,15 +188,21 @@
+           *first_nonclean_card++ = clean_card;
+         }
+         // scan oops in objects
+-        do {
+-	  if (depth_first) {
+-	    oop(bottom_obj)->push_contents(pm);
+-	  } else {
+-	    oop(bottom_obj)->copy_contents(pm);
+-	  }
+-          bottom_obj += oop(bottom_obj)->size();
+-          assert(bottom_obj <= sp_top, "just checking");
+-        } while (bottom_obj < top);
++        // hoisted the if (depth_first) check out of the loop
++        if (depth_first){
++          do {
++            oop(bottom_obj)->push_contents(pm);
++            bottom_obj += oop(bottom_obj)->size();
++            assert(bottom_obj <= sp_top, "just checking");
++          } while (bottom_obj < top);
++          pm->drain_stacks_cond_depth();
++        } else {
++          do {
++            oop(bottom_obj)->copy_contents(pm);
++            bottom_obj += oop(bottom_obj)->size();
++            assert(bottom_obj <= sp_top, "just checking");
++          } while (bottom_obj < top);
++        }
+         // remember top oop* scanned
+         prev_top = top;
+       }
+@@ -307,32 +310,32 @@
+       jbyte* following_clean_card = current_card;
+ 
+       if (first_unclean_card < worker_end_card) {
+-	oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card));
+-	assert((HeapWord*)p <= addr_for(first_unclean_card), "checking");
+-	// "p" should always be >= "last_scanned" because newly GC dirtied
+-	// cards are no longer scanned again (see comment at end
+-	// of loop on the increment of "current_card").  Test that
+-	// hypothesis before removing this code.
+-	// If this code is removed, deal with the first time through
+-	// the loop when the last_scanned is the object starting in
+-	// the previous slice.
+-	assert((p >= last_scanned) || 
+-	       (last_scanned == first_object_within_slice),
+-	       "Should no longer be possible");
+-	if (p < last_scanned) {   
+-	  // Avoid scanning more than once; this can happen because
+-	  // newgen cards set by GC may a different set than the
+-	  // originally dirty set
+-	  p = last_scanned;
+-	}
+-	oop* to = (oop*)addr_for(following_clean_card);
++        oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card));
++        assert((HeapWord*)p <= addr_for(first_unclean_card), "checking");
++        // "p" should always be >= "last_scanned" because newly GC dirtied
++        // cards are no longer scanned again (see comment at end
++        // of loop on the increment of "current_card").  Test that
++        // hypothesis before removing this code.
++        // If this code is removed, deal with the first time through
++        // the loop when the last_scanned is the object starting in
++        // the previous slice.
++        assert((p >= last_scanned) ||
++               (last_scanned == first_object_within_slice),
++               "Should no longer be possible");
++        if (p < last_scanned) {
++          // Avoid scanning more than once; this can happen because
++          // newgen cards set by GC may a different set than the
++          // originally dirty set
++          p = last_scanned;
++        }
++        oop* to = (oop*)addr_for(following_clean_card);
+ 
+         // Test slice_end first!
+         if ((HeapWord*)to > slice_end) {
+           to = (oop*)slice_end;
+         } else if (to > sp_top) {
+-	  to = sp_top;
+-	} 
++          to = sp_top;
++        }
+ 
+         // we know which cards to scan, now clear them
+         if (first_unclean_card <= worker_start_card+1)
+@@ -344,41 +347,56 @@
+           *first_unclean_card++ = clean_card;
+         }
+ 
+-	const int interval = PrefetchScanIntervalInBytes;
+-	// scan all objects in the range
+-	if (interval != 0) {
+-	  while (p < to) {
+-	    Prefetch::write(p, interval);
+-	    oop m = oop(p);
+-	    assert(m->is_oop_or_null(), "check for header");
+-	    if (depth_first) {
+-	      m->push_contents(pm); 
+-	    } else {
+-	      m->copy_contents(pm); 
+-	    }
+-            p += m->size();
++        const int interval = PrefetchScanIntervalInBytes;
++        // scan all objects in the range
++        if (interval != 0) {
++          // hoisted the if (depth_first) check out of the loop
++          if (depth_first) {
++            while (p < to) {
++              Prefetch::write(p, interval);
++              oop m = oop(p);
++              assert(m->is_oop_or_null(), "check for header");
++              m->push_contents(pm);
++              p += m->size();
++            }
++            pm->drain_stacks_cond_depth();
++          } else {
++            while (p < to) {
++              Prefetch::write(p, interval);
++              oop m = oop(p);
++              assert(m->is_oop_or_null(), "check for header");
++              m->copy_contents(pm);
++              p += m->size();
++            }
+           }
+-	} else {
+-	  while (p < to) {
+-	    oop m = oop(p);
+-	    assert(m->is_oop_or_null(), "check for header");
+-	    if (depth_first) {
+-	      m->push_contents(pm);
+-	    } else {
+-	      m->copy_contents(pm);
+-	    }
+-            p += m->size();
++        } else {
++          // hoisted the if (depth_first) check out of the loop
++          if (depth_first) {
++            while (p < to) {
++              oop m = oop(p);
++              assert(m->is_oop_or_null(), "check for header");
++              m->push_contents(pm);
++              p += m->size();
++            }
++            pm->drain_stacks_cond_depth();
++          } else {
++            while (p < to) {
++              oop m = oop(p);
++              assert(m->is_oop_or_null(), "check for header");
++              m->copy_contents(pm);
++              p += m->size();
++            }
+           }
+-	}
+-	last_scanned = p;
++        }
++        last_scanned = p;
+       }
+       // "current_card" is still the "following_clean_card" or
+       // the current_card is >= the worker_end_card so the
+       // loop will not execute again.
+       assert((current_card == following_clean_card) ||
+-	     (current_card >= worker_end_card),
+-	"current_card should only be incremented if it still equals "
+-	"following_clean_card");
++             (current_card >= worker_end_card),
++        "current_card should only be incremented if it still equals "
++        "following_clean_card");
+       // Increment current_card so that it is not processed again.
+       // It may now be dirty because a old-to-young pointer was
+       // found on it an updated.  If it is now dirty, it cannot be
+@@ -391,7 +409,7 @@
+ // This should be called before a scavenge.
+ void CardTableExtension::verify_all_young_refs_imprecise() {
+   CheckForUnmarkedObjects check;
+-  
++
+   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+ 
+@@ -474,10 +492,10 @@
+ }
+ 
+ // Assumes that only the base or the end changes.  This allows indentification
+-// of the region that is being resized.  The 
++// of the region that is being resized.  The
+ // CardTableModRefBS::resize_covered_region() is used for the normal case
+ // where the covered regions are growing or shrinking at the high end.
+-// The method resize_covered_region_by_end() is analogous to 
++// The method resize_covered_region_by_end() is analogous to
+ // CardTableModRefBS::resize_covered_region() but
+ // for regions that grow or shrink at the low end.
+ void CardTableExtension::resize_covered_region(MemRegion new_region) {
+@@ -502,7 +520,7 @@
+       // This is a case where the covered region is growing or shrinking
+       // at the start of the region.
+       assert(changed_region != -1, "Don't expect to add a covered region");
+-      assert(_covered[changed_region].byte_size() != new_region.byte_size(), 
++      assert(_covered[changed_region].byte_size() != new_region.byte_size(),
+         "The sizes should be different here");
+       resize_covered_region_by_end(changed_region, new_region);
+       return;
+@@ -510,7 +528,7 @@
+   }
+   // This should only be a new covered region (where no existing
+   // covered region matches at the start or the end).
+-  assert(_cur_covered_regions < _max_covered_regions, 
++  assert(_cur_covered_regions < _max_covered_regions,
+     "An existing region should have been found");
+   resize_covered_region_by_start(new_region);
+ }
+@@ -521,8 +539,8 @@
+ }
+ 
+ void CardTableExtension::resize_covered_region_by_end(int changed_region,
+-  						      MemRegion new_region) {
+-  assert(SafepointSynchronize::is_at_safepoint(), 
++                                                      MemRegion new_region) {
++  assert(SafepointSynchronize::is_at_safepoint(),
+     "Only expect an expansion at the low end at a GC");
+   debug_only(verify_guard();)
+ #ifdef ASSERT
+@@ -574,10 +592,10 @@
+ }
+ 
+ void CardTableExtension::resize_commit_uncommit(int changed_region,
+-						MemRegion new_region) {
++                                                MemRegion new_region) {
+   // Commit new or uncommit old pages, if necessary.
+   MemRegion cur_committed = _committed[changed_region];
+-  assert(_covered[changed_region].end() == new_region.end(), 
++  assert(_covered[changed_region].end() == new_region.end(),
+     "The ends of the regions are expected to match");
+   // Extend the start of this _committed region to
+   // to cover the start of any previous _committed region.
+@@ -587,14 +605,14 @@
+     // Only really need to set start of "cur_committed" to
+     // the new start (min_prev_start) but assertion checking code
+     // below use cur_committed.end() so make it correct.
+-    MemRegion new_committed = 
+-	MemRegion(min_prev_start, cur_committed.end());
++    MemRegion new_committed =
++        MemRegion(min_prev_start, cur_committed.end());
+     cur_committed = new_committed;
+   }
+ #ifdef ASSERT
+   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+-  assert(cur_committed.start() == 
+-    (HeapWord*) align_size_up((uintptr_t) cur_committed.start(), 
++  assert(cur_committed.start() ==
++    (HeapWord*) align_size_up((uintptr_t) cur_committed.start(),
+                               os::vm_page_size()),
+     "Starts should have proper alignment");
+ #endif
+@@ -612,30 +630,30 @@
+     // Expand the committed region
+     //
+     // Case A
+-    //						|+ guard +|
+-    //				|+ cur committed +++++++++|
+-    //			|+ new committed +++++++++++++++++|
++    //                                          |+ guard +|
++    //                          |+ cur committed +++++++++|
++    //                  |+ new committed +++++++++++++++++|
+     //
+     // Case B
+-    //						|+ guard +|
+-    //			      |+ cur committed +|
+-    //			|+ new committed +++++++|
++    //                                          |+ guard +|
++    //                        |+ cur committed +|
++    //                  |+ new committed +++++++|
+     //
+     // These are not expected because the calculation of the
+     // cur committed region and the new committed region
+     // share the same end for the covered region.
+     // Case C
+-    //						|+ guard +|
+-    //			      |+ cur committed +|
+-    //			|+ new committed +++++++++++++++++|
++    //                                          |+ guard +|
++    //                        |+ cur committed +|
++    //                  |+ new committed +++++++++++++++++|
+     // Case D
+-    //						|+ guard +|
+-    //			      |+ cur committed +++++++++++|
+-    //			|+ new committed +++++++|
++    //                                          |+ guard +|
++    //                        |+ cur committed +++++++++++|
++    //                  |+ new committed +++++++|
+ 
+-    HeapWord* new_end_for_commit = 
++    HeapWord* new_end_for_commit =
+       MIN2(cur_committed.end(), _guard_region.start());
+-    MemRegion new_committed = 
++    MemRegion new_committed =
+       MemRegion(new_start_aligned, new_end_for_commit);
+     if(!new_committed.is_empty()) {
+       if (!os::commit_memory((char*)new_committed.start(),
+@@ -646,7 +664,7 @@
+     }
+   } else if (new_start_aligned > cur_committed.start()) {
+     // Shrink the committed region
+-    MemRegion uncommit_region = committed_unique_to_self(changed_region, 
++    MemRegion uncommit_region = committed_unique_to_self(changed_region,
+       MemRegion(cur_committed.start(), new_start_aligned));
+     if (!uncommit_region.is_empty()) {
+       if (!os::uncommit_memory((char*)uncommit_region.start(),
+@@ -661,21 +679,21 @@
+ }
+ 
+ void CardTableExtension::resize_update_committed_table(int changed_region,
+-						       MemRegion new_region) {
++                                                       MemRegion new_region) {
+ 
+   jbyte* new_start = byte_for(new_region.start());
+   // Set the new start of the committed region
+   HeapWord* new_start_aligned =
+-    (HeapWord*)align_size_down((uintptr_t)new_start, 
++    (HeapWord*)align_size_down((uintptr_t)new_start,
+                              os::vm_page_size());
+-  MemRegion new_committed = MemRegion(new_start_aligned, 
++  MemRegion new_committed = MemRegion(new_start_aligned,
+     _committed[changed_region].end());
+   _committed[changed_region] = new_committed;
+   _committed[changed_region].set_start(new_start_aligned);
+ }
+ 
+ void CardTableExtension::resize_update_card_table_entries(int changed_region,
+-						          MemRegion new_region) {
++                                                          MemRegion new_region) {
+   debug_only(verify_guard();)
+   MemRegion original_covered = _covered[changed_region];
+   // Initialize the card entries.  Only consider the
+@@ -693,7 +711,7 @@
+ }
+ 
+ void CardTableExtension::resize_update_covered_table(int changed_region,
+-						     MemRegion new_region) {
++                                                     MemRegion new_region) {
+   // Update the covered region
+   _covered[changed_region].set_start(new_region.start());
+   _covered[changed_region].set_word_size(new_region.word_size());
+@@ -702,10 +720,10 @@
+   // of order.
+   for (int i = _cur_covered_regions-1 ; i > 0; i--) {
+     if (_covered[i].start() < _covered[i-1].start()) {
+-	MemRegion covered_mr = _covered[i-1];
+-	_covered[i-1] = _covered[i];
+-	_covered[i] = covered_mr;
+-	MemRegion committed_mr = _committed[i-1];
++        MemRegion covered_mr = _covered[i-1];
++        _covered[i-1] = _covered[i];
++        _covered[i] = covered_mr;
++        MemRegion committed_mr = _committed[i-1];
+       _committed[i-1] = _committed[i];
+       _committed[i] = committed_mr;
+       break;
+@@ -713,9 +731,9 @@
+   }
+ #ifdef ASSERT
+   for (int m = 0; m < _cur_covered_regions-1; m++) {
+-    assert(_covered[m].start() <= _covered[m+1].start(), 
++    assert(_covered[m].start() <= _covered[m+1].start(),
+       "Covered regions out of order");
+-    assert(_committed[m].start() <= _committed[m+1].start(), 
++    assert(_committed[m].start() <= _committed[m+1].start(),
+       "Committed regions out of order");
+   }
+ #endif
+@@ -745,18 +763,17 @@
+ //                               -------------
+ //                               |           |
+ //                               -------------
+-//      		^ returns this
++//                      ^ returns this
+ 
+ HeapWord* CardTableExtension::lowest_prev_committed_start(int ind) const {
+   assert(_cur_covered_regions >= 0, "Expecting at least on region");
+   HeapWord* min_start = _committed[ind].start();
+   for (int j = 0; j < ind; j++) {
+     HeapWord* this_start = _committed[j].start();
+-    if ((this_start < min_start) && 
++    if ((this_start < min_start) &&
+         !(_committed[j].intersection(_committed[ind])).is_empty()) {
+        min_start = this_start;
+     }
+   }
+   return min_start;
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)cardTableExtension.hpp	1.20 07/05/05 17:05:26 JVM"
+-#endif
+ /*
+  * Copyright 2001-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class MutableSpace;
+@@ -34,7 +31,7 @@
+  private:
+   // Support methods for resizing the card table.
+   void resize_commit_uncommit(int changed_region, MemRegion new_region);
+-  void resize_update_card_table_entries(int changed_region, 
++  void resize_update_card_table_entries(int changed_region,
+                                         MemRegion new_region);
+   void resize_update_committed_table(int changed_region, MemRegion new_region);
+   void resize_update_covered_table(int changed_region, MemRegion new_region);
+@@ -60,7 +57,7 @@
+                          MutableSpace* sp,
+                          HeapWord* space_top,
+                          PSPromotionManager* pm);
+-    
++
+   void scavenge_contents_parallel(ObjectStartArray* start_array,
+                                   MutableSpace* sp,
+                                   HeapWord* space_top,
+@@ -92,10 +89,10 @@
+   // Allows adjustment of the base and size of the covered regions
+   void resize_covered_region(MemRegion new_region);
+   // Finds the covered region to resize based on the start address
+-  // of the covered regions.  
++  // of the covered regions.
+   void resize_covered_region_by_start(MemRegion new_region);
+   // Finds the covered region to resize based on the end address
+-  // of the covered regions.  
++  // of the covered regions.
+   void resize_covered_region_by_end(int changed_region, MemRegion new_region);
+   // Finds the lowest start address of a covered region that is
+   // previous (i.e., lower index) to the covered region with index "ind".
+@@ -106,7 +103,6 @@
+   bool is_valid_card_address(jbyte* addr) {
+     return (addr >= _byte_map) && (addr < _byte_map + _byte_map_size);
+   }
+-  
++
+ #endif // ASSERT
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)gcTaskManager.cpp	1.34 07/05/05 17:05:27 JVM"
+-#endif
+ /*
+  * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -96,9 +93,9 @@
+ }
+ )
+ 
+-// 
++//
+ // GCTaskQueue
+-// 
++//
+ 
+ GCTaskQueue* GCTaskQueue::create() {
+   GCTaskQueue* result = new GCTaskQueue(false);
+@@ -138,10 +135,10 @@
+     tty->print_cr("[" INTPTR_FORMAT "]"
+                   " GCTaskQueue::destroy()"
+                   "  is_c_heap_obj:  %s",
+-                  that, 
++                  that,
+                   that->is_c_heap_obj() ? "true" : "false");
+   }
+-  // That instance may have been allocated as a CHeapObj, 
++  // That instance may have been allocated as a CHeapObj,
+   // in which case we have to free it explicitly.
+   if (that != NULL) {
+     that->destruct();
+@@ -328,12 +325,12 @@
+ }
+ )
+ 
+-// 
++//
+ // SynchronizedGCTaskQueue
+-// 
++//
+ 
+ SynchronizedGCTaskQueue::SynchronizedGCTaskQueue(GCTaskQueue* queue_arg,
+-                                                 Mutex*       lock_arg) :
++                                                 Monitor *       lock_arg) :
+   _unsynchronized_queue(queue_arg),
+   _lock(lock_arg) {
+   assert(unsynchronized_queue() != NULL, "null queue");
+@@ -344,9 +341,9 @@
+   // Nothing to do.
+ }
+ 
+-// 
++//
+ // GCTaskManager
+-// 
++//
+ GCTaskManager::GCTaskManager(uint workers) :
+   _workers(workers),
+   _ndc(NULL) {
+@@ -453,7 +450,7 @@
+     thread(i)->print_on(st);
+     st->cr();
+   }
+-} 
++}
+ 
+ void GCTaskManager::threads_do(ThreadClosure* tc) {
+   assert(tc != NULL, "Null ThreadClosure");
+@@ -512,9 +509,9 @@
+   GCTask* result = NULL;
+   // Grab the queue lock.
+   MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
+-  // Wait while the queue is block or 
++  // Wait while the queue is block or
+   // there is nothing to do, except maybe release resources.
+-  while (is_blocked() || 
++  while (is_blocked() ||
+          (queue()->is_empty() && !should_release_resources(which))) {
+     if (TraceGCTaskManager) {
+       tty->print_cr("GCTaskManager::get_task(%u)"
+@@ -608,7 +605,7 @@
+                   emptied_queue());
+   }
+   // Tell everyone that a task has completed.
+-  (void) monitor()->notify_all();  
++  (void) monitor()->notify_all();
+   // Release monitor().
+ }
+ 
+@@ -660,7 +657,7 @@
+   _resource_flag[which] = value;
+ }
+ 
+-// 
++//
+ // NoopGCTask
+ //
+ 
+@@ -695,7 +692,7 @@
+ 
+ void BarrierGCTask::do_it(GCTaskManager* manager, uint which) {
+   // Wait for this to be the only busy worker.
+-  // ??? I thought of having a StackObj class 
++  // ??? I thought of having a StackObj class
+   //     whose constructor would grab the lock and come to the barrier,
+   //     and whose destructor would release the lock,
+   //     but that seems like too much mechanism for two lines of code.
+@@ -722,7 +719,7 @@
+   // Nothing else to do.
+ }
+ 
+-// 
++//
+ // ReleasingBarrierGCTask
+ //
+ 
+@@ -738,9 +735,9 @@
+   // Nothing else to do.
+ }
+ 
+-// 
++//
+ // NotifyingBarrierGCTask
+-// 
++//
+ 
+ void NotifyingBarrierGCTask::do_it(GCTaskManager* manager, uint which) {
+   MutexLockerEx ml(manager->lock(), Mutex::_no_safepoint_check_flag);
+@@ -757,9 +754,9 @@
+   // Nothing else to do.
+ }
+ 
+-// 
++//
+ // WaitForBarrierGCTask
+-// 
++//
+ WaitForBarrierGCTask* WaitForBarrierGCTask::create() {
+   WaitForBarrierGCTask* result = new WaitForBarrierGCTask(false);
+   return result;
+@@ -777,7 +774,7 @@
+   if (TraceGCTaskManager) {
+     tty->print_cr("[" INTPTR_FORMAT "]"
+                   " WaitForBarrierGCTask::WaitForBarrierGCTask()"
+-                  "  monitor: " INTPTR_FORMAT, 
++                  "  monitor: " INTPTR_FORMAT,
+                   this, monitor());
+   }
+ }
+@@ -788,9 +785,9 @@
+       tty->print_cr("[" INTPTR_FORMAT "]"
+                     " WaitForBarrierGCTask::destroy()"
+                     "  is_c_heap_obj: %s"
+-                    "  monitor: " INTPTR_FORMAT, 
+-                    that, 
+-                    that->is_c_heap_obj() ? "true" : "false", 
++                    "  monitor: " INTPTR_FORMAT,
++                    that,
++                    that->is_c_heap_obj() ? "true" : "false",
+                     that->monitor());
+     }
+     that->destruct();
+@@ -805,23 +802,23 @@
+   if (TraceGCTaskManager) {
+     tty->print_cr("[" INTPTR_FORMAT "]"
+                   " WaitForBarrierGCTask::destruct()"
+-                  "  monitor: " INTPTR_FORMAT, 
++                  "  monitor: " INTPTR_FORMAT,
+                   this, monitor());
+   }
+   this->BarrierGCTask::destruct();
+-  // Clean up that should be in the destructor, 
++  // Clean up that should be in the destructor,
+   // except that ResourceMarks don't call destructors.
+    if (monitor() != NULL) {
+      MonitorSupply::release(monitor());
+   }
+   _monitor = (Monitor*) 0xDEAD000F;
+ }
+-  
++
+ void WaitForBarrierGCTask::do_it(GCTaskManager* manager, uint which) {
+   if (TraceGCTaskManager) {
+     tty->print_cr("[" INTPTR_FORMAT "]"
+                   " WaitForBarrierGCTask::do_it() waiting for idle"
+-                  "  monitor: " INTPTR_FORMAT, 
++                  "  monitor: " INTPTR_FORMAT,
+                   this, monitor());
+   }
+   {
+@@ -839,7 +836,7 @@
+     if (TraceGCTaskManager) {
+       tty->print_cr("[" INTPTR_FORMAT "]"
+                     " WaitForBarrierGCTask::do_it()"
+-                    "  [" INTPTR_FORMAT "] (%s)->notify_all()", 
++                    "  [" INTPTR_FORMAT "] (%s)->notify_all()",
+                     this, monitor(), monitor()->name());
+     }
+     monitor()->notify_all();
+@@ -851,7 +848,7 @@
+   if (TraceGCTaskManager) {
+     tty->print_cr("[" INTPTR_FORMAT "]"
+                   " WaitForBarrierGCTask::wait_for()"
+-      "  should_wait: %s", 
++      "  should_wait: %s",
+       this, should_wait() ? "true" : "false");
+   }
+   {
+@@ -861,7 +858,7 @@
+       if (TraceGCTaskManager) {
+         tty->print_cr("[" INTPTR_FORMAT "]"
+                       " WaitForBarrierGCTask::wait_for()"
+-          "  [" INTPTR_FORMAT "] (%s)->wait()", 
++          "  [" INTPTR_FORMAT "] (%s)->wait()",
+           this, monitor(), monitor()->name());
+       }
+       monitor()->wait(Mutex::_no_safepoint_check_flag, 0);
+@@ -871,7 +868,7 @@
+     if (TraceGCTaskManager) {
+       tty->print_cr("[" INTPTR_FORMAT "]"
+                     " WaitForBarrierGCTask::wait_for() returns"
+-        "  should_wait: %s", 
++        "  should_wait: %s",
+         this, should_wait() ? "true" : "false");
+     }
+     // Release monitor().
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)gcTaskManager.hpp	1.28 07/05/05 17:05:26 JVM"
+-#endif
+ /*
+  * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,12 +19,12 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //
+-// The GCTaskManager is a queue of GCTasks, and accessors 
+-// to allow the queue to be accessed from many threads.  
++// The GCTaskManager is a queue of GCTasks, and accessors
++// to allow the queue to be accessed from many threads.
+ //
+ 
+ // Forward declarations of types defined in this file.
+@@ -60,7 +57,7 @@
+     enum kind {
+       unknown_task,
+       ordinary_task,
+-      barrier_task, 
++      barrier_task,
+       noop_task
+     };
+     static const char* to_string(kind value);
+@@ -69,7 +66,7 @@
+   // Instance state.
+   const Kind::kind _kind;               // For runtime type checking.
+   const uint       _affinity;           // Which worker should run task.
+-  GCTask*          _newer;              // Tasks are on doubly-linked ... 
++  GCTask*          _newer;              // Tasks are on doubly-linked ...
+   GCTask*          _older;              // ... lists.
+ public:
+   virtual char* name() { return (char *)"task"; }
+@@ -116,9 +113,9 @@
+   GCTask(uint affinity);
+   //     A GCTask of a particular kind, with and affinity.
+   GCTask(Kind::kind kind, uint affinity);
+-  // We want a virtual destructor because virtual methods, 
+-  // but since ResourceObj's don't have their destructors 
+-  // called, we don't have one at all.  Instead we have 
++  // We want a virtual destructor because virtual methods,
++  // but since ResourceObj's don't have their destructors
++  // called, we don't have one at all.  Instead we have
+   // this method, which gets called by subclasses to clean up.
+   virtual void destruct();
+   // Methods.
+@@ -126,7 +123,7 @@
+ };
+ 
+ // A doubly-linked list of GCTasks.
+-// The list is not synchronized, because sometimes we want to 
++// The list is not synchronized, because sometimes we want to
+ // build up a list and then make it available to other threads.
+ // See also: SynchronizedGCTaskQueue.
+ class GCTaskQueue : public ResourceObj {
+@@ -167,7 +164,7 @@
+ protected:
+   // Constructor. Clients use factory, but there might be subclasses.
+   GCTaskQueue(bool on_c_heap);
+-  // Destructor-like method. 
++  // Destructor-like method.
+   // Because ResourceMark doesn't call destructors.
+   // This method cleans up like one.
+   virtual void destruct();
+@@ -209,10 +206,10 @@
+ private:
+   // Instance state.
+   GCTaskQueue* _unsynchronized_queue;   // Has-a unsynchronized queue.
+-  Mutex*       _lock;                   // Lock to control access.
++  Monitor *    _lock;                   // Lock to control access.
+ public:
+   // Factory create and destroy methods.
+-  static SynchronizedGCTaskQueue* create(GCTaskQueue* queue, Mutex* lock) {
++  static SynchronizedGCTaskQueue* create(GCTaskQueue* queue, Monitor * lock) {
+     return new SynchronizedGCTaskQueue(queue, lock);
+   }
+   static void destroy(SynchronizedGCTaskQueue* that) {
+@@ -224,7 +221,7 @@
+   GCTaskQueue* unsynchronized_queue() const {
+     return _unsynchronized_queue;
+   }
+-  Mutex* lock() const {
++  Monitor * lock() const {
+     return _lock;
+   }
+   // GCTaskQueue wrapper methods.
+@@ -260,12 +257,12 @@
+   }
+ protected:
+   // Constructor.  Clients use factory, but there might be subclasses.
+-  SynchronizedGCTaskQueue(GCTaskQueue* queue, Mutex* lock);
++  SynchronizedGCTaskQueue(GCTaskQueue* queue, Monitor * lock);
+   // Destructor.  Not virtual because no virtuals.
+   ~SynchronizedGCTaskQueue();
+ };
+ 
+-// This is an abstract base class for getting notifications 
++// This is an abstract base class for getting notifications
+ // when a GCTaskManager is done.
+ class NotifyDoneClosure : public CHeapObj {
+ public:
+@@ -281,7 +278,7 @@
+     // Nothing to do.
+   }
+ };
+-  
++
+ class GCTaskManager : public CHeapObj {
+  friend class ParCompactionManager;
+  friend class PSParallelCompact;
+@@ -325,7 +322,7 @@
+   Monitor* monitor() const {
+     return _monitor;
+   }
+-  Mutex* lock() const {
++  Monitor * lock() const {
+     return _monitor;
+   }
+   // Methods.
+@@ -355,7 +352,7 @@
+ 
+   //     Execute the task queue and wait for the completion.
+   void execute_and_wait(GCTaskQueue* list);
+-  
++
+   void print_task_time_stamps();
+   void print_threads_on(outputStream* st);
+   void threads_do(ThreadClosure* tc);
+@@ -458,11 +455,11 @@
+   void initialize();
+ };
+ 
+-// 
++//
+ // Some exemplary GCTasks.
+-// 
++//
+ 
+-// A noop task that does nothing, 
++// A noop task that does nothing,
+ // except take us around the GCTaskThread loop.
+ class NoopGCTask : public GCTask {
+ private:
+@@ -491,7 +488,7 @@
+   }
+ };
+ 
+-// A BarrierGCTask blocks other tasks from starting, 
++// A BarrierGCTask blocks other tasks from starting,
+ // and waits until it is the only task running.
+ class BarrierGCTask : public GCTask {
+ public:
+@@ -520,7 +517,7 @@
+   void do_it_internal(GCTaskManager* manager, uint which);
+ };
+ 
+-// A ReleasingBarrierGCTask is a BarrierGCTask 
++// A ReleasingBarrierGCTask is a BarrierGCTask
+ // that tells all the tasks to release their resource areas.
+ class ReleasingBarrierGCTask : public BarrierGCTask {
+ public:
+@@ -544,9 +541,9 @@
+   }
+   // Destructor-like method.
+   void destruct();
+-}; 
++};
+ 
+-// A NotifyingBarrierGCTask is a BarrierGCTask 
++// A NotifyingBarrierGCTask is a BarrierGCTask
+ // that calls a notification method when it is the only task running.
+ class NotifyingBarrierGCTask : public BarrierGCTask {
+ private:
+@@ -578,8 +575,8 @@
+   NotifyDoneClosure* notify_done_closure() const { return _ndc; }
+ };
+ 
+-// A WaitForBarrierGCTask is a BarrierGCTask 
+-// with a method you can call to wait until 
++// A WaitForBarrierGCTask is a BarrierGCTask
++// with a method you can call to wait until
+ // the BarrierGCTask is done.
+ // This may cover many of the uses of NotifyingBarrierGCTasks.
+ class WaitForBarrierGCTask : public BarrierGCTask {
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,7 +1,4 @@
+ 
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)gcTaskThread.cpp	1.25 07/05/05 17:05:26 JVM"
+-#endif
+ /*
+  * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -23,7 +20,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -108,8 +105,8 @@
+   // Part of thread setup.
+   // ??? Are these set up once here to make subsequent ones fast?
+   HandleMark   hm_outer;
+-  ResourceMark rm_outer; 
+- 
++  ResourceMark rm_outer;
++
+   TimeStamp timer;
+ 
+   for (;/* ever */;) {
+@@ -151,4 +148,3 @@
+     }
+   }
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)gcTaskThread.hpp	1.18 07/05/05 17:05:27 JVM"
+-#endif
+ /*
+  * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Forward declarations of classes defined here.
+@@ -37,7 +34,7 @@
+   // Instance state.
+   GCTaskManager* _manager;              // Manager for worker.
+   const uint     _processor_id;         // Which processor the worker is on.
+-  
++
+   GCTaskTimeStamp* _time_stamps;
+   uint _time_stamp_index;
+ 
+@@ -65,7 +62,7 @@
+ 
+   void print_task_time_stamps();
+   void print_on(outputStream* st) const;
+-  void print() const				    { print_on(tty); }
++  void print() const                                { print_on(tty); }
+ 
+ protected:
+   // Constructor.  Clients use factory, but there could be subclasses.
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)generationSizer.hpp	1.17 07/05/05 17:05:27 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // There is a nice batch of tested generation sizing code in
+@@ -39,7 +36,7 @@
+   void initialize_flags() {
+     // Do basic sizing work
+     this->TwoGenerationCollectorPolicy::initialize_flags();
+-    
++
+     // If the user hasn't explicitly set the number of worker
+     // threads, set the count.
+     if (ParallelGCThreads == 0) {
+@@ -70,5 +67,3 @@
+   size_t perm_gen_size()      { return PermSize; }
+   size_t max_perm_gen_size()  { return MaxPermSize; }
+ };
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)objectStartArray.cpp	1.20 07/05/05 17:05:28 JVM"
+-#endif
+ /*
+  * Copyright 2001-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -40,10 +37,10 @@
+   size_t bytes_to_reserve = reserved_region.word_size() / block_size_in_words;
+   assert(bytes_to_reserve > 0, "Sanity");
+ 
+-  bytes_to_reserve = 
++  bytes_to_reserve =
+     align_size_up(bytes_to_reserve, os::vm_allocation_granularity());
+ 
+-  // Do not use large-pages for the backing store. The one large page region 
++  // Do not use large-pages for the backing store. The one large page region
+   // will be used for the heap proper.
+   ReservedSpace backing_store(bytes_to_reserve);
+   if (!backing_store.is_reserved()) {
+@@ -81,7 +78,7 @@
+   size_t requested_blocks_size_in_bytes = mr.word_size() / block_size_in_words;
+ 
+   // Only commit memory in page sized chunks
+-  requested_blocks_size_in_bytes = 
++  requested_blocks_size_in_bytes =
+     align_size_up(requested_blocks_size_in_bytes, os::vm_page_size());
+ 
+   _covered_region = mr;
+@@ -117,8 +114,8 @@
+ }
+ 
+ 
+-bool ObjectStartArray::object_starts_in_range(HeapWord* start_addr, 
+-					      HeapWord* end_addr) const {
++bool ObjectStartArray::object_starts_in_range(HeapWord* start_addr,
++                                              HeapWord* end_addr) const {
+   assert(start_addr <= end_addr, "range is wrong");
+   if (start_addr > end_addr) {
+     return false;
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)objectStartArray.hpp	1.21 07/05/05 17:05:28 JVM"
+-#endif
+ /*
+  * Copyright 2001-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //
+@@ -56,7 +53,7 @@
+  protected:
+ 
+   // Mapping from address to object start array entry
+-  jbyte* block_for_addr(void* p) const { 
++  jbyte* block_for_addr(void* p) const {
+     assert(_covered_region.contains(p),
+            "out of bounds access to object start array");
+     jbyte* result = &_offset_base[uintptr_t(p) >> block_shift];
+@@ -66,9 +63,9 @@
+   }
+ 
+   // Mapping from object start array entry to address of first word
+-  HeapWord* addr_for_block(jbyte* p) { 
++  HeapWord* addr_for_block(jbyte* p) {
+     assert(_blocks_region.contains(p),
+-	   "out of bounds access to object start array");
++           "out of bounds access to object start array");
+     size_t delta = pointer_delta(p, _offset_base, sizeof(jbyte));
+     HeapWord* result = (HeapWord*) (delta << block_shift);
+     assert(_covered_region.contains(result),
+@@ -86,7 +83,7 @@
+     }
+ 
+     assert(_blocks_region.contains(p),
+-	   "out of bounds access to object start array");
++           "out of bounds access to object start array");
+ 
+     if (*p == clean_block) {
+       return _covered_region.end();
+@@ -95,7 +92,7 @@
+     size_t delta = pointer_delta(p, _offset_base, sizeof(jbyte));
+     HeapWord* result = (HeapWord*) (delta << block_shift);
+     result += *p;
+-    
++
+     assert(_covered_region.contains(result),
+            "out of bounds accessor from card marking array");
+ 
+@@ -103,7 +100,7 @@
+   }
+ 
+  public:
+-  
++
+   // This method is in lieu of a constructor, so that this class can be
+   // embedded inline in other classes.
+   void initialize(MemRegion reserved_region);
+@@ -163,4 +160,3 @@
+   // "start", the method will return true.
+   bool object_starts_in_range(HeapWord* start_addr, HeapWord* end_addr) const;
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)parallelScavengeHeap.cpp	1.94 07/05/17 15:52:49 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -36,96 +33,135 @@
+ ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL;
+ GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
+ 
++static void trace_gen_sizes(const char* const str,
++                            size_t pg_min, size_t pg_max,
++                            size_t og_min, size_t og_max,
++                            size_t yg_min, size_t yg_max)
++{
++  if (TracePageSizes) {
++    tty->print_cr("%s:  " SIZE_FORMAT "," SIZE_FORMAT " "
++                  SIZE_FORMAT "," SIZE_FORMAT " "
++                  SIZE_FORMAT "," SIZE_FORMAT " "
++                  SIZE_FORMAT,
++                  str, pg_min / K, pg_max / K,
++                  og_min / K, og_max / K,
++                  yg_min / K, yg_max / K,
++                  (pg_max + og_max + yg_max) / K);
++  }
++}
++
+ jint ParallelScavengeHeap::initialize() {
+   // Cannot be initialized until after the flags are parsed
+   GenerationSizer flag_parser;
+ 
+-  size_t max_young_size = flag_parser.max_young_gen_size();
+-  size_t max_old_size = flag_parser.max_old_gen_size();
+-  if (UseLargePages && 
+-      (max_young_size + max_old_size) >= LargePageHeapSizeThreshold) {
+-    adjust_generation_alignment_for_page_size(os::large_page_size());
+-  }
+-  const size_t alignment = generation_alignment();
+-
+-  // Check alignments
+-// NEEDS_CLEANUP   The default TwoGenerationCollectorPolicy uses
+-//   NewRatio;  it should check UseAdaptiveSizePolicy. Changes from
+-//   generationSizer could move to the common code.
+-  size_t min_young_size = 
+-    align_size_up(flag_parser.min_young_gen_size(), alignment);
+-  size_t young_size = align_size_up(flag_parser.young_gen_size(), alignment);
+-  max_young_size = align_size_up(max_young_size, alignment);
+-
+-  size_t min_old_size = 
+-    align_size_up(flag_parser.min_old_gen_size(), alignment);
+-  size_t old_size = align_size_up(flag_parser.old_gen_size(), alignment);
+-  old_size = MAX2(old_size, min_old_size);
+-  max_old_size = align_size_up(max_old_size, alignment);
+-
+-  size_t perm_size = align_size_up(flag_parser.perm_gen_size(), alignment);
+-  size_t max_perm_size = align_size_up(flag_parser.max_perm_gen_size(), 
+-                                                                  alignment);
+-
+-  // Calculate the total size.
+-  size_t total_reserved = max_young_size + max_old_size + max_perm_size;
+-
+-  if (UseLargePages) {
+-    total_reserved = round_to(total_reserved, os::large_page_size());
+-  }
+-
+-  ReservedSpace heap_rs(total_reserved, alignment, UseLargePages);
++  size_t yg_min_size = flag_parser.min_young_gen_size();
++  size_t yg_max_size = flag_parser.max_young_gen_size();
++  size_t og_min_size = flag_parser.min_old_gen_size();
++  size_t og_max_size = flag_parser.max_old_gen_size();
++  // Why isn't there a min_perm_gen_size()?
++  size_t pg_min_size = flag_parser.perm_gen_size();
++  size_t pg_max_size = flag_parser.max_perm_gen_size();
++
++  trace_gen_sizes("ps heap raw",
++                  pg_min_size, pg_max_size,
++                  og_min_size, og_max_size,
++                  yg_min_size, yg_max_size);
++
++  // The ReservedSpace ctor used below requires that the page size for the perm
++  // gen is <= the page size for the rest of the heap (young + old gens).
++  const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size,
++                                                     yg_max_size + og_max_size,
++                                                     8);
++  const size_t pg_page_sz = MIN2(os::page_size_for_region(pg_min_size,
++                                                          pg_max_size, 16),
++                                 og_page_sz);
++
++  const size_t pg_align = set_alignment(_perm_gen_alignment,  pg_page_sz);
++  const size_t og_align = set_alignment(_old_gen_alignment,   og_page_sz);
++  const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz);
++
++  // Update sizes to reflect the selected page size(s).
++  //
++  // NEEDS_CLEANUP.  The default TwoGenerationCollectorPolicy uses NewRatio; it
++  // should check UseAdaptiveSizePolicy.  Changes from generationSizer could
++  // move to the common code.
++  yg_min_size = align_size_up(yg_min_size, yg_align);
++  yg_max_size = align_size_up(yg_max_size, yg_align);
++  size_t yg_cur_size = align_size_up(flag_parser.young_gen_size(), yg_align);
++  yg_cur_size = MAX2(yg_cur_size, yg_min_size);
++
++  og_min_size = align_size_up(og_min_size, og_align);
++  og_max_size = align_size_up(og_max_size, og_align);
++  size_t og_cur_size = align_size_up(flag_parser.old_gen_size(), og_align);
++  og_cur_size = MAX2(og_cur_size, og_min_size);
++
++  pg_min_size = align_size_up(pg_min_size, pg_align);
++  pg_max_size = align_size_up(pg_max_size, pg_align);
++  size_t pg_cur_size = pg_min_size;
++
++  trace_gen_sizes("ps heap rnd",
++                  pg_min_size, pg_max_size,
++                  og_min_size, og_max_size,
++                  yg_min_size, yg_max_size);
++
++  // The main part of the heap (old gen + young gen) can often use a larger page
++  // size than is needed or wanted for the perm gen.  Use the "compound
++  // alignment" ReservedSpace ctor to avoid having to use the same page size for
++  // all gens.
++  ReservedSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size,
++                        og_align);
++  os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz,
++                       heap_rs.base(), pg_max_size);
++  os::trace_page_sizes("ps main", og_min_size + yg_min_size,
++                       og_max_size + yg_max_size, og_page_sz,
++                       heap_rs.base() + pg_max_size,
++                       heap_rs.size() - pg_max_size);
+   if (!heap_rs.is_reserved()) {
+     vm_shutdown_during_initialization(
+       "Could not reserve enough space for object heap");
+     return JNI_ENOMEM;
+   }
+ 
+-  _reserved_byte_size = heap_rs.size();
+   _reserved = MemRegion((HeapWord*)heap_rs.base(),
+-			(HeapWord*)(heap_rs.base() + heap_rs.size()));
+-
+-  HeapWord* boundary = (HeapWord*)(heap_rs.base() + max_young_size);
+-  CardTableExtension* card_table_barrier_set = new CardTableExtension(_reserved, 3);
+-  _barrier_set = card_table_barrier_set;
++                        (HeapWord*)(heap_rs.base() + heap_rs.size()));
+ 
++  CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
++  _barrier_set = barrier_set;
+   oopDesc::set_bs(_barrier_set);
+   if (_barrier_set == NULL) {
+     vm_shutdown_during_initialization(
+-      "Could not reserve enough space for barrier set"); 
++      "Could not reserve enough space for barrier set");
+     return JNI_ENOMEM;
+   }
+ 
+   // Initial young gen size is 4 Mb
+-  size_t init_young_size = align_size_up(4 * M, alignment);
+-  init_young_size = MAX2(MIN2(init_young_size, max_young_size), young_size);
+-
+-  // Divide up the reserved space: perm, old, young
+-  ReservedSpace perm_rs  = heap_rs.first_part(max_perm_size);
+-  ReservedSpace old_young_rs                
+-			 = heap_rs.last_part(max_perm_size);
+-  ReservedSpace old_rs   = old_young_rs.first_part(max_old_size);
+-  heap_rs                = old_young_rs.last_part(max_old_size);
+-  ReservedSpace young_rs = heap_rs.first_part(max_young_size);
+-  assert(young_rs.size() == heap_rs.size(), "Didn't reserve all of the heap");
++  //
++  // XXX - what about flag_parser.young_gen_size()?
++  const size_t init_young_size = align_size_up(4 * M, yg_align);
++  yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size);
++
++  // Split the reserved space into perm gen and the main heap (everything else).
++  // The main heap uses a different alignment.
++  ReservedSpace perm_rs = heap_rs.first_part(pg_max_size);
++  ReservedSpace main_rs = heap_rs.last_part(pg_max_size, og_align);
+ 
+   // Make up the generations
+   // Calculate the maximum size that a generation can grow.  This
+   // includes growth into the other generation.  Note that the
+-  // parameter _max_gen_size is kept as the maximum 
++  // parameter _max_gen_size is kept as the maximum
+   // size of the generation as the boundaries currently stand.
+   // _max_gen_size is still used as that value.
+   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
+   double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
+ 
+-  _gens = new AdjoiningGenerations(old_young_rs,
+-  				   old_size,
+-		                   min_old_size,
+-		                   max_old_size,
+-		                   init_young_size,
+-		                   min_young_size,
+-		                   max_young_size,
+-				   alignment);
++  _gens = new AdjoiningGenerations(main_rs,
++                                   og_cur_size,
++                                   og_min_size,
++                                   og_max_size,
++                                   yg_cur_size,
++                                   yg_min_size,
++                                   yg_max_size,
++                                   yg_align);
+ 
+   _old_gen = _gens->old_gen();
+   _young_gen = _gens->young_gen();
+@@ -135,28 +171,27 @@
+   const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
+   _size_policy =
+     new PSAdaptiveSizePolicy(eden_capacity,
+-			     initial_promo_size,
+-			     young_gen()->to_space()->capacity_in_bytes(),
+-			     generation_alignment(),
+-			     intra_generation_alignment(),
+-			     max_gc_pause_sec,
+-			     max_gc_minor_pause_sec,
+-			     GCTimeRatio
+-			     );
++                             initial_promo_size,
++                             young_gen()->to_space()->capacity_in_bytes(),
++                             intra_generation_alignment(),
++                             max_gc_pause_sec,
++                             max_gc_minor_pause_sec,
++                             GCTimeRatio
++                             );
+ 
+   _perm_gen = new PSPermGen(perm_rs,
+-			    alignment,
+-                            perm_size,
+-                            perm_size,
+-                            max_perm_size,
++                            pg_align,
++                            pg_cur_size,
++                            pg_cur_size,
++                            pg_max_size,
+                             "perm", 2);
+ 
+   assert(!UseAdaptiveGCBoundary ||
+-    (old_gen()->virtual_space()->high_boundary() == 
++    (old_gen()->virtual_space()->high_boundary() ==
+      young_gen()->virtual_space()->low_boundary()),
+     "Boundaries must meet");
+   // initialize the policy counters - 2 collectors, 3 generations
+-  _gc_policy_counters = 
++  _gc_policy_counters =
+     new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
+   _psh = this;
+ 
+@@ -170,22 +205,6 @@
+   return JNI_OK;
+ }
+ 
+-// Set the alignment of the generation so it would be aligned
+-// to both page_size and to intra_generation_alignment.
+-// This would require that we align to
+-// LCM(page_size, intra_generation_alignment), where LCM is
+-// "least common multiple". However, when page_size and
+-// intra_generation_alignment are both powers of 2, then
+-// round_to() below computes the same result; hence the
+-// assert below.
+-void ParallelScavengeHeap::adjust_generation_alignment_for_page_size(
+-  size_t page_size) {
+-  assert(is_power_of_2(page_size), 
+-    "Should not use round_to() if page size is not a power of 2");
+-  // round_to() checks that its second parameter is a power of 2
+-  set_generation_alignment(round_to(page_size, intra_generation_alignment()));
+-}
+-
+ void ParallelScavengeHeap::post_initialize() {
+   // Need to init the tenuring threshold
+   PSScavenge::initialize();
+@@ -276,7 +295,7 @@
+ // Static method
+ bool ParallelScavengeHeap::is_in_young(oop* p) {
+   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, 
++  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap,
+                                             "Must be ParallelScavengeHeap");
+ 
+   PSYoungGen* young_gen = heap->young_gen();
+@@ -291,7 +310,7 @@
+ // Static method
+ bool ParallelScavengeHeap::is_in_old_or_perm(oop* p) {
+   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, 
++  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap,
+                                             "Must be ParallelScavengeHeap");
+ 
+   PSOldGen* old_gen = heap->old_gen();
+@@ -339,10 +358,10 @@
+ // during failed allocation attempts. If the java heap becomes exhausted,
+ // we rely on the size_policy object to force a bail out.
+ HeapWord* ParallelScavengeHeap::mem_allocate(
+-				     size_t size, 
+-				     bool is_noref, 
+-				     bool is_tlab,
+-				     bool* gc_overhead_limit_was_exceeded) {
++                                     size_t size,
++                                     bool is_noref,
++                                     bool is_tlab,
++                                     bool* gc_overhead_limit_was_exceeded) {
+   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
+   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
+   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
+@@ -369,7 +388,7 @@
+       gc_count = Universe::heap()->total_collections();
+ 
+       result = young_gen()->allocate(size, is_tlab);
+-  
++
+       // (1) If the requested object is too large to easily fit in the
+       //     young_gen, or
+       // (2) If GC is locked out via GCLocker, young gen is full and
+@@ -432,8 +451,8 @@
+       if (size_policy()->gc_time_limit_exceeded()) {
+         size_policy()->set_gc_time_limit_exceeded(false);
+         if (PrintGCDetails && Verbose) {
+-	gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
+-	  "return NULL because gc_time_limit_exceeded is set");
++        gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
++          "return NULL because gc_time_limit_exceeded is set");
+         }
+         return NULL;
+       }
+@@ -446,7 +465,7 @@
+       // This prevents us from looping until time out on requests that can
+       // not be satisfied.
+       if (op.prologue_succeeded()) {
+-        assert(Universe::heap()->is_in_or_null(op.result()), 
++        assert(Universe::heap()->is_in_or_null(op.result()),
+           "result not in heap");
+ 
+         // If GC was locked out during VM operation then retry allocation
+@@ -456,15 +475,15 @@
+           continue;  // retry and/or stall as necessary
+         }
+         // If a NULL result is being returned, an out-of-memory
+-	// will be thrown now.  Clear the gc_time_limit_exceeded
+-	// flag to avoid the following situation.
+-	// 	gc_time_limit_exceeded is set during a collection
+-	//	the collection fails to return enough space and an OOM is thrown
+-	//	the next GC is skipped because the gc_time_limit_exceeded
+-	//	  flag is set and another OOM is thrown
+-	if (op.result() == NULL) {
++        // will be thrown now.  Clear the gc_time_limit_exceeded
++        // flag to avoid the following situation.
++        //      gc_time_limit_exceeded is set during a collection
++        //      the collection fails to return enough space and an OOM is thrown
++        //      the next GC is skipped because the gc_time_limit_exceeded
++        //        flag is set and another OOM is thrown
++        if (op.result() == NULL) {
+           size_policy()->set_gc_time_limit_exceeded(false);
+-	}
++        }
+         return op.result();
+       }
+     }
+@@ -472,7 +491,7 @@
+     // The policy object will prevent us from looping forever. If the
+     // time spent in gc crosses a threshold, we will bail out.
+     loop_count++;
+-    if ((result == NULL) && (QueuedAllocationWarningCount > 0) && 
++    if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
+         (loop_count % QueuedAllocationWarningCount == 0)) {
+       warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
+               " size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : "");
+@@ -487,7 +506,7 @@
+ // flow, and NOT collection policy. So we do not check for gc collection
+ // time over limit here, that is the responsibility of the heap specific
+ // collection methods. This method decides where to attempt allocations,
+-// and when to attempt collections, but no collection specific policy. 
++// and when to attempt collections, but no collection specific policy.
+ HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) {
+   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
+   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
+@@ -504,8 +523,8 @@
+   PSScavenge::invoke();
+   HeapWord* result = young_gen()->allocate(size, is_tlab);
+ 
+-  // Second level allocation failure. 
+-  //   Mark sweep and allocate in young generation.  
++  // Second level allocation failure.
++  //   Mark sweep and allocate in young generation.
+   if (result == NULL) {
+     // There is some chance the scavenge method decided to invoke mark_sweep.
+     // Don't mark sweep twice if so.
+@@ -516,7 +535,7 @@
+   }
+ 
+   // Third level allocation failure.
+-  //   After mark sweep and young generation allocation failure, 
++  //   After mark sweep and young generation allocation failure,
+   //   allocate in old generation.
+   if (result == NULL && !is_tlab) {
+     result = old_gen()->allocate(size, is_tlab);
+@@ -586,8 +605,8 @@
+       if (size_policy()->gc_time_limit_exceeded()) {
+         size_policy()->set_gc_time_limit_exceeded(false);
+         if (PrintGCDetails && Verbose) {
+-	gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate: "
+-	  "return NULL because gc_time_limit_exceeded is set");
++        gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate: "
++          "return NULL because gc_time_limit_exceeded is set");
+         }
+         assert(result == NULL, "Allocation did not fail");
+         return NULL;
+@@ -596,23 +615,23 @@
+       // Generate a VM operation
+       VM_ParallelGCFailedPermanentAllocation op(size, gc_count, full_gc_count);
+       VMThread::execute(&op);
+-        
++
+       // Did the VM operation execute? If so, return the result directly.
+       // This prevents us from looping until time out on requests that can
+       // not be satisfied.
+       if (op.prologue_succeeded()) {
+-        assert(Universe::heap()->is_in_permanent_or_null(op.result()), 
++        assert(Universe::heap()->is_in_permanent_or_null(op.result()),
+           "result not in heap");
+-	// If a NULL results is being returned, an out-of-memory
+-	// will be thrown now.  Clear the gc_time_limit_exceeded
+-	// flag to avoid the following situation.
+-	// 	gc_time_limit_exceeded is set during a collection
+-	//	the collection fails to return enough space and an OOM is thrown
+-	//	the next GC is skipped because the gc_time_limit_exceeded
+-	//	  flag is set and another OOM is thrown
+-	if (op.result() == NULL) {
++        // If a NULL results is being returned, an out-of-memory
++        // will be thrown now.  Clear the gc_time_limit_exceeded
++        // flag to avoid the following situation.
++        //      gc_time_limit_exceeded is set during a collection
++        //      the collection fails to return enough space and an OOM is thrown
++        //      the next GC is skipped because the gc_time_limit_exceeded
++        //        flag is set and another OOM is thrown
++        if (op.result() == NULL) {
+           size_policy()->set_gc_time_limit_exceeded(false);
+-	}
++        }
+         return op.result();
+       }
+     }
+@@ -620,8 +639,8 @@
+     // The policy object will prevent us from looping forever. If the
+     // time spent in gc crosses a threshold, we will bail out.
+     loop_count++;
+-    if ((QueuedAllocationWarningCount > 0) && 
+-	(loop_count % QueuedAllocationWarningCount == 0)) {
++    if ((QueuedAllocationWarningCount > 0) &&
++        (loop_count % QueuedAllocationWarningCount == 0)) {
+       warning("ParallelScavengeHeap::permanent_mem_allocate retries %d times \n\t"
+               " size=%d", loop_count, size);
+     }
+@@ -694,7 +713,7 @@
+ 
+ // This method is used by System.gc() and JVMTI.
+ void ParallelScavengeHeap::collect(GCCause::Cause cause) {
+-  assert(!Heap_lock->owned_by_self(), 
++  assert(!Heap_lock->owned_by_self(),
+     "this thread should not own the Heap_lock");
+ 
+   unsigned int gc_count      = 0;
+@@ -719,7 +738,7 @@
+   assert(Heap_lock->is_locked(), "Precondition#2");
+   GCCauseSetter gcs(this, cause);
+   switch (cause) {
+-    case GCCause::_heap_inspection: 
++    case GCCause::_heap_inspection:
+     case GCCause::_heap_dump: {
+       HandleMark hm;
+       invoke_full_gc(false);
+@@ -838,8 +857,8 @@
+ 
+ void ParallelScavengeHeap::print_heap_change(size_t prev_used) {
+   if (PrintGCDetails && Verbose) {
+-    gclog_or_tty->print(" "  SIZE_FORMAT 
+-                        "->" SIZE_FORMAT 
++    gclog_or_tty->print(" "  SIZE_FORMAT
++                        "->" SIZE_FORMAT
+                         "("  SIZE_FORMAT ")",
+                         prev_used, used(), capacity());
+   } else {
+@@ -857,9 +876,9 @@
+ }
+ 
+ // Before delegating the resize to the young generation,
+-// the reserved space for the young and old generations 
++// the reserved space for the young and old generations
+ // may be changed to accomodate the desired resize.
+-void ParallelScavengeHeap::resize_young_gen(size_t eden_size, 
++void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
+     size_t survivor_size) {
+   if (UseAdaptiveGCBoundary) {
+     if (size_policy()->bytes_absorbed_from_eden() != 0) {
+@@ -872,9 +891,9 @@
+   // Delegate the resize to the generation.
+   _young_gen->resize(eden_size, survivor_size);
+ }
+-  
++
+ // Before delegating the resize to the old generation,
+-// the reserved space for the young and old generations 
++// the reserved space for the young and old generations
+ // may be changed to accomodate the desired resize.
+ void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
+   if (UseAdaptiveGCBoundary) {
+@@ -887,4 +906,4 @@
+ 
+   // Delegate the resize to the generation.
+   _old_gen->resize(desired_free_space);
+-} 
++}
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)parallelScavengeHeap.hpp	1.61 07/05/17 15:52:51 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class AdjoiningGenerations;
+@@ -42,14 +39,11 @@
+ 
+   static ParallelScavengeHeap* _psh;
+ 
+-  // Byte size of the reserved space for the heap
+-  size_t _reserved_byte_size;
+-
+-  size_t _generation_alignment;
+-  inline void set_generation_alignment(size_t val);
++  size_t _perm_gen_alignment;
++  size_t _young_gen_alignment;
++  size_t _old_gen_alignment;
+ 
+-  // Adjust alignment for page size (may be large page size)
+-  void adjust_generation_alignment_for_page_size(size_t page_size);
++  inline size_t set_alignment(size_t& var, size_t val);
+ 
+   // Collection of generations that are adjacent in the
+   // space reserved for the heap.
+@@ -57,9 +51,6 @@
+ 
+   static GCTaskManager*          _gc_task_manager;      // The task manager.
+ 
+-  // Private accessors
+-  size_t reserved_byte_size() const { return _reserved_byte_size; }
+-
+  protected:
+   static inline size_t total_invocations();
+   HeapWord* allocate_new_tlab(size_t size);
+@@ -67,7 +58,9 @@
+ 
+  public:
+   ParallelScavengeHeap() : CollectedHeap() {
+-    set_generation_alignment(intra_generation_alignment());
++    set_alignment(_perm_gen_alignment, intra_generation_alignment());
++    set_alignment(_young_gen_alignment, intra_generation_alignment());
++    set_alignment(_old_gen_alignment, intra_generation_alignment());
+   }
+ 
+   // For use by VM operations
+@@ -100,8 +93,10 @@
+   void post_initialize();
+   void update_counters();
+ 
+-  // The alignment used for generations.
+-  size_t generation_alignment() const { return _generation_alignment; }
++  // The alignment used for the various generations.
++  size_t perm_gen_alignment()  const { return _perm_gen_alignment; }
++  size_t young_gen_alignment() const { return _young_gen_alignment; }
++  size_t old_gen_alignment()  const { return _old_gen_alignment; }
+ 
+   // The alignment used for eden and survivors within the young gen.
+   size_t intra_generation_alignment() const { return 64 * K; }
+@@ -143,10 +138,10 @@
+   // and caused a NULL to be returned.  If a NULL is not returned,
+   // "gc_time_limit_was_exceeded" has an undefined meaning.
+ 
+-  HeapWord* mem_allocate(size_t size, 
+-			 bool is_noref, 
+-			 bool is_tlab,
+-			 bool* gc_overhead_limit_was_exceeded);
++  HeapWord* mem_allocate(size_t size,
++                         bool is_noref,
++                         bool is_tlab,
++                         bool* gc_overhead_limit_was_exceeded);
+   HeapWord* failed_mem_allocate(size_t size, bool is_tlab);
+ 
+   HeapWord* permanent_mem_allocate(size_t size);
+@@ -163,7 +158,7 @@
+ 
+   // These also should be called by the vm thread at a safepoint (e.g., from a
+   // VM operation).
+-  // 
++  //
+   // The first collects the young generation only, unless the scavenge fails; it
+   // will then attempt a full gc.  The second collects the entire heap; if
+   // maximum_compaction is true, it will compact everything and clear all soft
+@@ -219,8 +214,9 @@
+   void resize_old_gen(size_t desired_free_space);
+ };
+ 
+-inline void ParallelScavengeHeap::set_generation_alignment(size_t val) {
+-  assert(align_size_up_(val, os::vm_page_size()) == val, "not aligned");
+-  assert(val >= intra_generation_alignment(), "alignment size is too small");
+-  _generation_alignment = val;
++inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val)
++{
++  assert(is_power_of_2((intptr_t)val), "must be a power of 2");
++  var = round_to(val, intra_generation_alignment());
++  return var;
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)parallelScavengeHeap.inline.hpp	1.5 07/05/05 17:05:28 JVM"
+-#endif
+ /*
+  * Copyright 2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ inline size_t ParallelScavengeHeap::total_invocations()
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)parMarkBitMap.cpp	1.30 07/05/05 17:05:27 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -31,17 +28,23 @@
+ bool
+ ParMarkBitMap::initialize(MemRegion covered_region)
+ {
+-  const size_t alloc_granularity = os::vm_allocation_granularity();
+   const idx_t bits = bits_required(covered_region);
+-  const idx_t words = bits / BitsPerWord;
+-  const idx_t bytes = align_size_up(words * sizeof(idx_t), alloc_granularity);
+-
+   // The bits will be divided evenly between two bitmaps; each of them should be
+   // an integral number of words.
+   assert(bits % (BitsPerWord * 2) == 0, "region size unaligned");
+ 
+-  ReservedSpace rs(bytes);
+-  _virtual_space = new PSVirtualSpace(rs, os::vm_page_size());
++  const size_t words = bits / BitsPerWord;
++  const size_t raw_bytes = words * sizeof(idx_t);
++  const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10);
++  const size_t granularity = os::vm_allocation_granularity();
++  const size_t bytes = align_size_up(raw_bytes, MAX2(page_sz, granularity));
++
++  const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
++    MAX2(page_sz, granularity);
++  ReservedSpace rs(bytes, rs_align, false);
++  os::trace_page_sizes("par bitmap", raw_bytes, raw_bytes, page_sz,
++                       rs.base(), rs.size());
++  _virtual_space = new PSVirtualSpace(rs, page_sz);
+   if (_virtual_space != NULL && _virtual_space->expand_by(bytes)) {
+     _region_start = covered_region.start();
+     _region_size = covered_region.word_size();
+@@ -65,7 +68,7 @@
+ #ifdef ASSERT
+ extern size_t mark_bitmap_count;
+ extern size_t mark_bitmap_size;
+-#endif	// #ifdef ASSERT
++#endif  // #ifdef ASSERT
+ 
+ bool
+ ParMarkBitMap::mark_obj(HeapWord* addr, size_t size)
+@@ -100,7 +103,7 @@
+       live_bits += tmp_end - beg_bit + 1;
+       beg_bit = find_obj_beg(tmp_end + 1, range_end);
+     } else {
+-      live_bits += end_bit - beg_bit;  // No + 1 here; end_bit is not counted. 
++      live_bits += end_bit - beg_bit;  // No + 1 here; end_bit is not counted.
+       return bits_to_words(live_bits);
+     }
+   }
+@@ -130,7 +133,7 @@
+ 
+ ParMarkBitMap::IterationStatus
+ ParMarkBitMap::iterate(ParMarkBitMapClosure* live_closure,
+-		       idx_t range_beg, idx_t range_end) const
++                       idx_t range_beg, idx_t range_end) const
+ {
+   DEBUG_ONLY(verify_bit(range_beg);)
+   DEBUG_ONLY(verify_bit(range_end);)
+@@ -165,9 +168,9 @@
+ 
+ ParMarkBitMap::IterationStatus
+ ParMarkBitMap::iterate(ParMarkBitMapClosure* live_closure,
+-		       ParMarkBitMapClosure* dead_closure,
+-		       idx_t range_beg, idx_t range_end,
+-		       idx_t dead_range_end) const
++                       ParMarkBitMapClosure* dead_closure,
++                       idx_t range_beg, idx_t range_end,
++                       idx_t dead_range_end) const
+ {
+   DEBUG_ONLY(verify_bit(range_beg);)
+   DEBUG_ONLY(verify_bit(range_end);)
+@@ -187,7 +190,7 @@
+     const size_t size = obj_size(range_beg, dead_space_end);
+     dead_closure->do_addr(bit_to_addr(range_beg), size);
+   }
+-    
++
+   while (cur_beg < range_end) {
+     const idx_t cur_end = find_obj_end(cur_beg, live_search_end);
+     if (cur_end >= range_end) {
+@@ -218,12 +221,12 @@
+   return complete;
+ }
+ 
+-#ifndef	PRODUCT
++#ifndef PRODUCT
+ void ParMarkBitMap::reset_counters()
+ {
+   _cas_tries = _cas_retries = _cas_by_another = 0;
+ }
+-#endif	// #ifndef PRODUCT
++#endif  // #ifndef PRODUCT
+ 
+ #ifdef ASSERT
+ void ParMarkBitMap::verify_clear() const
+@@ -234,4 +237,4 @@
+     assert(*p == 0, "bitmap not clear");
+   }
+ }
+-#endif	// #ifdef ASSERT
++#endif  // #ifdef ASSERT
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)parMarkBitMap.hpp	1.16 07/05/05 17:05:27 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class oopDesc;
+@@ -82,25 +79,25 @@
+   // Apply live_closure to each live object that lies completely within the
+   // range [live_range_beg, live_range_end).  This is used to iterate over the
+   // compacted region of the heap.  Return values:
+-  // 
+-  // incomplete		The iteration is not complete.  The last object that
+-  // 			begins in the range does not end in the range;
+-  // 			closure->source() is set to the start of that object.
+   //
+-  // complete		The iteration is complete.  All objects in the range
+-  // 			were processed and the closure is not full;
+-  // 			closure->source() is set one past the end of the range.
+-  // 
+-  // full		The closure is full; closure->source() is set to one
+-  // 			past the end of the last object processed.
+-  // 
+-  // would_overflow	The next object in the range would overflow the closure;
+-  // 			closure->source() is set to the start of that object.
++  // incomplete         The iteration is not complete.  The last object that
++  //                    begins in the range does not end in the range;
++  //                    closure->source() is set to the start of that object.
++  //
++  // complete           The iteration is complete.  All objects in the range
++  //                    were processed and the closure is not full;
++  //                    closure->source() is set one past the end of the range.
++  //
++  // full               The closure is full; closure->source() is set to one
++  //                    past the end of the last object processed.
++  //
++  // would_overflow     The next object in the range would overflow the closure;
++  //                    closure->source() is set to the start of that object.
+   IterationStatus iterate(ParMarkBitMapClosure* live_closure,
+-			  idx_t range_beg, idx_t range_end) const;
++                          idx_t range_beg, idx_t range_end) const;
+   inline IterationStatus iterate(ParMarkBitMapClosure* live_closure,
+-				 HeapWord* range_beg,
+-				 HeapWord* range_end) const;
++                                 HeapWord* range_beg,
++                                 HeapWord* range_end) const;
+ 
+   // Apply live closure as above and additionally apply dead_closure to all dead
+   // space in the range [range_beg, dead_range_end).  Note that dead_range_end
+@@ -111,14 +108,14 @@
+   // applied.  Thus callers must ensure that range_beg is not in the middle of a
+   // live object.
+   IterationStatus iterate(ParMarkBitMapClosure* live_closure,
+-			  ParMarkBitMapClosure* dead_closure,
+-			  idx_t range_beg, idx_t range_end,
+-			  idx_t dead_range_end) const;
++                          ParMarkBitMapClosure* dead_closure,
++                          idx_t range_beg, idx_t range_end,
++                          idx_t dead_range_end) const;
+   inline IterationStatus iterate(ParMarkBitMapClosure* live_closure,
+-				 ParMarkBitMapClosure* dead_closure,
+-				 HeapWord* range_beg,
+-				 HeapWord* range_end,
+-				 HeapWord* dead_range_end) const;
++                                 ParMarkBitMapClosure* dead_closure,
++                                 HeapWord* range_beg,
++                                 HeapWord* range_end,
++                                 HeapWord* dead_range_end) const;
+ 
+   // Return the number of live words in the range [beg_addr, end_addr) due to
+   // objects that start in the range.  If a live object extends onto the range,
+@@ -160,20 +157,20 @@
+   static inline idx_t bits_required(MemRegion covered_region);
+   static inline idx_t words_required(MemRegion covered_region);
+ 
+-#ifndef	PRODUCT
++#ifndef PRODUCT
+   // CAS statistics.
+   size_t cas_tries() { return _cas_tries; }
+   size_t cas_retries() { return _cas_retries; }
+   size_t cas_by_another() { return _cas_by_another; }
+ 
+   void reset_counters();
+-#endif	// #ifndef PRODUCT
++#endif  // #ifndef PRODUCT
+ 
+-#ifdef	ASSERT
++#ifdef  ASSERT
+   void verify_clear() const;
+   inline void verify_bit(idx_t bit) const;
+   inline void verify_addr(HeapWord* addr) const;
+-#endif	// #ifdef ASSERT
++#endif  // #ifdef ASSERT
+ 
+ private:
+   // Each bit in the bitmap represents one unit of 'object granularity.' Objects
+@@ -191,7 +188,7 @@
+   size_t _cas_tries;
+   size_t _cas_retries;
+   size_t _cas_by_another;
+-#endif	// #ifndef PRODUCT
++#endif  // #ifndef PRODUCT
+ };
+ 
+ inline ParMarkBitMap::ParMarkBitMap():
+@@ -202,7 +199,7 @@
+   _virtual_space = 0;
+ }
+ 
+-inline ParMarkBitMap::ParMarkBitMap(MemRegion covered_region): 
++inline ParMarkBitMap::ParMarkBitMap(MemRegion covered_region):
+   _beg_bits(NULL, 0),
+   _end_bits(NULL, 0)
+ {
+@@ -346,22 +343,22 @@
+ 
+ inline ParMarkBitMap::IterationStatus
+ ParMarkBitMap::iterate(ParMarkBitMapClosure* live_closure,
+-		       HeapWord* range_beg,
+-		       HeapWord* range_end) const
++                       HeapWord* range_beg,
++                       HeapWord* range_end) const
+ {
+   return iterate(live_closure, addr_to_bit(range_beg), addr_to_bit(range_end));
+ }
+ 
+ inline ParMarkBitMap::IterationStatus
+ ParMarkBitMap::iterate(ParMarkBitMapClosure* live_closure,
+-		       ParMarkBitMapClosure* dead_closure,
+-		       HeapWord* range_beg,
+-		       HeapWord* range_end,
+-		       HeapWord* dead_range_end) const
++                       ParMarkBitMapClosure* dead_closure,
++                       HeapWord* range_beg,
++                       HeapWord* range_end,
++                       HeapWord* dead_range_end) const
+ {
+   return iterate(live_closure, dead_closure,
+-		 addr_to_bit(range_beg), addr_to_bit(range_end),
+-		 addr_to_bit(dead_range_end));
++                 addr_to_bit(range_beg), addr_to_bit(range_end),
++                 addr_to_bit(dead_range_end));
+ }
+ 
+ inline bool
+@@ -416,7 +413,7 @@
+   return bit_to_addr(res_bit);
+ }
+ 
+-#ifdef	ASSERT
++#ifdef  ASSERT
+ inline void ParMarkBitMap::verify_bit(idx_t bit) const {
+   // Allow one past the last valid bit; useful for loop bounds.
+   assert(bit <= _beg_bits.size(), "bit out of range");
+@@ -427,4 +424,4 @@
+   assert(addr >= region_start(), "addr too small");
+   assert(addr <= region_start() + region_size(), "addr too big");
+ }
+-#endif	// #ifdef ASSERT
++#endif  // #ifdef ASSERT
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.inline.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.inline.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.inline.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.inline.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)parMarkBitMap.inline.hpp	1.6 07/05/05 17:05:27 JVM"
+-#endif
+ /*
+  * Copyright 2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ inline bool
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)pcTasks.cpp	1.21 07/05/05 17:05:27 JVM"
+-#endif
+ /*
+  * Copyright 2005-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -37,9 +34,9 @@
+ 
+   ResourceMark rm;
+ 
+-  NOT_PRODUCT(TraceTime tm("ThreadRootsMarkingTask", 
++  NOT_PRODUCT(TraceTime tm("ThreadRootsMarkingTask",
+     PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
+-  ParCompactionManager* cm = 
++  ParCompactionManager* cm =
+     ParCompactionManager::gc_thread_compaction_manager(which);
+   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
+ 
+@@ -57,15 +54,15 @@
+ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
+   assert(Universe::heap()->is_gc_active(), "called outside gc");
+ 
+-  NOT_PRODUCT(TraceTime tm("MarkFromRootsTask", 
++  NOT_PRODUCT(TraceTime tm("MarkFromRootsTask",
+     PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
+-  ParCompactionManager* cm = 
++  ParCompactionManager* cm =
+     ParCompactionManager::gc_thread_compaction_manager(which);
+   // cm->allocate_stacks();
+-  assert(cm->stacks_have_been_allocated(), 
++  assert(cm->stacks_have_been_allocated(),
+     "Stack space has not been allocated");
+   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
+-  
++
+   switch (_root_type) {
+     case universe:
+       Universe::oops_do(&mark_and_push_closure);
+@@ -128,16 +125,16 @@
+ {
+   assert(Universe::heap()->is_gc_active(), "called outside gc");
+ 
+-  NOT_PRODUCT(TraceTime tm("RefProcTask", 
++  NOT_PRODUCT(TraceTime tm("RefProcTask",
+     PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
+-  ParCompactionManager* cm = 
++  ParCompactionManager* cm =
+     ParCompactionManager::gc_thread_compaction_manager(which);
+   // cm->allocate_stacks();
+-  assert(cm->stacks_have_been_allocated(), 
++  assert(cm->stacks_have_been_allocated(),
+     "Stack space has not been allocated");
+   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
+   PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
+-  _rp_task.work(_work_id, *PSParallelCompact::is_alive_closure(), 
++  _rp_task.work(_work_id, *PSParallelCompact::is_alive_closure(),
+                 mark_and_push_closure, follow_stack_closure);
+ }
+ 
+@@ -149,7 +146,7 @@
+ {
+   ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
+   uint parallel_gc_threads = heap->gc_task_manager()->workers();
+-  GenTaskQueueSet* qset = ParCompactionManager::chunk_array()->task_queue_set();
++  TaskQueueSetSuper* qset = ParCompactionManager::chunk_array();
+   ParallelTaskTerminator terminator(parallel_gc_threads, qset);
+   GCTaskQueue* q = GCTaskQueue::create();
+   for(uint i=0; i<parallel_gc_threads; i++) {
+@@ -186,10 +183,10 @@
+ void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
+   assert(Universe::heap()->is_gc_active(), "called outside gc");
+ 
+-  NOT_PRODUCT(TraceTime tm("StealMarkingTask", 
++  NOT_PRODUCT(TraceTime tm("StealMarkingTask",
+     PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
+ 
+-  ParCompactionManager* cm = 
++  ParCompactionManager* cm =
+     ParCompactionManager::gc_thread_compaction_manager(which);
+   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
+ 
+@@ -218,10 +215,10 @@
+ void StealChunkCompactionTask::do_it(GCTaskManager* manager, uint which) {
+   assert(Universe::heap()->is_gc_active(), "called outside gc");
+ 
+-  NOT_PRODUCT(TraceTime tm("StealChunkCompactionTask", 
++  NOT_PRODUCT(TraceTime tm("StealChunkCompactionTask",
+     PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
+ 
+-  ParCompactionManager* cm = 
++  ParCompactionManager* cm =
+     ParCompactionManager::gc_thread_compaction_manager(which);
+ 
+   // Has to drain stacks first because there may be chunks on
+@@ -242,7 +239,7 @@
+       cm->drain_chunk_stacks();
+     } else {
+       if (terminator()->offer_termination()) {
+-	break;
++        break;
+       }
+       // Go around again.
+     }
+@@ -251,7 +248,7 @@
+ }
+ 
+ UpdateDensePrefixTask::UpdateDensePrefixTask(
+-				   PSParallelCompact::SpaceId space_id,
++                                   PSParallelCompact::SpaceId space_id,
+                                    size_t chunk_index_start,
+                                    size_t chunk_index_end) :
+   _space_id(space_id), _chunk_index_start(chunk_index_start),
+@@ -260,10 +257,10 @@
+ 
+ void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
+ 
+-  NOT_PRODUCT(TraceTime tm("UpdateDensePrefixTask", 
++  NOT_PRODUCT(TraceTime tm("UpdateDensePrefixTask",
+     PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
+ 
+-  ParCompactionManager* cm = 
++  ParCompactionManager* cm =
+     ParCompactionManager::gc_thread_compaction_manager(which);
+ 
+   PSParallelCompact::update_and_deadwood_in_dense_prefix(cm,
+@@ -275,7 +272,7 @@
+ void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
+   assert(Universe::heap()->is_gc_active(), "called outside gc");
+ 
+-  NOT_PRODUCT(TraceTime tm("DrainStacksCompactionTask", 
++  NOT_PRODUCT(TraceTime tm("DrainStacksCompactionTask",
+     PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
+ 
+   ParCompactionManager* cm =
+@@ -284,4 +281,3 @@
+   // Process any chunks already in the compaction managers stacks.
+   cm->drain_chunk_stacks();
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)pcTasks.hpp	1.19 07/05/05 17:05:26 JVM"
+-#endif
+ /*
+  * Copyright 2005-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,15 +19,15 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ 
+ // Tasks for parallel compaction of the old generation
+-// 
++//
+ // Tasks are created and enqueued on a task queue. The
+ // tasks for parallel old collector for marking objects
+-// are MarkFromRootsTask and ThreadRootsMarkingTask.  
++// are MarkFromRootsTask and ThreadRootsMarkingTask.
+ //
+ // MarkFromRootsTask's are created
+ // with a root group (e.g., jni_handles) and when the do_it()
+@@ -122,10 +119,10 @@
+     : _rp_task(rp_task),
+       _work_id(work_id)
+   { }
+-  
++
+ private:
+   virtual char* name() { return (char *)"Process referents by policy in parallel"; }
+-  
++
+   virtual void do_it(GCTaskManager* manager, uint which);
+ };
+ 
+@@ -160,7 +157,7 @@
+ // RefProcTaskExecutor
+ //
+ // Task executor is an interface for the reference processor to run
+-// tasks using GCTaskManager. 
++// tasks using GCTaskManager.
+ //
+ 
+ class RefProcTaskExecutor: public AbstractRefProcTaskExecutor {
+@@ -226,7 +223,7 @@
+ 
+   UpdateDensePrefixTask(PSParallelCompact::SpaceId space_id,
+                         size_t chunk_index_start,
+-                        size_t chunk_index_end); 
++                        size_t chunk_index_end);
+ 
+   virtual void do_it(GCTaskManager* manager, uint which);
+ };
+@@ -241,7 +238,7 @@
+ // guarantees about which task will be picked up by which thread.  For example,
+ // if thread A gets all the preloaded chunks, thread A may not get a draining
+ // task (they may all be done by other threads).
+-// 
++//
+ 
+ class DrainStacksCompactionTask : public GCTask {
+  public:
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/prefetchQueue.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/prefetchQueue.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/prefetchQueue.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/prefetchQueue.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)prefetchQueue.hpp	1.13 07/05/05 17:05:28 JVM"
+-#endif
+ /*
+  * Copyright 2002-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //
+@@ -68,6 +65,3 @@
+     return _prefetch_queue[_prefetch_index];
+   }
+ };
+-
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)psAdaptiveSizePolicy.cpp	1.81 07/05/05 17:05:29 JVM"
+-#endif
+ /*
+  * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,52 +19,46 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+ #include "incls/_psAdaptiveSizePolicy.cpp.incl"
+ 
+-#include <math.h>    
++#include <math.h>
+ 
+ PSAdaptiveSizePolicy::PSAdaptiveSizePolicy(size_t init_eden_size,
+-					   size_t init_promo_size,
+-					   size_t init_survivor_size,
+-					   size_t generation_alignment,
+-					   size_t intra_generation_alignment,
+-					   double gc_pause_goal_sec,
+-					   double gc_minor_pause_goal_sec,
+-					   uint gc_cost_ratio) : 
++                                           size_t init_promo_size,
++                                           size_t init_survivor_size,
++                                           size_t intra_generation_alignment,
++                                           double gc_pause_goal_sec,
++                                           double gc_minor_pause_goal_sec,
++                                           uint gc_cost_ratio) :
+      AdaptiveSizePolicy(init_eden_size,
+-			init_promo_size,
+-			init_survivor_size,
+-			gc_pause_goal_sec,
+-			gc_cost_ratio),
++                        init_promo_size,
++                        init_survivor_size,
++                        gc_pause_goal_sec,
++                        gc_cost_ratio),
+      _collection_cost_margin_fraction(AdaptiveSizePolicyCollectionCostMargin/
+        100.0),
+-     _generation_alignment(generation_alignment),
+      _intra_generation_alignment(intra_generation_alignment),
+      _live_at_last_full_gc(init_promo_size),
+      _gc_minor_pause_goal_sec(gc_minor_pause_goal_sec),
+      _latest_major_mutator_interval_seconds(0),
+      _young_gen_change_for_major_pause_count(0)
+ {
+-   assert(generation_alignment >= intra_generation_alignment,
+-     "generation alignment is too small")
+-
+   // Sizing policy statistics
+-    
+-  _avg_major_pause    = 
++  _avg_major_pause    =
+     new AdaptivePaddedAverage(AdaptiveTimeWeight, PausePadding);
+   _avg_minor_interval = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
+   _avg_major_interval = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
+ 
+   _avg_base_footprint = new AdaptiveWeightedAverage(AdaptiveSizePolicyWeight);
+-  _major_pause_old_estimator = 
++  _major_pause_old_estimator =
+     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
+-  _major_pause_young_estimator = 
++  _major_pause_young_estimator =
+     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
+-  _major_collection_estimator = 
++  _major_collection_estimator =
+     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
+ 
+   _young_gen_size_increment_supplement = YoungGenerationSizeSupplement;
+@@ -111,11 +102,11 @@
+     // Cost of collection (unit-less)
+     double collection_cost = 0.0;
+     if ((_latest_major_mutator_interval_seconds > 0.0) &&
+-	(major_pause_in_seconds > 0.0)) {
+-      double interval_in_seconds = 
++        (major_pause_in_seconds > 0.0)) {
++      double interval_in_seconds =
+         _latest_major_mutator_interval_seconds + major_pause_in_seconds;
+-      collection_cost = 
+-	major_pause_in_seconds / interval_in_seconds;
++      collection_cost =
++        major_pause_in_seconds / interval_in_seconds;
+       avg_major_gc_cost()->sample(collection_cost);
+ 
+       // Sample for performance counter
+@@ -127,18 +118,18 @@
+     double promo_size_in_mbytes = ((double)_promo_size)/((double)M);
+     _major_pause_old_estimator->update(promo_size_in_mbytes,
+       major_pause_in_ms);
+-    _major_pause_young_estimator->update(eden_size_in_mbytes, 
++    _major_pause_young_estimator->update(eden_size_in_mbytes,
+       major_pause_in_ms);
+ 
+     if (PrintAdaptiveSizePolicy && Verbose) {
+       gclog_or_tty->print("psAdaptiveSizePolicy::major_collection_end: "
+-	"major gc cost: %f  average: %f", collection_cost, 
+-	avg_major_gc_cost()->average());
+-      gclog_or_tty->print_cr("  major pause: %f major period %f", 
+-	major_pause_in_ms,
++        "major gc cost: %f  average: %f", collection_cost,
++        avg_major_gc_cost()->average());
++      gclog_or_tty->print_cr("  major pause: %f major period %f",
++        major_pause_in_ms,
+         _latest_major_mutator_interval_seconds * MILLIUNITS);
+     }
+-  
++
+     // Calculate variable used to estimate collection cost vs. gen sizes
+     assert(collection_cost >= 0.0, "Expected to be non-negative");
+     _major_collection_estimator->update(promo_size_in_mbytes,
+@@ -197,14 +188,14 @@
+ // If this is not a full GC, only test and modify the young generation.
+ 
+ void PSAdaptiveSizePolicy::compute_generation_free_space(size_t young_live,
+-					       size_t eden_live,
++                                               size_t eden_live,
+                                                size_t old_live,
+                                                size_t perm_live,
+-					       size_t cur_eden,
+-					       size_t max_old_gen_size,
+-					       size_t max_eden_size,
++                                               size_t cur_eden,
++                                               size_t max_old_gen_size,
++                                               size_t max_eden_size,
+                                                bool   is_full_gc,
+-					       GCCause::Cause gc_cause) {
++                                               GCCause::Cause gc_cause) {
+ 
+   // Update statistics
+   // Time statistics are updated as we go, update footprint stats here
+@@ -224,7 +215,7 @@
+   // two major collections even if the minor collections times
+   // exceeded the requested goals.  Now let the young generation
+   // adjust for the minor collection times.  Major collection times
+-  // will be zero for the first collection and will naturally be 
++  // will be zero for the first collection and will naturally be
+   // ignored.  Tenured generation adjustments are only made at the
+   // full collections so until the second major collection has
+   // been reached, no tenured generation adjustments will be made.
+@@ -235,11 +226,11 @@
+   // Start eden at the current value.  The desired value that is stored
+   // in _eden_size is not bounded by constraints of the heap and can
+   // run away.
+-  // 
++  //
+   // As expected setting desired_eden_size to the current
+   // value of desired_eden_size as a starting point
+   // caused desired_eden_size to grow way too large and caused
+-  // an overflow down stream.  It may have improved performance in 
++  // an overflow down stream.  It may have improved performance in
+   // some case but is dangerous.
+   size_t desired_eden_size = cur_eden;
+ 
+@@ -247,7 +238,7 @@
+   size_t original_promo_size = desired_promo_size;
+   size_t original_eden_size = desired_eden_size;
+ #endif
+-  
++
+   // Cache some values. There's a bit of work getting these, so
+   // we might save a little time.
+   const double major_cost = major_gc_cost();
+@@ -277,7 +268,7 @@
+ 
+   // Which way should we go?
+   // if pause requirement is not met
+-  //   adjust size of any generation with average paus exceeding 
++  //   adjust size of any generation with average paus exceeding
+   //   the pause limit.  Adjust one pause at a time (the larger)
+   //   and only make adjustments for the major pause at full collections.
+   // else if throughput requirement not met
+@@ -320,21 +311,21 @@
+     adjust_for_throughput(is_full_gc, &desired_promo_size, &desired_eden_size);
+ 
+   } else {
+-  
++
+     // Be conservative about reducing the footprint.
+     //   Do a minimum number of major collections first.
+     //   Have reasonable averages for major and minor collections costs.
+-    if (UseAdaptiveSizePolicyFootprintGoal && 
++    if (UseAdaptiveSizePolicyFootprintGoal &&
+         young_gen_policy_is_ready() &&
+         avg_major_gc_cost()->average() >= 0.0 &&
+         avg_minor_gc_cost()->average() >= 0.0) {
+       size_t desired_sum = desired_eden_size + desired_promo_size;
+       desired_eden_size = adjust_eden_for_footprint(desired_eden_size,
+-			  		            desired_sum);
++                                                    desired_sum);
+       if (is_full_gc) {
+         set_decide_at_full_gc(decide_at_full_gc_true);
+         desired_promo_size = adjust_promo_for_footprint(desired_promo_size,
+-				     			desired_sum);
++                                                        desired_sum);
+       }
+     }
+   }
+@@ -346,30 +337,30 @@
+       // "free_in_old_gen" was the original value for used for promo_limit
+       size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average());
+       gclog_or_tty->print_cr(
+-	    "PSAdaptiveSizePolicy::compute_generation_free_space limits:"
++            "PSAdaptiveSizePolicy::compute_generation_free_space limits:"
+             " desired_promo_size: " SIZE_FORMAT
+-	    " promo_limit: " SIZE_FORMAT
+-            " free_in_old_gen: " SIZE_FORMAT 
+-	    " max_old_gen_size: " SIZE_FORMAT
+-	    " avg_old_live: " SIZE_FORMAT,
+-            desired_promo_size, promo_limit, free_in_old_gen, 
+-	    max_old_gen_size, (size_t) avg_old_live()->average());
++            " promo_limit: " SIZE_FORMAT
++            " free_in_old_gen: " SIZE_FORMAT
++            " max_old_gen_size: " SIZE_FORMAT
++            " avg_old_live: " SIZE_FORMAT,
++            desired_promo_size, promo_limit, free_in_old_gen,
++            max_old_gen_size, (size_t) avg_old_live()->average());
+     }
+     if (desired_eden_size > eden_limit) {
+       gclog_or_tty->print_cr(
+-	    "AdaptiveSizePolicy::compute_generation_free_space limits:"
++            "AdaptiveSizePolicy::compute_generation_free_space limits:"
+             " desired_eden_size: " SIZE_FORMAT
+             " old_eden_size: " SIZE_FORMAT
+             " eden_limit: " SIZE_FORMAT
+-	    " cur_eden: " SIZE_FORMAT
+-	    " max_eden_size: " SIZE_FORMAT
+-	    " avg_young_live: " SIZE_FORMAT,
++            " cur_eden: " SIZE_FORMAT
++            " max_eden_size: " SIZE_FORMAT
++            " avg_young_live: " SIZE_FORMAT,
+             desired_eden_size, _eden_size, eden_limit, cur_eden,
+-	    max_eden_size, (size_t)avg_young_live()->average());
++            max_eden_size, (size_t)avg_young_live()->average());
+     }
+     if (gc_cost() > gc_cost_limit) {
+       gclog_or_tty->print_cr(
+-	    "AdaptiveSizePolicy::compute_generation_free_space: gc time limit"
++            "AdaptiveSizePolicy::compute_generation_free_space: gc time limit"
+             " gc_cost: %f "
+             " GCTimeLimit: %d",
+             gc_cost(), GCTimeLimit);
+@@ -393,10 +384,10 @@
+   if (desired_promo_size > free_in_old_gen && desired_eden_size > eden_limit) {
+ 
+     // eden_limit is the upper limit on the size of eden based on
+-    // the maximum size of the young generation and the sizes 
++    // the maximum size of the young generation and the sizes
+     // of the survivor space.
+-    // The question being asked is whether the gc costs are high 
+-    // and the space being recovered by a collection is low.  
++    // The question being asked is whether the gc costs are high
++    // and the space being recovered by a collection is low.
+     // free_in_young_gen is the free space in the young generation
+     // after a collection and promo_live is the free space in the old
+     // generation after a collection.
+@@ -415,7 +406,7 @@
+     const size_t total_mem = max_old_gen_size + max_eden_size;
+     const double mem_free_limit = total_mem * (GCHeapFreeLimit/100.0);
+     if (PrintAdaptiveSizePolicy && (Verbose ||
+-	(total_free_limit < (size_t) mem_free_limit))) {
++        (total_free_limit < (size_t) mem_free_limit))) {
+       gclog_or_tty->print_cr(
+             "PSAdaptiveSizePolicy::compute_generation_free_space limits:"
+             " promo_limit: " SIZE_FORMAT
+@@ -433,12 +424,12 @@
+       if (gc_cost() > gc_cost_limit &&
+         total_free_limit < (size_t) mem_free_limit) {
+         // Collections, on average, are taking too much time, and
+-        //	gc_cost() > gc_cost_limit
++        //      gc_cost() > gc_cost_limit
+         // we have too little space available after a full gc.
+-        //	total_free_limit < mem_free_limit
++        //      total_free_limit < mem_free_limit
+         // where
+         //   total_free_limit is the free space available in
+-        //     both generations 
++        //     both generations
+         //   total_mem is the total space available for allocation
+         //     in both generations (survivor spaces are not included
+         //     just as they are not included in eden_limit).
+@@ -452,25 +443,25 @@
+         // freed by the collection is the free space in the young gen +
+         // tenured gen.
+         // Ignore explicit GC's. Ignoring explicit GC's at this level
+-	// is the equivalent of the GC did not happen as far as the 
+-	// overhead calculation is concerted (i.e., the flag is not set
+-	// and the count is not affected).  Also the average will not
+-	// have been updated unless UseAdaptiveSizePolicyWithSystemGC is on.
+-	if (!GCCause::is_user_requested_gc(gc_cause) &&
+-	    !GCCause::is_serviceability_requested_gc(gc_cause)) {
++        // is the equivalent of the GC did not happen as far as the
++        // overhead calculation is concerted (i.e., the flag is not set
++        // and the count is not affected).  Also the average will not
++        // have been updated unless UseAdaptiveSizePolicyWithSystemGC is on.
++        if (!GCCause::is_user_requested_gc(gc_cause) &&
++            !GCCause::is_serviceability_requested_gc(gc_cause)) {
+           inc_gc_time_limit_count();
+           if (UseGCOverheadLimit &&
+-  	      (gc_time_limit_count() > AdaptiveSizePolicyGCTimeLimitThreshold)){
++              (gc_time_limit_count() > AdaptiveSizePolicyGCTimeLimitThreshold)){
+             // All conditions have been met for throwing an out-of-memory
+             _gc_time_limit_exceeded = true;
+-	    // Avoid consecutive OOM due to the gc time limit by resetting
+-	    // the counter.
+-	    reset_gc_time_limit_count();
++            // Avoid consecutive OOM due to the gc time limit by resetting
++            // the counter.
++            reset_gc_time_limit_count();
+           }
+           _print_gc_time_limit_would_be_exceeded = true;
+-	}
++        }
+       } else {
+-	// Did not exceed overhead limits
++        // Did not exceed overhead limits
+         reset_gc_time_limit_count();
+       }
+     }
+@@ -479,7 +470,7 @@
+ 
+   // And one last limit check, now that we've aligned things.
+   if (desired_eden_size > eden_limit) {
+-    // If the policy says to get a larger eden but 
++    // If the policy says to get a larger eden but
+     // is hitting the limit, don't decrease eden.
+     // This can lead to a general drifting down of the
+     // eden size.  Let the tenuring calculation push more
+@@ -492,13 +483,13 @@
+   if (PrintAdaptiveSizePolicy) {
+     // Timing stats
+     gclog_or_tty->print(
+-	       "PSAdaptiveSizePolicy::compute_generation_free_space: costs"
++               "PSAdaptiveSizePolicy::compute_generation_free_space: costs"
+                " minor_time: %f"
+                " major_cost: %f"
+-	       " mutator_cost: %f"
+-	       " throughput_goal: %f",
+-               minor_gc_cost(), major_gc_cost(), mutator_cost(), 
+-	       _throughput_goal);
++               " mutator_cost: %f"
++               " throughput_goal: %f",
++               minor_gc_cost(), major_gc_cost(), mutator_cost(),
++               _throughput_goal);
+ 
+     // We give more details if Verbose is set
+     if (Verbose) {
+@@ -506,12 +497,12 @@
+                   " major_pause: %f"
+                   " minor_interval: %f"
+                   " major_interval: %f"
+-		  " pause_goal: %f",
+-                  _avg_minor_pause->padded_average(), 
++                  " pause_goal: %f",
++                  _avg_minor_pause->padded_average(),
+                   _avg_major_pause->padded_average(),
+-                  _avg_minor_interval->average(), 
++                  _avg_minor_interval->average(),
+                   _avg_major_interval->average(),
+-		  gc_pause_goal_sec());
++                  gc_pause_goal_sec());
+     }
+ 
+     // Footprint stats
+@@ -523,17 +514,17 @@
+       gclog_or_tty->print( " base_footprint: " SIZE_FORMAT
+                   " avg_young_live: " SIZE_FORMAT
+                   " avg_old_live: " SIZE_FORMAT,
+-                  (size_t)_avg_base_footprint->average(), 
+-                  (size_t)avg_young_live()->average(), 
++                  (size_t)_avg_base_footprint->average(),
++                  (size_t)avg_young_live()->average(),
+                   (size_t)avg_old_live()->average());
+     }
+-    
++
+     // And finally, our old and new sizes.
+     gclog_or_tty->print(" old_promo_size: " SIZE_FORMAT
+                " old_eden_size: " SIZE_FORMAT
+                " desired_promo_size: " SIZE_FORMAT
+                " desired_eden_size: " SIZE_FORMAT,
+-               _promo_size, _eden_size, 
++               _promo_size, _eden_size,
+                desired_promo_size, desired_eden_size);
+     gclog_or_tty->cr();
+   }
+@@ -552,15 +543,15 @@
+   if (is_full_gc) {
+     // Don't wait for the threshold value for the major collections.  If
+     // here, the supplemental growth term was used and should decay.
+-    if ((_avg_major_pause->count() % TenuredGenerationSizeSupplementDecay) 
+-	== 0) {
+-      _old_gen_size_increment_supplement = 
++    if ((_avg_major_pause->count() % TenuredGenerationSizeSupplementDecay)
++        == 0) {
++      _old_gen_size_increment_supplement =
+         _old_gen_size_increment_supplement >> 1;
+     }
+   } else {
+     if ((_avg_minor_pause->count() >= AdaptiveSizePolicyReadyThreshold) &&
+-	(_avg_minor_pause->count() % YoungGenerationSizeSupplementDecay) == 0) {
+-      _young_gen_size_increment_supplement = 
++        (_avg_minor_pause->count() % YoungGenerationSizeSupplementDecay) == 0) {
++      _young_gen_size_increment_supplement =
+         _young_gen_size_increment_supplement >> 1;
+     }
+   }
+@@ -576,17 +567,17 @@
+   // here.  It has not seemed to be needed but perhaps should
+   // be added for consistency.
+   if (minor_pause_young_estimator()->decrement_will_decrease()) {
+-	// reduce eden size
++        // reduce eden size
+     set_change_young_gen_for_min_pauses(
+-	  decrease_young_gen_for_min_pauses_true);
+-    *desired_eden_size_ptr = *desired_eden_size_ptr - 
++          decrease_young_gen_for_min_pauses_true);
++    *desired_eden_size_ptr = *desired_eden_size_ptr -
+       eden_decrement_aligned_down(*desired_eden_size_ptr);
+     } else {
+       // EXPERIMENTAL ADJUSTMENT
+       // Only record that the estimator indicated such an action.
+       // *desired_eden_size_ptr = *desired_eden_size_ptr + eden_heap_delta;
+       set_change_young_gen_for_min_pauses(
+-	  increase_young_gen_for_min_pauses_true);
++          increase_young_gen_for_min_pauses_true);
+   }
+   if (PSAdjustTenuredGenForMinorPause) {
+     // If the desired eden size is as small as it will get,
+@@ -596,19 +587,19 @@
+       // may not be a good idea.  This is just a test.
+       if (minor_pause_old_estimator()->decrement_will_decrease()) {
+         set_change_old_gen_for_min_pauses(
+-  	  decrease_old_gen_for_min_pauses_true);
+-        *desired_promo_size_ptr = 
+-	  _promo_size - promo_decrement_aligned_down(*desired_promo_size_ptr);
++          decrease_old_gen_for_min_pauses_true);
++        *desired_promo_size_ptr =
++          _promo_size - promo_decrement_aligned_down(*desired_promo_size_ptr);
+       } else {
+         set_change_old_gen_for_min_pauses(
+-  	  increase_old_gen_for_min_pauses_true);
+-	size_t promo_heap_delta = 
+-	  promo_increment_with_supplement_aligned_up(*desired_promo_size_ptr);
+-        if ((*desired_promo_size_ptr + promo_heap_delta) > 
+-	    *desired_promo_size_ptr) {
+-          *desired_promo_size_ptr = 
+-	    _promo_size + promo_heap_delta;
+-        }      
++          increase_old_gen_for_min_pauses_true);
++        size_t promo_heap_delta =
++          promo_increment_with_supplement_aligned_up(*desired_promo_size_ptr);
++        if ((*desired_promo_size_ptr + promo_heap_delta) >
++            *desired_promo_size_ptr) {
++          *desired_promo_size_ptr =
++            _promo_size + promo_heap_delta;
++        }
+       }
+     }
+   }
+@@ -628,9 +619,9 @@
+   }
+ 
+   if (_avg_minor_pause->padded_average() > _avg_major_pause->padded_average()) {
+-    adjust_for_minor_pause_time(is_full_gc, 
+-				desired_promo_size_ptr, 
+-      				desired_eden_size_ptr);
++    adjust_for_minor_pause_time(is_full_gc,
++                                desired_promo_size_ptr,
++                                desired_eden_size_ptr);
+     // major pause adjustments
+   } else if (is_full_gc) {
+     // Adjust for the major pause time only at full gc's because the
+@@ -645,7 +636,7 @@
+     } else {
+       // EXPERIMENTAL ADJUSTMENT
+       // Only record that the estimator indicated such an action.
+-      // *desired_promo_size_ptr = _promo_size + 
++      // *desired_promo_size_ptr = _promo_size +
+       //   promo_increment_aligned_up(*desired_promo_size_ptr);
+       set_change_old_gen_for_maj_pauses(increase_old_gen_for_maj_pauses_true);
+     }
+@@ -658,23 +649,23 @@
+         // pause, do it.
+         // During startup there is noise in the statistics for deciding
+         // on whether to increase or decrease the young gen size.  For
+-        // some number of iterations, just try to increase the young 
++        // some number of iterations, just try to increase the young
+         // gen size if the major pause is too long to try and establish
+         // good statistics for later decisions.
+         if (major_pause_young_estimator()->increment_will_decrease() ||
+-  	  (_young_gen_change_for_major_pause_count 
+-  	    <= AdaptiveSizePolicyInitializingSteps)) {
++          (_young_gen_change_for_major_pause_count
++            <= AdaptiveSizePolicyInitializingSteps)) {
+           set_change_young_gen_for_maj_pauses(
+-  	  increase_young_gen_for_maj_pauses_true);
+-	  eden_heap_delta = eden_increment_aligned_up(*desired_eden_size_ptr);
++          increase_young_gen_for_maj_pauses_true);
++          eden_heap_delta = eden_increment_aligned_up(*desired_eden_size_ptr);
+           *desired_eden_size_ptr = _eden_size + eden_heap_delta;
+-  	  _young_gen_change_for_major_pause_count++;
++          _young_gen_change_for_major_pause_count++;
+         } else {
+-  	  // Record that decreasing the young gen size would decrease
+-  	  // the major pause
+-  	  set_change_young_gen_for_maj_pauses(
+-  	    decrease_young_gen_for_maj_pauses_true);
+-	  eden_heap_delta = eden_decrement_aligned_down(*desired_eden_size_ptr);
++          // Record that decreasing the young gen size would decrease
++          // the major pause
++          set_change_young_gen_for_maj_pauses(
++            decrease_young_gen_for_maj_pauses_true);
++          eden_heap_delta = eden_decrement_aligned_down(*desired_eden_size_ptr);
+           *desired_eden_size_ptr = _eden_size - eden_heap_delta;
+         }
+       }
+@@ -721,10 +712,10 @@
+   if (is_full_gc) {
+ 
+     // Calculate the change to use for the tenured gen.
+-    size_t scaled_promo_heap_delta = 0; 
++    size_t scaled_promo_heap_delta = 0;
+     // Can the increment to the generation be scaled?
+     if (gc_cost() >= 0.0 && major_gc_cost() >= 0.0) {
+-      size_t promo_heap_delta = 
++      size_t promo_heap_delta =
+         promo_increment_with_supplement_aligned_up(*desired_promo_size_ptr);
+       double scale_by_ratio = major_gc_cost() / gc_cost();
+       scaled_promo_heap_delta =
+@@ -732,14 +723,14 @@
+       if (PrintAdaptiveSizePolicy && Verbose) {
+         gclog_or_tty->print_cr(
+           "Scaled tenured increment: " SIZE_FORMAT " by %f down to "
+-	  SIZE_FORMAT,
++          SIZE_FORMAT,
+           promo_heap_delta, scale_by_ratio, scaled_promo_heap_delta);
+       }
+     } else if (major_gc_cost() >= 0.0) {
+       // Scaling is not going to work.  If the major gc time is the
+       // larger, give it a full increment.
+       if (major_gc_cost() >= minor_gc_cost()) {
+-        scaled_promo_heap_delta = 
++        scaled_promo_heap_delta =
+           promo_increment_with_supplement_aligned_up(*desired_promo_size_ptr);
+       }
+     } else {
+@@ -755,32 +746,32 @@
+         // a specific number of collections have been, use the heuristic
+         // that a larger generation size means lower collection costs.
+         if (major_collection_estimator()->increment_will_decrease() ||
+-           (_old_gen_change_for_major_throughput 
++           (_old_gen_change_for_major_throughput
+             <= AdaptiveSizePolicyInitializingSteps)) {
+           // Increase tenured generation size to reduce major collection cost
+           if ((*desired_promo_size_ptr + scaled_promo_heap_delta) >
+               *desired_promo_size_ptr) {
+             *desired_promo_size_ptr = _promo_size + scaled_promo_heap_delta;
+           }
+-          set_change_old_gen_for_throughput( 
+-	      increase_old_gen_for_throughput_true);
+-	      _old_gen_change_for_major_throughput++;
+-	} else {
+-	  // EXPERIMENTAL ADJUSTMENT
+-	  // Record that decreasing the old gen size would decrease
+-          // the major collection cost but don't do it. 
+-          // *desired_promo_size_ptr = _promo_size - 
+-	  //   promo_decrement_aligned_down(*desired_promo_size_ptr);
+           set_change_old_gen_for_throughput(
+-	        decrease_old_gen_for_throughput_true);
+-	}
++              increase_old_gen_for_throughput_true);
++              _old_gen_change_for_major_throughput++;
++        } else {
++          // EXPERIMENTAL ADJUSTMENT
++          // Record that decreasing the old gen size would decrease
++          // the major collection cost but don't do it.
++          // *desired_promo_size_ptr = _promo_size -
++          //   promo_decrement_aligned_down(*desired_promo_size_ptr);
++          set_change_old_gen_for_throughput(
++                decrease_old_gen_for_throughput_true);
++        }
+ 
+-	break;
++        break;
+       default:
+-	// Simplest strategy
++        // Simplest strategy
+         if ((*desired_promo_size_ptr + scaled_promo_heap_delta) >
+             *desired_promo_size_ptr) {
+-          *desired_promo_size_ptr = *desired_promo_size_ptr + 
++          *desired_promo_size_ptr = *desired_promo_size_ptr +
+             scaled_promo_heap_delta;
+         }
+         set_change_old_gen_for_throughput(
+@@ -790,10 +781,10 @@
+ 
+     if (PrintAdaptiveSizePolicy && Verbose) {
+       gclog_or_tty->print_cr(
+-	  "adjusting tenured gen for throughput (avg %f goal %f). "
+-	  "desired_promo_size " SIZE_FORMAT " promo_delta " SIZE_FORMAT ,
+-	  mutator_cost(), _throughput_goal,
+-	  *desired_promo_size_ptr, scaled_promo_heap_delta);
++          "adjusting tenured gen for throughput (avg %f goal %f). "
++          "desired_promo_size " SIZE_FORMAT " promo_delta " SIZE_FORMAT ,
++          mutator_cost(), _throughput_goal,
++          *desired_promo_size_ptr, scaled_promo_heap_delta);
+     }
+   }
+ 
+@@ -809,16 +800,16 @@
+       (size_t) (scale_by_ratio * (double) eden_heap_delta);
+     if (PrintAdaptiveSizePolicy && Verbose) {
+       gclog_or_tty->print_cr(
+-        "Scaled eden increment: " SIZE_FORMAT " by %f down to " 
+-	SIZE_FORMAT,
++        "Scaled eden increment: " SIZE_FORMAT " by %f down to "
++        SIZE_FORMAT,
+         eden_heap_delta, scale_by_ratio, scaled_eden_heap_delta);
+     }
+   } else if (minor_gc_cost() >= 0.0) {
+     // Scaling is not going to work.  If the minor gc time is the
+     // larger, give it a full increment.
+     if (minor_gc_cost() > major_gc_cost()) {
+-      scaled_eden_heap_delta = 
+-	eden_increment_with_supplement_aligned_up(*desired_eden_size_ptr);
++      scaled_eden_heap_delta =
++        eden_increment_with_supplement_aligned_up(*desired_eden_size_ptr);
+     }
+   } else {
+     // Don't expect to get here but it's ok if it does
+@@ -832,32 +823,32 @@
+   switch (AdaptiveSizeThroughPutPolicy) {
+     case 1:
+       if (minor_collection_estimator()->increment_will_decrease() ||
+-        (_young_gen_change_for_minor_throughput 
++        (_young_gen_change_for_minor_throughput
+           <= AdaptiveSizePolicyInitializingSteps)) {
+         // Expand young generation size to reduce frequency of
+         // of collections.
+-	if ((*desired_eden_size_ptr + scaled_eden_heap_delta) > 
+-	    *desired_eden_size_ptr) {
+-          *desired_eden_size_ptr = 
+-	    *desired_eden_size_ptr + scaled_eden_heap_delta;
+-	}
++        if ((*desired_eden_size_ptr + scaled_eden_heap_delta) >
++            *desired_eden_size_ptr) {
++          *desired_eden_size_ptr =
++            *desired_eden_size_ptr + scaled_eden_heap_delta;
++        }
+         set_change_young_gen_for_throughput(
+-	  increase_young_gen_for_througput_true);
+-	_young_gen_change_for_minor_throughput++;
++          increase_young_gen_for_througput_true);
++        _young_gen_change_for_minor_throughput++;
+       } else {
+-	// EXPERIMENTAL ADJUSTMENT
+-	// Record that decreasing the young gen size would decrease
+-	// the minor collection cost but don't do it. 
+-        // *desired_eden_size_ptr = _eden_size - 
+-	//   eden_decrement_aligned_down(*desired_eden_size_ptr);
++        // EXPERIMENTAL ADJUSTMENT
++        // Record that decreasing the young gen size would decrease
++        // the minor collection cost but don't do it.
++        // *desired_eden_size_ptr = _eden_size -
++        //   eden_decrement_aligned_down(*desired_eden_size_ptr);
+         set_change_young_gen_for_throughput(
+-	  decrease_young_gen_for_througput_true);
++          decrease_young_gen_for_througput_true);
+       }
+-	  break;
++          break;
+     default:
+-      if ((*desired_eden_size_ptr + scaled_eden_heap_delta) > 
++      if ((*desired_eden_size_ptr + scaled_eden_heap_delta) >
+           *desired_eden_size_ptr) {
+-        *desired_eden_size_ptr = 
++        *desired_eden_size_ptr =
+           *desired_eden_size_ptr + scaled_eden_heap_delta;
+       }
+       set_change_young_gen_for_throughput(
+@@ -867,10 +858,10 @@
+ 
+   if (PrintAdaptiveSizePolicy && Verbose) {
+     gclog_or_tty->print_cr(
+-	"adjusting eden for throughput (avg %f goal %f). desired_eden_size "
+-	SIZE_FORMAT " eden delta " SIZE_FORMAT "\n",
++        "adjusting eden for throughput (avg %f goal %f). desired_eden_size "
++        SIZE_FORMAT " eden delta " SIZE_FORMAT "\n",
+       mutator_cost(), _throughput_goal,
+-	*desired_eden_size_ptr, scaled_eden_heap_delta);
++        *desired_eden_size_ptr, scaled_eden_heap_delta);
+   }
+ }
+ 
+@@ -888,9 +879,9 @@
+     gclog_or_tty->print_cr(
+       "AdaptiveSizePolicy::compute_generation_free_space "
+       "adjusting tenured gen for footprint. "
+-      "starting promo size " SIZE_FORMAT 
++      "starting promo size " SIZE_FORMAT
+       " reduced promo size " SIZE_FORMAT,
+-      " promo delta " SIZE_FORMAT, 
++      " promo delta " SIZE_FORMAT,
+       desired_promo_size, reduced_size, change );
+   }
+ 
+@@ -912,9 +903,9 @@
+     gclog_or_tty->print_cr(
+       "AdaptiveSizePolicy::compute_generation_free_space "
+       "adjusting eden for footprint. "
+-      " starting eden size " SIZE_FORMAT 
++      " starting eden size " SIZE_FORMAT
+       " reduced eden size " SIZE_FORMAT
+-      " eden delta " SIZE_FORMAT,  
++      " eden delta " SIZE_FORMAT,
+       desired_eden_size, reduced_size, change);
+   }
+ 
+@@ -923,12 +914,12 @@
+ }
+ 
+ // Scale down "change" by the factor
+-//	part / total
++//      part / total
+ // Don't align the results.
+ 
+-size_t PSAdaptiveSizePolicy::scale_down(size_t change, 
+-					double part, 
+-					double total) {
++size_t PSAdaptiveSizePolicy::scale_down(size_t change,
++                                        double part,
++                                        double total) {
+   assert(part <= total, "Inconsistent input");
+   size_t reduced_change = change;
+   if (total > 0) {
+@@ -939,8 +930,8 @@
+   return reduced_change;
+ }
+ 
+-size_t PSAdaptiveSizePolicy::eden_increment(size_t cur_eden, 
+-					    uint percent_change) {
++size_t PSAdaptiveSizePolicy::eden_increment(size_t cur_eden,
++                                            uint percent_change) {
+   size_t eden_heap_delta;
+   eden_heap_delta = cur_eden / 100 * percent_change;
+   return eden_heap_delta;
+@@ -962,7 +953,7 @@
+ 
+ size_t PSAdaptiveSizePolicy::eden_increment_with_supplement_aligned_up(
+   size_t cur_eden) {
+-  size_t result = eden_increment(cur_eden, 
++  size_t result = eden_increment(cur_eden,
+     YoungGenerationSizeIncrement + _young_gen_size_increment_supplement);
+   return align_size_up(result, _intra_generation_alignment);
+ }
+@@ -973,13 +964,13 @@
+ }
+ 
+ size_t PSAdaptiveSizePolicy::eden_decrement(size_t cur_eden) {
+-  size_t eden_heap_delta = eden_increment(cur_eden) / 
++  size_t eden_heap_delta = eden_increment(cur_eden) /
+     AdaptiveSizeDecrementScaleFactor;
+   return eden_heap_delta;
+ }
+-  
+-size_t PSAdaptiveSizePolicy::promo_increment(size_t cur_promo, 
+-					     uint percent_change) {
++
++size_t PSAdaptiveSizePolicy::promo_increment(size_t cur_promo,
++                                             uint percent_change) {
+   size_t promo_heap_delta;
+   promo_heap_delta = cur_promo / 100 * percent_change;
+   return promo_heap_delta;
+@@ -1001,7 +992,7 @@
+ 
+ size_t PSAdaptiveSizePolicy::promo_increment_with_supplement_aligned_up(
+   size_t cur_promo) {
+-  size_t result =  promo_increment(cur_promo, 
++  size_t result =  promo_increment(cur_promo,
+     TenuredGenerationSizeIncrement + _old_gen_size_increment_supplement);
+   return align_size_up(result, _intra_generation_alignment);
+ }
+@@ -1022,13 +1013,13 @@
+                                              int tenuring_threshold,
+                                              size_t survivor_limit) {
+   assert(survivor_limit >= _intra_generation_alignment,
+-	 "survivor_limit too small");
++         "survivor_limit too small");
+   assert((size_t)align_size_down(survivor_limit, _intra_generation_alignment)
+-	 == survivor_limit, "survivor_limit not aligned");
++         == survivor_limit, "survivor_limit not aligned");
+ 
+   // This method is called even if the tenuring threshold and survivor
+   // spaces are not adjusted so that the averages are sampled above.
+-  if (!UsePSAdaptiveSurvivorSizePolicy || 
++  if (!UsePSAdaptiveSurvivorSizePolicy ||
+       !young_gen_policy_is_ready()) {
+     return tenuring_threshold;
+   }
+@@ -1045,11 +1036,11 @@
+   set_decrement_tenuring_threshold_for_survivor_limit(false);
+ 
+   if (!is_survivor_overflow) {
+-    // Keep running averages on how much survived 
++    // Keep running averages on how much survived
+ 
+     // We use the tenuring threshold to equalize the cost of major
+     // and minor collections.
+-    // ThresholdTolerance is used to indicate how sensitive the 
++    // ThresholdTolerance is used to indicate how sensitive the
+     // tenuring threshold is to differences in cost betweent the
+     // collection types.
+ 
+@@ -1073,7 +1064,7 @@
+     // Survivor space overflow occurred, so promoted and survived are
+     // not accurate. We'll make our best guess by combining survived
+     // and promoted and count them as survivors.
+-    // 
++    //
+     // We'll lower the tenuring threshold to see if we can correct
+     // things. Also, set the survivor size conservatively. We're
+     // trying to avoid many overflows from occurring if defnew size
+@@ -1086,7 +1077,7 @@
+   // we use this to see how good of an estimate we have of what survived.
+   // We're trying to pad the survivor size as little as possible without
+   // overflowing the survivor spaces.
+-  size_t target_size = align_size_up((size_t)_avg_survived->padded_average(), 
++  size_t target_size = align_size_up((size_t)_avg_survived->padded_average(),
+                                      _intra_generation_alignment);
+   target_size = MAX2(target_size, _intra_generation_alignment);
+ 
+@@ -1112,23 +1103,23 @@
+   }
+ 
+   // We keep a running average of the amount promoted which is used
+-  // to decide when we should collect the old generation (when 
++  // to decide when we should collect the old generation (when
+   // the amount of old gen free space is less than what we expect to
+   // promote).
+- 
++
+   if (PrintAdaptiveSizePolicy) {
+     // A little more detail if Verbose is on
+-    if (Verbose) { 
++    if (Verbose) {
+       gclog_or_tty->print( "  avg_survived: %f"
+                   "  avg_deviation: %f",
+-                  _avg_survived->average(), 
++                  _avg_survived->average(),
+                   _avg_survived->deviation());
+     }
+ 
+     gclog_or_tty->print( "  avg_survived_padded_avg: %f",
+                 _avg_survived->padded_average());
+ 
+-    if (Verbose) { 
++    if (Verbose) {
+       gclog_or_tty->print( "  avg_promoted_avg: %f"
+                   "  avg_promoted_dev: %f",
+                   avg_promoted()->average(),
+@@ -1151,11 +1142,11 @@
+ }
+ 
+ void PSAdaptiveSizePolicy::update_averages(bool is_survivor_overflow,
+-					   size_t survived, 
+-					   size_t promoted) {
++                                           size_t survived,
++                                           size_t promoted) {
+   // Update averages
+   if (!is_survivor_overflow) {
+-    // Keep running averages on how much survived 
++    // Keep running averages on how much survived
+     _avg_survived->sample(survived);
+   } else {
+     size_t survived_guess = survived + promoted;
+@@ -1165,7 +1156,7 @@
+ 
+   if (PrintAdaptiveSizePolicy) {
+     gclog_or_tty->print(
+-		  "AdaptiveSizePolicy::compute_survivor_space_size_and_thresh:"
++                  "AdaptiveSizePolicy::compute_survivor_space_size_and_thresh:"
+                   "  survived: "  SIZE_FORMAT
+                   "  promoted: "  SIZE_FORMAT
+                   "  overflow: %s",
+@@ -1173,12 +1164,12 @@
+   }
+ }
+ 
+-bool PSAdaptiveSizePolicy::print_adaptive_size_policy_on(outputStream* st) 
++bool PSAdaptiveSizePolicy::print_adaptive_size_policy_on(outputStream* st)
+   const {
+ 
+   if (!UseAdaptiveSizePolicy) return false;
+ 
+   return AdaptiveSizePolicy::print_adaptive_size_policy_on(
+-                          st, 
+-			  PSScavenge::tenuring_threshold());
++                          st,
++                          PSScavenge::tenuring_threshold());
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)psAdaptiveSizePolicy.hpp	1.63 07/05/05 17:05:27 JVM"
+-#endif
+ /*
+  * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This class keeps statistical information and computes the
+@@ -43,7 +40,7 @@
+ // 05/02/2003 Update
+ // The 1.5 policy makes use of data gathered for the costs of GC on
+ // specific generations.  That data does reference specific
+-// generation.  Also diagnostics specific to generations have 
++// generation.  Also diagnostics specific to generations have
+ // been added.
+ 
+ // Forward decls
+@@ -65,7 +62,7 @@
+ 
+   // Footprint statistics
+   AdaptiveWeightedAverage* _avg_base_footprint;
+- 
++
+   // Statistical data gathered for GC
+   GCStats _gc_stats;
+ 
+@@ -86,7 +83,6 @@
+   // for making ergonomic decisions.
+   double _latest_major_mutator_interval_seconds;
+ 
+-  const size_t _generation_alignment;       // alignment for generations
+   const size_t _intra_generation_alignment; // alignment for eden, survivors
+ 
+   const double _gc_minor_pause_goal_sec;    // goal for maximum minor gc pause
+@@ -132,8 +128,8 @@
+ 
+   // Change the young generation size to achieve a minor GC pause time goal
+   void adjust_for_minor_pause_time(bool is_full_gc,
+-				   size_t* desired_promo_size_ptr,
+-				   size_t* desired_eden_size_ptr);
++                                   size_t* desired_promo_size_ptr,
++                                   size_t* desired_eden_size_ptr);
+   // Change the generation sizes to achieve a GC pause time goal
+   // Returned sizes are not necessarily aligned.
+   void adjust_for_pause_time(bool is_full_gc,
+@@ -147,9 +143,9 @@
+   // Change the generation sizes to achieve minimum footprint
+   // Returned sizes are not aligned.
+   size_t adjust_promo_for_footprint(size_t desired_promo_size,
+-			            size_t desired_total);
++                                    size_t desired_total);
+   size_t adjust_eden_for_footprint(size_t desired_promo_size,
+-			           size_t desired_total);
++                                   size_t desired_total);
+ 
+   // Size in bytes for an increment or decrement of eden.
+   virtual size_t eden_increment(size_t cur_eden, uint percent_change);
+@@ -175,20 +171,20 @@
+   // Time accessors
+ 
+   // Footprint accessors
+-  size_t live_space() const { 
++  size_t live_space() const {
+     return (size_t)(avg_base_footprint()->average() +
+                     avg_young_live()->average() +
+                     avg_old_live()->average());
+   }
+-  size_t free_space() const { 
+-    return _eden_size + _promo_size; 
++  size_t free_space() const {
++    return _eden_size + _promo_size;
+   }
+ 
+-  void set_promo_size(size_t new_size) { 
++  void set_promo_size(size_t new_size) {
+     _promo_size = new_size;
+   }
+-  void set_survivor_size(size_t new_size) { 
+-    _survivor_size = new_size; 
++  void set_survivor_size(size_t new_size) {
++    _survivor_size = new_size;
+   }
+ 
+   // Update estimators
+@@ -207,26 +203,25 @@
+   virtual size_t promo_increment(size_t cur_promo);
+ 
+   // Accessors for use by performance counters
+-  AdaptivePaddedNoZeroDevAverage*  avg_promoted() const { 
+-    return _gc_stats.avg_promoted(); 
++  AdaptivePaddedNoZeroDevAverage*  avg_promoted() const {
++    return _gc_stats.avg_promoted();
+   }
+-  AdaptiveWeightedAverage* avg_base_footprint() const { 
+-    return _avg_base_footprint; 
++  AdaptiveWeightedAverage* avg_base_footprint() const {
++    return _avg_base_footprint;
+   }
+ 
+   // Input arguments are initial free space sizes for young and old
+-  // generations, the initial survivor space size, the 
++  // generations, the initial survivor space size, the
+   // alignment values and the pause & throughput goals.
+   //
+   // NEEDS_CLEANUP this is a singleton object
+-  PSAdaptiveSizePolicy(size_t init_eden_size, 
+-		       size_t init_promo_size, 
+-		       size_t init_survivor_size, 
+-		       size_t generation_alignment,
+-		       size_t intra_generation_alignment,
+-		       double gc_pause_goal_sec,
+-		       double gc_minor_pause_goal_sec,
+-		       uint gc_time_ratio);
++  PSAdaptiveSizePolicy(size_t init_eden_size,
++                       size_t init_promo_size,
++                       size_t init_survivor_size,
++                       size_t intra_generation_alignment,
++                       double gc_pause_goal_sec,
++                       double gc_minor_pause_goal_sec,
++                       uint gc_time_ratio);
+ 
+   // Methods indicating events of interest to the adaptive size policy,
+   // called by GC algorithms. It is the responsibility of users of this
+@@ -234,7 +229,7 @@
+   void major_collection_begin();
+   void major_collection_end(size_t amount_live, GCCause::Cause gc_cause);
+ 
+-  // 
++  //
+   void tenured_allocation(size_t size) {
+     _avg_pretenured->sample(size);
+   }
+@@ -258,25 +253,25 @@
+     return (size_t)avg_promoted()->padded_average();
+   }
+ 
+-  int change_young_gen_for_maj_pauses() { 
+-    return _change_young_gen_for_maj_pauses; 
++  int change_young_gen_for_maj_pauses() {
++    return _change_young_gen_for_maj_pauses;
+   }
+-  void set_change_young_gen_for_maj_pauses(int v) { 
+-    _change_young_gen_for_maj_pauses = v; 
++  void set_change_young_gen_for_maj_pauses(int v) {
++    _change_young_gen_for_maj_pauses = v;
+   }
+ 
+-  int change_old_gen_for_min_pauses() { 
+-    return _change_old_gen_for_min_pauses; 
++  int change_old_gen_for_min_pauses() {
++    return _change_old_gen_for_min_pauses;
+   }
+-  void set_change_old_gen_for_min_pauses(int v) { 
+-    _change_old_gen_for_min_pauses = v; 
++  void set_change_old_gen_for_min_pauses(int v) {
++    _change_old_gen_for_min_pauses = v;
+   }
+ 
+   // Return true if the old generation size was changed
+   // to try to reach a pause time goal.
+   bool old_gen_changed_for_pauses() {
+     bool result = _change_old_gen_for_maj_pauses != 0 ||
+-		  _change_old_gen_for_min_pauses != 0;
++                  _change_old_gen_for_min_pauses != 0;
+     return result;
+   }
+ 
+@@ -284,7 +279,7 @@
+   // to try to reach a pause time goal.
+   bool young_gen_changed_for_pauses() {
+     bool result = _change_young_gen_for_min_pauses != 0 ||
+-		  _change_young_gen_for_maj_pauses != 0;
++                  _change_young_gen_for_maj_pauses != 0;
+     return result;
+   }
+   // end flags for pause goal
+@@ -302,7 +297,7 @@
+     bool result = _change_young_gen_for_throughput != 0;
+     return result;
+   }
+-		  
++
+   int decrease_for_footprint() { return _decrease_for_footprint; }
+ 
+ 
+@@ -321,8 +316,8 @@
+   virtual void clear_generation_free_space_flags();
+ 
+   float major_pause_old_slope() { return _major_pause_old_estimator->slope(); }
+-  float major_pause_young_slope() { 
+-    return _major_pause_young_estimator->slope(); 
++  float major_pause_young_slope() {
++    return _major_pause_young_estimator->slope();
+   }
+   float major_collection_slope() { return _major_collection_estimator->slope();}
+ 
+@@ -338,14 +333,14 @@
+   // as an indication if a full gc has just been performed, for use
+   // in deciding if an OOM error should be thrown.
+   void compute_generation_free_space(size_t young_live,
+-				     size_t eden_live,
++                                     size_t eden_live,
+                                      size_t old_live,
+                                      size_t perm_live,
+-				     size_t cur_eden,  // current eden in bytes
+-				     size_t max_old_gen_size,
+-				     size_t max_eden_size,
++                                     size_t cur_eden,  // current eden in bytes
++                                     size_t max_old_gen_size,
++                                     size_t max_eden_size,
+                                      bool   is_full_gc,
+-				     GCCause::Cause gc_cause);
++                                     GCCause::Cause gc_cause);
+ 
+   // Calculates new survivor space size;  returns a new tenuring threshold
+   // value. Stores new survivor size in _survivor_size.
+@@ -367,8 +362,8 @@
+     return sz > alignment ? align_size_down(sz, alignment) : alignment;
+   }
+ 
+-  size_t live_at_last_full_gc() { 
+-    return _live_at_last_full_gc; 
++  size_t live_at_last_full_gc() {
++    return _live_at_last_full_gc;
+   }
+ 
+   size_t bytes_absorbed_from_eden() const { return _bytes_absorbed_from_eden; }
+@@ -380,9 +375,9 @@
+ 
+   // Update averages that are always used (even
+   // if adaptive sizing is turned off).
+-  void update_averages(bool is_survivor_overflow, 
+-		       size_t survived, 
+-		       size_t promoted);
++  void update_averages(bool is_survivor_overflow,
++                       size_t survived,
++                       size_t promoted);
+ 
+   // Printing support
+   virtual bool print_adaptive_size_policy_on(outputStream* st) const;
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)psCompactionManager.cpp	1.21 07/05/05 17:05:28 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -32,7 +29,7 @@
+ ParCompactionManager**  ParCompactionManager::_manager_array = NULL;
+ OopTaskQueueSet*     ParCompactionManager::_stack_array = NULL;
+ ObjectStartArray*    ParCompactionManager::_start_array = NULL;
+-ParMarkBitMap*	     ParCompactionManager::_mark_bitmap = NULL;
++ParMarkBitMap*       ParCompactionManager::_mark_bitmap = NULL;
+ ChunkTaskQueueSet*   ParCompactionManager::_chunk_array = NULL;
+ 
+ ParCompactionManager::ParCompactionManager() :
+@@ -43,7 +40,7 @@
+ 
+   _old_gen = heap->old_gen();
+   _start_array = old_gen()->start_array();
+-    
++
+ 
+   marking_stack()->initialize();
+ 
+@@ -55,13 +52,13 @@
+   chunk_stack()->initialize();
+ 
+   // We want the overflow stack to be permanent
+-  _chunk_overflow_stack = 
++  _chunk_overflow_stack =
+     new (ResourceObj::C_HEAP) GrowableArray<size_t>(10, true);
+ #endif
+ 
+   // Note that _revisit_klass_stack is allocated out of the
+   // C heap (as opposed to out of ResourceArena).
+-  int size = 
++  int size =
+     (SystemDictionary::number_of_classes() * 2) * 2 / ParallelGCThreads;
+   _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
+ 
+@@ -76,7 +73,7 @@
+ }
+ 
+ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
+-  assert(PSParallelCompact::gc_task_manager() != NULL, 
++  assert(PSParallelCompact::gc_task_manager() != NULL,
+     "Needed for initialization");
+ 
+   _mark_bitmap = mbm;
+@@ -86,7 +83,7 @@
+   assert(_manager_array == NULL, "Attempt to initialize twice");
+   _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1 );
+   guarantee(_manager_array != NULL, "Could not initialize promotion manager");
+-  
++
+   _stack_array = new OopTaskQueueSet(parallel_gc_threads);
+   guarantee(_stack_array != NULL, "Count not initialize promotion manager");
+   _chunk_array = new ChunkTaskQueueSet(parallel_gc_threads);
+@@ -97,13 +94,17 @@
+     _manager_array[i] = new ParCompactionManager();
+     guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
+     stack_array()->register_queue(i, _manager_array[i]->marking_stack());
++#ifdef USE_ChunkTaskQueueWithOverflow
++    chunk_array()->register_queue(i, _manager_array[i]->chunk_stack()->task_queue());
++#else
+     chunk_array()->register_queue(i, _manager_array[i]->chunk_stack());
++#endif
+   }
+ 
+   // The VMThread gets its own ParCompactionManager, which is not available
+-  // for work stealing. 
++  // for work stealing.
+   _manager_array[parallel_gc_threads] = new ParCompactionManager();
+-  guarantee(_manager_array[parallel_gc_threads] != NULL, 
++  guarantee(_manager_array[parallel_gc_threads] != NULL,
+     "Could not create ParCompactionManager");
+   assert(PSParallelCompact::gc_task_manager()->workers() != 0,
+     "Not initialized?");
+@@ -111,14 +112,14 @@
+ 
+ bool ParCompactionManager::should_update() {
+   assert(action() != NotValid, "Action is not set");
+-  return (action() == ParCompactionManager::Update) || 
++  return (action() == ParCompactionManager::Update) ||
+          (action() == ParCompactionManager::CopyAndUpdate) ||
+          (action() == ParCompactionManager::UpdateAndCopy);
+ }
+ 
+ bool ParCompactionManager::should_copy() {
+   assert(action() != NotValid, "Action is not set");
+-  return (action() == ParCompactionManager::Copy) || 
++  return (action() == ParCompactionManager::Copy) ||
+          (action() == ParCompactionManager::CopyAndUpdate) ||
+          (action() == ParCompactionManager::UpdateAndCopy);
+ }
+@@ -142,7 +143,7 @@
+ 
+   if(!marking_stack()->push(obj)) {
+     overflow_stack()->push(obj);
+-  } 
++  }
+ }
+ 
+ oop ParCompactionManager::retrieve_for_scanning() {
+@@ -170,22 +171,21 @@
+ #else
+   if(!chunk_stack()->push(chunk_index)) {
+     chunk_overflow_stack()->push(chunk_index);
+-  } 
++  }
+ #endif
+ }
+ 
+ bool ParCompactionManager::retrieve_for_processing(size_t& chunk_index) {
+-
+ #ifdef USE_ChunkTaskQueueWithOverflow
+   return chunk_stack()->retrieve(chunk_index);
+ #else
+   // Should not be used in the parallel case
+   ShouldNotReachHere();
+-  return NULL;
++  return false;
+ #endif
+ }
+ 
+-ParCompactionManager* 
++ParCompactionManager*
+ ParCompactionManager::gc_thread_compaction_manager(int index) {
+   assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
+   assert(_manager_array != NULL, "Sanity");
+@@ -206,7 +206,7 @@
+   MutableSpace* old_space = heap->old_gen()->object_space();
+   MutableSpace* perm_space = heap->perm_gen()->object_space();
+ #endif /* ASSERT */
+-  
++
+ 
+   do {
+ 
+@@ -245,7 +245,7 @@
+   MutableSpace* old_space = heap->old_gen()->object_space();
+   MutableSpace* perm_space = heap->perm_gen()->object_space();
+ #endif /* ASSERT */
+-  
++
+ #if 1 // def DO_PARALLEL - the serial code hasn't been updated
+   do {
+ 
+@@ -276,8 +276,8 @@
+       // pop, but they can come from anywhere, unfortunately.
+       PSParallelCompact::fill_and_update_chunk(this, chunk_index);
+     }
+-  } while((chunk_stack()->size() != 0) || 
+-	  (chunk_overflow_stack()->length() != 0));
++  } while((chunk_stack()->size() != 0) ||
++          (chunk_overflow_stack()->length() != 0));
+ #endif
+ 
+ #ifdef USE_ChunkTaskQueueWithOverflow
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)psCompactionManager.hpp	1.17 07/05/05 17:05:29 JVM"
+-#endif
+ /*
+  * Copyright 2005-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //
+@@ -81,7 +78,7 @@
+   static ChunkTaskQueueSet*    _chunk_array;
+   static PSOldGen*             _old_gen;
+ 
+-  OopTaskQueue		       _marking_stack;
++  OopTaskQueue                 _marking_stack;
+   GrowableArray<oop>*          _overflow_stack;
+   // Is there a way to reuse the _marking_stack for the
+   // saving empty chunks?  For now just create a different
+@@ -90,7 +87,7 @@
+ #ifdef USE_ChunkTaskQueueWithOverflow
+   ChunkTaskQueueWithOverflow   _chunk_stack;
+ #else
+-  ChunkTaskQueue	       _chunk_stack;
++  ChunkTaskQueue               _chunk_stack;
+   GrowableArray<size_t>*       _chunk_overflow_stack;
+ #endif
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)psGCAdaptivePolicyCounters.cpp	1.23 07/05/05 17:05:31 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -31,13 +28,13 @@
+ 
+ 
+ PSGCAdaptivePolicyCounters::PSGCAdaptivePolicyCounters(const char* name_arg,
+-                       		      int collectors, 
+-				      int generations,
+-                       		      PSAdaptiveSizePolicy* size_policy_arg)
+-	: GCAdaptivePolicyCounters(name_arg, 
+-				   collectors, 
+-				   generations, 
+-				   size_policy_arg) {
++                                      int collectors,
++                                      int generations,
++                                      PSAdaptiveSizePolicy* size_policy_arg)
++        : GCAdaptivePolicyCounters(name_arg,
++                                   collectors,
++                                   generations,
++                                   size_policy_arg) {
+   if (UsePerfData) {
+     EXCEPTION_MARK;
+     ResourceMark rm;
+@@ -61,37 +58,37 @@
+       PerfData::U_Bytes, (jlong) 0, CHECK);
+ 
+     cname = PerfDataManager::counter_name(name_space(), "avgPromotedAvg");
+-    _avg_promoted_avg_counter = 
++    _avg_promoted_avg_counter =
+       PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
+         ps_size_policy()->calculated_promo_size_in_bytes(), CHECK);
+ 
+     cname = PerfDataManager::counter_name(name_space(), "avgPromotedDev");
+-    _avg_promoted_dev_counter = 
++    _avg_promoted_dev_counter =
+       PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
+         (jlong) 0 , CHECK);
+ 
+     cname = PerfDataManager::counter_name(name_space(), "avgPromotedPaddedAvg");
+-    _avg_promoted_padded_avg_counter = 
++    _avg_promoted_padded_avg_counter =
+       PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
+         ps_size_policy()->calculated_promo_size_in_bytes(), CHECK);
+ 
+     cname = PerfDataManager::counter_name(name_space(),
+       "avgPretenuredPaddedAvg");
+-    _avg_pretenured_padded_avg = 
++    _avg_pretenured_padded_avg =
+       PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
+         (jlong) 0, CHECK);
+ 
+ 
+-    cname = PerfDataManager::counter_name(name_space(), 
++    cname = PerfDataManager::counter_name(name_space(),
+       "changeYoungGenForMajPauses");
+-    _change_young_gen_for_maj_pauses_counter = 
+-      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Events, 
++    _change_young_gen_for_maj_pauses_counter =
++      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Events,
+         (jlong)0, CHECK);
+ 
+-    cname = PerfDataManager::counter_name(name_space(), 
++    cname = PerfDataManager::counter_name(name_space(),
+       "changeOldGenForMinPauses");
+-    _change_old_gen_for_min_pauses = 
+-      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Events, 
++    _change_old_gen_for_min_pauses =
++      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Events,
+         (jlong)0, CHECK);
+ 
+ 
+@@ -200,4 +197,3 @@
+     update_counters_from_policy();
+   }
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)psGCAdaptivePolicyCounters.hpp	1.21 07/05/05 17:05:29 JVM"
+-#endif
+ /*
+  * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // PSGCAdaptivePolicyCounters is a holder class for performance counters
+@@ -63,7 +60,7 @@
+   PerfVariable* _full_follows_scavenge;
+ 
+   // Use this time stamp if the gc time stamp is not available.
+-  TimeStamp	_counter_time_stamp;
++  TimeStamp     _counter_time_stamp;
+ 
+  protected:
+   PSAdaptiveSizePolicy* ps_size_policy() {
+@@ -71,8 +68,8 @@
+   }
+ 
+  public:
+-  PSGCAdaptivePolicyCounters(const char* name, int collectors, int generations, 
+-			     PSAdaptiveSizePolicy* size_policy);
++  PSGCAdaptivePolicyCounters(const char* name, int collectors, int generations,
++                             PSAdaptiveSizePolicy* size_policy);
+   inline void update_old_capacity(size_t size_in_bytes) {
+     _old_capacity->set_value(size_in_bytes);
+   }
+@@ -191,7 +188,7 @@
+   // that are updated via input parameters.
+   void update_counters();
+ 
+-  virtual GCPolicyCounters::Name kind() const { 
+-    return GCPolicyCounters::PSGCAdaptivePolicyCountersKind; 
++  virtual GCPolicyCounters::Name kind() const {
++    return GCPolicyCounters::PSGCAdaptivePolicyCountersKind;
+   }
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGenerationCounters.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGenerationCounters.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGenerationCounters.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGenerationCounters.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)psGenerationCounters.cpp	1.7 07/05/05 17:05:29 JVM"
+-#endif
+ 
+ /*
+  * Copyright 2004 Sun Microsystems, Inc.  All Rights Reserved.
+@@ -23,7 +20,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGenerationCounters.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGenerationCounters.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGenerationCounters.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGenerationCounters.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)psGenerationCounters.hpp	1.8 07/05/05 17:05:29 JVM"
+-#endif
+ 
+ /*
+  * Copyright 2004 Sun Microsystems, Inc.  All Rights Reserved.
+@@ -23,7 +20,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A PSGenerationCounter is a holder class for performance counters
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)psMarkSweep.cpp	1.92 07/06/08 23:11:01 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -64,7 +61,7 @@
+   // policy object if GCs are, on the whole, taking too long. If so,
+   // bail out without attempting a collection.  The exceptions are
+   // for explicitly requested GC's.
+-  if (!policy->gc_time_limit_exceeded() || 
++  if (!policy->gc_time_limit_exceeded() ||
+       GCCause::is_user_requested_gc(gc_cause) ||
+       GCCause::is_serviceability_requested_gc(gc_cause)) {
+     IsGCActiveMark mark;
+@@ -80,7 +77,7 @@
+ }
+ 
+ // This method contains no policy. You should probably
+-// be calling invoke() instead. 
++// be calling invoke() instead.
+ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
+   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
+   assert(ref_processor() != NULL, "Sanity");
+@@ -121,7 +118,7 @@
+   }
+ 
+   // Verify object start arrays
+-  if (VerifyObjectStartArray && 
++  if (VerifyObjectStartArray &&
+       VerifyBeforeGC) {
+     old_gen->verify_object_start_array();
+     perm_gen->verify_object_start_array();
+@@ -148,7 +145,7 @@
+     TraceMemoryManagerStats tms(true /* Full GC */);
+ 
+     if (TraceGen1Time) accumulated_time()->start();
+-  
++
+     // Let the size policy know we're starting
+     size_policy->major_collection_begin();
+ 
+@@ -157,7 +154,7 @@
+     CodeCache::gc_prologue();
+     Threads::gc_prologue();
+     BiasedLocking::preserve_marks();
+-    
++
+     // Capture heap size before collection for printing.
+     size_t prev_used = heap->used();
+ 
+@@ -167,30 +164,30 @@
+     // For PrintGCDetails
+     size_t old_gen_prev_used = old_gen->used_in_bytes();
+     size_t young_gen_prev_used = young_gen->used_in_bytes();
+-    
++
+     allocate_stacks();
+-    
++
+     NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
+     COMPILER2_PRESENT(DerivedPointerTable::clear());
+-  
++
+     ref_processor()->enable_discovery();
+ 
+     mark_sweep_phase1(clear_all_softrefs);
+ 
+     mark_sweep_phase2();
+-    
++
+     // Don't add any more derived pointers during phase3
+     COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
+     COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
+-    
++
+     mark_sweep_phase3();
+-    
++
+     mark_sweep_phase4();
+-    
++
+     restore_marks();
+-    
++
+     deallocate_stacks();
+-    
++
+     eden_empty = young_gen->eden_space()->is_empty();
+     if (!eden_empty) {
+       eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
+@@ -200,30 +197,30 @@
+     // input to soft ref clearing policy at the next gc.
+     Universe::update_heap_info_at_gc();
+ 
+-    survivors_empty = young_gen->from_space()->is_empty() && 
++    survivors_empty = young_gen->from_space()->is_empty() &&
+       young_gen->to_space()->is_empty();
+     young_gen_empty = eden_empty && survivors_empty;
+-    
++
+     BarrierSet* bs = heap->barrier_set();
+     if (bs->is_a(BarrierSet::ModRef)) {
+       ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
+       MemRegion old_mr = heap->old_gen()->reserved();
+       MemRegion perm_mr = heap->perm_gen()->reserved();
+       assert(perm_mr.end() <= old_mr.start(), "Generations out of order");
+-      
++
+       if (young_gen_empty) {
+         modBS->clear(MemRegion(perm_mr.start(), old_mr.end()));
+       } else {
+         modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end()));
+       }
+     }
+-    
++
+     BiasedLocking::restore_marks();
+     Threads::gc_epilogue();
+     CodeCache::gc_epilogue();
+-    
++
+     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
+-  
++
+     ref_processor()->enqueue_discovered_references(NULL);
+ 
+     // Update time of last GC
+@@ -239,12 +236,12 @@
+         gclog_or_tty->stamp();
+         gclog_or_tty->print_cr(" collection: %d ",
+                        heap->total_collections());
+-	if (Verbose) {
+-	  gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
+-	    " perm_gen_capacity: %d ",
+-	    old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(), 
+-	    perm_gen->capacity_in_bytes());
+-	}
++        if (Verbose) {
++          gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
++            " perm_gen_capacity: %d ",
++            old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
++            perm_gen->capacity_in_bytes());
++        }
+       }
+ 
+       // Don't check if the size_policy is ready here.  Let
+@@ -253,22 +250,22 @@
+           ((gc_cause != GCCause::_java_lang_system_gc) ||
+             UseAdaptiveSizePolicyWithSystemGC)) {
+         // Calculate optimal free space amounts
+-	assert(young_gen->max_size() > 
+-	  young_gen->from_space()->capacity_in_bytes() + 
+-	  young_gen->to_space()->capacity_in_bytes(), 
+-	  "Sizes of space in young gen are out-of-bounds");
+-	size_t max_eden_size = young_gen->max_size() - 
+-	  young_gen->from_space()->capacity_in_bytes() - 
+-	  young_gen->to_space()->capacity_in_bytes();
++        assert(young_gen->max_size() >
++          young_gen->from_space()->capacity_in_bytes() +
++          young_gen->to_space()->capacity_in_bytes(),
++          "Sizes of space in young gen are out-of-bounds");
++        size_t max_eden_size = young_gen->max_size() -
++          young_gen->from_space()->capacity_in_bytes() -
++          young_gen->to_space()->capacity_in_bytes();
+         size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
+-				 young_gen->eden_space()->used_in_bytes(),
++                                 young_gen->eden_space()->used_in_bytes(),
+                                  old_gen->used_in_bytes(),
+                                  perm_gen->used_in_bytes(),
+-				 young_gen->eden_space()->capacity_in_bytes(),
++                                 young_gen->eden_space()->capacity_in_bytes(),
+                                  old_gen->max_gen_size(),
+                                  max_eden_size,
+                                  true /* full gc*/,
+-				 gc_cause);
++                                 gc_cause);
+ 
+         heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
+ 
+@@ -297,7 +294,7 @@
+ 
+     // We collected the perm gen, so we'll resize it here.
+     perm_gen->compute_new_size(perm_gen_prev_used);
+-    
++
+     if (TraceGen1Time) accumulated_time()->stop();
+ 
+     if (PrintGC) {
+@@ -323,12 +320,12 @@
+     if (PrintGCDetails) {
+       if (size_policy->print_gc_time_limit_would_be_exceeded()) {
+         if (size_policy->gc_time_limit_exceeded()) {
+-          gclog_or_tty->print_cr("	GC time is exceeding GCTimeLimit "
+-	    "of %d%%", GCTimeLimit);
++          gclog_or_tty->print_cr("      GC time is exceeding GCTimeLimit "
++            "of %d%%", GCTimeLimit);
+         } else {
+-          gclog_or_tty->print_cr("	GC time would exceed GCTimeLimit "
+-	    "of %d%%", GCTimeLimit);
+-	}
++          gclog_or_tty->print_cr("      GC time would exceed GCTimeLimit "
++            "of %d%%", GCTimeLimit);
++        }
+       }
+       size_policy->set_print_gc_time_limit_would_be_exceeded(false);
+     }
+@@ -341,7 +338,7 @@
+   }
+ 
+   // Re-verify object start arrays
+-  if (VerifyObjectStartArray && 
++  if (VerifyObjectStartArray &&
+       VerifyAfterGC) {
+     old_gen->verify_object_start_array();
+     perm_gen->verify_object_start_array();
+@@ -355,12 +352,12 @@
+ }
+ 
+ bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
+-					     PSYoungGen* young_gen,
+-					     PSOldGen* old_gen) {
++                                             PSYoungGen* young_gen,
++                                             PSOldGen* old_gen) {
+   MutableSpace* const eden_space = young_gen->eden_space();
+   assert(!eden_space->is_empty(), "eden must be non-empty");
+   assert(young_gen->virtual_space()->alignment() ==
+-	 old_gen->virtual_space()->alignment(), "alignments do not match");
++         old_gen->virtual_space()->alignment(), "alignments do not match");
+ 
+   if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
+     return false;
+@@ -394,14 +391,14 @@
+ 
+   if (TraceAdaptiveGCBoundary && Verbose) {
+     gclog_or_tty->print(" absorbing " SIZE_FORMAT "K:  "
+-			"eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
+-			"from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
+-			"young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
+-			absorb_size / K,
+-			eden_capacity / K, (eden_capacity - absorb_size) / K,
+-			young_gen->from_space()->used_in_bytes() / K,
+-			young_gen->to_space()->used_in_bytes() / K,
+-			young_gen->capacity_in_bytes() / K, new_young_size / K);
++                        "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
++                        "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
++                        "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
++                        absorb_size / K,
++                        eden_capacity / K, (eden_capacity - absorb_size) / K,
++                        young_gen->from_space()->used_in_bytes() / K,
++                        young_gen->to_space()->used_in_bytes() / K,
++                        young_gen->capacity_in_bytes() / K, new_young_size / K);
+   }
+ 
+   // Fill the unused part of the old gen.
+@@ -423,7 +420,7 @@
+   // from end to virtual_space->high() in debug builds).
+   HeapWord* const new_top = eden_space->top();
+   old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
+-					absorb_size);
++                                        absorb_size);
+   young_gen->reset_after_change();
+   old_space->set_top(new_top);
+   old_space->set_end(new_top);
+@@ -438,7 +435,7 @@
+ 
+   // Could update the promoted average here, but it is not typically updated at
+   // full GCs and the value to use is unclear.  Something like
+-  // 
++  //
+   // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
+ 
+   size_policy->set_bytes_absorbed_from_eden(absorb_size);
+@@ -508,7 +505,7 @@
+ 
+   // Process reference objects found during marking
+ 
+-  // Skipping the reference processing for VerifyParallelOldWithMarkSweep 
++  // Skipping the reference processing for VerifyParallelOldWithMarkSweep
+   // affects the marking (makes it different).
+   {
+     ReferencePolicy *soft_ref_policy;
+@@ -559,7 +556,7 @@
+   // array. If perm_gen is not traversed last a klassOop may get
+   // overwritten. This is fine since it is dead, but if the class has dead
+   // instances we have to skip them, and in order to find their size we
+-  // need the klassOop! 
++  // need the klassOop!
+   //
+   // It is not required that we traverse spaces in the same order in
+   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
+@@ -658,8 +655,8 @@
+   young_gen->compact();
+ }
+ 
+-jlong PSMarkSweep::millis_since_last_gc() { 
+-  jlong ret_val = os::javaTimeMillis() - _time_of_last_gc; 
++jlong PSMarkSweep::millis_since_last_gc() {
++  jlong ret_val = os::javaTimeMillis() - _time_of_last_gc;
+   // XXX See note in genCollectedHeap::millis_since_last_gc().
+   if (ret_val < 0) {
+     NOT_PRODUCT(warning("time warp: %d", ret_val);)
+@@ -668,6 +665,6 @@
+   return ret_val;
+ }
+ 
+-void PSMarkSweep::reset_millis_since_last_gc() { 
+-  _time_of_last_gc = os::javaTimeMillis(); 
++void PSMarkSweep::reset_millis_since_last_gc() {
++  _time_of_last_gc = os::javaTimeMillis();
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)psMarkSweepDecorator.cpp	1.26 07/05/17 15:52:53 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -48,7 +45,7 @@
+ void PSMarkSweepDecorator::advance_destination_decorator() {
+   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+-  
++
+   assert(_destination_decorator != NULL, "Sanity");
+   guarantee(_destination_decorator != heap->perm_gen()->object_mark_sweep(), "Cannot advance perm gen decorator");
+ 
+@@ -105,26 +102,26 @@
+ 
+   HeapWord* compact_top = dest->compaction_top();
+   HeapWord* compact_end = dest->space()->end();
+-                                           
+-  HeapWord* q = space()->bottom();                                                    
+-  HeapWord* t = space()->top();                                                
+-  
+-  HeapWord*  end_of_live= q;    /* One byte beyond the last byte of the last 
+-				   live object. */                           
+-  HeapWord*  first_dead = space()->end(); /* The first dead object. */                 
+-  LiveRange* liveRange  = NULL; /* The current live range, recorded in the   
+-				   first header of preceding free area. */   
+-  _first_dead = first_dead;                                                  
++
++  HeapWord* q = space()->bottom();
++  HeapWord* t = space()->top();
++
++  HeapWord*  end_of_live= q;    /* One byte beyond the last byte of the last
++                                   live object. */
++  HeapWord*  first_dead = space()->end(); /* The first dead object. */
++  LiveRange* liveRange  = NULL; /* The current live range, recorded in the
++                                   first header of preceding free area. */
++  _first_dead = first_dead;
+ 
+   const intx interval = PrefetchScanIntervalInBytes;
+-                                                                             
+-  while (q < t) {                                                            
++
++  while (q < t) {
+     assert(oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() ||
+-           oop(q)->mark()->has_bias_pattern(),     
+-	   "these are the only valid states during a mark sweep");           
+-    if (oop(q)->is_gc_marked()) {  
+-      /* prefetch beyond q */                                                
+-      Prefetch::write(q, interval);                          
++           oop(q)->mark()->has_bias_pattern(),
++           "these are the only valid states during a mark sweep");
++    if (oop(q)->is_gc_marked()) {
++      /* prefetch beyond q */
++      Prefetch::write(q, interval);
+       size_t size = oop(q)->size();
+ 
+       size_t compaction_max_size = pointer_delta(compact_end, compact_top);
+@@ -146,8 +143,8 @@
+         compact_end = dest->space()->end();
+         assert(compact_top == dest->space()->bottom(), "Advanced to space already in use");
+         assert(compact_end > compact_top, "Must always be space remaining");
+-	compaction_max_size = 
+-	  pointer_delta(compact_end, compact_top);
++        compaction_max_size =
++          pointer_delta(compact_end, compact_top);
+       }
+ 
+       // store the forwarding pointer into the mark word
+@@ -155,13 +152,13 @@
+         oop(q)->forward_to(oop(compact_top));
+         assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
+       } else {
+-	// Don't clear the mark since it's confuses parallel old
+-	// verification.
+-	if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
++        // Don't clear the mark since it's confuses parallel old
++        // verification.
++        if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
+           // if the object isn't moving we can just set the mark to the default
+-          // mark and handle it specially later on.  
++          // mark and handle it specially later on.
+           oop(q)->init_mark();
+-	}
++        }
+         assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
+       }
+ 
+@@ -173,26 +170,26 @@
+ 
+       debug_only(MarkSweep::register_live_oop(oop(q), size));
+       compact_top += size;
+-      assert(compact_top <= dest->space()->end(), 
+-	"Exceeding space in destination");
++      assert(compact_top <= dest->space()->end(),
++        "Exceeding space in destination");
+ 
+-      q += size;                                                             
+-      end_of_live = q;                                                       
+-    } else {                                                                 
+-      /* run over all the contiguous dead objects */                         
+-      HeapWord* end = q;                                                     
+-      do {                                                                   
+-        /* prefetch beyond end */                                            
+-        Prefetch::write(end, interval);                            
+-	end += oop(end)->size();
++      q += size;
++      end_of_live = q;
++    } else {
++      /* run over all the contiguous dead objects */
++      HeapWord* end = q;
++      do {
++        /* prefetch beyond end */
++        Prefetch::write(end, interval);
++        end += oop(end)->size();
+       } while (end < t && (!oop(end)->is_gc_marked()));
+ 
+       /* see if we might want to pretend this object is alive so that
+        * we don't have to compact quite as often.
+        */
+       if (allowed_deadspace > 0 && q == compact_top) {
+-	size_t sz = pointer_delta(end, q);
+-	if (insert_deadspace(allowed_deadspace, q, sz)) {
++        size_t sz = pointer_delta(end, q);
++        if (insert_deadspace(allowed_deadspace, q, sz)) {
+           size_t compaction_max_size = pointer_delta(compact_end, compact_top);
+ 
+           // This should only happen if a space in the young gen overflows the
+@@ -201,19 +198,19 @@
+           while (sz > compaction_max_size) {
+             // First record the last compact_top
+             dest->set_compaction_top(compact_top);
+-            
++
+             // Advance to the next compaction decorator
+             advance_destination_decorator();
+             dest = destination_decorator();
+-            
++
+             // Update compaction info
+             start_array = dest->start_array();
+             compact_top = dest->compaction_top();
+             compact_end = dest->space()->end();
+             assert(compact_top == dest->space()->bottom(), "Advanced to space already in use");
+             assert(compact_end > compact_top, "Must always be space remaining");
+-	    compaction_max_size = 
+-	      pointer_delta(compact_end, compact_top);
++            compaction_max_size =
++              pointer_delta(compact_end, compact_top);
+           }
+ 
+           // store the forwarding pointer into the mark word
+@@ -222,12 +219,12 @@
+             assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
+           } else {
+             // if the object isn't moving we can just set the mark to the default
+-	    // Don't clear the mark since it's confuses parallel old
+-	    // verification.
+-	    if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
+-              // mark and handle it specially later on.  
++            // Don't clear the mark since it's confuses parallel old
++            // verification.
++            if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
++              // mark and handle it specially later on.
+               oop(q)->init_mark();
+-	    }
++            }
+             assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
+           }
+ 
+@@ -235,56 +232,56 @@
+             // Update object start array
+             if (start_array)
+               start_array->allocate_block(compact_top);
+-	  }
++          }
+ 
+           debug_only(MarkSweep::register_live_oop(oop(q), sz));
+           compact_top += sz;
+-          assert(compact_top <= dest->space()->end(), 
+-	    "Exceeding space in destination");
++          assert(compact_top <= dest->space()->end(),
++            "Exceeding space in destination");
++
++          q = end;
++          end_of_live = end;
++          continue;
++        }
++      }
+ 
+-	  q = end;
+-	  end_of_live = end;
+-	  continue;
+-	}
++      /* for the previous LiveRange, record the end of the live objects. */
++      if (liveRange) {
++        liveRange->set_end(q);
+       }
+ 
+-      /* for the previous LiveRange, record the end of the live objects. */  
+-      if (liveRange) {                                                       
+-	liveRange->set_end(q);                                               
+-      }                                                                      
+-                                                                             
+-      /* record the current LiveRange object.                                
+-       * liveRange->start() is overlaid on the mark word.                    
+-       */                                                                    
+-      liveRange = (LiveRange*)q;                                             
+-      liveRange->set_start(end);                                             
+-      liveRange->set_end(end);                                               
+-                                                                             
+-      /* see if this is the first dead region. */                            
+-      if (q < first_dead) {                                                  
+-	first_dead = q;                                                      
+-      }                                                                      
+-                                                                             
+-      /* move on to the next object */                                       
+-      q = end;                                                               
+-    }                                                                        
+-  }                                                                          
+-                                                                             
+-  assert(q == t, "just checking");                                           
+-  if (liveRange != NULL) {                                                   
+-    liveRange->set_end(q);                                                   
+-  }                                                                          
+-  _end_of_live = end_of_live;                                                
+-  if (end_of_live < first_dead) {                                            
+-    first_dead = end_of_live;                                                
+-  }                                                                          
+-  _first_dead = first_dead;                                                  
+-        
++      /* record the current LiveRange object.
++       * liveRange->start() is overlaid on the mark word.
++       */
++      liveRange = (LiveRange*)q;
++      liveRange->set_start(end);
++      liveRange->set_end(end);
++
++      /* see if this is the first dead region. */
++      if (q < first_dead) {
++        first_dead = q;
++      }
++
++      /* move on to the next object */
++      q = end;
++    }
++  }
++
++  assert(q == t, "just checking");
++  if (liveRange != NULL) {
++    liveRange->set_end(q);
++  }
++  _end_of_live = end_of_live;
++  if (end_of_live < first_dead) {
++    first_dead = end_of_live;
++  }
++  _first_dead = first_dead;
++
+   // Update compaction top
+   dest->set_compaction_top(compact_top);
+ }
+ 
+-bool PSMarkSweepDecorator::insert_deadspace(ssize_t& allowed_deadspace_words, 
++bool PSMarkSweepDecorator::insert_deadspace(ssize_t& allowed_deadspace_words,
+                                        HeapWord* q, size_t deadlength) {
+   allowed_deadspace_words -= deadlength;
+   if (allowed_deadspace_words >= 0) {
+@@ -299,11 +296,11 @@
+                                             * (HeapWordSize/sizeof(jint))));
+     } else {
+       assert((int) deadlength == instanceOopDesc::header_size(),
+-	     "size for smallest fake dead object doesn't match");
++             "size for smallest fake dead object doesn't match");
+       oop(q)->set_klass(SystemDictionary::object_klass());
+     }
+     assert((int) deadlength == oop(q)->size(),
+-	   "make sure size for fake dead object match");
++           "make sure size for fake dead object match");
+     // Recall that we required "q == compaction_top".
+     return true;
+   } else {
+@@ -333,11 +330,11 @@
+ 
+       // point all the oops to the new location
+       size_t size = oop(q)->adjust_pointers();
+-	
++
+       debug_only(MarkSweep::check_interior_pointers());
+-      
++
+       debug_only(MarkSweep::validate_live_oop(oop(q), size));
+-      
++
+       q += size;
+     }
+ 
+@@ -391,7 +388,7 @@
+     // mark word during the previous pass, so we can't use is_gc_marked for the
+     // traversal.
+     HeapWord* const end = _first_dead;
+-      
++
+     while (q < end) {
+       size_t size = oop(q)->size();
+       assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
+@@ -400,7 +397,7 @@
+       q += size;
+     }
+ #endif
+-      
++
+     if (_first_dead == t) {
+       q = t;
+     } else {
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)psMarkSweepDecorator.hpp	1.14 07/05/05 17:05:29 JVM"
+-#endif
+ /*
+  * Copyright 2001-2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,10 +19,10 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+-// 
++//
+ // A PSMarkSweepDecorator is used to add "ParallelScavenge" style mark sweep operations
+ // to a MutableSpace.
+ //
+@@ -74,7 +71,3 @@
+   void precompact();
+   void compact(bool mangle_free_space);
+ };
+-
+-
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)psMarkSweep.hpp	1.26 07/05/05 17:05:28 JVM"
+-#endif
+ /*
+  * Copyright 2001-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class PSAdaptiveSizePolicy;
+@@ -66,8 +63,8 @@
+   // If objects are left in eden after a collection, try to move the boundary
+   // and absorb them into the old gen.  Returns true if eden was emptied.
+   static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
+-					 PSYoungGen* young_gen,
+-					 PSOldGen* old_gen);
++                                         PSYoungGen* young_gen,
++                                         PSOldGen* old_gen);
+ 
+   // Reset time since last full gc
+   static void reset_millis_since_last_gc();
+@@ -86,4 +83,3 @@
+   // Time since last full gc (in milliseconds)
+   static jlong millis_since_last_gc();
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)psOldGen.cpp	1.54 07/05/05 17:05:28 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -33,7 +30,7 @@
+ }
+ 
+ PSOldGen::PSOldGen(ReservedSpace rs, size_t alignment,
+-		   size_t initial_size, size_t min_size, size_t max_size,
++                   size_t initial_size, size_t min_size, size_t max_size,
+                    const char* perf_data_name, int level):
+   _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
+   _max_gen_size(max_size)
+@@ -49,7 +46,7 @@
+ {}
+ 
+ void PSOldGen::initialize(ReservedSpace rs, size_t alignment,
+-			  const char* perf_data_name, int level) {
++                          const char* perf_data_name, int level) {
+   initialize_virtual_space(rs, alignment);
+   initialize_work(perf_data_name, level);
+   // The old gen can grow to gen_size_limit().  _reserve reflects only
+@@ -73,7 +70,7 @@
+ 
+   MemRegion limit_reserved((HeapWord*)virtual_space()->low_boundary(),
+     heap_word_size(_max_gen_size));
+-  assert(limit_reserved.byte_size() == _max_gen_size, 
++  assert(limit_reserved.byte_size() == _max_gen_size,
+     "word vs bytes confusion");
+   //
+   // Object start stuff
+@@ -89,7 +86,7 @@
+   //
+ 
+   MemRegion cmr((HeapWord*)virtual_space()->low(),
+-		(HeapWord*)virtual_space()->high());
++                (HeapWord*)virtual_space()->high());
+   Universe::heap()->barrier_set()->resize_covered_region(cmr);
+ 
+   CardTableModRefBS* _ct = (CardTableModRefBS*)Universe::heap()->barrier_set();
+@@ -111,7 +108,7 @@
+   //
+ 
+   _object_space = new MutableSpace();
+-  
++
+   if (_object_space == NULL)
+     vm_exit_during_initialization("Could not allocate an old gen space");
+ 
+@@ -127,7 +124,7 @@
+ 
+   // Generation Counters, generation 'level', 1 subspace
+   _gen_counters = new PSGenerationCounters(perf_data_name, level, 1,
+-					   virtual_space());
++                                           virtual_space());
+   _space_counters = new SpaceCounters(perf_data_name, 0,
+                                       virtual_space()->reserved_size(),
+                                       _object_space, _gen_counters);
+@@ -245,8 +242,8 @@
+   if (result && Verbose && PrintGC) {
+     size_t new_mem_size = virtual_space()->committed_size();
+     size_t old_mem_size = new_mem_size - bytes;
+-    gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " 
+-                                       SIZE_FORMAT "K to " 
++    gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
++                                       SIZE_FORMAT "K to "
+                                        SIZE_FORMAT "K",
+                     name(), old_mem_size/K, bytes/K, new_mem_size/K);
+   }
+@@ -280,8 +277,8 @@
+     if (Verbose && PrintGC) {
+       size_t new_mem_size = virtual_space()->committed_size();
+       size_t old_mem_size = new_mem_size + bytes;
+-      gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by " 
+-                                         SIZE_FORMAT "K to " 
++      gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by "
++                                         SIZE_FORMAT "K to "
+                                          SIZE_FORMAT "K",
+                       name(), old_mem_size/K, bytes/K, new_mem_size/K);
+     }
+@@ -344,7 +341,7 @@
+ // all heap related data structures, we may cause program failures.
+ void PSOldGen::post_resize() {
+   // First construct a memregion representing the new size
+-  MemRegion new_memregion((HeapWord*)virtual_space()->low(), 
++  MemRegion new_memregion((HeapWord*)virtual_space()->low(),
+     (HeapWord*)virtual_space()->high());
+   size_t new_word_size = new_memregion.word_size();
+ 
+@@ -359,7 +356,7 @@
+     // This cannot be safely tested for, as allocation may be taking
+     // place.
+     MemRegion mangle_region(object_space()->end(), virtual_space_high);
+-    object_space()->mangle_region(mangle_region); 
++    object_space()->mangle_region(mangle_region);
+   }
+ 
+   // ALWAYS do this last!!
+@@ -369,7 +366,7 @@
+     "Sanity");
+ }
+ 
+-size_t PSOldGen::gen_size_limit() { 
++size_t PSOldGen::gen_size_limit() {
+   return _max_gen_size;
+ }
+ 
+@@ -392,16 +389,16 @@
+ void PSOldGen::print_on(outputStream* st) const {
+   st->print(" %-15s", name());
+   if (PrintGCDetails && Verbose) {
+-    st->print(" total " SIZE_FORMAT ", used " SIZE_FORMAT, 
++    st->print(" total " SIZE_FORMAT ", used " SIZE_FORMAT,
+                 capacity_in_bytes(), used_in_bytes());
+   } else {
+-    st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", 
++    st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
+                 capacity_in_bytes()/K, used_in_bytes()/K);
+   }
+   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
+-		virtual_space()->low_boundary(),
+-		virtual_space()->high(),
+-		virtual_space()->high_boundary());
++                virtual_space()->low_boundary(),
++                virtual_space()->high(),
++                virtual_space()->high_boundary());
+ 
+   st->print("  object"); object_space()->print_on(st);
+ }
+@@ -426,17 +423,17 @@
+ #ifndef PRODUCT
+ 
+ void PSOldGen::space_invariants() {
+-  assert(object_space()->end() == (HeapWord*) virtual_space()->high(), 
++  assert(object_space()->end() == (HeapWord*) virtual_space()->high(),
+     "Space invariant");
+-  assert(object_space()->bottom() == (HeapWord*) virtual_space()->low(), 
++  assert(object_space()->bottom() == (HeapWord*) virtual_space()->low(),
+     "Space invariant");
+-  assert(virtual_space()->low_boundary() <= virtual_space()->low(), 
++  assert(virtual_space()->low_boundary() <= virtual_space()->low(),
+     "Space invariant");
+-  assert(virtual_space()->high_boundary() >= virtual_space()->high(), 
++  assert(virtual_space()->high_boundary() >= virtual_space()->high(),
+     "Space invariant");
+-  assert(virtual_space()->low_boundary() == (char*) _reserved.start(), 
++  assert(virtual_space()->low_boundary() == (char*) _reserved.start(),
+     "Space invariant");
+-  assert(virtual_space()->high_boundary() == (char*) _reserved.end(), 
++  assert(virtual_space()->high_boundary() == (char*) _reserved.end(),
+     "Space invariant");
+   assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
+     "Space invariant");
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)psOldGen.hpp	1.37 07/05/05 17:05:30 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class PSMarkSweepDecorator;
+@@ -44,7 +41,7 @@
+   // Performance Counters
+   PSGenerationCounters*    _gen_counters;
+   SpaceCounters*           _space_counters;
+-  
++
+   // Sizing information, in bytes, set in constructor
+   const size_t _init_gen_size;
+   const size_t _min_gen_size;
+@@ -95,28 +92,28 @@
+  public:
+   // Initialize the generation.
+   PSOldGen(ReservedSpace rs, size_t alignment,
+-	   size_t initial_size, size_t min_size, size_t max_size,
++           size_t initial_size, size_t min_size, size_t max_size,
+            const char* perf_data_name, int level);
+ 
+   PSOldGen(size_t initial_size, size_t min_size, size_t max_size,
+            const char* perf_data_name, int level);
+ 
+   void initialize(ReservedSpace rs, size_t alignment,
+-		  const char* perf_data_name, int level);
++                  const char* perf_data_name, int level);
+   void initialize_virtual_space(ReservedSpace rs, size_t alignment);
+   void initialize_work(const char* perf_data_name, int level);
+ 
+   MemRegion reserved() const                { return _reserved; }
+-  virtual size_t max_gen_size() 	    { return _max_gen_size; }
+-  size_t min_gen_size() 		    { return _min_gen_size; }
++  virtual size_t max_gen_size()             { return _max_gen_size; }
++  size_t min_gen_size()                     { return _min_gen_size; }
+ 
+   // Returns limit on the maximum size of the generation.  This
+   // is the same as _max_gen_size for PSOldGen but need not be
+   // for a derived class.
+   virtual size_t gen_size_limit();
+ 
+-  bool is_in(const void* p) const           { 
+-    return _virtual_space->contains((void *)p); 
++  bool is_in(const void* p) const           {
++    return _virtual_space->contains((void *)p);
+   }
+ 
+   bool is_in_reserved(const void* p) const {
+@@ -170,7 +167,7 @@
+   virtual void print() const;
+   virtual void print_on(outputStream* st) const;
+   void print_used_change(size_t prev_used) const;
+-  
++
+   void verify(bool allow_dirty);
+   void verify_object_start_array();
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)psParallelCompact.cpp	1.61 07/06/08 23:12:00 JVM"
+-#endif
+ /*
+  * Copyright 2005-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -47,14 +44,26 @@
+ 
+ const size_t ParallelCompactData::BlocksPerChunk = ChunkSize / BlockSize;
+ 
+-// The values for Claimed and Completed were chosen simply to make decrementing
+-// them to another valid value 'unlikely.'
+-const size_t ParallelCompactData::ChunkData::Available = 0;
+-const size_t ParallelCompactData::ChunkData::Claimed   = 0x0ffff;
+-const size_t ParallelCompactData::ChunkData::Completed = 0xfffff;
++const ParallelCompactData::ChunkData::chunk_sz_t
++ParallelCompactData::ChunkData::dc_shift = 27;
++
++const ParallelCompactData::ChunkData::chunk_sz_t
++ParallelCompactData::ChunkData::dc_mask = ~0U << dc_shift;
++
++const ParallelCompactData::ChunkData::chunk_sz_t
++ParallelCompactData::ChunkData::dc_one = 0x1U << dc_shift;
++
++const ParallelCompactData::ChunkData::chunk_sz_t
++ParallelCompactData::ChunkData::los_mask = ~dc_mask;
++
++const ParallelCompactData::ChunkData::chunk_sz_t
++ParallelCompactData::ChunkData::dc_claimed = 0x8U << dc_shift;
++
++const ParallelCompactData::ChunkData::chunk_sz_t
++ParallelCompactData::ChunkData::dc_completed = 0xcU << dc_shift;
+ 
+ #ifdef ASSERT
+-short	ParallelCompactData::BlockData::_cur_phase = 0;
++short   ParallelCompactData::BlockData::_cur_phase = 0;
+ #endif
+ 
+ SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
+@@ -67,9 +76,9 @@
+ double PSParallelCompact::_dwl_std_dev;
+ double PSParallelCompact::_dwl_first_term;
+ double PSParallelCompact::_dwl_adjustment;
+-#ifdef	ASSERT
++#ifdef  ASSERT
+ bool   PSParallelCompact::_dwl_initialized = false;
+-#endif	// #ifdef ASSERT
++#endif  // #ifdef ASSERT
+ 
+ #ifdef VALIDATE_MARK_SWEEP
+ GrowableArray<oop*>*    PSParallelCompact::_root_refs_stack = NULL;
+@@ -146,32 +155,32 @@
+       assert(beg_bit > end_bit, "bit set in middle of an obj");
+     } else {
+       if (addr >= boundaries[bidx] && addr < boundaries[bidx + 1]) {
+-	// a dead object in the current space.
+-	oop obj = (oop)addr;
+-	end_bit = _mark_bitmap.addr_to_bit(addr + obj->size());
+-	assert(!obj->is_gc_marked(), "obj marked in header, not in bitmap");
+-	tmp_bit = beg_bit + 1;
+-	beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, end_bit);
+-	assert(beg_bit == end_bit, "beg bit set in unmarked obj");
+-	beg_bit = _mark_bitmap.find_obj_end(tmp_bit, end_bit);
+-	assert(beg_bit == end_bit, "end bit set in unmarked obj");
++        // a dead object in the current space.
++        oop obj = (oop)addr;
++        end_bit = _mark_bitmap.addr_to_bit(addr + obj->size());
++        assert(!obj->is_gc_marked(), "obj marked in header, not in bitmap");
++        tmp_bit = beg_bit + 1;
++        beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, end_bit);
++        assert(beg_bit == end_bit, "beg bit set in unmarked obj");
++        beg_bit = _mark_bitmap.find_obj_end(tmp_bit, end_bit);
++        assert(beg_bit == end_bit, "end bit set in unmarked obj");
+       } else if (addr < boundaries[bidx + 2]) {
+-	// addr is between top in the current space and bottom in the next.
+-	end_bit = beg_bit + pointer_delta(boundaries[bidx + 2], addr);
+-	tmp_bit = beg_bit;
+-	beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, end_bit);
+-	assert(beg_bit == end_bit, "beg bit set above top");
+-	beg_bit = _mark_bitmap.find_obj_end(tmp_bit, end_bit);
+-	assert(beg_bit == end_bit, "end bit set above top");
+-	bidx += 2;
++        // addr is between top in the current space and bottom in the next.
++        end_bit = beg_bit + pointer_delta(boundaries[bidx + 2], addr);
++        tmp_bit = beg_bit;
++        beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, end_bit);
++        assert(beg_bit == end_bit, "beg bit set above top");
++        beg_bit = _mark_bitmap.find_obj_end(tmp_bit, end_bit);
++        assert(beg_bit == end_bit, "end bit set above top");
++        bidx += 2;
+       } else if (bidx < bidx_max - 2) {
+-	bidx += 2; // ???
++        bidx += 2; // ???
+       } else {
+-	tmp_bit = beg_bit;
+-	beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, last_bit);
+-	assert(beg_bit == last_bit, "beg bit set outside heap");
+-	beg_bit = _mark_bitmap.find_obj_end(tmp_bit, last_bit);
+-	assert(beg_bit == last_bit, "end bit set outside heap");
++        tmp_bit = beg_bit;
++        beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, last_bit);
++        assert(beg_bit == last_bit, "beg bit set outside heap");
++        beg_bit = _mark_bitmap.find_obj_end(tmp_bit, last_bit);
++        assert(beg_bit == last_bit, "end bit set outside heap");
+       }
+     }
+   } while (beg_bit < last_bit);
+@@ -191,40 +200,40 @@
+   for (unsigned int id = 0; id < last_space_id; ++id) {
+     const MutableSpace* space = _space_info[id].space();
+     tty->print_cr("%u %s "
+-		  SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10") " "
+-		  SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10") " ",
+-		  id, space_names[id],
+-		  summary_data().addr_to_chunk_idx(space->bottom()),
+-		  summary_data().addr_to_chunk_idx(space->top()),
+-		  summary_data().addr_to_chunk_idx(space->end()),
+-		  summary_data().addr_to_chunk_idx(_space_info[id].new_top()));
++                  SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10") " "
++                  SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10") " ",
++                  id, space_names[id],
++                  summary_data().addr_to_chunk_idx(space->bottom()),
++                  summary_data().addr_to_chunk_idx(space->top()),
++                  summary_data().addr_to_chunk_idx(space->end()),
++                  summary_data().addr_to_chunk_idx(_space_info[id].new_top()));
+   }
+ }
+ 
+ void
+ print_generic_summary_chunk(size_t i, const ParallelCompactData::ChunkData* c)
+ {
+-#define	CHUNK_IDX_FORMAT	SIZE_FORMAT_W("7")
+-#define	CHUNK_DATA_FORMAT	SIZE_FORMAT_W("5")
++#define CHUNK_IDX_FORMAT        SIZE_FORMAT_W("7")
++#define CHUNK_DATA_FORMAT       SIZE_FORMAT_W("5")
+ 
+   ParallelCompactData& sd = PSParallelCompact::summary_data();
+   size_t dci = c->destination() ? sd.addr_to_chunk_idx(c->destination()) : 0;
+   tty->print_cr(CHUNK_IDX_FORMAT " " PTR_FORMAT " "
+-		CHUNK_IDX_FORMAT " " PTR_FORMAT " "
+-		CHUNK_DATA_FORMAT " " CHUNK_DATA_FORMAT " "
+-		CHUNK_DATA_FORMAT " " CHUNK_IDX_FORMAT " %d",
+-		i, c->data_location(), dci, c->destination(),
+-		c->partial_obj_size(), c->live_obj_size(),
+-		c->data_size(), c->source_chunk(), c->destination_count());
++                CHUNK_IDX_FORMAT " " PTR_FORMAT " "
++                CHUNK_DATA_FORMAT " " CHUNK_DATA_FORMAT " "
++                CHUNK_DATA_FORMAT " " CHUNK_IDX_FORMAT " %d",
++                i, c->data_location(), dci, c->destination(),
++                c->partial_obj_size(), c->live_obj_size(),
++                c->data_size(), c->source_chunk(), c->destination_count());
+ 
+-#undef	CHUNK_IDX_FORMAT
+-#undef	CHUNK_DATA_FORMAT
++#undef  CHUNK_IDX_FORMAT
++#undef  CHUNK_DATA_FORMAT
+ }
+ 
+ void
+ print_generic_summary_data(ParallelCompactData& summary_data,
+-			   HeapWord* const beg_addr,
+-			   HeapWord* const end_addr)
++                           HeapWord* const beg_addr,
++                           HeapWord* const end_addr)
+ {
+   size_t total_words = 0;
+   size_t i = summary_data.addr_to_chunk_idx(beg_addr);
+@@ -246,32 +255,32 @@
+ 
+ void
+ print_generic_summary_data(ParallelCompactData& summary_data,
+-			   SpaceInfo* space_info)
++                           SpaceInfo* space_info)
+ {
+   for (unsigned int id = 0; id < PSParallelCompact::last_space_id; ++id) {
+     const MutableSpace* space = space_info[id].space();
+     print_generic_summary_data(summary_data, space->bottom(),
+-			       MAX2(space->top(), space_info[id].new_top()));
++                               MAX2(space->top(), space_info[id].new_top()));
+   }
+ }
+ 
+ void
+ print_initial_summary_chunk(size_t i,
+-			    const ParallelCompactData::ChunkData* c,
+-			    bool newline = true)
++                            const ParallelCompactData::ChunkData* c,
++                            bool newline = true)
+ {
+   tty->print(SIZE_FORMAT_W("5") " " PTR_FORMAT " "
+-	     SIZE_FORMAT_W("5") " " SIZE_FORMAT_W("5") " "
+-	     SIZE_FORMAT_W("5") " " SIZE_FORMAT_W("5") " %d",
+-	     i, c->destination(),
+-	     c->partial_obj_size(), c->live_obj_size(),
+-	     c->data_size(), c->source_chunk(), c->destination_count());
++             SIZE_FORMAT_W("5") " " SIZE_FORMAT_W("5") " "
++             SIZE_FORMAT_W("5") " " SIZE_FORMAT_W("5") " %d",
++             i, c->destination(),
++             c->partial_obj_size(), c->live_obj_size(),
++             c->data_size(), c->source_chunk(), c->destination_count());
+   if (newline) tty->cr();
+ }
+ 
+ void
+ print_initial_summary_data(ParallelCompactData& summary_data,
+-			   const MutableSpace* space) {
++                           const MutableSpace* space) {
+   if (space->top() == space->bottom()) {
+     return;
+   }
+@@ -310,15 +319,15 @@
+     const double reclaimed_ratio = double(dead_to_right) / live_to_right;
+ 
+     if (reclaimed_ratio > max_reclaimed_ratio) {
+-	    max_reclaimed_ratio = reclaimed_ratio;
+-	    max_reclaimed_ratio_chunk = i;
+-	    max_dead_to_right = dead_to_right;
+-	    max_live_to_right = live_to_right;
++            max_reclaimed_ratio = reclaimed_ratio;
++            max_reclaimed_ratio_chunk = i;
++            max_dead_to_right = dead_to_right;
++            max_live_to_right = live_to_right;
+     }
+ 
+     print_initial_summary_chunk(i, c, false);
+     tty->print_cr(" %12.10f " SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10"),
+-		  reclaimed_ratio, dead_to_right, live_to_right);
++                  reclaimed_ratio, dead_to_right, live_to_right);
+ 
+     live_to_right -= c->data_size();
+     ++i;
+@@ -330,14 +339,14 @@
+   }
+ 
+   tty->print_cr("max:  " SIZE_FORMAT_W("4") " d2r=" SIZE_FORMAT_W("10") " "
+-		"l2r=" SIZE_FORMAT_W("10") " max_ratio=%14.12f",
+-		max_reclaimed_ratio_chunk, max_dead_to_right,
+-		max_live_to_right, max_reclaimed_ratio);
++                "l2r=" SIZE_FORMAT_W("10") " max_ratio=%14.12f",
++                max_reclaimed_ratio_chunk, max_dead_to_right,
++                max_live_to_right, max_reclaimed_ratio);
+ }
+ 
+ void
+ print_initial_summary_data(ParallelCompactData& summary_data,
+-			   SpaceInfo* space_info) {
++                           SpaceInfo* space_info) {
+   unsigned int id = PSParallelCompact::perm_space_id;
+   const MutableSpace* space;
+   do {
+@@ -350,14 +359,14 @@
+     print_generic_summary_data(summary_data, space->bottom(), space->top());
+   } while (++id < PSParallelCompact::last_space_id);
+ }
+-#endif	// #ifndef PRODUCT
++#endif  // #ifndef PRODUCT
+ 
+-#ifdef	ASSERT
++#ifdef  ASSERT
+ size_t add_obj_count;
+ size_t add_obj_size;
+ size_t mark_bitmap_count;
+ size_t mark_bitmap_size;
+-#endif	// #ifdef ASSERT
++#endif  // #ifdef ASSERT
+ 
+ ParallelCompactData::ParallelCompactData()
+ {
+@@ -379,9 +388,9 @@
+   DEBUG_ONLY(_region_end = _region_start + region_size;)
+ 
+   assert(chunk_align_down(_region_start) == _region_start,
+-	 "region start not aligned");
++         "region start not aligned");
+   assert((region_size & ChunkSizeOffsetMask) == 0,
+-	 "region size not a multiple of ChunkSize");
++         "region size not a multiple of ChunkSize");
+ 
+   bool result = initialize_chunk_data(region_size);
+ 
+@@ -397,11 +406,17 @@
+ PSVirtualSpace*
+ ParallelCompactData::create_vspace(size_t count, size_t element_size)
+ {
++  const size_t raw_bytes = count * element_size;
++  const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10);
+   const size_t granularity = os::vm_allocation_granularity();
+-  const size_t bytes = align_size_up(count * element_size, granularity);
++  const size_t bytes = align_size_up(raw_bytes, MAX2(page_sz, granularity));
+ 
+-  ReservedSpace rs(bytes);
+-  PSVirtualSpace* vspace = new PSVirtualSpace(rs, os::vm_page_size());
++  const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
++    MAX2(page_sz, granularity);
++  ReservedSpace rs(bytes, rs_align, false);
++  os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(),
++                       rs.size());
++  PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
+   if (vspace != 0) {
+     if (vspace->expand_by(bytes)) {
+       return vspace;
+@@ -517,14 +532,13 @@
+   HeapWord* addr = beg;
+   while (cur_chunk < end_chunk) {
+     _chunk_data[cur_chunk].set_destination(addr);
+-    _chunk_data[cur_chunk].set_destination_count(ChunkData::Available);
++    _chunk_data[cur_chunk].set_destination_count(0);
+     _chunk_data[cur_chunk].set_source_chunk(cur_chunk);
+     _chunk_data[cur_chunk].set_data_location(addr);
+ 
+     // Update live_obj_size so the chunk appears completely full.
+     size_t live_size = ChunkSize - _chunk_data[cur_chunk].partial_obj_size();
+     _chunk_data[cur_chunk].set_live_obj_size(live_size);
+-    _chunk_data[cur_chunk].set_obj_not_updated(NULL);
+ 
+     ++cur_chunk;
+     addr += ChunkSize;
+@@ -532,20 +546,20 @@
+ }
+ 
+ bool ParallelCompactData::summarize(HeapWord* target_beg, HeapWord* target_end,
+-				    HeapWord* source_beg, HeapWord* source_end,
+-				    HeapWord** target_next,
+-				    HeapWord** source_next) {
++                                    HeapWord* source_beg, HeapWord* source_end,
++                                    HeapWord** target_next,
++                                    HeapWord** source_next) {
+   // This is too strict.
+   // assert(chunk_offset(source_beg) == 0, "not ChunkSize aligned");
+ 
+   if (TraceParallelOldGCSummaryPhase) {
+     tty->print_cr("tb=" PTR_FORMAT " te=" PTR_FORMAT " "
+-		  "sb=" PTR_FORMAT " se=" PTR_FORMAT " "
+-		  "tn=" PTR_FORMAT " sn=" PTR_FORMAT,
+-		  target_beg, target_end,
+-		  source_beg, source_end,
+-		  target_next != 0 ? *target_next : (HeapWord*) 0,
+-		  source_next != 0 ? *source_next : (HeapWord*) 0);
++                  "sb=" PTR_FORMAT " se=" PTR_FORMAT " "
++                  "tn=" PTR_FORMAT " sn=" PTR_FORMAT,
++                  target_beg, target_end,
++                  source_beg, source_end,
++                  target_next != 0 ? *target_next : (HeapWord*) 0,
++                  source_next != 0 ? *source_next : (HeapWord*) 0);
+   }
+ 
+   size_t cur_chunk = addr_to_chunk_idx(source_beg);
+@@ -555,9 +569,9 @@
+   while (cur_chunk < end_chunk) {
+     size_t words = _chunk_data[cur_chunk].data_size();
+ 
+-#if	1
++#if     1
+     assert(pointer_delta(target_end, dest_addr) >= words,
+-	   "source region does not fit into target region");
++           "source region does not fit into target region");
+ #else
+     // XXX - need some work on the corner cases here.  If the chunk does not
+     // fit, then must either make sure any partial_obj from the chunk fits, or
+@@ -568,7 +582,7 @@
+       *source_next = chunk_to_addr(cur_chunk);
+       return false;
+     }
+-#endif	// #if 1
++#endif  // #if 1
+ 
+     _chunk_data[cur_chunk].set_destination(dest_addr);
+ 
+@@ -585,41 +599,41 @@
+       HeapWord* const last_addr = dest_addr + words - 1;
+       const size_t dest_chunk_1 = addr_to_chunk_idx(dest_addr);
+       const size_t dest_chunk_2 = addr_to_chunk_idx(last_addr);
+-#if	0
++#if     0
+       // Initially assume that the destination chunks will be the same and
+       // adjust the value below if necessary.  Under this assumption, if
+       // cur_chunk == dest_chunk_2, then cur_chunk will be compacted completely
+       // into itself.
+-      size_t destination_count = cur_chunk == dest_chunk_2 ? 0 : 1;
++      uint destination_count = cur_chunk == dest_chunk_2 ? 0 : 1;
+       if (dest_chunk_1 != dest_chunk_2) {
+-	// Destination chunks differ; adjust destination_count.
+-	destination_count += 1;
+-	// Data from cur_chunk will be copied to the start of dest_chunk_2.
+-	_chunk_data[dest_chunk_2].set_source_chunk(cur_chunk);
++        // Destination chunks differ; adjust destination_count.
++        destination_count += 1;
++        // Data from cur_chunk will be copied to the start of dest_chunk_2.
++        _chunk_data[dest_chunk_2].set_source_chunk(cur_chunk);
+       } else if (chunk_offset(dest_addr) == 0) {
+-	// Data from cur_chunk will be copied to the start of the destination
+-	// chunk.
+-	_chunk_data[dest_chunk_1].set_source_chunk(cur_chunk);
++        // Data from cur_chunk will be copied to the start of the destination
++        // chunk.
++        _chunk_data[dest_chunk_1].set_source_chunk(cur_chunk);
+       }
+ #else
+       // Initially assume that the destination chunks will be different and
+       // adjust the value below if necessary.  Under this assumption, if
+       // cur_chunk == dest_chunk2, then cur_chunk will be compacted partially
+       // into dest_chunk_1 and partially into itself.
+-      size_t destination_count = cur_chunk == dest_chunk_2 ? 1 : 2;
++      uint destination_count = cur_chunk == dest_chunk_2 ? 1 : 2;
+       if (dest_chunk_1 != dest_chunk_2) {
+-	// Data from cur_chunk will be copied to the start of dest_chunk_2.
+-	_chunk_data[dest_chunk_2].set_source_chunk(cur_chunk);
++        // Data from cur_chunk will be copied to the start of dest_chunk_2.
++        _chunk_data[dest_chunk_2].set_source_chunk(cur_chunk);
+       } else {
+-	// Destination chunks are the same; adjust destination_count.
+-	destination_count -= 1;
+-	if (chunk_offset(dest_addr) == 0) {
+-	  // Data from cur_chunk will be copied to the start of the destination
+-	  // chunk.
+-	  _chunk_data[dest_chunk_1].set_source_chunk(cur_chunk);
+-	}
++        // Destination chunks are the same; adjust destination_count.
++        destination_count -= 1;
++        if (chunk_offset(dest_addr) == 0) {
++          // Data from cur_chunk will be copied to the start of the destination
++          // chunk.
++          _chunk_data[dest_chunk_1].set_source_chunk(cur_chunk);
++        }
+       }
+-#endif	// #if 0
++#endif  // #if 0
+ 
+       _chunk_data[cur_chunk].set_destination_count(destination_count);
+       _chunk_data[cur_chunk].set_data_location(chunk_to_addr(cur_chunk));
+@@ -633,11 +647,6 @@
+   return true;
+ }
+ 
+-void ParallelCompactData::set_obj_not_updated(HeapWord* moved_obj) {
+-  size_t chunk_index = addr_to_chunk_idx(moved_obj);
+-  chunk(chunk_index)->set_obj_not_updated(moved_obj);
+-}
+-
+ bool ParallelCompactData::partial_obj_ends_in_block(size_t block_index) {
+   HeapWord* block_addr = block_to_addr(block_index);
+   HeapWord* block_end_addr = block_addr + BlockSize;
+@@ -655,39 +664,6 @@
+   return false;
+ }
+ 
+-HeapWord*
+-ParallelCompactData::first_live_or_end_in_chunk_range(size_t chunk_index_start,
+-						      size_t chunk_index_end) {
+-  HeapWord* const end_addr = chunk_to_addr(chunk_index_end);
+-
+-  // A live object may entend into the chunk; skip over it.
+-  HeapWord* poe_addr = partial_obj_end(chunk_index_start);
+-  if (poe_addr >= end_addr) {
+-    return end_addr;
+-  }
+-
+-  // Search the bitmap for the next object.
+-  typedef ParMarkBitMap::idx_t idx_t;
+-  ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
+-  const idx_t beg_bit = bitmap->addr_to_bit(poe_addr);
+-  const idx_t end_bit = bitmap->addr_to_bit(end_addr);
+-  const idx_t res_bit = bitmap->find_obj_beg(beg_bit, end_bit);
+-  return res_bit < end_bit ? bitmap->bit_to_addr(res_bit) : end_addr;
+-}
+-
+-HeapWord* ParallelCompactData::first_live_or_end_in_chunk(size_t chunk_index) {
+-  // Has the first live for the chunk already been found once?
+-  HeapWord* result = NULL;
+-  HeapWord* first_addr = chunk_to_addr(chunk_index);
+-  if (chunk(chunk_index)->first_live_obj() >= first_addr) {
+-    result = chunk(chunk_index)->first_live_obj();
+-  } else {
+-    result = first_live_or_end_in_chunk_range(chunk_index, chunk_index + 1);
+-    chunk(chunk_index)->set_first_live_obj(result);
+-  }
+-  return result;
+-}
+-
+ HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
+   HeapWord* result = NULL;
+   if (UseParallelOldGCChunkPointerCalc) {
+@@ -733,7 +709,7 @@
+   }
+ 
+   // The new location of the object is
+-  //	chunk destination +
++  //    chunk destination +
+   //    size of the partial object extending onto the chunk +
+   //    sizes of the live objects in the Chunk that are to the left of addr
+   const size_t partial_obj_size = chunk_ptr->partial_obj_size();
+@@ -776,7 +752,7 @@
+   }
+ 
+   // The new location of the object is
+-  //	chunk destination +
++  //    chunk destination +
+   //    block offset +
+   //    sizes of the live objects in the Block that are to the left of addr
+   const size_t block_offset = addr_to_block_ptr(addr)->offset();
+@@ -802,7 +778,7 @@
+   return updated_klass;
+ }
+ 
+-#ifdef	ASSERT
++#ifdef  ASSERT
+ void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
+ {
+   const size_t* const beg = (const size_t*)vspace->committed_low_addr();
+@@ -817,7 +793,7 @@
+   verify_clear(_chunk_vspace);
+   verify_clear(_block_vspace);
+ }
+-#endif	// #ifdef ASSERT
++#endif  // #ifdef ASSERT
+ 
+ #ifdef NOT_PRODUCT
+ ParallelCompactData::ChunkData* debug_chunk(size_t chunk_index) {
+@@ -852,7 +828,7 @@
+ }
+ 
+ void PSParallelCompact::mark_and_follow(ParCompactionManager* cm,
+-					oop* p) {
++                                        oop* p) {
+   assert(Universe::heap()->is_in_reserved(p),
+          "we should only be traversing objects here");
+   oop m = *p;
+@@ -866,7 +842,7 @@
+ // Anything associated with this variable is temporary.
+ 
+ void PSParallelCompact::mark_and_push_internal(ParCompactionManager* cm,
+-					       oop* p) {
++                                               oop* p) {
+   // Push marked object, contents will be followed later
+   oop m = *p;
+   if (mark_obj(m)) {
+@@ -942,7 +918,7 @@
+   _space_info[perm_space_id].set_min_dense_prefix(perm_space->top());
+   if (TraceParallelOldGCDensePrefix) {
+     tty->print_cr("perm min_dense_prefix=" PTR_FORMAT,
+-		  _space_info[perm_space_id].min_dense_prefix());
++                  _space_info[perm_space_id].min_dense_prefix());
+   }
+ }
+ 
+@@ -984,7 +960,7 @@
+ 
+ void
+ PSParallelCompact::clear_data_covering_space(SpaceId id)
+-{ 
++{
+   // At this point, top is the value before GC, new_top() is the value that will
+   // be set at the end of GC.  The marking bitmap is cleared to top; nothing
+   // should be marked above top.  The summary data is cleared to the larger of
+@@ -1071,7 +1047,7 @@
+   bool eden_empty = eden_space->is_empty();
+   if (!eden_empty) {
+     eden_empty = absorb_live_data_from_eden(heap->size_policy(),
+-					    heap->young_gen(), heap->old_gen());
++                                            heap->young_gen(), heap->old_gen());
+   }
+ 
+   // Update heap occupancy information which is used as input to the soft ref
+@@ -1108,7 +1084,7 @@
+ 
+ HeapWord*
+ PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id,
+-						    bool maximum_compaction)
++                                                    bool maximum_compaction)
+ {
+   const size_t chunk_size = ParallelCompactData::ChunkSize;
+   const ParallelCompactData& sd = summary_data();
+@@ -1146,11 +1122,11 @@
+ 
+   if (TraceParallelOldGCDensePrefix) {
+     tty->print_cr("cur_dens=%5.3f dw_dens=%5.3f dw_goal=" SIZE_FORMAT,
+-		  cur_density, deadwood_density, deadwood_goal);
++                  cur_density, deadwood_density, deadwood_goal);
+     tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
+-		  "space_cap=" SIZE_FORMAT,
+-		  space_live, space_used,
+-		  space_capacity);
++                  "space_cap=" SIZE_FORMAT,
++                  space_live, space_used,
++                  space_capacity);
+   }
+ 
+   // XXX - Use binary search?
+@@ -1162,9 +1138,9 @@
+     const size_t cur_deadwood = pointer_delta(dense_prefix, chunk_destination);
+     if (TraceParallelOldGCDensePrefix && Verbose) {
+       tty->print_cr("c#=" SIZE_FORMAT_W("04") " dst=" PTR_FORMAT " "
+-		    "dp=" SIZE_FORMAT_W("08") " " "cdw=" SIZE_FORMAT_W("08"),
+-		    sd.chunk(cp), chunk_destination,
+-		    dense_prefix, cur_deadwood);
++                    "dp=" SIZE_FORMAT_W("08") " " "cdw=" SIZE_FORMAT_W("08"),
++                    sd.chunk(cp), chunk_destination,
++                    dense_prefix, cur_deadwood);
+     }
+ 
+     if (cur_deadwood >= deadwood_goal) {
+@@ -1178,23 +1154,23 @@
+       size_t live_to_right = space_live - live_to_left;
+       double density_to_right = double(live_to_right) / space_to_right;
+       while (cp > full_cp) {
+-	--cp;
+-	const size_t prev_chunk_live_to_right = live_to_right - cp->data_size();
+-	const size_t prev_chunk_space_to_right = space_to_right + chunk_size;
+-	double prev_chunk_density_to_right =
+-	  double(prev_chunk_live_to_right) / prev_chunk_space_to_right;
+-	if (density_to_right <= prev_chunk_density_to_right) {
+-	  return dense_prefix;
+-	}
+-	if (TraceParallelOldGCDensePrefix && Verbose) {
+-	  tty->print_cr("backing up from c=" SIZE_FORMAT_W("4") " d2r=%10.8f "
+-			"pc_d2r=%10.8f", sd.chunk(cp), density_to_right,
+-			prev_chunk_density_to_right);
+-	}
+-	dense_prefix -= chunk_size;
+-	live_to_right = prev_chunk_live_to_right;
+-	space_to_right = prev_chunk_space_to_right;
+-	density_to_right = prev_chunk_density_to_right;
++        --cp;
++        const size_t prev_chunk_live_to_right = live_to_right - cp->data_size();
++        const size_t prev_chunk_space_to_right = space_to_right + chunk_size;
++        double prev_chunk_density_to_right =
++          double(prev_chunk_live_to_right) / prev_chunk_space_to_right;
++        if (density_to_right <= prev_chunk_density_to_right) {
++          return dense_prefix;
++        }
++        if (TraceParallelOldGCDensePrefix && Verbose) {
++          tty->print_cr("backing up from c=" SIZE_FORMAT_W("4") " d2r=%10.8f "
++                        "pc_d2r=%10.8f", sd.chunk(cp), density_to_right,
++                        prev_chunk_density_to_right);
++        }
++        dense_prefix -= chunk_size;
++        live_to_right = prev_chunk_live_to_right;
++        space_to_right = prev_chunk_space_to_right;
++        density_to_right = prev_chunk_density_to_right;
+       }
+       return dense_prefix;
+     }
+@@ -1206,11 +1182,11 @@
+   return dense_prefix;
+ }
+ 
+-#ifndef	PRODUCT
++#ifndef PRODUCT
+ void PSParallelCompact::print_dense_prefix_stats(const char* const algorithm,
+-						 const SpaceId id,
+-						 const bool maximum_compaction,
+-						 HeapWord* const addr)
++                                                 const SpaceId id,
++                                                 const bool maximum_compaction,
++                                                 HeapWord* const addr)
+ {
+   const size_t chunk_idx = summary_data().addr_to_chunk_idx(addr);
+   ChunkData* const cp = summary_data().chunk(chunk_idx);
+@@ -1225,28 +1201,28 @@
+   const size_t dead_to_right = space->top() - addr - live_to_right;
+ 
+   tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W("05") " "
+-		"spl=" SIZE_FORMAT " "
+-		"d2l=" SIZE_FORMAT " d2l%%=%6.4f "
+-		"d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT
+-		" ratio=%10.8f",
+-		algorithm, addr, chunk_idx,
+-		space_live,
+-		dead_to_left, dead_to_left_pct,
+-		dead_to_right, live_to_right,
+-		double(dead_to_right) / live_to_right);
++                "spl=" SIZE_FORMAT " "
++                "d2l=" SIZE_FORMAT " d2l%%=%6.4f "
++                "d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT
++                " ratio=%10.8f",
++                algorithm, addr, chunk_idx,
++                space_live,
++                dead_to_left, dead_to_left_pct,
++                dead_to_right, live_to_right,
++                double(dead_to_right) / live_to_right);
+ }
+-#endif	// #ifndef PRODUCT
++#endif  // #ifndef PRODUCT
+ 
+ // Return a fraction indicating how much of the generation can be treated as
+ // "dead wood" (i.e., not reclaimed).  The function uses a normal distribution
+ // based on the density of live objects in the generation to determine a limit,
+ // which is then adjusted so the return value is min_percent when the density is
+ // 1.
+-// 
++//
+ // The following table shows some return values for a different values of the
+ // standard deviation (ParallelOldDeadWoodLimiterStdDev); the mean is 0.5 and
+ // min_percent is 1.
+-// 
++//
+ //                          fraction allowed as dead wood
+ //         -----------------------------------------------------------------
+ // density std_dev=70 std_dev=75 std_dev=80 std_dev=85 std_dev=90 std_dev=95
+@@ -1281,7 +1257,7 @@
+   const double raw_limit = normal_distribution(density);
+ 
+   // Adjust the raw limit so it becomes the minimum when the density is 1.
+-  // 
++  //
+   // First subtract the adjustment value (which is simply the precomputed value
+   // normal_distribution(1.0)); this yields a value of 0 when the density is 1.
+   // Then add the minimum value, so the minimum is returned when the density is
+@@ -1293,7 +1269,7 @@
+ 
+ ParallelCompactData::ChunkData*
+ PSParallelCompact::first_dead_space_chunk(const ChunkData* beg,
+-					  const ChunkData* end)
++                                          const ChunkData* end)
+ {
+   const size_t chunk_size = ParallelCompactData::ChunkSize;
+   ParallelCompactData& sd = summary_data();
+@@ -1323,8 +1299,8 @@
+ 
+ ParallelCompactData::ChunkData*
+ PSParallelCompact::dead_wood_limit_chunk(const ChunkData* beg,
+-					 const ChunkData* end,
+-					 size_t dead_words)
++                                         const ChunkData* end,
++                                         size_t dead_words)
+ {
+   ParallelCompactData& sd = summary_data();
+   size_t left = sd.chunk(beg);
+@@ -1356,9 +1332,9 @@
+ // of each space into itself, and before final summarization.
+ inline double
+ PSParallelCompact::reclaimed_ratio(const ChunkData* const cp,
+-				   HeapWord* const bottom,
+-				   HeapWord* const top,
+-				   HeapWord* const new_top)
++                                   HeapWord* const bottom,
++                                   HeapWord* const top,
++                                   HeapWord* const new_top)
+ {
+   ParallelCompactData& sd = summary_data();
+ 
+@@ -1383,7 +1359,7 @@
+ 
+ // Return the address of the end of the dense prefix, a.k.a. the start of the
+ // compacted region.  The address is always on a chunk boundary.
+-// 
++//
+ // Completely full chunks at the left are skipped, since no compaction can occur
+ // in those chunks.  Then the maximum amount of dead wood to allow is computed,
+ // based on the density (amount live / capacity) of the generation; the chunk
+@@ -1393,7 +1369,7 @@
+ // is selected.
+ HeapWord*
+ PSParallelCompact::compute_dense_prefix(const SpaceId id,
+-					bool maximum_compaction)
++                                        bool maximum_compaction)
+ {
+   const size_t chunk_size = ParallelCompactData::ChunkSize;
+   const ParallelCompactData& sd = summary_data();
+@@ -1412,9 +1388,9 @@
+   // of the dense prefix.
+   const ChunkData* const full_cp = first_dead_space_chunk(beg_cp, new_top_cp);
+   assert(full_cp->destination() == sd.chunk_to_addr(full_cp) ||
+-	 space->is_empty(), "no dead space allowed to the left");
++         space->is_empty(), "no dead space allowed to the left");
+   assert(full_cp->data_size() < chunk_size || full_cp == new_top_cp - 1,
+-	 "chunk must have dead space");
++         "chunk must have dead space");
+ 
+   // The gc number is saved whenever a maximum compaction is done, and used to
+   // determine when the maximum compaction interval has expired.  This avoids
+@@ -1434,21 +1410,21 @@
+ 
+   const double density = double(space_live) / double(space_capacity);
+   const size_t min_percent_free =
+-	  id == perm_space_id ? PermMarkSweepDeadRatio : MarkSweepDeadRatio;
++          id == perm_space_id ? PermMarkSweepDeadRatio : MarkSweepDeadRatio;
+   const double limiter = dead_wood_limiter(density, min_percent_free);
+   const size_t dead_wood_max = space_used - space_live;
+   const size_t dead_wood_limit = MIN2(size_t(space_capacity * limiter),
+-				      dead_wood_max);
++                                      dead_wood_max);
+ 
+   if (TraceParallelOldGCDensePrefix) {
+     tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
+-		  "space_cap=" SIZE_FORMAT,
+-		  space_live, space_used,
+-		  space_capacity);
++                  "space_cap=" SIZE_FORMAT,
++                  space_live, space_used,
++                  space_capacity);
+     tty->print_cr("dead_wood_limiter(%6.4f, %d)=%6.4f "
+-		  "dead_wood_max=" SIZE_FORMAT " dead_wood_limit=" SIZE_FORMAT,
+-		  density, min_percent_free, limiter,
+-		  dead_wood_max, dead_wood_limit);
++                  "dead_wood_max=" SIZE_FORMAT " dead_wood_limit=" SIZE_FORMAT,
++                  density, min_percent_free, limiter,
++                  dead_wood_max, dead_wood_limit);
+   }
+ 
+   // Locate the chunk with the desired amount of dead space to the left.
+@@ -1467,7 +1443,7 @@
+     }
+   }
+ 
+-#if	0
++#if     0
+   // Something to consider:  if the chunk with the best ratio is 'close to' the
+   // first chunk w/free space, choose the first chunk with free space
+   // ("first-free").  The first-free chunk is usually near the start of the
+@@ -1477,7 +1453,7 @@
+     _maximum_compaction_gc_num = total_invocations();
+     best_cp = full_cp;
+   }
+-#endif	// #if 0
++#endif  // #if 0
+ 
+   return sd.chunk_to_addr(best_cp);
+ }
+@@ -1487,8 +1463,8 @@
+   for (unsigned int i = 0; i < last_space_id; ++i) {
+     const MutableSpace* space = _space_info[i].space();
+     bool result = _summary_data.summarize(space->bottom(), space->end(),
+-					  space->bottom(), space->top(),
+-					  _space_info[i].new_top_addr());
++                                          space->bottom(), space->top(),
++                                          _space_info[i].new_top_addr());
+     assert(result, "should never fail");
+     _space_info[i].set_dense_prefix(space->bottom());
+   }
+@@ -1503,7 +1479,7 @@
+     // Only enough dead space is filled so that any remaining dead space to the
+     // left is larger than the minimum filler object.  (The remainder is filled
+     // during the copy/update phase.)
+-    // 
++    //
+     // The size of the dead space to the right of the boundary is not a
+     // concern, since compaction will be able to use whatever space is
+     // available.
+@@ -1515,41 +1491,41 @@
+     //                              +---+
+     // a) beg_bits:  ...  x   x   x | 0 | ||   0   x  x  ...
+     //    end_bits:  ...  x   x   x | 0 | ||   0   x  x  ...
+-    // 				    +---+
++    //                              +---+
+     //
+     // In the 64-bit VM, each bit represents one 64-bit word:
+     //                              +------------+
+     // b) beg_bits:  ...  x   x   x | 0   ||   0 | x  x  ...
+     //    end_bits:  ...  x   x   1 | 0   ||   0 | x  x  ...
+-    // 				    +------------+
++    //                              +------------+
+     //                          +-------+
+     // c) beg_bits:  ...  x   x | 0   0 | ||   0   x  x  ...
+     //    end_bits:  ...  x   1 | 0   0 | ||   0   x  x  ...
+-    //				+-------+
++    //                          +-------+
+     //                      +-----------+
+     // d) beg_bits:  ...  x | 0   0   0 | ||   0   x  x  ...
+     //    end_bits:  ...  1 | 0   0   0 | ||   0   x  x  ...
+-    //			    +-----------+
+-    // 				+-------+
++    //                      +-----------+
++    //                          +-------+
+     // e) beg_bits:  ...  0   0 | 0   0 | ||   0   x  x  ...
+     //    end_bits:  ...  0   0 | 0   0 | ||   0   x  x  ...
+-    //				+-------+
++    //                          +-------+
+ 
+     // Initially assume case a, c or e will apply.
+     size_t obj_len = (size_t)oopDesc::header_size();
+     HeapWord* obj_beg = dense_prefix_end - obj_len;
+ 
+-#ifdef	_LP64
++#ifdef  _LP64
+     if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
+       // Case b above.
+       obj_beg = dense_prefix_end - 1;
+     } else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) &&
+-	       _mark_bitmap.is_obj_end(dense_prefix_bit - 4)) {
++               _mark_bitmap.is_obj_end(dense_prefix_bit - 4)) {
+       // Case d above.
+       obj_beg = dense_prefix_end - 3;
+       obj_len = 3;
+     }
+-#endif	// #ifdef _LP64
++#endif  // #ifdef _LP64
+ 
+     MemRegion region(obj_beg, obj_len);
+     SharedHeap::fill_region_with_object(region);
+@@ -1571,13 +1547,13 @@
+   HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction);
+   _space_info[id].set_dense_prefix(dense_prefix_end);
+ 
+-#ifndef	PRODUCT
++#ifndef PRODUCT
+   if (TraceParallelOldGCDensePrefix) {
+     print_dense_prefix_stats("ratio", id, maximum_compaction, dense_prefix_end);
+     HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction);
+     print_dense_prefix_stats("density", id, maximum_compaction, addr);
+   }
+-#endif	// #ifndef PRODUCT
++#endif  // #ifndef PRODUCT
+ 
+   // If dead space crosses the dense prefix boundary, it is (at least partially)
+   // filled with a dummy object, marked live and added to the summary data.
+@@ -1591,8 +1567,8 @@
+   // Compute the destination of each Chunk, and thus each object.
+   _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
+   _summary_data.summarize(dense_prefix_end, space->end(),
+-			  dense_prefix_end, space->top(),
+-			  new_top_addr);
++                          dense_prefix_end, space->top(),
++                          new_top_addr);
+ 
+   if (TraceParallelOldGCSummaryPhase) {
+     const size_t chunk_size = ParallelCompactData::ChunkSize;
+@@ -1601,22 +1577,22 @@
+     const HeapWord* nt_aligned_up = _summary_data.chunk_align_up(*new_top_addr);
+     const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
+     tty->print_cr("id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
+-		  "dp_chunk=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
+-		  "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
+-		  id, space->capacity_in_words(), dense_prefix_end,
+-		  dp_chunk, dp_words / chunk_size,
+-		  cr_words / chunk_size, *new_top_addr);
++                  "dp_chunk=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
++                  "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
++                  id, space->capacity_in_words(), dense_prefix_end,
++                  dp_chunk, dp_words / chunk_size,
++                  cr_words / chunk_size, *new_top_addr);
+   }
+ }
+ 
+ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
+-				      bool maximum_compaction)
++                                      bool maximum_compaction)
+ {
+   EventMark m("2 summarize");
+   TraceTime tm("summary phase", print_phases(), true, gclog_or_tty);
+   // trace("2");
+ 
+-#ifdef	ASSERT
++#ifdef  ASSERT
+   if (VerifyParallelOldWithMarkSweep  &&
+       (PSParallelCompact::total_invocations() %
+          VerifyParallelOldWithMarkSweepInterval) == 0) {
+@@ -1624,13 +1600,13 @@
+   }
+   if (TraceParallelOldGCMarkingPhase) {
+     tty->print_cr("add_obj_count=" SIZE_FORMAT " "
+-		  "add_obj_bytes=" SIZE_FORMAT,
+-		  add_obj_count, add_obj_size * HeapWordSize);
++                  "add_obj_bytes=" SIZE_FORMAT,
++                  add_obj_count, add_obj_size * HeapWordSize);
+     tty->print_cr("mark_bitmap_count=" SIZE_FORMAT " "
+-		  "mark_bitmap_bytes=" SIZE_FORMAT,
+-		  mark_bitmap_count, mark_bitmap_size * HeapWordSize);
++                  "mark_bitmap_bytes=" SIZE_FORMAT,
++                  mark_bitmap_count, mark_bitmap_size * HeapWordSize);
+   }
+-#endif	// #ifdef ASSERT
++#endif  // #ifdef ASSERT
+ 
+   // Quick summarization of each space into itself, to see how much is live.
+   summarize_spaces_quick();
+@@ -1649,7 +1625,7 @@
+   unsigned int id;
+   for (id = old_space_id; id < last_space_id; ++id) {
+     old_space_total_live += pointer_delta(_space_info[id].new_top(),
+-					  _space_info[id].space()->bottom());
++                                          _space_info[id].space()->bottom());
+   }
+ 
+   const MutableSpace* old_space = _space_info[old_space_id].space();
+@@ -1672,17 +1648,17 @@
+   for (id = eden_space_id; id < last_space_id; ++id) {
+     const MutableSpace* space = _space_info[id].space();
+     const size_t live = pointer_delta(_space_info[id].new_top(),
+-				      space->bottom());
++                                      space->bottom());
+     const size_t available = pointer_delta(target_space_end, *new_top_addr);
+     if (live <= available) {
+       // All the live data will fit.
+       if (TraceParallelOldGCSummaryPhase) {
+-	tty->print_cr("summarizing %d into old_space @ " PTR_FORMAT,
+-		      id, *new_top_addr);
++        tty->print_cr("summarizing %d into old_space @ " PTR_FORMAT,
++                      id, *new_top_addr);
+       }
+       _summary_data.summarize(*new_top_addr, target_space_end,
+-			      space->bottom(), space->top(),
+-			      new_top_addr);
++                              space->bottom(), space->top(),
++                              new_top_addr);
+ 
+       // Reset the new_top value for the space.
+       _space_info[id].set_new_top(space->bottom());
+@@ -1691,15 +1667,15 @@
+       ChunkData* beg_chunk = _summary_data.addr_to_chunk_ptr(space->bottom());
+       ChunkData* end_chunk = _summary_data.addr_to_chunk_ptr(space->top() - 1);
+       while (beg_chunk <= end_chunk) {
+-	beg_chunk->set_source_chunk(0);
+-	++beg_chunk;
++        beg_chunk->set_source_chunk(0);
++        ++beg_chunk;
+       }
+     }
+   }
+ 
+   // Fill in the block data after any changes to the chunks have
+   // been made.
+-#ifdef	ASSERT
++#ifdef  ASSERT
+   summarize_blocks(cm, perm_space_id);
+   summarize_blocks(cm, old_space_id);
+ #else
+@@ -1724,8 +1700,8 @@
+ // the chunks and fill in the BlockData for each chunk.
+ 
+ void PSParallelCompact::summarize_blocks(ParCompactionManager* cm,
+-					 SpaceId first_compaction_space_id) {
+-#if	0
++                                         SpaceId first_compaction_space_id) {
++#if     0
+   DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(1);)
+   for (SpaceId cur_space_id = first_compaction_space_id;
+        cur_space_id != last_space_id;
+@@ -1734,13 +1710,13 @@
+     size_t start_chunk_index =
+       _summary_data.addr_to_chunk_idx(space(cur_space_id)->bottom());
+     BitBlockUpdateClosure bbu(mark_bitmap(),
+-			      cm,
+-			      start_chunk_index);
++                              cm,
++                              start_chunk_index);
+     // Iterate over blocks.
+     for (size_t chunk_index =  start_chunk_index;
+-	 chunk_index < _summary_data.chunk_count() &&
+-	 _summary_data.chunk_to_addr(chunk_index) < space(cur_space_id)->top();
+-	 chunk_index++) {
++         chunk_index < _summary_data.chunk_count() &&
++         _summary_data.chunk_to_addr(chunk_index) < space(cur_space_id)->top();
++         chunk_index++) {
+ 
+       // Reset the closure for the new chunk.  Note that the closure
+       // maintains some data that does not get reset for each chunk
+@@ -1751,22 +1727,22 @@
+       // may return the end of the chunk.  That is acceptable since
+       // it will properly limit the iterations.
+       ParMarkBitMap::idx_t left_offset = mark_bitmap()->addr_to_bit(
+-	_summary_data.first_live_or_end_in_chunk(chunk_index));
++        _summary_data.first_live_or_end_in_chunk(chunk_index));
+ 
+       // End the iteration at the end of the chunk.
+       HeapWord* chunk_addr = _summary_data.chunk_to_addr(chunk_index);
+       HeapWord* chunk_end = chunk_addr + ParallelCompactData::ChunkSize;
+       ParMarkBitMap::idx_t right_offset =
+-	mark_bitmap()->addr_to_bit(chunk_end);
++        mark_bitmap()->addr_to_bit(chunk_end);
+ 
+       // Blocks that have not objects starting in them can be
+       // skipped because their data will never be used.
+       if (left_offset < right_offset) {
+ 
+         // Iterate through the objects in the chunk.
+-        ParMarkBitMap::idx_t last_offset = 
+-  	  mark_bitmap()->pair_iterate(&bbu, left_offset, right_offset);
+-  
++        ParMarkBitMap::idx_t last_offset =
++          mark_bitmap()->pair_iterate(&bbu, left_offset, right_offset);
++
+         // If last_offset is less than right_offset, then the iterations
+         // terminated while it was looking for an end bit.  "last_offset"
+         // is then the offset for the last start bit.  In this situation
+@@ -1776,8 +1752,8 @@
+ 
+         size_t cur_block_plus_1 = bbu.cur_block() + 1;
+         HeapWord* cur_block_plus_1_addr =
+-  	_summary_data.block_to_addr(bbu.cur_block()) +
+-  	ParallelCompactData::BlockSize;
++        _summary_data.block_to_addr(bbu.cur_block()) +
++        ParallelCompactData::BlockSize;
+         HeapWord* last_offset_addr = mark_bitmap()->bit_to_addr(last_offset);
+  #if 1  // This code works.  The else doesn't but should.  Why does it?
+         // The current block (cur_block()) has already been updated.
+@@ -1786,152 +1762,152 @@
+         // last object starts (which can be greater than the
+         // next block if there were no objects found in intervening
+         // blocks).
+-        size_t last_block = 
+-  	  MAX2(bbu.cur_block() + 1,
+-  	       _summary_data.addr_to_block_idx(last_offset_addr));
++        size_t last_block =
++          MAX2(bbu.cur_block() + 1,
++               _summary_data.addr_to_block_idx(last_offset_addr));
+  #else
+-	// The current block has already been updated.  The only block
+-	// that remains to be updated is the block where the last
+-	// object in the chunk starts.
+-	size_t last_block = _summary_data.addr_to_block_idx(last_offset_addr);
++        // The current block has already been updated.  The only block
++        // that remains to be updated is the block where the last
++        // object in the chunk starts.
++        size_t last_block = _summary_data.addr_to_block_idx(last_offset_addr);
+  #endif
+-	assert_bit_is_start(last_offset);
++        assert_bit_is_start(last_offset);
+         assert((last_block == _summary_data.block_count()) ||
+-  	     (_summary_data.block(last_block)->raw_offset() == 0),
++             (_summary_data.block(last_block)->raw_offset() == 0),
+           "Should not have been set");
+         // Is the last block still in the current chunk?  If still
+         // in this chunk, update the last block (the counting that
+         // included the current block is meant for the offset of the last
+         // block).  If not in this chunk, do nothing.  Should not
+         // update a block in the next chunk.
+-	if (ParallelCompactData::chunk_contains_block(bbu.chunk_index(), 
+-						      last_block)) {
++        if (ParallelCompactData::chunk_contains_block(bbu.chunk_index(),
++                                                      last_block)) {
+           if (last_offset < right_offset) {
+-	    // The last object started in this chunk but ends beyond
+-	    // this chunk.  Update the block for this last object.
++            // The last object started in this chunk but ends beyond
++            // this chunk.  Update the block for this last object.
+             assert(mark_bitmap()->is_marked(last_offset), "Should be marked");
+-    	    // No end bit was found.  The closure takes care of
+-    	    // the cases where
+-    	    //   an objects crosses over into the next block
+-    	    //   an objects starts and ends in the next block
+-    	    // It does not handle the case where an object is
+-    	    // the first object in a later block and extends
+-    	    // past the end of the chunk (i.e., the closure
+-    	    // only handles complete objects that are in the range
+-    	    // it is given).  That object is handed back here
+-    	    // for any special consideration necessary.
+-    	    //
+-    	    // Is the first bit in the last block a start or end bit?
+-    	    //
+-    	    // If the partial object ends in the last block L,
+-    	    // then the 1st bit in L may be an end bit.
+-    	    //
+-    	    // Else does the last object start in a block after the current 
+-	    // block? A block AA will already have been updated if an
+-    	    // object ends in the next block AA+1.  An object found to end in 
+-    	    // the AA+1 is the trigger that updates AA.  Objects are being 
+-	    // counted in the current block for updaing a following
+-    	    // block.  An object may start in later block
+-    	    // block but may extend beyond the last block in the chunk.
+-    	    // Updates are only done when the end of an object has been
+-    	    // found. If the last object (covered by block L) starts 
+-    	    // beyond the current block, then no object ends in L (otherwise
+-    	    // L would be the current block).  So the first bit in L is
+-    	    // a start bit.  
+-	    //
+-	    // Else the last objects start in the current block and ends
+-	    // beyond the chunk.  The current block has already been
+-	    // updated and there is no later block (with an object
+-	    // starting in it) that needs to be updated.
+-    	    //
+-    	    if (_summary_data.partial_obj_ends_in_block(last_block)) {
++            // No end bit was found.  The closure takes care of
++            // the cases where
++            //   an objects crosses over into the next block
++            //   an objects starts and ends in the next block
++            // It does not handle the case where an object is
++            // the first object in a later block and extends
++            // past the end of the chunk (i.e., the closure
++            // only handles complete objects that are in the range
++            // it is given).  That object is handed back here
++            // for any special consideration necessary.
++            //
++            // Is the first bit in the last block a start or end bit?
++            //
++            // If the partial object ends in the last block L,
++            // then the 1st bit in L may be an end bit.
++            //
++            // Else does the last object start in a block after the current
++            // block? A block AA will already have been updated if an
++            // object ends in the next block AA+1.  An object found to end in
++            // the AA+1 is the trigger that updates AA.  Objects are being
++            // counted in the current block for updaing a following
++            // block.  An object may start in later block
++            // block but may extend beyond the last block in the chunk.
++            // Updates are only done when the end of an object has been
++            // found. If the last object (covered by block L) starts
++            // beyond the current block, then no object ends in L (otherwise
++            // L would be the current block).  So the first bit in L is
++            // a start bit.
++            //
++            // Else the last objects start in the current block and ends
++            // beyond the chunk.  The current block has already been
++            // updated and there is no later block (with an object
++            // starting in it) that needs to be updated.
++            //
++            if (_summary_data.partial_obj_ends_in_block(last_block)) {
+               _summary_data.block(last_block)->set_end_bit_offset(
+-    	        bbu.live_data_left());
+-    	    } else if (last_offset_addr >= cur_block_plus_1_addr) {
+-	      //   The start of the object is on a later block
+-	      // (to the right of the current block and there are no
+-	      // complete live objects to the left of this last object
+-	      // within the chunk.
+-    	      //   The first bit in the block is for the start of the
+-    	      // last object.
++                bbu.live_data_left());
++            } else if (last_offset_addr >= cur_block_plus_1_addr) {
++              //   The start of the object is on a later block
++              // (to the right of the current block and there are no
++              // complete live objects to the left of this last object
++              // within the chunk.
++              //   The first bit in the block is for the start of the
++              // last object.
+               _summary_data.block(last_block)->set_start_bit_offset(
+-    	        bbu.live_data_left());
+-    	    } else {
+-	      //   The start of the last object was found in 
+-	      // the current chunk (which has already 
+-	      // been updated).
+-  	      assert(bbu.cur_block() == 
+-		      _summary_data.addr_to_block_idx(last_offset_addr),
+-  	        "Should be a block already processed");
+-    	    }
++                bbu.live_data_left());
++            } else {
++              //   The start of the last object was found in
++              // the current chunk (which has already
++              // been updated).
++              assert(bbu.cur_block() ==
++                      _summary_data.addr_to_block_idx(last_offset_addr),
++                "Should be a block already processed");
++            }
+ #ifdef ASSERT
+-    	    // Is there enough block information to find this object?
+-    	    // The destination of the chunk has not been set so the
+-    	    // values returned by calc_new_pointer() and
+-    	    // block_calc_new_pointer() will only be
+-    	    // offsets.  But they should agree.
+-    	    HeapWord* moved_obj_with_chunks = 
+-    	      _summary_data.chunk_calc_new_pointer(last_offset_addr);
+-    	    HeapWord* moved_obj_with_blocks = 
+-    	      _summary_data.calc_new_pointer(last_offset_addr);
+-            assert(moved_obj_with_chunks == moved_obj_with_blocks, 
+-    	      "Block calculation is wrong");
+-#endif 
++            // Is there enough block information to find this object?
++            // The destination of the chunk has not been set so the
++            // values returned by calc_new_pointer() and
++            // block_calc_new_pointer() will only be
++            // offsets.  But they should agree.
++            HeapWord* moved_obj_with_chunks =
++              _summary_data.chunk_calc_new_pointer(last_offset_addr);
++            HeapWord* moved_obj_with_blocks =
++              _summary_data.calc_new_pointer(last_offset_addr);
++            assert(moved_obj_with_chunks == moved_obj_with_blocks,
++              "Block calculation is wrong");
++#endif
+           } else if (last_block < _summary_data.block_count()) {
+-  	    // Iterations ended looking for a start bit (but
+-  	    // did not run off the end of the block table).
++            // Iterations ended looking for a start bit (but
++            // did not run off the end of the block table).
+             _summary_data.block(last_block)->set_start_bit_offset(
+               bbu.live_data_left());
+           }
+-	}
++        }
+ #ifdef ASSERT
+-  	// Is there enough block information to find this object?
++        // Is there enough block information to find this object?
+           HeapWord* left_offset_addr = mark_bitmap()->bit_to_addr(left_offset);
+-  	HeapWord* moved_obj_with_chunks =
+-  	  _summary_data.calc_new_pointer(left_offset_addr);
+-  	HeapWord* moved_obj_with_blocks =
+-  	  _summary_data.calc_new_pointer(left_offset_addr);
++        HeapWord* moved_obj_with_chunks =
++          _summary_data.calc_new_pointer(left_offset_addr);
++        HeapWord* moved_obj_with_blocks =
++          _summary_data.calc_new_pointer(left_offset_addr);
+           assert(moved_obj_with_chunks == moved_obj_with_blocks,
+-  	  "Block calculation is wrong");
++          "Block calculation is wrong");
+ #endif
+ 
+         // Is there another block after the end of this chunk?
+ #ifdef ASSERT
+         if (last_block < _summary_data.block_count()) {
+-  	// No object may have been found in a block.  If that
+-  	// block is at the end of the chunk, the iteration will
+-  	// terminate without incrementing the current block so
+-  	// that the current block is not the last block in the
+-  	// chunk.  That situation precludes asserting that the
+-  	// current block is the last block in the chunk.  Assert
+-  	// the lesser condition that the current block does not
+-  	// exceed the chunk.
++        // No object may have been found in a block.  If that
++        // block is at the end of the chunk, the iteration will
++        // terminate without incrementing the current block so
++        // that the current block is not the last block in the
++        // chunk.  That situation precludes asserting that the
++        // current block is the last block in the chunk.  Assert
++        // the lesser condition that the current block does not
++        // exceed the chunk.
+           assert(_summary_data.block_to_addr(last_block) <=
+-  	       (_summary_data.chunk_to_addr(chunk_index) +
+-  	         ParallelCompactData::ChunkSize),
+-  	      "Chunk and block inconsistency");
+-  	  assert(last_offset <= right_offset, "Iteration over ran end");
++               (_summary_data.chunk_to_addr(chunk_index) +
++                 ParallelCompactData::ChunkSize),
++              "Chunk and block inconsistency");
++          assert(last_offset <= right_offset, "Iteration over ran end");
+         }
+ #endif
+       }
+ #ifdef ASSERT
+       if (PrintGCDetails && Verbose) {
+         if (_summary_data.chunk(chunk_index)->partial_obj_size() == 1) {
+-          size_t first_block = 
+-	    chunk_index / ParallelCompactData::BlocksPerChunk;
+-	  gclog_or_tty->print_cr("first_block " PTR_FORMAT 
+-	    " _offset " PTR_FORMAT 
+-	    "_first_is_start_bit %d",
+-            first_block, 
+-	    _summary_data.block(first_block)->raw_offset(), 
+-	    _summary_data.block(first_block)->first_is_start_bit());
++          size_t first_block =
++            chunk_index / ParallelCompactData::BlocksPerChunk;
++          gclog_or_tty->print_cr("first_block " PTR_FORMAT
++            " _offset " PTR_FORMAT
++            "_first_is_start_bit %d",
++            first_block,
++            _summary_data.block(first_block)->raw_offset(),
++            _summary_data.block(first_block)->first_is_start_bit());
+         }
+       }
+ #endif
+     }
+   }
+   DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(16);)
+-#endif	// #if 0
++#endif  // #if 0
+ }
+ 
+ // This method should contain all heap-specific policy for invoking a full
+@@ -1944,7 +1920,7 @@
+ void PSParallelCompact::invoke(bool maximum_heap_compaction) {
+   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
+   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
+-	 "should be in vm thread");
++         "should be in vm thread");
+   ParallelScavengeHeap* heap = gc_heap();
+   GCCause::Cause gc_cause = heap->gc_cause();
+   assert(!heap->is_gc_active(), "not reentrant");
+@@ -1974,12 +1950,12 @@
+ }
+ 
+ bool ParallelCompactData::chunk_contains_block(size_t chunk_index,
+-					       size_t block_index) {
++                                               size_t block_index) {
+   size_t first_block_in_chunk = chunk_index * BlocksPerChunk;
+   size_t last_block_in_chunk = (chunk_index + 1) * BlocksPerChunk - 1;
+ 
+   return (first_block_in_chunk <= block_index) &&
+-	 (block_index <= last_block_in_chunk);
++         (block_index <= last_block_in_chunk);
+ }
+ 
+ // This method contains no policy. You should probably
+@@ -2060,16 +2036,16 @@
+ #ifndef PRODUCT
+     if (TraceParallelOldGCMarkingPhase) {
+       gclog_or_tty->print_cr("marking_phase: cas_tries %d  cas_retries %d "
+-	"cas_by_another %d",
+-	mark_bitmap()->cas_tries(), mark_bitmap()->cas_retries(),
+-	mark_bitmap()->cas_by_another());
++        "cas_by_another %d",
++        mark_bitmap()->cas_tries(), mark_bitmap()->cas_retries(),
++        mark_bitmap()->cas_by_another());
+     }
+-#endif	// #ifndef PRODUCT
++#endif  // #ifndef PRODUCT
+ 
+ #ifdef ASSERT
+     if (VerifyParallelOldWithMarkSweep &&
+-	(PSParallelCompact::total_invocations() %
+-	   VerifyParallelOldWithMarkSweepInterval) == 0) {
++        (PSParallelCompact::total_invocations() %
++           VerifyParallelOldWithMarkSweepInterval) == 0) {
+       gclog_or_tty->print_cr("Verify marking with mark_sweep_phase1()");
+       if (PrintGCDetails && Verbose) {
+         gclog_or_tty->print_cr("mark_sweep_phase1:");
+@@ -2086,12 +2062,12 @@
+ #endif
+ 
+     bool max_on_system_gc = UseMaximumCompactionOnSystemGC && is_system_gc;
+-    summary_phase(serial_CM, maximum_heap_compaction || max_on_system_gc);    
++    summary_phase(serial_CM, maximum_heap_compaction || max_on_system_gc);
+ 
+ #ifdef ASSERT
+     if (VerifyParallelOldWithMarkSweep &&
+-	(PSParallelCompact::total_invocations() %
+-	   VerifyParallelOldWithMarkSweepInterval) == 0) {
++        (PSParallelCompact::total_invocations() %
++           VerifyParallelOldWithMarkSweepInterval) == 0) {
+       if (PrintGCDetails && Verbose) {
+         gclog_or_tty->print_cr("mark_sweep_phase2:");
+       }
+@@ -2108,8 +2084,8 @@
+ 
+ #ifdef ASSERT
+     if (VerifyParallelOldWithMarkSweep &&
+-	(PSParallelCompact::total_invocations() %
+-	   VerifyParallelOldWithMarkSweepInterval) == 0) {
++        (PSParallelCompact::total_invocations() %
++           VerifyParallelOldWithMarkSweepInterval) == 0) {
+       // Do a separate verify phase so that the verify
+       // code can use the the forwarding pointers to
+       // check the new pointer calculation.  The restore_marks()
+@@ -2154,12 +2130,12 @@
+         gclog_or_tty->stamp();
+         gclog_or_tty->print_cr(" collection: %d ",
+                        heap->total_collections());
+-	if (Verbose) {
+-	  gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
+-	    " perm_gen_capacity: %d ",
+-	    old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
+-	    perm_gen->capacity_in_bytes());
+-	}
++        if (Verbose) {
++          gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
++            " perm_gen_capacity: %d ",
++            old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
++            perm_gen->capacity_in_bytes());
++        }
+       }
+ 
+       // Don't check if the size_policy is ready here.  Let
+@@ -2168,22 +2144,22 @@
+           ((gc_cause != GCCause::_java_lang_system_gc) ||
+             UseAdaptiveSizePolicyWithSystemGC)) {
+         // Calculate optimal free space amounts
+-	assert(young_gen->max_size() >
+-	  young_gen->from_space()->capacity_in_bytes() +
+-	  young_gen->to_space()->capacity_in_bytes(),
+-	  "Sizes of space in young gen are out-of-bounds");
+-	size_t max_eden_size = young_gen->max_size() -
+-	  young_gen->from_space()->capacity_in_bytes() -
+-	  young_gen->to_space()->capacity_in_bytes();
++        assert(young_gen->max_size() >
++          young_gen->from_space()->capacity_in_bytes() +
++          young_gen->to_space()->capacity_in_bytes(),
++          "Sizes of space in young gen are out-of-bounds");
++        size_t max_eden_size = young_gen->max_size() -
++          young_gen->from_space()->capacity_in_bytes() -
++          young_gen->to_space()->capacity_in_bytes();
+         size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
+-				 young_gen->eden_space()->used_in_bytes(),
++                                 young_gen->eden_space()->used_in_bytes(),
+                                  old_gen->used_in_bytes(),
+                                  perm_gen->used_in_bytes(),
+-				 young_gen->eden_space()->capacity_in_bytes(),
++                                 young_gen->eden_space()->capacity_in_bytes(),
+                                  old_gen->max_gen_size(),
+                                  max_eden_size,
+                                  true /* full gc*/,
+-				 gc_cause);
++                                 gc_cause);
+ 
+         heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
+ 
+@@ -2219,11 +2195,11 @@
+         // No GC timestamp here.  This is after GC so it would be confusing.
+         young_gen->print_used_change(pre_gc_values.young_gen_used());
+         old_gen->print_used_change(pre_gc_values.old_gen_used());
+-	heap->print_heap_change(pre_gc_values.heap_used());
+-	// Print perm gen last (print_heap_change() excludes the perm gen).
++        heap->print_heap_change(pre_gc_values.heap_used());
++        // Print perm gen last (print_heap_change() excludes the perm gen).
+         perm_gen->print_used_change(pre_gc_values.perm_gen_used());
+       } else {
+-	heap->print_heap_change(pre_gc_values.heap_used());
++        heap->print_heap_change(pre_gc_values.heap_used());
+       }
+     }
+ 
+@@ -2234,12 +2210,12 @@
+     if (PrintGCDetails) {
+       if (size_policy->print_gc_time_limit_would_be_exceeded()) {
+         if (size_policy->gc_time_limit_exceeded()) {
+-          gclog_or_tty->print_cr("	GC time is exceeding GCTimeLimit "
+-	    "of %d%%", GCTimeLimit);
++          gclog_or_tty->print_cr("      GC time is exceeding GCTimeLimit "
++            "of %d%%", GCTimeLimit);
+         } else {
+-          gclog_or_tty->print_cr("	GC time would exceed GCTimeLimit "
+-	    "of %d%%", GCTimeLimit);
+-	}
++          gclog_or_tty->print_cr("      GC time would exceed GCTimeLimit "
++            "of %d%%", GCTimeLimit);
++        }
+       }
+       size_policy->set_print_gc_time_limit_would_be_exceeded(false);
+     }
+@@ -2267,20 +2243,20 @@
+   }
+   if (PrintGCTaskTimeStamps) {
+     gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " "
+-			   INT64_FORMAT,
+-			   marking_start.ticks(), compaction_start.ticks(),
+-			   collection_exit.ticks());
++                           INT64_FORMAT,
++                           marking_start.ticks(), compaction_start.ticks(),
++                           collection_exit.ticks());
+     gc_task_manager()->print_task_time_stamps();
+   }
+ }
+ 
+ bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
+-					     PSYoungGen* young_gen,
+-					     PSOldGen* old_gen) {
++                                             PSYoungGen* young_gen,
++                                             PSOldGen* old_gen) {
+   MutableSpace* const eden_space = young_gen->eden_space();
+   assert(!eden_space->is_empty(), "eden must be non-empty");
+   assert(young_gen->virtual_space()->alignment() ==
+-	 old_gen->virtual_space()->alignment(), "alignments do not match");
++         old_gen->virtual_space()->alignment(), "alignments do not match");
+ 
+   if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
+     return false;
+@@ -2314,14 +2290,14 @@
+ 
+   if (TraceAdaptiveGCBoundary && Verbose) {
+     gclog_or_tty->print(" absorbing " SIZE_FORMAT "K:  "
+-			"eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
+-			"from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
+-			"young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
+-			absorb_size / K,
+-			eden_capacity / K, (eden_capacity - absorb_size) / K,
+-			young_gen->from_space()->used_in_bytes() / K,
+-			young_gen->to_space()->used_in_bytes() / K,
+-			young_gen->capacity_in_bytes() / K, new_young_size / K);
++                        "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
++                        "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
++                        "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
++                        absorb_size / K,
++                        eden_capacity / K, (eden_capacity - absorb_size) / K,
++                        young_gen->from_space()->used_in_bytes() / K,
++                        young_gen->to_space()->used_in_bytes() / K,
++                        young_gen->capacity_in_bytes() / K, new_young_size / K);
+   }
+ 
+   // Fill the unused part of the old gen.
+@@ -2336,7 +2312,7 @@
+   // from end to virtual_space->high() in debug builds).
+   HeapWord* const new_top = eden_space->top();
+   old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
+-					absorb_size);
++                                        absorb_size);
+   young_gen->reset_after_change();
+   old_space->set_top(new_top);
+   old_space->set_end(new_top);
+@@ -2365,14 +2341,14 @@
+ }
+ 
+ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
+-			              bool maximum_heap_compaction) {
++                                      bool maximum_heap_compaction) {
+   // Recursively traverse all live objects and mark them
+   EventMark m("1 mark object");
+   TraceTime tm("marking phase", print_phases(), true, gclog_or_tty);
+ 
+   ParallelScavengeHeap* heap = gc_heap();
+   uint parallel_gc_threads = heap->gc_task_manager()->workers();
+-  GenTaskQueueSet* qset = ParCompactionManager::chunk_array()->task_queue_set();
++  TaskQueueSetSuper* qset = ParCompactionManager::chunk_array();
+   ParallelTaskTerminator terminator(parallel_gc_threads, qset);
+ 
+   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
+@@ -2396,7 +2372,7 @@
+ 
+     if (parallel_gc_threads > 1) {
+       for (uint j = 0; j < parallel_gc_threads; j++) {
+-	q->enqueue(new StealMarkingTask(&terminator));
++        q->enqueue(new StealMarkingTask(&terminator));
+       }
+     }
+ 
+@@ -2443,7 +2419,7 @@
+ 
+   // Follow code cache roots.
+   CodeCache::do_unloading(is_alive_closure(), &mark_and_push_closure,
+-			  purged_class);
++                          purged_class);
+   follow_stack(cm); // Flush marking stack.
+ 
+   // Update subklass/sibling/implementor links of live klasses
+@@ -2498,7 +2474,7 @@
+   // Should the reference processor have a span that excludes
+   // young gen objects?
+   PSScavenge::reference_processor()->weak_oops_do(
+-					      adjust_root_pointer_closure());
++                                              adjust_root_pointer_closure());
+ }
+ 
+ void PSParallelCompact::compact_perm(ParCompactionManager* cm) {
+@@ -2525,8 +2501,8 @@
+ 
+   const ParallelCompactData& sd = PSParallelCompact::summary_data();
+ 
+-  size_t fillable_chunks = 0;	// A count for diagnostic purposes.
+-  unsigned int which = 0;	// The worker thread number.
++  size_t fillable_chunks = 0;   // A count for diagnostic purposes.
++  unsigned int which = 0;       // The worker thread number.
+ 
+   for (unsigned int id = to_space_id; id > perm_space_id; --id) {
+     SpaceInfo* const space_info = _space_info + id;
+@@ -2542,19 +2518,19 @@
+         ParCompactionManager* cm = ParCompactionManager::manager_array(which);
+         cm->save_for_processing(cur);
+ 
+-	if (TraceParallelOldGCCompactionPhase && Verbose) {
+-	  const size_t count_mod_8 = fillable_chunks & 7;
+-	  if (count_mod_8 == 0) gclog_or_tty->print("fillable: ");
+-	  gclog_or_tty->print(" " SIZE_FORMAT_W("7"), cur);
+-	  if (count_mod_8 == 7) gclog_or_tty->cr();
+-	}
++        if (TraceParallelOldGCCompactionPhase && Verbose) {
++          const size_t count_mod_8 = fillable_chunks & 7;
++          if (count_mod_8 == 0) gclog_or_tty->print("fillable: ");
++          gclog_or_tty->print(" " SIZE_FORMAT_W("7"), cur);
++          if (count_mod_8 == 7) gclog_or_tty->cr();
++        }
+ 
+         NOT_PRODUCT(++fillable_chunks;)
+ 
+         // Assign chunks to threads in round-robin fashion.
+         if (++which == task_count) {
+-	  which = 0;
+-	}
++          which = 0;
++        }
+       }
+     }
+   }
+@@ -2590,12 +2566,12 @@
+ 
+     // The dense prefix is before this chunk.
+     size_t chunk_index_end_dense_prefix =
+-	sd.addr_to_chunk_idx(dense_prefix_end);
++        sd.addr_to_chunk_idx(dense_prefix_end);
+     ChunkData* const dense_prefix_cp = sd.chunk(chunk_index_end_dense_prefix);
+     assert(dense_prefix_end == space->end() ||
+-	   dense_prefix_cp->available() ||
+-	   dense_prefix_cp->claimed(),
+-	   "The chunk after the dense prefix should always be ready to fill");
++           dense_prefix_cp->available() ||
++           dense_prefix_cp->claimed(),
++           "The chunk after the dense prefix should always be ready to fill");
+ 
+     size_t chunk_index_start = sd.addr_to_chunk_idx(space->bottom());
+ 
+@@ -2607,17 +2583,17 @@
+     if (total_dense_prefix_chunks > 0) {
+       uint tasks_for_dense_prefix = 1;
+       if (UseParallelDensePrefixUpdate) {
+-	if (total_dense_prefix_chunks <=
+-	    (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
+-	  // Don't over partition.  This assumes that
+-	  // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
+-	  // so there are not many chunks to process.
+-	  tasks_for_dense_prefix = parallel_gc_threads;
+-	} else {
+-	  // Over partition
+-	  tasks_for_dense_prefix = parallel_gc_threads *
+-	    PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
+-	}
++        if (total_dense_prefix_chunks <=
++            (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
++          // Don't over partition.  This assumes that
++          // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
++          // so there are not many chunks to process.
++          tasks_for_dense_prefix = parallel_gc_threads;
++        } else {
++          // Over partition
++          tasks_for_dense_prefix = parallel_gc_threads *
++            PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
++        }
+       }
+       size_t chunks_per_thread = total_dense_prefix_chunks /
+         tasks_for_dense_prefix;
+@@ -2630,13 +2606,13 @@
+         if (chunk_index_start >= chunk_index_end_dense_prefix) {
+           break;
+         }
+-	// chunk_index_end is not processed
++        // chunk_index_end is not processed
+         size_t chunk_index_end = MIN2(chunk_index_start + chunks_per_thread,
+-				      chunk_index_end_dense_prefix);
++                                      chunk_index_end_dense_prefix);
+         q->enqueue(new UpdateDensePrefixTask(
+-				 space_id,
+-				 chunk_index_start,
+-				 chunk_index_end));
++                                 space_id,
++                                 chunk_index_start,
++                                 chunk_index_end));
+         chunk_index_start = chunk_index_end;
+       }
+     }
+@@ -2644,17 +2620,17 @@
+     // fit evenly.
+     if (chunk_index_start < chunk_index_end_dense_prefix) {
+       q->enqueue(new UpdateDensePrefixTask(
+-				 space_id,
+-				 chunk_index_start,
+-				 chunk_index_end_dense_prefix));
++                                 space_id,
++                                 chunk_index_start,
++                                 chunk_index_end_dense_prefix));
+     }
+     space_id = next_compaction_space_id(space_id);
+   }  // End tasks for dense prefix
+ }
+ 
+ void PSParallelCompact::enqueue_chunk_stealing_tasks(
+-				     GCTaskQueue* q,
+-				     ParallelTaskTerminator* terminator_ptr,
++                                     GCTaskQueue* q,
++                                     ParallelTaskTerminator* terminator_ptr,
+                                      uint parallel_gc_threads) {
+   TraceTime tm("steal task setup", print_phases(), true, gclog_or_tty);
+ 
+@@ -2677,7 +2653,7 @@
+   PSOldGen* old_gen = heap->old_gen();
+   old_gen->start_array()->reset();
+   uint parallel_gc_threads = heap->gc_task_manager()->workers();
+-  GenTaskQueueSet* qset = ParCompactionManager::chunk_array()->task_queue_set();
++  TaskQueueSetSuper* qset = ParCompactionManager::chunk_array();
+   ParallelTaskTerminator terminator(parallel_gc_threads, qset);
+ 
+   GCTaskQueue* q = GCTaskQueue::create();
+@@ -2698,7 +2674,7 @@
+     // We have to release the barrier tasks!
+     WaitForBarrierGCTask::destroy(fin);
+ 
+-#ifdef	ASSERT
++#ifdef  ASSERT
+     // Verify that all chunks have been processed before the deferred updates.
+     // Note that perm_space_id is skipped; this type of verification is not
+     // valid until the perm gen is compacted by chunks.
+@@ -2718,7 +2694,7 @@
+   }
+ }
+ 
+-#ifdef	ASSERT
++#ifdef  ASSERT
+ void PSParallelCompact::verify_complete(SpaceId space_id) {
+   // All Chunks between space bottom() to new_top() should be marked as filled
+   // and all Chunks between new_top() and top() should be available (i.e.,
+@@ -2738,8 +2714,8 @@
+     const ChunkData* const c = sd.chunk(cur_chunk);
+     if (!c->completed()) {
+       warning("chunk " SIZE_FORMAT " not filled:  "
+-	      "destination_count=" SIZE_FORMAT,
+-	      cur_chunk, c->destination_count());
++              "destination_count=" SIZE_FORMAT,
++              cur_chunk, c->destination_count());
+       issued_a_warning = true;
+     }
+   }
+@@ -2748,8 +2724,8 @@
+     const ChunkData* const c = sd.chunk(cur_chunk);
+     if (!c->available()) {
+       warning("chunk " SIZE_FORMAT " not empty:   "
+-	      "destination_count=" SIZE_FORMAT,
+-	      cur_chunk, c->destination_count());
++              "destination_count=" SIZE_FORMAT,
++              cur_chunk, c->destination_count());
+       issued_a_warning = true;
+     }
+   }
+@@ -2758,7 +2734,7 @@
+     print_chunk_ranges();
+   }
+ }
+-#endif	// #ifdef ASSERT
++#endif  // #ifdef ASSERT
+ 
+ void PSParallelCompact::compact_serial(ParCompactionManager* cm) {
+   EventMark m("5 compact serial");
+@@ -2845,11 +2821,11 @@
+     if (index != -1) {
+       int l = _root_refs_stack->length();
+       if (l > 0 && l - 1 != index) {
+-	oop* last = _root_refs_stack->pop();
+-	assert(last != p, "should be different");
+-	_root_refs_stack->at_put(index, last);
++        oop* last = _root_refs_stack->pop();
++        assert(last != p, "should be different");
++        _root_refs_stack->at_put(index, last);
+       } else {
+-	_root_refs_stack->remove(p);
++        _root_refs_stack->remove(p);
+       }
+     }
+   }
+@@ -2913,9 +2889,9 @@
+ }
+ 
+ void PSParallelCompact::live_oop_moved_to(HeapWord* q, size_t size,
+-				  HeapWord* compaction_top) {
++                                  HeapWord* compaction_top) {
+   assert(oop(q)->forwardee() == NULL || oop(q)->forwardee() == oop(compaction_top),
+-	 "should be moved to forwarded location");
++         "should be moved to forwarded location");
+   if (ValidateMarkSweep) {
+     PSParallelCompact::validate_live_oop(oop(q), size);
+     _live_oops_moved_to->push(oop(compaction_top));
+@@ -2994,9 +2970,9 @@
+ // Update interior oops in the ranges of chunks [beg_chunk, end_chunk).
+ void
+ PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
+-						       SpaceId space_id,
+-						       size_t beg_chunk,
+-						       size_t end_chunk) {
++                                                       SpaceId space_id,
++                                                       size_t beg_chunk,
++                                                       size_t end_chunk) {
+   ParallelCompactData& sd = summary_data();
+   ParMarkBitMap* const mbm = mark_bitmap();
+ 
+@@ -3005,13 +2981,13 @@
+   assert(beg_chunk <= end_chunk, "bad chunk range");
+   assert(end_addr <= dense_prefix(space_id), "not in the dense prefix");
+ 
+-#ifdef	ASSERT
++#ifdef  ASSERT
+   // Claim the chunks to avoid triggering an assert when they are marked as
+   // filled.
+   for (size_t claim_chunk = beg_chunk; claim_chunk < end_chunk; ++claim_chunk) {
+     assert(sd.chunk(claim_chunk)->claim_unsafe(), "claim() failed");
+   }
+-#endif	// #ifdef ASSERT
++#endif  // #ifdef ASSERT
+ 
+   if (beg_addr != space(space_id)->bottom()) {
+     // Find the first live object or block of dead space that *starts* in this
+@@ -3038,15 +3014,17 @@
+     FillClosure fill_closure(cm, space_id);
+     ParMarkBitMap::IterationStatus status;
+     status = mbm->iterate(&update_closure, &fill_closure, beg_addr, end_addr,
+-			  dense_prefix_end);
++                          dense_prefix_end);
+     if (status == ParMarkBitMap::incomplete) {
+       update_closure.do_addr(update_closure.source());
+     }
+   }
+ 
+   // Mark the chunks as filled.
+-  for (size_t done_chunk = beg_chunk; done_chunk < end_chunk; ++done_chunk) {
+-    sd.chunk(done_chunk)->set_completed();
++  ChunkData* const beg_cp = sd.chunk(beg_chunk);
++  ChunkData* const end_cp = sd.chunk(end_chunk);
++  for (ChunkData* cp = beg_cp; cp < end_cp; ++cp) {
++    cp->set_completed();
+   }
+ }
+ 
+@@ -3067,7 +3045,7 @@
+ }
+ 
+ void PSParallelCompact::update_deferred_objects(ParCompactionManager* cm,
+-						SpaceId id) {
++                                                SpaceId id) {
+   assert(id < last_space_id, "bad space id");
+ 
+   ParallelCompactData& sd = summary_data();
+@@ -3083,10 +3061,10 @@
+   const ChunkData* const end_chunk = sd.addr_to_chunk_ptr(end_addr);
+   const ChunkData* cur_chunk;
+   for (cur_chunk = beg_chunk; cur_chunk < end_chunk; ++cur_chunk) {
+-    HeapWord* const addr = cur_chunk->obj_not_updated();
++    HeapWord* const addr = cur_chunk->deferred_obj_addr();
+     if (addr != NULL) {
+       if (start_array != NULL) {
+-	start_array->allocate_block(addr);
++        start_array->allocate_block(addr);
+       }
+       oop(addr)->update_contents(cm);
+       assert(oop(addr)->is_oop_or_null(), "should be an oop now");
+@@ -3129,7 +3107,7 @@
+ 
+ HeapWord*
+ PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
+-				 size_t src_chunk_idx)
++                                 size_t src_chunk_idx)
+ {
+   ParMarkBitMap* const bitmap = mark_bitmap();
+   const ParallelCompactData& sd = summary_data();
+@@ -3185,8 +3163,8 @@
+ }
+ 
+ void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
+-						     size_t beg_chunk,
+-						     HeapWord* end_addr)
++                                                     size_t beg_chunk,
++                                                     HeapWord* end_addr)
+ {
+   ParallelCompactData& sd = summary_data();
+   ChunkData* const beg = sd.chunk(beg_chunk);
+@@ -3203,9 +3181,9 @@
+ }
+ 
+ size_t PSParallelCompact::next_src_chunk(MoveAndUpdateClosure& closure,
+-					 SpaceId& src_space_id,
+-					 HeapWord*& src_space_top,
+-					 HeapWord* end_addr)
++                                         SpaceId& src_space_id,
++                                         HeapWord*& src_space_top,
++                                         HeapWord* end_addr)
+ {
+   typedef ParallelCompactData::ChunkData ChunkData;
+ 
+@@ -3251,21 +3229,21 @@
+       const ChunkData* const top_cp = sd.addr_to_chunk_ptr(top_aligned_up);
+ 
+       for (const ChunkData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) {
+-	if (src_cp->live_obj_size() > 0) {
+-	  // Found it.
+-	  assert(src_cp->destination() == destination,
+-		 "first live obj in the space must match the destination");
+-	  assert(src_cp->partial_obj_size() == 0,
+-		 "a space cannot begin with a partial obj");
+-
+-	  src_space_id = SpaceId(space_id);
+-	  src_space_top = space->top();
+-	  const size_t src_chunk_idx = sd.chunk(src_cp);
+-	  closure.set_source(sd.chunk_to_addr(src_chunk_idx));
+-	  return src_chunk_idx;
+-	} else {
+-	  assert(src_cp->data_size() == 0, "sanity");
+-	}
++        if (src_cp->live_obj_size() > 0) {
++          // Found it.
++          assert(src_cp->destination() == destination,
++                 "first live obj in the space must match the destination");
++          assert(src_cp->partial_obj_size() == 0,
++                 "a space cannot begin with a partial obj");
++
++          src_space_id = SpaceId(space_id);
++          src_space_top = space->top();
++          const size_t src_chunk_idx = sd.chunk(src_cp);
++          closure.set_source(sd.chunk_to_addr(src_chunk_idx));
++          return src_chunk_idx;
++        } else {
++          assert(src_cp->data_size() == 0, "sanity");
++        }
+       }
+     }
+   } while (++space_id < last_space_id);
+@@ -3312,6 +3290,7 @@
+     closure.copy_partial_obj();
+     if (closure.is_full()) {
+       decrement_destination_counts(cm, src_chunk_idx, closure.source());
++      chunk_ptr->set_deferred_obj_addr(NULL);
+       chunk_ptr->set_completed();
+       return;
+     }
+@@ -3324,14 +3303,14 @@
+       // Move to the next source chunk, possibly switching spaces as well.  All
+       // args except end_addr may be modified.
+       src_chunk_idx = next_src_chunk(closure, src_space_id, src_space_top,
+-				     end_addr);
++                                     end_addr);
+     }
+   }
+ 
+   do {
+     HeapWord* const cur_addr = closure.source();
+     HeapWord* const end_addr = MIN2(sd.chunk_align_up(cur_addr + 1),
+-				    src_space_top);
++                                    src_space_top);
+     IterationStatus status = bitmap->iterate(&closure, cur_addr, end_addr);
+ 
+     if (status == ParMarkBitMap::incomplete) {
+@@ -3339,28 +3318,33 @@
+       assert(closure.source() < end_addr, "sanity")
+       HeapWord* const obj_beg = closure.source();
+       HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(),
+-				       src_space_top);
++                                       src_space_top);
+       HeapWord* const obj_end = bitmap->find_obj_end(obj_beg, range_end);
+       if (obj_end < range_end) {
+-	// The end was found; the entire object will fit.
+-	status = closure.do_addr(obj_beg, bitmap->obj_size(obj_beg, obj_end));
+-	assert(status != ParMarkBitMap::would_overflow, "sanity");
++        // The end was found; the entire object will fit.
++        status = closure.do_addr(obj_beg, bitmap->obj_size(obj_beg, obj_end));
++        assert(status != ParMarkBitMap::would_overflow, "sanity");
+       } else {
+-	// The end was not found; the object will not fit.
+-	assert(range_end < src_space_top, "obj cannot cross space boundary");
+-	status = ParMarkBitMap::would_overflow;
++        // The end was not found; the object will not fit.
++        assert(range_end < src_space_top, "obj cannot cross space boundary");
++        status = ParMarkBitMap::would_overflow;
+       }
+     }
+ 
+     if (status == ParMarkBitMap::would_overflow) {
+       // The last object did not fit.  Note that interior oop updates were
+       // deferred, then copy enough of the object to fill the chunk.
+-      chunk_ptr->set_obj_not_updated(closure.destination());
++      chunk_ptr->set_deferred_obj_addr(closure.destination());
+       status = closure.copy_until_full(); // copies from closure.source()
++
++      decrement_destination_counts(cm, src_chunk_idx, closure.source());
++      chunk_ptr->set_completed();
++      return;
+     }
+ 
+     if (status == ParMarkBitMap::full) {
+       decrement_destination_counts(cm, src_chunk_idx, closure.source());
++      chunk_ptr->set_deferred_obj_addr(NULL);
+       chunk_ptr->set_completed();
+       return;
+     }
+@@ -3370,7 +3354,7 @@
+     // Move to the next source chunk, possibly switching spaces as well.  All
+     // args except end_addr may be modified.
+     src_chunk_idx = next_src_chunk(closure, src_space_id, src_space_top,
+-				   end_addr);
++                                   end_addr);
+   } while (true);
+ }
+ 
+@@ -3388,7 +3372,7 @@
+   HeapWord* end_addr = sp->top();
+ 
+ #ifdef ASSERT
+-  assert(beg_addr <= dp_addr &&	dp_addr <= end_addr, "bad dense prefix");
++  assert(beg_addr <= dp_addr && dp_addr <= end_addr, "bad dense prefix");
+   if (cm->should_verify_only()) {
+     VerifyUpdateClosure verify_update(cm, sp);
+     bitmap->iterate(&verify_update, beg_addr, end_addr);
+@@ -3423,7 +3407,7 @@
+     status = bitmap->iterate(&closure, dest_addr, end_addr);
+     assert(status == ParMarkBitMap::full, "iteration not complete");
+     assert(bitmap->find_obj_beg(closure.source(), end_addr) == end_addr,
+-	   "live objects skipped because closure is full");
++           "live objects skipped because closure is full");
+   }
+ }
+ 
+@@ -3478,7 +3462,7 @@
+ 
+   _source = addr;
+   assert(PSParallelCompact::summary_data().calc_new_pointer(source()) ==
+-	 destination(), "wrong destination");
++         destination(), "wrong destination");
+ 
+   if (words > words_remaining()) {
+     return ParMarkBitMap::would_overflow;
+@@ -3504,8 +3488,8 @@
+ }
+ 
+ UpdateOnlyClosure::UpdateOnlyClosure(ParMarkBitMap* mbm,
+-				     ParCompactionManager* cm,
+-				     PSParallelCompact::SpaceId space_id) :
++                                     ParCompactionManager* cm,
++                                     PSParallelCompact::SpaceId space_id) :
+   ParMarkBitMapClosure(mbm, cm),
+   _space_id(space_id),
+   _start_array(PSParallelCompact::start_array(space_id))
+@@ -3520,16 +3504,16 @@
+ }
+ 
+ BitBlockUpdateClosure::BitBlockUpdateClosure(ParMarkBitMap* mbm,
+-			ParCompactionManager* cm,
+-			size_t chunk_index) :
+-		        ParMarkBitMapClosure(mbm, cm), 
+-			_live_data_left(0), 
+-			_cur_block(0) {
+-  _chunk_start = 
++                        ParCompactionManager* cm,
++                        size_t chunk_index) :
++                        ParMarkBitMapClosure(mbm, cm),
++                        _live_data_left(0),
++                        _cur_block(0) {
++  _chunk_start =
+     PSParallelCompact::summary_data().chunk_to_addr(chunk_index);
+   _chunk_end =
+     PSParallelCompact::summary_data().chunk_to_addr(chunk_index) +
+-		 ParallelCompactData::ChunkSize;
++                 ParallelCompactData::ChunkSize;
+   _chunk_index = chunk_index;
+   _cur_block =
+     PSParallelCompact::summary_data().addr_to_block_idx(_chunk_start);
+@@ -3584,8 +3568,8 @@
+     gclog_or_tty->print_cr("first_block " PTR_FORMAT
+       " _offset " PTR_FORMAT
+       " _first_is_start_bit %d",
+-      first_block, 
+-      sd.block(first_block)->raw_offset(), 
++      first_block,
++      sd.block(first_block)->raw_offset(),
+       sd.block(first_block)->first_is_start_bit());
+     }
+   }
+@@ -3637,8 +3621,8 @@
+     // Does this object pass beyond the its block?
+     if (block_of_obj < block_of_obj_last) {
+       // Object crosses block boundary.  Two blocks need to be udpated:
+-      // 	the current block where the object started
+-      //	the block where the object ends
++      //        the current block where the object started
++      //        the block where the object ends
+       //
+       // The offset for blocks with no objects starting in them
+       // (e.g., blocks between _cur_block and  block_of_obj_last)
+@@ -3697,13 +3681,13 @@
+   if (forwarding_ptr == NULL) {
+     // The object is dead or not moving.
+     assert(bitmap()->is_unmarked(obj) || (new_pointer == (HeapWord*) obj),
+-	   "Object liveness is wrong.");
++           "Object liveness is wrong.");
+     return ParMarkBitMap::incomplete;
+   }
+   assert(UseParallelOldGCDensePrefix ||
+-	 (HeapMaximumCompactionInterval > 1) ||
+-	 (MarkSweepAlwaysCompactCount > 1) ||
+-	 (forwarding_ptr == new_pointer),
++         (HeapMaximumCompactionInterval > 1) ||
++         (MarkSweepAlwaysCompactCount > 1) ||
++         (forwarding_ptr == new_pointer),
+     "Calculation of new location is incorrect");
+   return ParMarkBitMap::incomplete;
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)psParallelCompact.hpp	1.47 07/05/05 17:05:30 JVM"
+-#endif
+ /*
+  * Copyright 2005-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class ParallelScavengeHeap;
+@@ -100,28 +97,33 @@
+   class ChunkData
+   {
+   public:
+-    static const size_t Available;
+-    static const size_t Claimed;
+-    static const size_t Completed;
++    // Destination address of the chunk.
++    HeapWord* destination() const { return _destination; }
+ 
+-  public:
+-    // Size of the partial object extending onto the chunk (words).
+-    size_t partial_obj_size() const { return _partial_obj_size; }
++    // The first chunk containing data destined for this chunk.
++    size_t source_chunk() const { return _source_chunk; }
++
++    // The object (if any) starting in this chunk and ending in a different
++    // chunk that could not be updated during the main (parallel) compaction
++    // phase.  This is different from _partial_obj_addr, which is an object that
++    // extends onto a source chunk.  However, the two uses do not overlap in
++    // time, so the same field is used to save space.
++    HeapWord* deferred_obj_addr() const { return _partial_obj_addr; }
+ 
+-    // The starting address of the partial object.
++    // The starting address of the partial object extending onto the chunk.
+     HeapWord* partial_obj_addr() const { return _partial_obj_addr; }
+ 
++    // Size of the partial object extending onto the chunk (words).
++    size_t partial_obj_size() const { return _partial_obj_size; }
++
+     // Size of live data that lies within this chunk due to objects that start
+     // in this chunk (words).  This does not include the partial object
+     // extending onto the chunk (if any), or the part of an object that extends
+     // onto the next chunk (if any).
+-    size_t live_obj_size() const { return _live_obj_size; }
++    size_t live_obj_size() const { return _dc_and_los & los_mask; }
+ 
+     // Total live data that lies within the chunk (words).
+-    size_t data_size() const { return _partial_obj_size + _live_obj_size; }
+-
+-    // Destination address of the chunk.
+-    HeapWord* destination() const { return _destination; }
++    size_t data_size() const { return partial_obj_size() + live_obj_size(); }
+ 
+     // The destination_count is the number of other chunks to which data from
+     // this chunk will be copied.  At the end of the summary phase, the valid
+@@ -136,96 +138,80 @@
+     // During compaction as chunks are emptied, the destination_count is
+     // decremented (atomically) and when it reaches 0, it can be claimed and
+     // then filled.
+-    // 
++    //
+     // A chunk is claimed for processing by atomically changing the
+-    // destination_count from Available to Claimed.  After a chunk has been
+-    // filled, the destination_count should be set to ChunkData::Completed.
+-    size_t destination_count() const { return _destination_count; }
+-
+-    // Cached address of the first live object starting in the chunk.  If the
+-    // value has not been cached yet, returns NULL.  If there is no live object
+-    // starting in the chunk, returns the address one past the end of the chunk.
+-    HeapWord* first_live_obj() { return _first_live_obj; }
+-
+-    // Object that crosses this chunk's right (upper) boundary and whose
+-    // starting address is in this chunk.  This is set during
+-    // compaction when an object extends over the right (upper) boundary of
+-    // a destination chunk.  This is different than _partial_obj_addr
+-    // which extends over the left (lower) boundary of a source chunk.
+-    // This object is unique to a chunk.
+-    HeapWord* obj_not_updated() const { return _obj_not_updated; }
+-
+-    // The first chunk containing data destined for this chunk.
+-    size_t source_chunk() const { return _source_chunk; }
++    // destination_count to the claimed value (dc_claimed).  After a chunk has
++    // been filled, the destination_count should be set to the completed value
++    // (dc_completed).
++    inline uint destination_count() const;
++    inline uint destination_count_raw() const;
+ 
+     // The location of the java heap data that corresponds to this chunk.
+-    HeapWord* data_location() const { return _data_location; }
++    inline HeapWord* data_location() const;
+ 
+     // The highest address referenced by objects in this chunk.
+-    HeapWord* highest_ref() const { return _highest_ref; }
++    inline HeapWord* highest_ref() const;
+ 
+     // Whether this chunk is available to be claimed, has been claimed, or has
+     // been completed.
+-    bool available() const { return _destination_count == Available; }
+-    bool claimed() const   { return _destination_count == Claimed; }
+-    bool completed() const { return _destination_count == Completed; }
++    //
++    // Minor subtlety:  claimed() returns true if the chunk is marked
++    // completed(), which is desirable since a chunk must be claimed before it
++    // can be completed.
++    bool available() const { return _dc_and_los < dc_one; }
++    bool claimed() const   { return _dc_and_los >= dc_claimed; }
++    bool completed() const { return _dc_and_los >= dc_completed; }
+ 
+     // These are not atomic.
+-    void set_partial_obj_size(size_t words)  { _partial_obj_size = words; }
+-    void set_partial_obj_addr(HeapWord* k)   { _partial_obj_addr = k; }
+-    void set_live_obj_size(size_t words)     { _live_obj_size = words; }
+-    void set_destination(HeapWord* addr)     { _destination = addr; }
+-    void set_destination_count(size_t count) { _destination_count = count; }
+-    void set_first_live_obj(HeapWord* v)     { _first_live_obj = v; }
+-    void set_obj_not_updated(HeapWord* addr) { _obj_not_updated = addr; }
+-    void set_source_chunk(size_t chunk)      { _source_chunk = chunk; }
+-    void set_data_location(HeapWord* addr)   { _data_location = addr; }
++    void set_destination(HeapWord* addr)       { _destination = addr; }
++    void set_source_chunk(size_t chunk)        { _source_chunk = chunk; }
++    void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
++    void set_partial_obj_addr(HeapWord* addr)  { _partial_obj_addr = addr; }
++    void set_partial_obj_size(size_t words)    {
++      _partial_obj_size = (chunk_sz_t) words;
++    }
++
++    inline void set_destination_count(uint count);
++    inline void set_live_obj_size(size_t words);
++    inline void set_data_location(HeapWord* addr);
+     inline void set_completed();
+     inline bool claim_unsafe();
+ 
+     // These are atomic.
+-    void add_live_obj(size_t words)          { add_live_obj((intptr_t)words); }
+-
+-    void set_highest_ref(HeapWord* addr) {
+-      HeapWord* tmp = _highest_ref;
+-      while (addr > tmp) {
+-	tmp = (HeapWord*)Atomic::cmpxchg_ptr(addr, &_highest_ref, tmp);
+-      }
+-    }
+-
+-    void decrement_destination_count() {
+-      assert(_destination_count < Claimed, "Chunk already claimed");
+-      assert(_destination_count > 0, "count must not go negative");
+-      Atomic::add_ptr(-1, &_destination_count);
+-    }
+-
+-    bool claim() {
+-      size_t old_val = cmpxchg_size_t(Claimed, &_destination_count, Available);
+-      return old_val == Available;
+-    }
++    inline void add_live_obj(size_t words);
++    inline void set_highest_ref(HeapWord* addr);
++    inline void decrement_destination_count();
++    inline bool claim();
+ 
+   private:
+-    static size_t cmpxchg_size_t(size_t new_val, volatile size_t* addr,
+-				 size_t old_val) {
+-      return size_t(Atomic::cmpxchg_ptr((void*)new_val, addr, (void*)old_val));
+-    }
+-    void add_live_obj(intptr_t sz) { Atomic::add_ptr(sz, &_live_obj_size); }
++    // The type used to represent object sizes within a chunk.
++    typedef uint chunk_sz_t;
+ 
+-  private:
+-    size_t          _partial_obj_size;
+-    HeapWord*       _partial_obj_addr;
+-    volatile size_t _live_obj_size;
+-    HeapWord*       _destination;
+-    HeapWord*       _first_live_obj;
+-    HeapWord*	    _obj_not_updated;
+-    size_t          _source_chunk;
+-    HeapWord*       _data_location;
+-    HeapWord*       _highest_ref;
+-    volatile size_t _destination_count;
++    // Constants for manipulating the _dc_and_los field, which holds both the
++    // destination count and live obj size.  The live obj size lives at the
++    // least significant end so no masking is necessary when adding.
++    static const chunk_sz_t dc_shift;           // Shift amount.
++    static const chunk_sz_t dc_mask;            // Mask for destination count.
++    static const chunk_sz_t dc_one;             // 1, shifted appropriately.
++    static const chunk_sz_t dc_claimed;         // Chunk has been claimed.
++    static const chunk_sz_t dc_completed;       // Chunk has been completed.
++    static const chunk_sz_t los_mask;           // Mask for live obj size.
++
++    HeapWord*           _destination;
++    size_t              _source_chunk;
++    HeapWord*           _partial_obj_addr;
++    chunk_sz_t          _partial_obj_size;
++    chunk_sz_t volatile _dc_and_los;
++#ifdef ASSERT
++    // These enable optimizations that are only partially implemented.  Use
++    // debug builds to prevent the code fragments from breaking.
++    HeapWord*           _data_location;
++    HeapWord*           _highest_ref;
++#endif  // #ifdef ASSERT
+ 
+ #ifdef ASSERT
+    public:
+-    uint	    _pushed;	// 0 until chunk is pushed onto a worker's stack
++    uint            _pushed;    // 0 until chunk is pushed onto a worker's stack
+    private:
+ #endif
+   };
+@@ -233,7 +219,7 @@
+   // 'Blocks' allow shorter sections of the bitmap to be searched.  Each Block
+   // holds an offset, which is the amount of live data in the Chunk to the left
+   // of the first live object in the Block.  This amount of live data will
+-  // include any object extending into the block. The first block in 
++  // include any object extending into the block. The first block in
+   // a chunk does not include any partial object extending into the
+   // the chunk.
+   //
+@@ -251,7 +237,7 @@
+     void set_first_is_start_bit(bool v) { _first_is_start_bit = v; }
+ 
+ #if 0
+-    // The need for this method was anticipated but it is 
++    // The need for this method was anticipated but it is
+     // never actually used.  Do not include it for now.  If
+     // it is needed, consider the problem of what is passed
+     // as "v".  To avoid warning errors the method set_start_bit_offset()
+@@ -274,15 +260,15 @@
+       _offset = - _offset;
+       _first_is_start_bit = false;
+     }
+-    bool first_is_start_bit() { 
++    bool first_is_start_bit() {
+       assert(_set_phase > 0, "Not initialized");
+-      return _first_is_start_bit; 
++      return _first_is_start_bit;
+     }
+-    bool first_is_end_bit() { 
++    bool first_is_end_bit() {
+       assert(_set_phase > 0, "Not initialized");
+-      return !_first_is_start_bit; 
++      return !_first_is_start_bit;
+     }
+-    
++
+   private:
+     blk_ofs_t _offset;
+     // This is temporary until the mark_bitmap is separated into
+@@ -322,10 +308,10 @@
+   // destination of chunk n is simply the start of chunk n.  The argument beg
+   // must be chunk-aligned; end need not be.
+   void summarize_dense_prefix(HeapWord* beg, HeapWord* end);
+-  
++
+   bool summarize(HeapWord* target_beg, HeapWord* target_end,
+-		 HeapWord* source_beg, HeapWord* source_end,
+-		 HeapWord** target_next, HeapWord** source_next = 0);
++                 HeapWord* source_beg, HeapWord* source_end,
++                 HeapWord** target_next, HeapWord** source_next = 0);
+ 
+   void clear();
+   void clear_range(size_t beg_chunk, size_t end_chunk);
+@@ -351,16 +337,12 @@
+   // Analogous to chunk_offset() for blocks.
+   size_t     block_offset(const HeapWord* addr) const;
+   size_t     addr_to_block_idx(const HeapWord* addr) const;
+-  size_t     addr_to_block_idx(const oop obj) const { 
+-    return addr_to_block_idx((HeapWord*) obj); 
++  size_t     addr_to_block_idx(const oop obj) const {
++    return addr_to_block_idx((HeapWord*) obj);
+   }
+   inline BlockData* addr_to_block_ptr(const HeapWord* addr) const;
+   inline HeapWord*  block_to_addr(size_t block) const;
+ 
+-  // The given object (new location) was not updated.
+-  // Set the _obj_not_updated field in the appropriate chunk.
+-  void set_obj_not_updated(HeapWord* moved_obj);
+-
+   // Return the address one past the end of the partial object.
+   HeapWord* partial_obj_end(size_t chunk_idx) const;
+ 
+@@ -386,23 +368,13 @@
+   // If there is no partial object, returns false.
+   bool partial_obj_ends_in_block(size_t block_index);
+ 
+-  // Returns the address of the first live object starting in the chunk.
+-  HeapWord* first_live_or_end_in_chunk(size_t chunk_index);
+-
+-  // Returns the address of the first live object starting in the chunk.
+-  HeapWord* first_live_or_end_in_chunk_range(size_t chunk_index_start,
+-					     size_t chunk_index_end);
+-
+-  // Returns the address of the first live object starting in the block.
+-  HeapWord* first_live_object_in_block(size_t block_index);
+-
+   // Returns the block index for the block
+   static size_t block_idx(BlockData* block);
+ 
+-#ifdef	ASSERT
++#ifdef  ASSERT
+   void verify_clear(const PSVirtualSpace* vspace);
+   void verify_clear();
+-#endif	// #ifdef ASSERT
++#endif  // #ifdef ASSERT
+ 
+ private:
+   bool initialize_block_data(size_t region_size);
+@@ -411,9 +383,9 @@
+ 
+ private:
+   HeapWord*       _region_start;
+-#ifdef	ASSERT
++#ifdef  ASSERT
+   HeapWord*       _region_end;
+-#endif	// #ifdef ASSERT
++#endif  // #ifdef ASSERT
+ 
+   PSVirtualSpace* _chunk_vspace;
+   ChunkData*      _chunk_data;
+@@ -424,23 +396,97 @@
+   size_t          _block_count;
+ };
+ 
++inline uint
++ParallelCompactData::ChunkData::destination_count_raw() const
++{
++  return _dc_and_los & dc_mask;
++}
++
++inline uint
++ParallelCompactData::ChunkData::destination_count() const
++{
++  return destination_count_raw() >> dc_shift;
++}
++
++inline void
++ParallelCompactData::ChunkData::set_destination_count(uint count)
++{
++  assert(count <= (dc_completed >> dc_shift), "count too large");
++  const chunk_sz_t live_sz = (chunk_sz_t) live_obj_size();
++  _dc_and_los = (count << dc_shift) | live_sz;
++}
++
++inline void ParallelCompactData::ChunkData::set_live_obj_size(size_t words)
++{
++  assert(words <= los_mask, "would overflow");
++  _dc_and_los = destination_count_raw() | (chunk_sz_t)words;
++}
++
++inline void ParallelCompactData::ChunkData::decrement_destination_count()
++{
++  assert(_dc_and_los < dc_claimed, "already claimed");
++  assert(_dc_and_los >= dc_one, "count would go negative");
++  Atomic::add((int)dc_mask, (volatile int*)&_dc_and_los);
++}
++
++inline HeapWord* ParallelCompactData::ChunkData::data_location() const
++{
++  DEBUG_ONLY(return _data_location;)
++  NOT_DEBUG(return NULL;)
++}
++
++inline HeapWord* ParallelCompactData::ChunkData::highest_ref() const
++{
++  DEBUG_ONLY(return _highest_ref;)
++  NOT_DEBUG(return NULL;)
++}
++
++inline void ParallelCompactData::ChunkData::set_data_location(HeapWord* addr)
++{
++  DEBUG_ONLY(_data_location = addr;)
++}
++
+ inline void ParallelCompactData::ChunkData::set_completed()
+ {
+   assert(claimed(), "must be claimed first");
+-  set_destination_count(Completed);
++  _dc_and_los = dc_completed | (chunk_sz_t) live_obj_size();
+ }
+ 
+ // MT-unsafe claiming of a chunk.  Should only be used during single threaded
+ // execution.
+ inline bool ParallelCompactData::ChunkData::claim_unsafe()
+ {
+-  if (destination_count() == Available) {
+-    set_destination_count(Claimed);
++  if (available()) {
++    _dc_and_los |= dc_claimed;
+     return true;
+   }
+   return false;
+ }
+ 
++inline void ParallelCompactData::ChunkData::add_live_obj(size_t words)
++{
++  assert(words <= (size_t)los_mask - live_obj_size(), "overflow");
++  Atomic::add((int) words, (volatile int*) &_dc_and_los);
++}
++
++inline void ParallelCompactData::ChunkData::set_highest_ref(HeapWord* addr)
++{
++#ifdef ASSERT
++  HeapWord* tmp = _highest_ref;
++  while (addr > tmp) {
++    tmp = (HeapWord*)Atomic::cmpxchg_ptr(addr, &_highest_ref, tmp);
++  }
++#endif  // #ifdef ASSERT
++}
++
++inline bool ParallelCompactData::ChunkData::claim()
++{
++  const int los = (int) live_obj_size();
++  const int old = Atomic::cmpxchg(dc_claimed | los,
++                                  (volatile int*) &_dc_and_los, los);
++  return old == los;
++}
++
+ inline ParallelCompactData::ChunkData*
+ ParallelCompactData::chunk(size_t chunk_idx) const
+ {
+@@ -558,7 +604,7 @@
+ 
+ // Abstract closure for use with ParMarkBitMap::iterate(), which will invoke the
+ // do_addr() method.
+-// 
++//
+ // The closure is initialized with the number of heap words to process
+ // (words_remaining()), and becomes 'full' when it reaches 0.  The do_addr()
+ // methods in subclasses should update the total as words are processed.  Since
+@@ -574,7 +620,7 @@
+ 
+  public:
+   inline ParMarkBitMapClosure(ParMarkBitMap* mbm, ParCompactionManager* cm,
+-			      size_t words = max_uintx);
++                              size_t words = max_uintx);
+ 
+   inline ParCompactionManager* compaction_manager() const;
+   inline ParMarkBitMap*        bitmap() const;
+@@ -593,18 +639,18 @@
+   ParMarkBitMap* const        _bitmap;
+   ParCompactionManager* const _compaction_manager;
+   DEBUG_ONLY(const size_t     _initial_words_remaining;) // Useful in debugger.
+-  size_t                      _words_remaining;	// Words left to copy.
++  size_t                      _words_remaining; // Words left to copy.
+ 
+  protected:
+-  HeapWord*                   _source;		// Next addr that would be read.
++  HeapWord*                   _source;          // Next addr that would be read.
+ };
+ 
+ inline
+ ParMarkBitMapClosure::ParMarkBitMapClosure(ParMarkBitMap* bitmap,
+-					   ParCompactionManager* cm,
+-					   size_t words):
++                                           ParCompactionManager* cm,
++                                           size_t words):
+   _bitmap(bitmap), _compaction_manager(cm)
+-#ifdef	ASSERT
++#ifdef  ASSERT
+   , _initial_words_remaining(words)
+ #endif
+ {
+@@ -651,9 +697,9 @@
+   size_t    _chunk_index;
+ 
+  public:
+-  BitBlockUpdateClosure(ParMarkBitMap* mbm, 
+-			ParCompactionManager* cm, 
+-			size_t chunk_index);
++  BitBlockUpdateClosure(ParMarkBitMap* mbm,
++                        ParCompactionManager* cm,
++                        size_t chunk_index);
+ 
+   size_t cur_block() { return _cur_block; }
+   size_t chunk_index() { return _chunk_index; }
+@@ -727,10 +773,10 @@
+     void do_oop(oop* p) { adjust_pointer(p, _is_root); }
+   };
+ 
+-  // Closure for verifying update of pointers.  Does not 
++  // Closure for verifying update of pointers.  Does not
+   // have any side effects.
+   class VerifyUpdateClosure: public ParMarkBitMapClosure {
+-    const MutableSpace* _space;	// Is this ever used?
++    const MutableSpace* _space; // Is this ever used?
+ 
+    public:
+     VerifyUpdateClosure(ParCompactionManager* cm, const MutableSpace* sp) :
+@@ -769,7 +815,7 @@
+   static CollectorCounters*   _counters;
+   static ParMarkBitMap        _mark_bitmap;
+   static ParallelCompactData  _summary_data;
+-  static IsAliveClosure	      _is_alive_closure;
++  static IsAliveClosure       _is_alive_closure;
+   static SpaceInfo            _space_info[last_space_id];
+   static bool                 _print_phases;
+   static AdjustPointerClosure _adjust_root_pointer_closure;
+@@ -786,9 +832,9 @@
+   static double _dwl_std_dev;
+   static double _dwl_first_term;
+   static double _dwl_adjustment;
+-#ifdef	ASSERT
++#ifdef  ASSERT
+   static bool   _dwl_initialized;
+-#endif	// #ifdef ASSERT
++#endif  // #ifdef ASSERT
+ 
+  private:
+   // Closure accessors
+@@ -809,7 +855,7 @@
+ 
+   // Mark live objects
+   static void marking_phase(ParCompactionManager* cm,
+-			    bool maximum_heap_compaction);
++                            bool maximum_heap_compaction);
+   static void follow_stack(ParCompactionManager* cm);
+   static void follow_weak_klass_links(ParCompactionManager* cm);
+ 
+@@ -821,7 +867,7 @@
+   // Compute the dense prefix for the designated space.  This is an experimental
+   // implementation currently not used in production.
+   static HeapWord* compute_dense_prefix_via_density(const SpaceId id,
+-						    bool maximum_compaction);
++                                                    bool maximum_compaction);
+ 
+   // Methods used to compute the dense prefix.
+ 
+@@ -840,34 +886,34 @@
+   // dead_words of dead space to the left.  The argument beg must be the first
+   // chunk in the space that is not completely live.
+   static ChunkData* dead_wood_limit_chunk(const ChunkData* beg,
+-					  const ChunkData* end,
+-					  size_t dead_words);
++                                          const ChunkData* end,
++                                          size_t dead_words);
+ 
+   // Return a pointer to the first chunk in the range [beg, end) that is not
+   // completely full.
+   static ChunkData* first_dead_space_chunk(const ChunkData* beg,
+-					   const ChunkData* end);
++                                           const ChunkData* end);
+ 
+   // Return a value indicating the benefit or 'yield' if the compacted region
+   // were to start (or equivalently if the dense prefix were to end) at the
+   // candidate chunk.  Higher values are better.
+-  // 
++  //
+   // The value is based on the amount of space reclaimed vs. the costs of (a)
+   // updating references in the dense prefix plus (b) copying objects and
+   // updating references in the compacted region.
+   static inline double reclaimed_ratio(const ChunkData* const candidate,
+-				       HeapWord* const bottom,
+-				       HeapWord* const top,
+-				       HeapWord* const new_top);
++                                       HeapWord* const bottom,
++                                       HeapWord* const top,
++                                       HeapWord* const new_top);
+ 
+   // Compute the dense prefix for the designated space.
+   static HeapWord* compute_dense_prefix(const SpaceId id,
+-					bool maximum_compaction);
++                                        bool maximum_compaction);
+ 
+   // Return true if dead space crosses onto the specified Chunk; bit must be the
+   // bit index corresponding to the first word of the Chunk.
+   static inline bool dead_space_crosses_boundary(const ChunkData* chunk,
+-						 idx_t bit);
++                                                 idx_t bit);
+ 
+   // Summary phase utility routine to fill dead space (if any) at the dense
+   // prefix boundary.  Should only be called if the the dense prefix is
+@@ -881,8 +927,8 @@
+   static bool block_first_offset(size_t block_index, idx_t* block_offset_ptr);
+ 
+   // Fill in the BlockData
+-  static void summarize_blocks(ParCompactionManager* cm, 
+-			       SpaceId first_compaction_space_id);
++  static void summarize_blocks(ParCompactionManager* cm,
++                               SpaceId first_compaction_space_id);
+ 
+   // The space that is compacted after space_id.
+   static SpaceId next_compaction_space_id(SpaceId space_id);
+@@ -899,17 +945,17 @@
+ 
+   // Add available chunks to the stack and draining tasks to the task queue.
+   static void enqueue_chunk_draining_tasks(GCTaskQueue* q,
+-					   uint parallel_gc_threads);
++                                           uint parallel_gc_threads);
+ 
+   // Add dense prefix update tasks to the task queue.
+   static void enqueue_dense_prefix_tasks(GCTaskQueue* q,
+-					 uint parallel_gc_threads);
++                                         uint parallel_gc_threads);
+ 
+   // Add chunk stealing tasks to the task queue.
+   static void enqueue_chunk_stealing_tasks(
+-				       GCTaskQueue* q,
+-				       ParallelTaskTerminator* terminator_ptr,
+-				       uint parallel_gc_threads);
++                                       GCTaskQueue* q,
++                                       ParallelTaskTerminator* terminator_ptr,
++                                       uint parallel_gc_threads);
+ 
+   // For debugging only - compacts the old gen serially
+   static void compact_serial(ParCompactionManager* cm);
+@@ -917,8 +963,8 @@
+   // If objects are left in eden after a collection, try to move the boundary
+   // and absorb them into the old gen.  Returns true if eden was emptied.
+   static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
+-					 PSYoungGen* young_gen,
+-					 PSOldGen* old_gen);
++                                         PSYoungGen* young_gen,
++                                         PSOldGen* old_gen);
+ 
+   // Reset time since last full gc
+   static void reset_millis_since_last_gc();
+@@ -986,21 +1032,21 @@
+ 
+   // Used to add tasks
+   static GCTaskManager* const gc_task_manager();
+-  static klassOop updated_int_array_klass_obj() { 
+-    return _updated_int_array_klass_obj; 
++  static klassOop updated_int_array_klass_obj() {
++    return _updated_int_array_klass_obj;
+   }
+-  
++
+   // Marking support
+   static inline bool mark_obj(oop obj);
+-  static bool mark_obj(oop* p)  { 
++  static bool mark_obj(oop* p)  {
+     if (*p != NULL) {
+-      return mark_obj(*p); 
++      return mark_obj(*p);
+     } else {
+       return false;
+     }
+   }
+-  static void mark_and_push(ParCompactionManager* cm, oop* p) {     
+-					  // Check mark and maybe push on
++  static void mark_and_push(ParCompactionManager* cm, oop* p) {
++                                          // Check mark and maybe push on
+                                           // marking stack
+     oop m = *p;
+     if (m != NULL && mark_bitmap()->is_unmarked(m)) {
+@@ -1020,7 +1066,7 @@
+   static inline ObjectStartArray* start_array(SpaceId space_id);
+ 
+   // Return true if the klass should be updated.
+-  static inline bool should_update_klass(klassOop k); 
++  static inline bool should_update_klass(klassOop k);
+ 
+   // Move and update the live objects in the specified space.
+   static void move_and_update(ParCompactionManager* cm, SpaceId space_id);
+@@ -1028,11 +1074,11 @@
+   // Process the end of the given chunk range in the dense prefix.
+   // This includes saving any object not updated.
+   static void dense_prefix_chunks_epilogue(ParCompactionManager* cm,
+-					   size_t chunk_start_index,
+-					   size_t chunk_end_index,
+-					   idx_t exiting_object_offset,
+-					   idx_t chunk_offset_start,
+-					   idx_t chunk_offset_end);
++                                           size_t chunk_start_index,
++                                           size_t chunk_end_index,
++                                           idx_t exiting_object_offset,
++                                           idx_t chunk_offset_start,
++                                           idx_t chunk_offset_end);
+ 
+   // Update a chunk in the dense prefix.  For each live object
+   // in the chunk, update it's interior references.  For each
+@@ -1043,9 +1089,9 @@
+   // (holds only dead objects that don't need any processing), so
+   // dead space can be filled in any order.
+   static void update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
+-						  SpaceId space_id,
+-					          size_t chunk_index_start,
+-						  size_t chunk_index_end);
++                                                  SpaceId space_id,
++                                                  size_t chunk_index_start,
++                                                  size_t chunk_index_end);
+ 
+   // Return the address of the count + 1st live word in the range [beg, end).
+   static HeapWord* skip_live_words(HeapWord* beg, HeapWord* end, size_t count);
+@@ -1053,7 +1099,7 @@
+   // Return the address of the word to be copied to dest_addr, which must be
+   // aligned to a chunk boundary.
+   static HeapWord* first_src_addr(HeapWord* const dest_addr,
+-				  size_t src_chunk_idx);
++                                  size_t src_chunk_idx);
+ 
+   // Determine the next source chunk, set closure.source() to the start of the
+   // new chunk return the chunk index.  Parameter end_addr is the address one
+@@ -1061,15 +1107,15 @@
+   // new source space and set src_space_id (in-out parameter) and src_space_top
+   // (out parameter) accordingly.
+   static size_t next_src_chunk(MoveAndUpdateClosure& closure,
+-			       SpaceId& src_space_id,
+-			       HeapWord*& src_space_top,
+-			       HeapWord* end_addr);
++                               SpaceId& src_space_id,
++                               HeapWord*& src_space_top,
++                               HeapWord* end_addr);
+ 
+   // Decrement the destination count for each non-empty source chunk in the
+   // range [beg_chunk, chunk(chunk_align_up(end_addr))).
+   static void decrement_destination_counts(ParCompactionManager* cm,
+-					   size_t beg_chunk,
+-					   HeapWord* end_addr);
++                                           size_t beg_chunk,
++                                           HeapWord* end_addr);
+ 
+   // Fill a chunk, copying objects from one or more source chunks.
+   static void fill_chunk(ParCompactionManager* cm, size_t chunk_idx);
+@@ -1081,15 +1127,15 @@
+   static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
+ 
+   // Mark pointer and follow contents.
+-  static void mark_and_follow(ParCompactionManager* cm, oop* p);    
++  static void mark_and_follow(ParCompactionManager* cm, oop* p);
+ 
+   static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
+   static ParallelCompactData& summary_data() { return _summary_data; }
+ 
+   static inline void adjust_pointer(oop* p) { adjust_pointer(p, false); }
+   static inline void adjust_pointer(oop* p,
+-				    HeapWord* beg_addr,
+-				    HeapWord* end_addr);
++                                    HeapWord* beg_addr,
++                                    HeapWord* end_addr);
+ 
+   // Reference Processing
+   static ReferenceProcessor* const ref_processor() { return _ref_processor; }
+@@ -1117,26 +1163,26 @@
+   // within an oop that was live during the last GC. Helpful for
+   // tracking down heap stomps.
+   static void print_new_location_of_heap_address(HeapWord* q);
+-#endif	// #ifdef VALIDATE_MARK_SWEEP
++#endif  // #ifdef VALIDATE_MARK_SWEEP
+ 
+   // Call backs for class unloading
+   // Update subklass/sibling/implementor links at end of marking.
+-  static void revisit_weak_klass_link(ParCompactionManager* cm, Klass* k); 
++  static void revisit_weak_klass_link(ParCompactionManager* cm, Klass* k);
+ 
+-#ifndef	PRODUCT
++#ifndef PRODUCT
+   // Debugging support.
+   static const char* space_names[last_space_id];
+   static void print_chunk_ranges();
+   static void print_dense_prefix_stats(const char* const algorithm,
+-				       const SpaceId id,
+-				       const bool maximum_compaction,
+-				       HeapWord* const addr);
+-#endif	// #ifndef PRODUCT
++                                       const SpaceId id,
++                                       const bool maximum_compaction,
++                                       HeapWord* const addr);
++#endif  // #ifndef PRODUCT
+ 
+-#ifdef	ASSERT
++#ifdef  ASSERT
+   // Verify that all the chunks have been emptied.
+   static void verify_complete(SpaceId space_id);
+-#endif	// #ifdef ASSERT
++#endif  // #ifdef ASSERT
+ };
+ 
+ bool PSParallelCompact::mark_obj(oop obj) {
+@@ -1163,11 +1209,11 @@
+ 
+ inline bool
+ PSParallelCompact::dead_space_crosses_boundary(const ChunkData* chunk,
+-					       idx_t bit)
++                                               idx_t bit)
+ {
+   assert(bit > 0, "cannot call this for the first bit/chunk");
+   assert(_summary_data.chunk_to_addr(chunk) == _mark_bitmap.bit_to_addr(bit),
+-	 "sanity check");
++         "sanity check");
+ 
+   // Dead space crosses the boundary if (1) a partial object does not extend
+   // onto the chunk, (2) an object does not start at the beginning of the chunk,
+@@ -1212,8 +1258,8 @@
+ }
+ 
+ inline void PSParallelCompact::adjust_pointer(oop* p,
+-					      HeapWord* beg_addr,
+-					      HeapWord* end_addr) {
++                                              HeapWord* beg_addr,
++                                              HeapWord* end_addr) {
+   if (is_in(p, beg_addr, end_addr)) {
+     adjust_pointer(p);
+   }
+@@ -1222,8 +1268,8 @@
+ class MoveAndUpdateClosure: public ParMarkBitMapClosure {
+  public:
+   inline MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm,
+-			      ObjectStartArray* start_array,
+-			      HeapWord* destination, size_t words);
++                              ObjectStartArray* start_array,
++                              HeapWord* destination, size_t words);
+ 
+   // Accessors.
+   HeapWord* destination() const         { return _destination; }
+@@ -1249,15 +1295,15 @@
+ 
+  protected:
+   ObjectStartArray* const _start_array;
+-  HeapWord*               _destination;		// Next addr to be written.
++  HeapWord*               _destination;         // Next addr to be written.
+ };
+ 
+ inline
+ MoveAndUpdateClosure::MoveAndUpdateClosure(ParMarkBitMap* bitmap,
+-					   ParCompactionManager* cm,
+-					   ObjectStartArray* start_array,
+-					   HeapWord* destination,
+-					   size_t words) :
++                                           ParCompactionManager* cm,
++                                           ObjectStartArray* start_array,
++                                           HeapWord* destination,
++                                           size_t words) :
+   ParMarkBitMapClosure(bitmap, cm, words), _start_array(start_array)
+ {
+   _destination = destination;
+@@ -1276,9 +1322,9 @@
+   ObjectStartArray* const          _start_array;
+ 
+  public:
+-  UpdateOnlyClosure(ParMarkBitMap* mbm, 
+-		    ParCompactionManager* cm,
+-		    PSParallelCompact::SpaceId space_id);
++  UpdateOnlyClosure(ParMarkBitMap* mbm,
++                    ParCompactionManager* cm,
++                    PSParallelCompact::SpaceId space_id);
+ 
+   // Update the object.
+   virtual IterationStatus do_addr(HeapWord* addr, size_t words);
+@@ -1299,8 +1345,8 @@
+     _start_array(PSParallelCompact::start_array(space_id))
+   {
+     assert(_space_id == PSParallelCompact::perm_space_id ||
+-	   _space_id == PSParallelCompact::old_space_id,
+-	   "cannot use FillClosure in the young gen");
++           _space_id == PSParallelCompact::old_space_id,
++           "cannot use FillClosure in the young gen");
+     assert(bitmap() != NULL, "need a bitmap");
+     assert(_start_array != NULL, "need a start array");
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)psPermGen.cpp	1.28 07/05/05 17:05:30 JVM"
+-#endif
+ /*
+  * Copyright 2001-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,20 +19,20 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_psPermGen.cpp.incl"
+ 
+-PSPermGen::PSPermGen(ReservedSpace rs, size_t alignment, 
+-		     size_t initial_size, size_t min_size, size_t max_size,
++PSPermGen::PSPermGen(ReservedSpace rs, size_t alignment,
++                     size_t initial_size, size_t min_size, size_t max_size,
+                      const char* gen_name, int level) :
+   PSOldGen(rs, alignment, initial_size, min_size, max_size, gen_name, level),
+   _last_used(0)
+ {
+   assert(object_mark_sweep() != NULL, "Sanity");
+-  
++
+   object_mark_sweep()->set_allowed_dead_ratio(PermMarkSweepDeadRatio);
+   _avg_size = new AdaptivePaddedAverage(AdaptivePermSizeWeight,
+                                         PermGenPadding);
+@@ -55,7 +52,7 @@
+ void PSPermGen::compute_new_size(size_t used_before_collection) {
+   // Update our padded average of objects allocated in perm
+   // gen between collections.
+-  assert(used_before_collection >= _last_used, 
++  assert(used_before_collection >= _last_used,
+                                 "negative allocation amount since last GC?");
+ 
+   const size_t alloc_since_last_gc = used_before_collection - _last_used;
+@@ -67,7 +64,7 @@
+ 
+   // We have different alignment constraints than the rest of the heap.
+   const size_t alignment = MAX2(MinPermHeapExpansion,
+-				virtual_space()->alignment());
++                                virtual_space()->alignment());
+ 
+   // Compute the desired size:
+   //  The free space is the newly computed padded average,
+@@ -92,13 +89,13 @@
+     MutexLocker x(ExpandHeap_lock);
+     if (desired_size > size_before) {
+       const size_t change_bytes = desired_size - size_before;
+-      const size_t aligned_change_bytes = 
+-	align_size_up(change_bytes, alignment);
++      const size_t aligned_change_bytes =
++        align_size_up(change_bytes, alignment);
+       expand_by(aligned_change_bytes);
+     } else {
+       // Shrinking
+-      const size_t change_bytes = 
+-	size_before - desired_size;
++      const size_t change_bytes =
++        size_before - desired_size;
+       const size_t aligned_change_bytes = align_size_down(change_bytes, alignment);
+       shrink(aligned_change_bytes);
+     }
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPermGen.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPermGen.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPermGen.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPermGen.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)psPermGen.hpp	1.18 07/05/05 17:05:30 JVM"
+-#endif
+ /*
+  * Copyright 2001-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class AdaptivePaddedAverage;
+@@ -36,14 +33,14 @@
+  public:
+   // Initialize the generation.
+   PSPermGen(ReservedSpace rs, size_t alignment, size_t initial_byte_size,
+-	    size_t minimum_byte_size, size_t maximum_byte_size,
++            size_t minimum_byte_size, size_t maximum_byte_size,
+             const char* gen_name, int level);
+ 
+   // Permanent Gen special allocation. Uses the OldGen allocation
+   // routines, which should not be directly called on this generation.
+   HeapWord* allocate_permanent(size_t word_size);
+ 
+-  // Size calculation. 
++  // Size calculation.
+   void compute_new_size(size_t used_before_collection);
+ 
+   // MarkSweep code
+@@ -54,4 +51,3 @@
+ 
+   virtual const char* name() const { return "PSPermGen"; }
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)psPromotionLAB.cpp	1.17 07/05/05 17:05:30 JVM"
+-#endif
+ /*
+  * Copyright 2002-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -33,7 +30,7 @@
+ // This is the shared initialization code. It sets up the basic pointers,
+ // and allows enough extra space for a filler object. We call a virtual
+ // method, "lab_is_valid()" to handle the different asserts the old/young
+-// labs require. 
++// labs require.
+ void PSPromotionLAB::initialize(MemRegion lab) {
+   assert(lab_is_valid(lab), "Sanity");
+ 
+@@ -49,7 +46,7 @@
+     if (ZapUnusedHeapArea) {
+       debug_only(Copy::fill_to_words(top(), free()/HeapWordSize, badHeapWord));
+     }
+-    
++
+     // NOTE! We need to allow space for a filler object.
+     assert(lab.word_size() >= filler_header_size, "lab is too small");
+     end = end - filler_header_size;
+@@ -68,13 +65,13 @@
+ void PSPromotionLAB::flush() {
+   assert(_state != flushed, "Attempt to flush PLAB twice");
+   assert(top() <= end(), "pointers out of order");
+-  
++
+   // If we were initialized to a zero sized lab, there is
+   // nothing to flush
+   if (_state == zero_size)
+     return;
+ 
+-  // PLAB's never allocate the last aligned_header_size 
++  // PLAB's never allocate the last aligned_header_size
+   // so they can always fill with an array.
+   HeapWord* tlab_end = end() + filler_header_size;
+   typeArrayOop filler_oop = (typeArrayOop) top();
+@@ -90,7 +87,7 @@
+   HeapWord* elt_words = ((HeapWord*)filler_oop) + typeArrayOopDesc::header_size(T_INT);
+   Copy::fill_to_words(elt_words, array_length, 0xDEAABABE);
+ #endif
+-  
++
+   set_bottom(NULL);
+   set_end(NULL);
+   set_top(NULL);
+@@ -100,7 +97,7 @@
+ 
+ bool PSPromotionLAB::unallocate_object(oop obj) {
+   assert(Universe::heap()->is_in(obj), "Object outside heap");
+-  
++
+   if (contains(obj)) {
+     HeapWord* object_end = (HeapWord*)obj + obj->size();
+     assert(object_end <= top(), "Object crosses promotion LAB boundary");
+@@ -119,7 +116,7 @@
+ void PSOldPromotionLAB::flush() {
+   assert(_state != flushed, "Attempt to flush PLAB twice");
+   assert(top() <= end(), "pointers out of order");
+-  
++
+   if (_state == zero_size)
+     return;
+ 
+@@ -150,11 +147,11 @@
+ bool PSOldPromotionLAB::lab_is_valid(MemRegion lab) {
+   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+-  assert(_start_array->covered_region().contains(lab), "Sanity");    
++  assert(_start_array->covered_region().contains(lab), "Sanity");
+ 
+   PSOldGen* old_gen = heap->old_gen();
+   MemRegion used = old_gen->object_space()->used_region();
+-  
++
+   if (used.contains(lab)) {
+     return true;
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)psPromotionLAB.hpp	1.13 07/05/05 17:05:30 JVM"
+-#endif
+ /*
+  * Copyright 2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //
+@@ -143,5 +140,3 @@
+ 
+   debug_only(virtual bool lab_is_valid(MemRegion lab));
+ };
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)psPromotionManager.cpp	1.29 07/05/05 17:05:30 JVM"
+-#endif
+ /*
+  * Copyright 2002-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,17 +19,17 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+ #include "incls/_psPromotionManager.cpp.incl"
+ 
+-PSPromotionManager** PSPromotionManager::_manager_array = NULL;
+-OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL;
+-OopTaskQueueSet*     PSPromotionManager::_stack_array_breadth = NULL;
+-PSOldGen*            PSPromotionManager::_old_gen = NULL;
+-MutableSpace*        PSPromotionManager::_young_space = NULL;
++PSPromotionManager**         PSPromotionManager::_manager_array = NULL;
++OopStarTaskQueueSet*         PSPromotionManager::_stack_array_depth = NULL;
++OopTaskQueueSet*             PSPromotionManager::_stack_array_breadth = NULL;
++PSOldGen*                    PSPromotionManager::_old_gen = NULL;
++MutableSpace*                PSPromotionManager::_young_space = NULL;
+ 
+ void PSPromotionManager::initialize() {
+   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+@@ -44,7 +41,7 @@
+   assert(_manager_array == NULL, "Attempt to initialize twice");
+   _manager_array = NEW_C_HEAP_ARRAY(PSPromotionManager*, ParallelGCThreads+1 );
+   guarantee(_manager_array != NULL, "Could not initialize promotion manager");
+-  
++
+   if (UseDepthFirstScavengeOrder) {
+     _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
+     guarantee(_stack_array_depth != NULL, "Count not initialize promotion manager");
+@@ -65,10 +62,9 @@
+   }
+ 
+   // The VMThread gets its own PSPromotionManager, which is not available
+-  // for work stealing. 
++  // for work stealing.
+   _manager_array[ParallelGCThreads] = new PSPromotionManager();
+   guarantee(_manager_array[ParallelGCThreads] != NULL, "Could not create PSPromotionManager");
+-
+ }
+ 
+ PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(int index) {
+@@ -94,6 +90,10 @@
+ }
+ 
+ void PSPromotionManager::post_scavenge() {
++#if PS_PM_STATS
++  print_stats();
++#endif // PS_PM_STATS
++
+   for(uint i=0; i<ParallelGCThreads+1; i++) {
+     PSPromotionManager* manager = manager_array(i);
+ 
+@@ -137,6 +137,38 @@
+   }
+ }
+ 
++#if PS_PM_STATS
++
++void
++PSPromotionManager::print_stats(uint i) {
++  tty->print_cr("---- GC Worker %2d Stats", i);
++  tty->print_cr("    total pushes            %8d", _total_pushes);
++  tty->print_cr("    masked pushes           %8d", _masked_pushes);
++  tty->print_cr("    overflow pushes         %8d", _overflow_pushes);
++  tty->print_cr("    max overflow length     %8d", _max_overflow_length);
++  tty->print_cr("");
++  tty->print_cr("    arrays chunked          %8d", _arrays_chunked);
++  tty->print_cr("    array chunks processed  %8d", _array_chunks_processed);
++  tty->print_cr("");
++  tty->print_cr("    total steals            %8d", _total_steals);
++  tty->print_cr("    masked steals           %8d", _masked_steals);
++  tty->print_cr("");
++}
++
++void
++PSPromotionManager::print_stats() {
++  tty->print_cr("== GC Tasks Stats (%s), GC %3d",
++                (UseDepthFirstScavengeOrder) ? "Depth-First" : "Breadth-First",
++                Universe::heap()->total_collections());
++
++  for (uint i = 0; i < ParallelGCThreads+1; ++i) {
++    PSPromotionManager* manager = manager_array(i);
++    manager->print_stats(i);
++  }
++}
++
++#endif // PS_PM_STATS
++
+ PSPromotionManager::PSPromotionManager() {
+   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+@@ -169,6 +201,10 @@
+                                      (uint) (queue_size / 4));
+   }
+ 
++  _array_chunk_size = ParGCArrayScanChunk;
++  // let's choose 1.5x the chunk size
++  _min_array_size_for_chunking = 3 * _array_chunk_size / 2;
++
+   reset();
+ }
+ 
+@@ -189,11 +225,23 @@
+   lab_base = old_gen()->object_space()->top();
+   _old_lab.initialize(MemRegion(lab_base, (size_t)0));
+   _old_gen_is_full = false;
+-  
++
+   _prefetch_queue.clear();
++
++#if PS_PM_STATS
++  _total_pushes = 0;
++  _masked_pushes = 0;
++  _overflow_pushes = 0;
++  _max_overflow_length = 0;
++  _arrays_chunked = 0;
++  _array_chunks_processed = 0;
++  _total_steals = 0;
++  _masked_steals = 0;
++#endif // PS_PM_STATS
+ }
+ 
+ void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
++  assert(depth_first(), "invariant");
+   assert(overflow_stack_depth() != NULL, "invariant");
+   totally_drain = totally_drain || _totally_drain;
+ 
+@@ -212,21 +260,21 @@
+     // claimed stack while we work.
+     while(!overflow_stack_depth()->is_empty()) {
+       p = overflow_stack_depth()->pop();
+-      PSScavenge::copy_and_push_safe_barrier(this, p);
++      process_popped_location_depth(p);
+     }
+ 
+     if (totally_drain) {
+       while (claimed_stack_depth()->pop_local(p)) {
+-	PSScavenge::copy_and_push_safe_barrier(this, p);
++        process_popped_location_depth(p);
+       }
+     } else {
+       while (claimed_stack_depth()->size() > _target_stack_size &&
+-	     claimed_stack_depth()->pop_local(p)) {
+-	PSScavenge::copy_and_push_safe_barrier(this, p);
++             claimed_stack_depth()->pop_local(p)) {
++        process_popped_location_depth(p);
+       }
+     }
+   } while( (totally_drain && claimed_stack_depth()->size() > 0) ||
+-	   (overflow_stack_depth()->length() > 0) );
++           (overflow_stack_depth()->length() > 0) );
+ 
+   assert(!totally_drain || claimed_stack_empty(), "Sanity");
+   assert(totally_drain ||
+@@ -236,6 +284,7 @@
+ }
+ 
+ void PSPromotionManager::drain_stacks_breadth(bool totally_drain) {
++  assert(!depth_first(), "invariant");
+   assert(overflow_stack_breadth() != NULL, "invariant");
+   totally_drain = totally_drain || _totally_drain;
+ 
+@@ -260,17 +309,17 @@
+     if (totally_drain) {
+       // obj is a reference!!!
+       while (claimed_stack_breadth()->pop_local(obj)) {
+-	// It would be nice to assert about the type of objects we might
+-	// pop, but they can come from anywhere, unfortunately.
+-	obj->copy_contents(this);
++        // It would be nice to assert about the type of objects we might
++        // pop, but they can come from anywhere, unfortunately.
++        obj->copy_contents(this);
+       }
+     } else {
+       // obj is a reference!!!
+       while (claimed_stack_breadth()->size() > _target_stack_size &&
+-	     claimed_stack_breadth()->pop_local(obj)) {
+-	// It would be nice to assert about the type of objects we might
+-	// pop, but they can come from anywhere, unfortunately.
+-	obj->copy_contents(this);
++             claimed_stack_breadth()->pop_local(obj)) {
++        // It would be nice to assert about the type of objects we might
++        // pop, but they can come from anywhere, unfortunately.
++        obj->copy_contents(this);
+       }
+     }
+ 
+@@ -280,7 +329,7 @@
+       flush_prefetch_queue();
+     }
+   } while((totally_drain && claimed_stack_breadth()->size() > 0) ||
+-	  (overflow_stack_breadth()->length() > 0));
++          (overflow_stack_breadth()->length() > 0));
+ 
+   assert(!totally_drain || claimed_stack_empty(), "Sanity");
+   assert(totally_drain ||
+@@ -298,7 +347,7 @@
+   assert(!_young_lab.is_flushed() || _young_gen_is_full, "Sanity");
+   if (!_young_lab.is_flushed())
+     _young_lab.flush();
+- 
++
+   assert(!_old_lab.is_flushed() || _old_gen_is_full, "Sanity");
+   if (!_old_lab.is_flushed())
+     _old_lab.flush();
+@@ -319,7 +368,7 @@
+   assert(PSScavenge::should_scavenge(o), "Sanity");
+ 
+   oop new_obj = NULL;
+-  
++
+   // NOTE! We must be very careful with any methods that access the mark
+   // in o. There may be multiple threads racing on it, and it may be forwarded
+   // at any time. Do not use oop methods for accessing the mark!
+@@ -345,7 +394,7 @@
+         } else {
+           // Flush and fill
+           _young_lab.flush();
+-          
++
+           HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
+           if (lab_base != NULL) {
+             _young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
+@@ -357,14 +406,14 @@
+         }
+       }
+     }
+-    
++
+     // Otherwise try allocating obj tenured
+     if (new_obj == NULL) {
+-#ifndef	PRODUCT
++#ifndef PRODUCT
+       if (Universe::heap()->promotion_should_fail()) {
+-	return oop_promotion_failed(o, test_mark);
++        return oop_promotion_failed(o, test_mark);
+       }
+-#endif	// #ifndef PRODUCT
++#endif  // #ifndef PRODUCT
+ 
+       new_obj = (oop) _old_lab.allocate(new_obj_size);
+       new_obj_is_tenured = true;
+@@ -378,7 +427,7 @@
+           } else {
+             // Flush and fill
+             _old_lab.flush();
+-            
++
+             HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize);
+             if(lab_base != NULL) {
+               _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
+@@ -405,28 +454,43 @@
+ 
+     // Copy obj
+     Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
+-    
++
+     // Now we have to CAS in the header.
+     if (o->cas_forward_to(new_obj, test_mark)) {
+       // We won any races, we "own" this object.
+       assert(new_obj == o->forwardee(), "Sanity");
+-      
++
+       // Increment age if obj still in new generation. Now that
+       // we're dealing with a markOop that cannot change, it is
+       // okay to use the non mt safe oop methods.
+-      if (!new_obj_is_tenured) { 
++      if (!new_obj_is_tenured) {
+         new_obj->incr_age();
+         assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
+       }
+ 
+       if (depth_first) {
+-	new_obj->push_contents(this);
++        // Do the size comparison first with new_obj_size, which we
++        // already have. Hopefully, only a few objects are larger than
++        // _min_array_size_for_chunking, and most of them will be arrays.
++        // So, the is->objArray() test would be very infrequent.
++        if (new_obj_size > _min_array_size_for_chunking &&
++            new_obj->is_objArray() &&
++            PSChunkLargeArrays) {
++          // we'll chunk it
++#if PS_PM_STATS
++          ++_arrays_chunked;
++#endif // PS_PM_STATS
++          oop* const masked_o = mask_chunked_array_oop(o);
++          push_depth(masked_o);
++#if PS_PM_STATS
++          ++_masked_pushes;
++#endif // PS_PM_STATS
++        } else {
++          // we'll just push its contents
++          new_obj->push_contents(this);
++        }
+       } else {
+-	// If we loop too many times, handle_stack_overflow will assert.
+-	// It may be worth adding loop count asserts anyway.
+-	if (!claimed_stack_breadth()->push(new_obj)) {
+-	  overflow_stack_breadth()->push(new_obj);
+-	}
++        push_breadth(new_obj);
+       }
+     }  else {
+       // We lost, someone else "owns" this object
+@@ -438,7 +502,7 @@
+         if (!_old_lab.unallocate_object(new_obj)) {
+           // The promotion lab failed to unallocate the object.
+           // We need to overwrite the object with a filler that
+-          // contains no interior pointers. 
++          // contains no interior pointers.
+           MemRegion mr((HeapWord*)new_obj, new_obj_size);
+           // Clean this up and move to oopFactory (see bug 4718422)
+           SharedHeap::fill_region_with_object(mr);
+@@ -447,7 +511,7 @@
+         if (!_young_lab.unallocate_object(new_obj)) {
+           // The promotion lab failed to unallocate the object.
+           // We need to overwrite the object with a filler that
+-          // contains no interior pointers. 
++          // contains no interior pointers.
+           MemRegion mr((HeapWord*)new_obj, new_obj_size);
+           // Clean this up and move to oopFactory (see bug 4718422)
+           SharedHeap::fill_region_with_object(mr);
+@@ -466,8 +530,8 @@
+   // This code must come after the CAS test, or it will print incorrect
+   // information.
+   if (TraceScavenge) {
+-    gclog_or_tty->print_cr("{%s %s 0x%x -> 0x%x (%d)}", 
+-       PSScavenge::should_scavenge(new_obj) ? "copying" : "tenuring", 
++    gclog_or_tty->print_cr("{%s %s 0x%x -> 0x%x (%d)}",
++       PSScavenge::should_scavenge(new_obj) ? "copying" : "tenuring",
+        new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size());
+ 
+   }
+@@ -476,6 +540,47 @@
+   return new_obj;
+ }
+ 
++void PSPromotionManager::process_array_chunk(oop old) {
++  assert(PSChunkLargeArrays, "invariant");
++  assert(old->is_objArray(), "invariant");
++  assert(old->is_forwarded(), "invariant");
++
++#if PS_PM_STATS
++  ++_array_chunks_processed;
++#endif // PS_PM_STATS
++
++  oop const obj = old->forwardee();
++
++  int start;
++  int const end = arrayOop(old)->length();
++  if (end > (int) _min_array_size_for_chunking) {
++    // we'll chunk more
++    start = end - _array_chunk_size;
++    assert(start > 0, "invariant");
++    arrayOop(old)->set_length(start);
++    push_depth(mask_chunked_array_oop(old));
++#if PS_PM_STATS
++    ++_masked_pushes;
++#endif // PS_PM_STATS
++  } else {
++    // this is the final chunk for this array
++    start = 0;
++    int const actual_length = arrayOop(obj)->length();
++    arrayOop(old)->set_length(actual_length);
++  }
++
++  assert(start < end, "invariant");
++  oop* const base      = objArrayOop(obj)->base();
++  oop* p               = base + start;
++  oop* const chunk_end = base + end;
++  while (p < chunk_end) {
++    if (PSScavenge::should_scavenge(*p)) {
++      claim_or_forward_depth(p);
++    }
++    ++p;
++  }
++}
++
+ oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
+   assert(_old_gen_is_full || PromotionFailureALot, "Sanity");
+ 
+@@ -493,9 +598,7 @@
+     } else {
+       // Don't bother incrementing the age, just push
+       // onto the claimed_stack..
+-      if(!claimed_stack_breadth()->push(obj)) {
+-	overflow_stack_breadth()->push(obj);
+-      }
++      push_breadth(obj);
+     }
+ 
+     // Save the mark if needed
+@@ -503,18 +606,18 @@
+   }  else {
+     // We lost, someone else "owns" this object
+     guarantee(obj->is_forwarded(), "Object must be forwarded if the cas failed.");
+-    
++
+     // No unallocation to worry about.
+     obj = obj->forwardee();
+   }
+-  
++
+ #ifdef DEBUG
+   if (TraceScavenge) {
+-    gclog_or_tty->print_cr("{%s %s 0x%x (%d)}", 
+-                           "promotion-failure", 
++    gclog_or_tty->print_cr("{%s %s 0x%x (%d)}",
++                           "promotion-failure",
+                            obj->blueprint()->internal_name(),
+                            obj, obj->size());
+-    
++
+   }
+ #endif
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)psPromotionManager.hpp	1.19 07/05/05 17:05:30 JVM"
+-#endif
+ /*
+  * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //
+@@ -41,47 +38,142 @@
+ #define HAS_BEEN_MOVED 0x1501d01d
+ // End move to some global location
+ 
+-
+ class MutableSpace;
+ class PSOldGen;
+ class ParCompactionManager;
+ 
++#define PS_CHUNKED_ARRAY_OOP_MASK  1
++
++#define PS_PM_STATS         0
++
+ class PSPromotionManager : public CHeapObj {
+   friend class PSScavenge;
+   friend class PSRefProcTaskExecutor;
+  private:
+-  static PSPromotionManager**  _manager_array;
+-  static OopStarTaskQueueSet*  _stack_array_depth;
+-  static OopTaskQueueSet*      _stack_array_breadth;
+-  static PSOldGen*             _old_gen;
+-  static MutableSpace*         _young_space;
+-  
+-  PSYoungPromotionLAB          _young_lab;
+-  PSOldPromotionLAB            _old_lab;
+-  bool                         _young_gen_is_full;
+-  bool                         _old_gen_is_full;
+-  PrefetchQueue                _prefetch_queue;
+-
+-  OopStarTaskQueue             _claimed_stack_depth;
+-  GrowableArray<oop*>*         _overflow_stack_depth;
+-  OopTaskQueue                 _claimed_stack_breadth;
+-  GrowableArray<oop>*          _overflow_stack_breadth;
+-
+-  bool                         _depth_first;
+-  bool                         _totally_drain;
+-  uint                         _target_stack_size;
++  static PSPromotionManager**         _manager_array;
++  static OopStarTaskQueueSet*         _stack_array_depth;
++  static OopTaskQueueSet*             _stack_array_breadth;
++  static PSOldGen*                    _old_gen;
++  static MutableSpace*                _young_space;
++
++#if PS_PM_STATS
++  uint                                _total_pushes;
++  uint                                _masked_pushes;
++
++  uint                                _overflow_pushes;
++  uint                                _max_overflow_length;
++
++  uint                                _arrays_chunked;
++  uint                                _array_chunks_processed;
++
++  uint                                _total_steals;
++  uint                                _masked_steals;
++
++  void print_stats(uint i);
++  static void print_stats();
++#endif // PS_PM_STATS
++
++  PSYoungPromotionLAB                 _young_lab;
++  PSOldPromotionLAB                   _old_lab;
++  bool                                _young_gen_is_full;
++  bool                                _old_gen_is_full;
++  PrefetchQueue                       _prefetch_queue;
++
++  OopStarTaskQueue                    _claimed_stack_depth;
++  GrowableArray<oop*>*                _overflow_stack_depth;
++  OopTaskQueue                        _claimed_stack_breadth;
++  GrowableArray<oop>*                 _overflow_stack_breadth;
++
++  bool                                _depth_first;
++  bool                                _totally_drain;
++  uint                                _target_stack_size;
++
++  uint                                _array_chunk_size;
++  uint                                _min_array_size_for_chunking;
+ 
+   // Accessors
+   static PSOldGen* old_gen()              { return _old_gen; }
+   static MutableSpace* young_space()      { return _young_space; }
+ 
+   inline static PSPromotionManager* manager_array(int index);
+-  inline void claim_or_forward_internal_depth(oop* p);
+-  inline void claim_or_forward_internal_breadth(oop* p);
+ 
+   GrowableArray<oop*>* overflow_stack_depth()  { return _overflow_stack_depth; }
+   GrowableArray<oop>* overflow_stack_breadth()   { return _overflow_stack_breadth; }
+ 
++  // On the task queues we push reference locations as well as
++  // partially-scanned arrays (in the latter case, we push an oop to
++  // the from-space image of the array and the length on the
++  // from-space image indicates how many entries on the array we still
++  // need to scan; this is basically how ParNew does partial array
++  // scanning too). To be able to distinguish between reference
++  // locations and partially-scanned array oops we simply mask the
++  // latter oops with 0x01. The next three methods do the masking,
++  // unmasking, and checking whether the oop is masked or not. Notice
++  // that the signature of the mask and unmask methods looks a bit
++  // strange, as they accept and return different types (oop and
++  // oop*). This is because of the difference in types between what
++  // the task queue holds (oop*) and oops to partially-scanned arrays
++  // (oop). We do all the necessary casting in the mask / unmask
++  // methods to avoid sprinkling the rest of the code with more casts.
++
++  bool is_oop_masked(oop* p) {
++    return ((intptr_t) p & PS_CHUNKED_ARRAY_OOP_MASK) == PS_CHUNKED_ARRAY_OOP_MASK;
++  }
++
++  oop* mask_chunked_array_oop(oop obj) {
++    assert(!is_oop_masked((oop*) obj), "invariant");
++    oop* ret = (oop*) ((intptr_t) obj  | PS_CHUNKED_ARRAY_OOP_MASK);
++    assert(is_oop_masked(ret), "invariant");
++    return ret;
++  }
++
++  oop unmask_chunked_array_oop(oop* p) {
++    assert(is_oop_masked(p), "invariant");
++    oop ret = oop((intptr_t) p & ~PS_CHUNKED_ARRAY_OOP_MASK);
++    assert(!is_oop_masked((oop*) ret), "invariant");
++    return ret;
++  }
++
++  void process_array_chunk(oop old);
++
++  void push_depth(oop* p) {
++    assert(depth_first(), "pre-condition");
++
++#if PS_PM_STATS
++    ++_total_pushes;
++#endif // PS_PM_STATS
++
++    if (!claimed_stack_depth()->push(p)) {
++      overflow_stack_depth()->push(p);
++#if PS_PM_STATS
++      ++_overflow_pushes;
++      uint stack_length = (uint) overflow_stack_depth()->length();
++      if (stack_length > _max_overflow_length) {
++        _max_overflow_length = stack_length;
++      }
++#endif // PS_PM_STATS
++    }
++  }
++
++  void push_breadth(oop o) {
++    assert(!depth_first(), "pre-condition");
++
++#if PS_PM_STATS
++    ++_total_pushes;
++#endif // PS_PM_STATS
++
++    if(!claimed_stack_breadth()->push(o)) {
++      overflow_stack_breadth()->push(o);
++#if PS_PM_STATS
++      ++_overflow_pushes;
++      uint stack_length = (uint) overflow_stack_breadth()->length();
++      if (stack_length > _max_overflow_length) {
++        _max_overflow_length = stack_length;
++      }
++#endif // PS_PM_STATS
++    }
++  }
++
+  protected:
+   static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; }
+   static OopTaskQueueSet*     stack_array_breadth() { return _stack_array_breadth; }
+@@ -124,7 +216,6 @@
+   // Promotion methods
+   oop copy_to_survivor_space(oop o, bool depth_first);
+   oop oop_promotion_failed(oop obj, markOop obj_mark);
+-  void handle_stack_overflow();
+ 
+   void reset();
+ 
+@@ -136,6 +227,11 @@
+       drain_stacks_breadth(totally_drain);
+     }
+   }
++  void drain_stacks_cond_depth() {
++    if (claimed_stack_depth()->size() > _target_stack_size) {
++      drain_stacks_depth(false);
++    }
++  }
+   void drain_stacks_depth(bool totally_drain);
+   void drain_stacks_breadth(bool totally_drain);
+ 
+@@ -160,7 +256,22 @@
+     return _depth_first;
+   }
+ 
++  inline void process_popped_location_depth(oop* p);
++
+   inline void flush_prefetch_queue();
++
+   inline void claim_or_forward_depth(oop* p);
++  inline void claim_or_forward_internal_depth(oop* p);
++
+   inline void claim_or_forward_breadth(oop* p);
++  inline void claim_or_forward_internal_breadth(oop* p);
++
++#if PS_PM_STATS
++  void increment_steals(oop* p = NULL) {
++    _total_steals += 1;
++    if (p != NULL && is_oop_masked(p)) {
++      _masked_steals += 1;
++    }
++  }
++#endif // PS_PM_STATS
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)psPromotionManager.inline.hpp	1.18 07/05/05 17:05:30 JVM"
+-#endif
+ /*
+  * Copyright 2002-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ inline PSPromotionManager* PSPromotionManager::manager_array(int index) {
+@@ -39,13 +36,11 @@
+ 
+       // Card mark
+       if (PSScavenge::is_obj_in_young((HeapWord*) o)) {
+-	PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
++        PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
+       }
+       *p = o;
+     } else {
+-      if (!claimed_stack_depth()->push(p)) {
+-	overflow_stack_depth()->push(p);
+-      }
++      push_depth(p);
+     }
+   }
+ }
+@@ -58,7 +53,7 @@
+     } else {
+       o = copy_to_survivor_space(o, false);
+     }
+-  
++
+     // Card mark
+     if (PSScavenge::is_obj_in_young((HeapWord*) o)) {
+       PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
+@@ -110,3 +105,13 @@
+     claim_or_forward_internal_breadth(p);
+   }
+ }
++
++inline void PSPromotionManager::process_popped_location_depth(oop* p) {
++  if (is_oop_masked(p)) {
++    assert(PSChunkLargeArrays, "invariant");
++    oop const old = unmask_chunked_array_oop(p);
++    process_array_chunk(old);
++  } else {
++    PSScavenge::copy_and_push_safe_barrier(this, p);
++  }
++}
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)psScavenge.cpp	1.98 07/06/08 23:12:37 JVM"
+-#endif
+ /*
+  * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ 
+@@ -30,7 +27,7 @@
+ # include "incls/_psScavenge.cpp.incl"
+ 
+ HeapWord*                  PSScavenge::_to_space_top_before_gc = NULL;
+-int			   PSScavenge::_consecutive_skipped_scavenges = 0;
++int                        PSScavenge::_consecutive_skipped_scavenges = 0;
+ ReferenceProcessor*        PSScavenge::_ref_processor = NULL;
+ CardTableExtension*        PSScavenge::_card_table = NULL;
+ bool                       PSScavenge::_survivor_overflow = false;
+@@ -64,14 +61,14 @@
+     ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+     assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+     _to_space = heap->young_gen()->to_space();
+-    
++
+     assert(_promotion_manager != NULL, "Sanity");
+   }
+ 
+   void do_oop(oop* p) {
+     assert (*p != NULL, "expected non-null ref");
+     assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
+-  
++
+     oop obj = oop(*p);
+     // Weak refs may be visited more than once.
+     if (PSScavenge::should_scavenge(obj, _to_space)) {
+@@ -111,15 +108,15 @@
+     : _rp_task(rp_task),
+       _work_id(work_id)
+   { }
+-  
++
+ private:
+   virtual char* name() { return (char *)"Process referents by policy in parallel"; }
+   virtual void do_it(GCTaskManager* manager, uint which);
+ };
+ 
+ void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
+-{  
+-  PSPromotionManager* promotion_manager = 
++{
++  PSPromotionManager* promotion_manager =
+     PSPromotionManager::gc_thread_promotion_manager(which);
+   assert(promotion_manager != NULL, "sanity check");
+   PSKeepAliveClosure keep_alive(promotion_manager);
+@@ -210,23 +207,23 @@
+     PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
+     if (UsePerfData)
+       counters->update_full_follows_scavenge(0);
+-    if (!scavenge_was_done || 
+-	policy->should_full_GC(heap->old_gen()->free_in_bytes())) {
++    if (!scavenge_was_done ||
++        policy->should_full_GC(heap->old_gen()->free_in_bytes())) {
+       if (UsePerfData)
+         counters->update_full_follows_scavenge(full_follows_scavenge);
+ 
+       GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
+       if (UseParallelOldGC) {
+-	PSParallelCompact::invoke_no_policy(false);
++        PSParallelCompact::invoke_no_policy(false);
+       } else {
+-	PSMarkSweep::invoke_no_policy(false);
++        PSMarkSweep::invoke_no_policy(false);
+       }
+     }
+   }
+ }
+ 
+ // This method contains no policy. You should probably
+-// be calling invoke() instead. 
++// be calling invoke() instead.
+ bool PSScavenge::invoke_no_policy() {
+   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
+   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
+@@ -300,10 +297,10 @@
+ 
+     // Let the size policy know we're starting
+     size_policy->minor_collection_begin();
+-    
++
+     // Verify the object start arrays.
+     if (VerifyObjectStartArray &&
+-	VerifyBeforeGC) {
++        VerifyBeforeGC) {
+       old_gen->verify_object_start_array();
+       perm_gen->verify_object_start_array();
+     }
+@@ -312,10 +309,10 @@
+     if (VerifyRememberedSets) {
+       CardTableExtension::verify_all_young_refs_imprecise();
+     }
+-    
++
+     if (!ScavengeWithObjectsInToSpace) {
+       assert(young_gen->to_space()->is_empty(),
+-	     "Attempt to scavenge with live objects in to_space");
++             "Attempt to scavenge with live objects in to_space");
+       young_gen->to_space()->clear();
+     } else if (ZapUnusedHeapArea) {
+       young_gen->to_space()->mangle_unused_area();
+@@ -326,7 +323,7 @@
+     COMPILER2_PRESENT(DerivedPointerTable::clear());
+ 
+     reference_processor()->enable_discovery();
+-    
++
+     // We track how much was promoted to the next generation for
+     // the AdaptiveSizePolicy.
+     size_t old_gen_used_before = old_gen->used_in_bytes();
+@@ -336,7 +333,7 @@
+ 
+     // Reset our survivor overflow.
+     set_survivor_overflow(false);
+-    
++
+     // We need to save the old/perm top values before
+     // creating the promotion_manager. We pass the top
+     // values to the card_table, to prevent it from
+@@ -353,9 +350,9 @@
+     PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
+     {
+       // TraceTime("Roots");
+-      
++
+       GCTaskQueue* q = GCTaskQueue::create();
+-      
++
+       for(uint i=0; i<ParallelGCThreads; i++) {
+         q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i));
+       }
+@@ -373,10 +370,10 @@
+       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
+ 
+       ParallelTaskTerminator terminator(
+-	gc_task_manager()->workers(),
+-	promotion_manager->depth_first() ?
+-	    (TaskQueueSetSuper*) promotion_manager->stack_array_depth()
+-	  : (TaskQueueSetSuper*) promotion_manager->stack_array_breadth());
++        gc_task_manager()->workers(),
++        promotion_manager->depth_first() ?
++            (TaskQueueSetSuper*) promotion_manager->stack_array_depth()
++          : (TaskQueueSetSuper*) promotion_manager->stack_array_breadth());
+       if (ParallelGCThreads>1) {
+         for (uint j=0; j<ParallelGCThreads; j++) {
+           q->enqueue(new StealTask(&terminator));
+@@ -395,14 +392,14 @@
+ #else
+       ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy();
+ #endif // COMPILER2
+-    
++
+       PSKeepAliveClosure keep_alive(promotion_manager);
+       PSEvacuateFollowersClosure evac_followers(promotion_manager);
+       assert(soft_ref_policy != NULL,"No soft reference policy");
+       if (reference_processor()->processing_is_mt()) {
+         PSRefProcTaskExecutor task_executor;
+         reference_processor()->process_discovered_references(
+-          soft_ref_policy, &_is_alive_closure, &keep_alive, &evac_followers, 
++          soft_ref_policy, &_is_alive_closure, &keep_alive, &evac_followers,
+           &task_executor);
+       } else {
+         reference_processor()->process_discovered_references(
+@@ -410,7 +407,7 @@
+           NULL);
+       }
+     }
+-    
++
+     // Enqueue reference objects discovered during scavenge.
+     if (reference_processor()->processing_is_mt()) {
+       PSRefProcTaskExecutor task_executor;
+@@ -418,7 +415,7 @@
+     } else {
+       reference_processor()->enqueue_discovered_references(NULL);
+     }
+-    
++
+     // Finally, flush the promotion_manager's labs, and deallocate its stacks.
+     assert(promotion_manager->claimed_stack_empty(), "Sanity");
+     PSPromotionManager::post_scavenge();
+@@ -464,75 +461,75 @@
+         }
+ 
+ 
+-  	if (UsePerfData) {
+-	  PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
+-  	  counters->update_old_eden_size(
+-	    size_policy->calculated_eden_size_in_bytes());
+-  	  counters->update_old_promo_size(
+-	    size_policy->calculated_promo_size_in_bytes());
++        if (UsePerfData) {
++          PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
++          counters->update_old_eden_size(
++            size_policy->calculated_eden_size_in_bytes());
++          counters->update_old_promo_size(
++            size_policy->calculated_promo_size_in_bytes());
+           counters->update_old_capacity(old_gen->capacity_in_bytes());
+           counters->update_young_capacity(young_gen->capacity_in_bytes());
+-  	  counters->update_survived(survived);
+-  	  counters->update_promoted(promoted);
+-  	  counters->update_survivor_overflowed(_survivor_overflow);
+-  	}
+-
+-        size_t survivor_limit = 
+-	  size_policy->max_survivor_size(young_gen->max_size());
+-        _tenuring_threshold = 
+-	  size_policy->compute_survivor_space_size_and_threshold(
+-                                                           _survivor_overflow, 
++          counters->update_survived(survived);
++          counters->update_promoted(promoted);
++          counters->update_survivor_overflowed(_survivor_overflow);
++        }
++
++        size_t survivor_limit =
++          size_policy->max_survivor_size(young_gen->max_size());
++        _tenuring_threshold =
++          size_policy->compute_survivor_space_size_and_threshold(
++                                                           _survivor_overflow,
+                                                            _tenuring_threshold,
+                                                            survivor_limit);
+ 
+        if (PrintTenuringDistribution) {
+          gclog_or_tty->cr();
+          gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %d (max %d)",
+-                                size_policy->calculated_survivor_size_in_bytes(), 
++                                size_policy->calculated_survivor_size_in_bytes(),
+                                 _tenuring_threshold, MaxTenuringThreshold);
+        }
+-    
+-	if (UsePerfData) {
++
++        if (UsePerfData) {
+           PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
+           counters->update_tenuring_threshold(_tenuring_threshold);
+           counters->update_survivor_size_counters();
+-	}
++        }
+ 
+-	// Do call at minor collections?
+-	// Don't check if the size_policy is ready at this
+-	// level.  Let the size_policy check that internally.
+-	if (UseAdaptiveSizePolicy &&
+-	    UseAdaptiveGenerationSizePolicyAtMinorCollection &&
++        // Do call at minor collections?
++        // Don't check if the size_policy is ready at this
++        // level.  Let the size_policy check that internally.
++        if (UseAdaptiveSizePolicy &&
++            UseAdaptiveGenerationSizePolicyAtMinorCollection &&
+             ((gc_cause != GCCause::_java_lang_system_gc) ||
+               UseAdaptiveSizePolicyWithSystemGC)) {
+ 
+           // Calculate optimial free space amounts
+-          assert(young_gen->max_size() > 
+-            young_gen->from_space()->capacity_in_bytes() + 
+-            young_gen->to_space()->capacity_in_bytes(), 
++          assert(young_gen->max_size() >
++            young_gen->from_space()->capacity_in_bytes() +
++            young_gen->to_space()->capacity_in_bytes(),
+             "Sizes of space in young gen are out-of-bounds");
+-          size_t max_eden_size = young_gen->max_size() - 
+-            young_gen->from_space()->capacity_in_bytes() - 
++          size_t max_eden_size = young_gen->max_size() -
++            young_gen->from_space()->capacity_in_bytes() -
+             young_gen->to_space()->capacity_in_bytes();
+           size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
+-				   young_gen->eden_space()->used_in_bytes(),
++                                   young_gen->eden_space()->used_in_bytes(),
+                                    old_gen->used_in_bytes(),
+                                    perm_gen->used_in_bytes(),
+-				   young_gen->eden_space()->capacity_in_bytes(),
++                                   young_gen->eden_space()->capacity_in_bytes(),
+                                    old_gen->max_gen_size(),
+                                    max_eden_size,
+                                    false  /* full gc*/,
+-				   gc_cause);
+-        
+-	}
++                                   gc_cause);
++
++        }
+         // Resize the young generation at every collection
+-	// even if new sizes have not been calculated.  This is
+-	// to allow resizes that may have been inhibited by the
+-	// relative location of the "to" and "from" spaces.
+-        
+-	// Resizing the old gen at minor collects can cause increases
+-	// that don't feed back to the generation sizing policy until
+-	// a major collection.  Don't resize the old gen here.
++        // even if new sizes have not been calculated.  This is
++        // to allow resizes that may have been inhibited by the
++        // relative location of the "to" and "from" spaces.
++
++        // Resizing the old gen at minor collects can cause increases
++        // that don't feed back to the generation sizing policy until
++        // a major collection.  Don't resize the old gen here.
+ 
+         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
+                         size_policy->calculated_survivor_size_in_bytes());
+@@ -559,10 +556,10 @@
+     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
+ 
+     NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
+-    
++
+     // Re-verify object start arrays
+     if (VerifyObjectStartArray &&
+-	VerifyAfterGC) {
++        VerifyAfterGC) {
+       old_gen->verify_object_start_array();
+       perm_gen->verify_object_start_array();
+     }
+@@ -576,12 +573,12 @@
+     }
+ 
+     if (TraceGen0Time) accumulated_time()->stop();
+-    
++
+     if (PrintGC) {
+       if (PrintGCDetails) {
+-	// Don't print a GC timestamp here.  This is after the GC so
+-	// would be confusing.
+-	young_gen->print_used_change(young_gen_used_before);
++        // Don't print a GC timestamp here.  This is after the GC so
++        // would be confusing.
++        young_gen->print_used_change(young_gen_used_before);
+       }
+       heap->print_heap_change(prev_used);
+     }
+@@ -606,7 +603,7 @@
+   if (PrintGCTaskTimeStamps) {
+     tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT,
+                   scavenge_entry.ticks(), scavenge_midpoint.ticks(),
+-		  scavenge_exit.ticks());
++                  scavenge_exit.ticks());
+     gc_task_manager()->print_task_time_stamps();
+   }
+ 
+@@ -631,7 +628,7 @@
+     young_gen->object_iterate(&unforward_closure);
+ 
+     if (PrintGC && Verbose) {
+-      gclog_or_tty->print_cr("Restoring %d marks", 
++      gclog_or_tty->print_cr("Restoring %d marks",
+                               _preserved_oop_stack->length());
+     }
+ 
+@@ -639,9 +636,9 @@
+     for (int i=0; i < _preserved_oop_stack->length(); i++) {
+       oop obj       = _preserved_oop_stack->at(i);
+       markOop mark  = _preserved_mark_stack->at(i);
+-      obj->set_mark(mark);      
++      obj->set_mark(mark);
+     }
+- 
++
+     // Deallocate the preserved mark and oop stacks.
+     // The stacks were allocated as CHeap objects, so
+     // we must call delete to prevent mem leaks.
+@@ -684,7 +681,7 @@
+   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+   PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
+-  
++
+   if (UsePerfData) {
+     counters->update_scavenge_skipped(not_skipped);
+   }
+@@ -697,7 +694,7 @@
+     if (!young_gen->to_space()->is_empty()) {
+       _consecutive_skipped_scavenges++;
+       if (UsePerfData) {
+-	counters->update_scavenge_skipped(to_space_not_empty);
++        counters->update_scavenge_skipped(to_space_not_empty);
+       }
+       return false;
+     }
+@@ -720,10 +717,10 @@
+       (size_t) policy->average_promoted_in_bytes(),
+       (size_t) policy->padded_average_promoted_in_bytes(),
+       old_gen->free_in_bytes());
+-    if (young_gen->used_in_bytes() < 
++    if (young_gen->used_in_bytes() <
+         (size_t) policy->padded_average_promoted_in_bytes()) {
+       gclog_or_tty->print_cr(" padded_promoted_average is greater"
+-	" than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
++        " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
+     }
+   }
+ 
+@@ -746,7 +743,7 @@
+ }
+ 
+ void PSScavenge::initialize() {
+-  // Arguments must have been parsed 
++  // Arguments must have been parsed
+ 
+   if (AlwaysTenure) {
+     _tenuring_threshold = 0;
+@@ -754,10 +751,10 @@
+     _tenuring_threshold = markOopDesc::max_age + 1;
+   } else {
+     // We want to smooth out our startup times for the AdaptiveSizePolicy
+-    _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold : 
++    _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
+                                                     MaxTenuringThreshold;
+   }
+-  
++
+   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+ 
+@@ -768,7 +765,7 @@
+   // Set boundary between young_gen and old_gen
+   assert(perm_gen->reserved().end() <= old_gen->object_space()->bottom(),
+          "perm above old");
+-  assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(), 
++  assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
+          "old above young");
+   _young_generation_boundary = young_gen->eden_space()->bottom();
+ 
+@@ -778,7 +775,7 @@
+     mr,                         // span
+     true,                       // atomic_discovery
+     true,                       // mt_discovery
+-    &_is_alive_closure,
++    NULL,                       // is_alive_non_header
+     ParallelGCThreads,
+     ParallelRefProcEnabled);
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)psScavenge.hpp	1.46 07/05/05 17:05:30 JVM"
+-#endif
+ /*
+  * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class GCTaskManager;
+@@ -50,7 +47,7 @@
+   static HeapWord* _to_space_top_before_gc;
+ 
+   // Number of consecutive attempts to scavenge that were skipped
+-  static int		    _consecutive_skipped_scavenges;
++  static int                _consecutive_skipped_scavenges;
+ 
+ 
+  protected:
+@@ -62,7 +59,7 @@
+   static int                 _tenuring_threshold;   // tenuring threshold for next scavenge
+   static elapsedTimer        _accumulated_time;     // total time spent on scavenge
+   static HeapWord*           _young_generation_boundary; // The lowest address possible for the young_gen.
+-                                                         // This is used to decide if an oop should be scavenged, 
++                                                         // This is used to decide if an oop should be scavenged,
+                                                          // cards should be marked, etc.
+   static GrowableArray<markOop>* _preserved_mark_stack; // List of marks to be restored after failed promotion
+   static GrowableArray<oop>*     _preserved_oop_stack;  // List of oops that need their mark restored.
+@@ -84,7 +81,7 @@
+   static elapsedTimer*    accumulated_time()    { return &_accumulated_time; }
+   static bool             promotion_failed()
+     { return _preserved_mark_stack != NULL; }
+-  static int		  consecutive_skipped_scavenges() 
++  static int              consecutive_skipped_scavenges()
+     { return _consecutive_skipped_scavenges; }
+ 
+   // Performance Counters
+@@ -92,14 +89,14 @@
+ 
+   // Used by scavenge_contents && psMarkSweep
+   static ReferenceProcessor* const reference_processor() {
+-    assert(_ref_processor != NULL, "Sanity"); 
++    assert(_ref_processor != NULL, "Sanity");
+     return _ref_processor;
+   }
+   // Used to add tasks
+   static GCTaskManager* const gc_task_manager();
+   // The promotion managers tell us if they encountered overflow
+   static void set_survivor_overflow(bool state) {
+-    _survivor_overflow = state; 
++    _survivor_overflow = state;
+   }
+   // Adaptive size policy support.  When the young generation/old generation
+   // boundary moves, _young_generation_boundary must be reset
+@@ -131,7 +128,7 @@
+   inline static void copy_and_push_safe_barrier(PSPromotionManager* pm, oop* p);
+ 
+   // Is an object in the young generation
+-  // This assumes that the HeapWord argument is in the heap, 
++  // This assumes that the HeapWord argument is in the heap,
+   // so it only checks one side of the complete predicate.
+   inline static bool is_obj_in_young(HeapWord* o) {
+     const bool result = (o >= _young_generation_boundary);
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)psScavenge.inline.hpp	1.18 07/05/05 17:05:29 JVM"
+-#endif
+ /*
+  * Copyright 2002-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ 
+@@ -65,8 +62,8 @@
+   } else {
+     *p = pm->copy_to_survivor_space(o, pm->depth_first());
+   }
+-  
+-  // We cannot mark without test, as some code passes us pointers 
++
++  // We cannot mark without test, as some code passes us pointers
+   // that are outside the heap.
+   if ((!PSScavenge::is_obj_in_young((HeapWord*) p)) &&
+       Universe::heap()->is_in_reserved(p)) {
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)psTasks.cpp	1.28 07/05/05 17:05:27 JVM"
+-#endif
+ /*
+  * Copyright 2002-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -53,7 +50,7 @@
+ 
+   PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
+   PSScavengeRootsClosure roots_closure(pm);
+-  
++
+   switch (_root_type) {
+     case universe:
+       Universe::oops_do(&roots_closure);
+@@ -108,7 +105,7 @@
+ 
+   PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
+   PSScavengeRootsClosure roots_closure(pm);
+-  
++
+   if (_java_thread != NULL)
+     _java_thread->oops_do(&roots_closure);
+ 
+@@ -129,7 +126,7 @@
+ void StealTask::do_it(GCTaskManager* manager, uint which) {
+   assert(Universe::heap()->is_gc_active(), "called outside gc");
+ 
+-  PSPromotionManager* pm = 
++  PSPromotionManager* pm =
+     PSPromotionManager::gc_thread_promotion_manager(which);
+   pm->drain_stacks(true);
+   guarantee(pm->stacks_empty(),
+@@ -140,24 +137,30 @@
+     while(true) {
+       oop* p;
+       if (PSPromotionManager::steal_depth(which, &random_seed, p)) {
+-	PSScavenge::copy_and_push_safe_barrier(pm, p);
+-	pm->drain_stacks_depth(true);
++#if PS_PM_STATS
++        pm->increment_steals(p);
++#endif // PS_PM_STATS
++        pm->process_popped_location_depth(p);
++        pm->drain_stacks_depth(true);
+       } else {
+-	if (terminator()->offer_termination()) {
+-	  break;
+-	}
++        if (terminator()->offer_termination()) {
++          break;
++        }
+       }
+     }
+   } else {
+     while(true) {
+       oop obj;
+       if (PSPromotionManager::steal_breadth(which, &random_seed, obj)) {
+-	obj->copy_contents(pm);
+-	pm->drain_stacks_breadth(true);
++#if PS_PM_STATS
++        pm->increment_steals();
++#endif // PS_PM_STATS
++        obj->copy_contents(pm);
++        pm->drain_stacks_breadth(true);
+       } else {
+-	if (terminator()->offer_termination()) {
+-	  break;
+-	}
++        if (terminator()->offer_termination()) {
++          break;
++        }
+       }
+     }
+   }
+@@ -172,14 +175,14 @@
+ void SerialOldToYoungRootsTask::do_it(GCTaskManager* manager, uint which) {
+   assert(_gen != NULL, "Sanity");
+   assert(_gen->object_space()->contains(_gen_top) || _gen_top == _gen->object_space()->top(), "Sanity");
+-  
+-  { 
++
++  {
+     PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
+-    
++
+     assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+     CardTableExtension* card_table = (CardTableExtension *)Universe::heap()->barrier_set();
+     // FIX ME! Assert that card_table is the type we believe it to be.
+-    
++
+     card_table->scavenge_contents(_gen->start_array(),
+                                   _gen->object_space(),
+                                   _gen_top,
+@@ -199,22 +202,20 @@
+   assert(_gen->object_space()->contains(_gen_top) || _gen_top == _gen->object_space()->top(), "Sanity");
+   assert(_stripe_number < ParallelGCThreads, "Sanity");
+ 
+-  { 
++  {
+     PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
+-    
++
+     assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+     CardTableExtension* card_table = (CardTableExtension *)Universe::heap()->barrier_set();
+     // FIX ME! Assert that card_table is the type we believe it to be.
+-    
++
+     card_table->scavenge_contents_parallel(_gen->start_array(),
+                                            _gen->object_space(),
+                                            _gen_top,
+                                            pm,
+                                            _stripe_number);
+-    
++
+     // Do the real work
+     pm->drain_stacks(false);
+   }
+ }
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)psTasks.hpp	1.21 07/05/05 17:05:29 JVM"
+-#endif
+ /*
+  * Copyright 2002-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //
+@@ -45,7 +42,7 @@
+ //
+ // This task scans all the roots of a given type.
+ //
+-// 
++//
+ 
+ class ScavengeRootsTask : public GCTask {
+  public:
+@@ -57,13 +54,13 @@
+     flat_profiler         = 5,
+     system_dictionary     = 6,
+     management            = 7,
+-    jvmti		  = 8
+-  }; 
++    jvmti                 = 8
++  };
+  private:
+   RootType _root_type;
+  public:
+   ScavengeRootsTask(RootType value) : _root_type(value) {}
+-  
++
+   char* name() { return (char *)"scavenge-roots-task"; }
+ 
+   virtual void do_it(GCTaskManager* manager, uint which);
+@@ -74,7 +71,7 @@
+ //
+ // This task scans the roots of a single thread. This task
+ // enables scanning of thread roots in parallel.
+-// 
++//
+ 
+ class ThreadRootsTask : public GCTask {
+  private:
+@@ -83,7 +80,7 @@
+  public:
+   ThreadRootsTask(JavaThread* root) : _java_thread(root), _vm_thread(NULL) {}
+   ThreadRootsTask(VMThread* root) : _java_thread(NULL), _vm_thread(root) {}
+-  
++
+   char* name() { return (char *)"thread-roots-task"; }
+ 
+   virtual void do_it(GCTaskManager* manager, uint which);
+@@ -93,7 +90,7 @@
+ // StealTask
+ //
+ // This task is used to distribute work to idle threads.
+-// 
++//
+ 
+ class StealTask : public GCTask {
+  private:
+@@ -117,11 +114,11 @@
+  private:
+   PSOldGen* _gen;
+   HeapWord* _gen_top;
+-  
++
+  public:
+   SerialOldToYoungRootsTask(PSOldGen *gen, HeapWord* gen_top) :
+-    _gen(gen), _gen_top(gen_top) { } 
+-  
++    _gen(gen), _gen_top(gen_top) { }
++
+   char* name() { return (char *)"serial-old-to-young-roots-task"; }
+ 
+   virtual void do_it(GCTaskManager* manager, uint which);
+@@ -137,13 +134,12 @@
+   PSOldGen* _gen;
+   HeapWord* _gen_top;
+   uint _stripe_number;
+-  
++
+  public:
+   OldToYoungRootsTask(PSOldGen *gen, HeapWord* gen_top, uint stripe_number) :
+-    _gen(gen), _gen_top(gen_top), _stripe_number(stripe_number) { } 
++    _gen(gen), _gen_top(gen_top), _stripe_number(stripe_number) { }
+ 
+   char* name() { return (char *)"old-to-young-roots-task"; }
+ 
+   virtual void do_it(GCTaskManager* manager, uint which);
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)psVirtualspace.cpp	1.16 07/05/05 17:05:31 JVM"
+-#endif
+ /*
+  * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -52,7 +49,7 @@
+ 
+ // Deprecated.
+ bool PSVirtualSpace::initialize(ReservedSpace rs,
+-				size_t commit_size) {
++                                size_t commit_size) {
+   set_reserved(rs);
+   set_committed(reserved_low_addr(), reserved_low_addr());
+ 
+@@ -63,7 +60,7 @@
+   return result;
+ }
+ 
+-PSVirtualSpace::~PSVirtualSpace() { 
++PSVirtualSpace::~PSVirtualSpace() {
+   release();
+ }
+ 
+@@ -102,13 +99,13 @@
+ 
+   if (pre_touch || AlwaysPreTouch) {
+     for (char* curr = base_addr;
+- 	 curr < _committed_high_addr;
+- 	 curr += os::vm_page_size()) {
++         curr < _committed_high_addr;
++         curr += os::vm_page_size()) {
+       char tmp = *curr;
+       *curr = 0;
+     }
+   }
+-  
++
+   return result;
+ }
+ 
+@@ -135,7 +132,7 @@
+   assert(grows_up(), "this space must grow up");
+   assert(other_space->grows_down(), "other space must grow down");
+   assert(reserved_high_addr() == other_space->reserved_low_addr(),
+-	 "spaces not contiguous");
++         "spaces not contiguous");
+   assert(special() == other_space->special(), "one space is special, the other is not");
+   DEBUG_ONLY(PSVirtualSpaceVerifier this_verifier(this));
+   DEBUG_ONLY(PSVirtualSpaceVerifier other_verifier(other_space));
+@@ -160,7 +157,7 @@
+         os::commit_memory(commit_base, tmp_bytes, alignment())) {
+       // Reduce the reserved region in the other space.
+       other_space->set_reserved(other_space->reserved_low_addr() + tmp_bytes,
+-				other_space->reserved_high_addr(),
++                                other_space->reserved_high_addr(),
+                                 other_space->special());
+ 
+       // Grow both reserved and committed in this space.
+@@ -177,9 +174,9 @@
+   if (tmp_bytes > 0) {
+     // Reduce both committed and reserved in the other space.
+     other_space->set_committed(other_space->committed_low_addr() + tmp_bytes,
+-			       other_space->committed_high_addr());
++                               other_space->committed_high_addr());
+     other_space->set_reserved(other_space->reserved_low_addr() + tmp_bytes,
+-			      other_space->reserved_high_addr(),
++                              other_space->reserved_high_addr(),
+                               other_space->special());
+ 
+     // Grow both reserved and committed in this space.
+@@ -214,8 +211,8 @@
+ 
+   // Reserved region must be non-empty or both addrs must be 0.
+   assert(reserved_low_addr() < reserved_high_addr() ||
+-	 reserved_low_addr() == NULL && reserved_high_addr() == NULL,
+-	 "bad reserved addrs");
++         reserved_low_addr() == NULL && reserved_high_addr() == NULL,
++         "bad reserved addrs");
+   assert(committed_low_addr() <= committed_high_addr(), "bad committed addrs");
+ 
+   if (grows_up()) {
+@@ -229,27 +226,27 @@
+ 
+ void PSVirtualSpace::print() const {
+   gclog_or_tty->print_cr("virtual space [" PTR_FORMAT "]:  alignment="
+-			 SIZE_FORMAT "K grows %s%s",
+-			 this, alignment() / K, grows_up() ? "up" : "down",
++                         SIZE_FORMAT "K grows %s%s",
++                         this, alignment() / K, grows_up() ? "up" : "down",
+                          special() ? " (pinned in memory)" : "");
+   gclog_or_tty->print_cr("    reserved=" SIZE_FORMAT "K"
+-			 " [" PTR_FORMAT "," PTR_FORMAT "]"
+-			 " committed=" SIZE_FORMAT "K"
+-			 " [" PTR_FORMAT "," PTR_FORMAT "]",
+-			 reserved_size() / K,
+-			 reserved_low_addr(), reserved_high_addr(),
+-			 committed_size() / K,
+-			 committed_low_addr(), committed_high_addr());
++                         " [" PTR_FORMAT "," PTR_FORMAT "]"
++                         " committed=" SIZE_FORMAT "K"
++                         " [" PTR_FORMAT "," PTR_FORMAT "]",
++                         reserved_size() / K,
++                         reserved_low_addr(), reserved_high_addr(),
++                         committed_size() / K,
++                         committed_low_addr(), committed_high_addr());
+ }
+ #endif // #ifndef PRODUCT
+ 
+ void PSVirtualSpace::print_space_boundaries_on(outputStream* st) const {
+   st->print_cr(" [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ")",
+-	       low_boundary(), high(), high_boundary());
++               low_boundary(), high(), high_boundary());
+ }
+ 
+ PSVirtualSpaceHighToLow::PSVirtualSpaceHighToLow(ReservedSpace rs,
+-						 size_t alignment) :
++                                                 size_t alignment) :
+   PSVirtualSpace(alignment)
+ {
+   set_reserved(rs);
+@@ -279,13 +276,13 @@
+ 
+   if (pre_touch || AlwaysPreTouch) {
+     for (char* curr = base_addr;
+- 	 curr < _committed_high_addr;
+- 	 curr += os::vm_page_size()) {
++         curr < _committed_high_addr;
++         curr += os::vm_page_size()) {
+       char tmp = *curr;
+       *curr = 0;
+     }
+   }
+-  
++
+   return result;
+ }
+ 
+@@ -307,12 +304,12 @@
+ }
+ 
+ size_t PSVirtualSpaceHighToLow::expand_into(PSVirtualSpace* other_space,
+-					    size_t bytes) {
++                                            size_t bytes) {
+   assert(is_aligned(bytes), "arg not aligned");
+   assert(grows_down(), "this space must grow down");
+   assert(other_space->grows_up(), "other space must grow up");
+   assert(reserved_low_addr() == other_space->reserved_high_addr(),
+-	 "spaces not contiguous");
++         "spaces not contiguous");
+   assert(special() == other_space->special(), "one space is special in memory, the other is not");
+   DEBUG_ONLY(PSVirtualSpaceVerifier this_verifier(this));
+   DEBUG_ONLY(PSVirtualSpaceVerifier other_verifier(other_space));
+@@ -337,7 +334,7 @@
+         os::commit_memory(commit_base, tmp_bytes, alignment())) {
+       // Reduce the reserved region in the other space.
+       other_space->set_reserved(other_space->reserved_low_addr(),
+-				other_space->reserved_high_addr() - tmp_bytes,
++                                other_space->reserved_high_addr() - tmp_bytes,
+                                 other_space->special());
+ 
+       // Grow both reserved and committed in this space.
+@@ -354,9 +351,9 @@
+   if (tmp_bytes > 0) {
+     // Reduce both committed and reserved in the other space.
+     other_space->set_committed(other_space->committed_low_addr(),
+-			       other_space->committed_high_addr() - tmp_bytes);
++                               other_space->committed_high_addr() - tmp_bytes);
+     other_space->set_reserved(other_space->reserved_low_addr(),
+-			      other_space->reserved_high_addr() - tmp_bytes,
++                              other_space->reserved_high_addr() - tmp_bytes,
+                               other_space->special());
+ 
+     // Grow both reserved and committed in this space.
+@@ -370,5 +367,5 @@
+ void
+ PSVirtualSpaceHighToLow::print_space_boundaries_on(outputStream* st) const {
+   st->print_cr(" (" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT "]",
+-	       high_boundary(), low(), low_boundary());
++               high_boundary(), low(), low_boundary());
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)psVirtualspace.hpp	1.15 07/05/05 17:05:31 JVM"
+-#endif
+ /*
+  * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,11 +19,11 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // VirtualSpace for the parallel scavenge collector.
+-// 
++//
+ // VirtualSpace is data structure for committing a previously reserved address
+ // range in smaller chunks.
+ 
+@@ -39,7 +36,7 @@
+ 
+   // Reserved area
+   char* _reserved_low_addr;
+-  char* _reserved_high_addr; 
++  char* _reserved_high_addr;
+ 
+   // Committed area
+   char* _committed_low_addr;
+@@ -142,9 +139,9 @@
+ #endif
+ };
+ 
+-// 
++//
+ // PSVirtualSpace inlines.
+-// 
++//
+ inline size_t
+ PSVirtualSpace::pointer_delta(const char* left, const char* right) {
+   return ::pointer_delta((void *)left, (void*)right, sizeof(char));
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)psYoungGen.cpp	1.67 07/05/05 17:05:31 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,16 +19,16 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_psYoungGen.cpp.incl"
+ 
+ PSYoungGen::PSYoungGen(size_t        initial_size,
+-                       size_t        min_size, 
++                       size_t        min_size,
+                        size_t        max_size) :
+-  _init_gen_size(initial_size), 
++  _init_gen_size(initial_size),
+   _min_gen_size(min_size),
+   _max_gen_size(max_size)
+ {}
+@@ -41,7 +38,7 @@
+   _virtual_space = new PSVirtualSpace(rs, alignment);
+   if (!_virtual_space->expand_by(_init_gen_size)) {
+     vm_exit_during_initialization("Could not reserve enough space for "
+-				  "object heap");
++                                  "object heap");
+   }
+ }
+ 
+@@ -56,7 +53,7 @@
+                         (HeapWord*)_virtual_space->high_boundary());
+ 
+   MemRegion cmr((HeapWord*)_virtual_space->low(),
+-		(HeapWord*)_virtual_space->high());
++                (HeapWord*)_virtual_space->high());
+   Universe::heap()->barrier_set()->resize_covered_region(cmr);
+ 
+   if (UseNUMA) {
+@@ -204,7 +201,7 @@
+ 
+   // Relationship of spaces to each other
+   char* eden_start = (char*)eden_space()->bottom();
+-  char* eden_end   = (char*)eden_space()->end();   
++  char* eden_end   = (char*)eden_space()->end();
+   char* from_start = (char*)from_space()->bottom();
+   char* from_end   = (char*)from_space()->end();
+   char* to_start   = (char*)to_space()->bottom();
+@@ -282,9 +279,9 @@
+ 
+   // Adjust new generation size
+   const size_t eden_plus_survivors =
+-	  align_size_up(eden_size + 2 * survivor_size, alignment);
+-  size_t desired_size = MAX2(MIN2(eden_plus_survivors, max_size()), 
+-			     min_gen_size());
++          align_size_up(eden_size + 2 * survivor_size, alignment);
++  size_t desired_size = MAX2(MIN2(eden_plus_survivors, max_size()),
++                             min_gen_size());
+   assert(desired_size <= max_size(), "just checking");
+ 
+   if (desired_size > orig_size) {
+@@ -326,23 +323,23 @@
+     if (Verbose && PrintGC) {
+       size_t current_size  = _virtual_space->committed_size();
+       gclog_or_tty->print_cr("PSYoung generation size changed: "
+-			     SIZE_FORMAT "K->" SIZE_FORMAT "K",
+-			     orig_size/K, current_size/K);
++                             SIZE_FORMAT "K->" SIZE_FORMAT "K",
++                             orig_size/K, current_size/K);
+     }
+   }
+ 
+   guarantee(eden_plus_survivors <= _virtual_space->committed_size() ||
+-	    _virtual_space->committed_size() == max_size(), "Sanity");
++            _virtual_space->committed_size() == max_size(), "Sanity");
+ 
+   return true;
+ }
+ 
+ 
+-void PSYoungGen::resize_spaces(size_t requested_eden_size, 
+-			       size_t requested_survivor_size) {
++void PSYoungGen::resize_spaces(size_t requested_eden_size,
++                               size_t requested_survivor_size) {
+   assert(UseAdaptiveSizePolicy, "sanity check");
+-  assert(requested_eden_size > 0  && requested_survivor_size > 0, 
+-	 "just checking");
++  assert(requested_eden_size > 0  && requested_survivor_size > 0,
++         "just checking");
+ 
+   // We require eden and to space to be empty
+   if ((!eden_space()->is_empty()) || (!to_space()->is_empty())) {
+@@ -350,35 +347,35 @@
+   }
+ 
+   if (PrintAdaptiveSizePolicy && Verbose) {
+-    gclog_or_tty->print_cr("PSYoungGen::resize_spaces(requested_eden_size: " 
+-                  SIZE_FORMAT 
++    gclog_or_tty->print_cr("PSYoungGen::resize_spaces(requested_eden_size: "
++                  SIZE_FORMAT
+                   ", requested_survivor_size: " SIZE_FORMAT ")",
+                   requested_eden_size, requested_survivor_size);
+-    gclog_or_tty->print_cr("    eden: [" PTR_FORMAT ".." PTR_FORMAT ") " 
+-                  SIZE_FORMAT, 
+-                  eden_space()->bottom(), 
+-                  eden_space()->end(), 
++    gclog_or_tty->print_cr("    eden: [" PTR_FORMAT ".." PTR_FORMAT ") "
++                  SIZE_FORMAT,
++                  eden_space()->bottom(),
++                  eden_space()->end(),
+                   pointer_delta(eden_space()->end(),
+                                 eden_space()->bottom(),
+                                 sizeof(char)));
+-    gclog_or_tty->print_cr("    from: [" PTR_FORMAT ".." PTR_FORMAT ") " 
+-		  SIZE_FORMAT, 
+-                  from_space()->bottom(), 
+-                  from_space()->end(), 
++    gclog_or_tty->print_cr("    from: [" PTR_FORMAT ".." PTR_FORMAT ") "
++                  SIZE_FORMAT,
++                  from_space()->bottom(),
++                  from_space()->end(),
+                   pointer_delta(from_space()->end(),
+                                 from_space()->bottom(),
+                                 sizeof(char)));
+-    gclog_or_tty->print_cr("      to: [" PTR_FORMAT ".." PTR_FORMAT ") " 
+-		  SIZE_FORMAT, 
+-                  to_space()->bottom(),   
+-                  to_space()->end(), 
++    gclog_or_tty->print_cr("      to: [" PTR_FORMAT ".." PTR_FORMAT ") "
++                  SIZE_FORMAT,
++                  to_space()->bottom(),
++                  to_space()->end(),
+                   pointer_delta(  to_space()->end(),
+                                   to_space()->bottom(),
+                                   sizeof(char)));
+   }
+ 
+   // There's nothing to do if the new sizes are the same as the current
+-  if (requested_survivor_size == to_space()->capacity_in_bytes() && 
++  if (requested_survivor_size == to_space()->capacity_in_bytes() &&
+       requested_survivor_size == from_space()->capacity_in_bytes() &&
+       requested_eden_size == eden_space()->capacity_in_bytes()) {
+     if (PrintAdaptiveSizePolicy && Verbose) {
+@@ -386,9 +383,9 @@
+     }
+     return;
+   }
+-  
++
+   char* eden_start = (char*)eden_space()->bottom();
+-  char* eden_end   = (char*)eden_space()->end();   
++  char* eden_end   = (char*)eden_space()->end();
+   char* from_start = (char*)from_space()->bottom();
+   char* from_end   = (char*)from_space()->end();
+   char* to_start   = (char*)to_space()->bottom();
+@@ -396,7 +393,7 @@
+ 
+   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+   const size_t alignment = heap->intra_generation_alignment();
+-  const bool maintain_minimum = 
++  const bool maintain_minimum =
+     (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
+ 
+   // Check whether from space is below to space
+@@ -422,9 +419,9 @@
+       // This could be done in general but policy at a higher
+       // level is determining a requested size for eden and that
+       // should be honored unless there is a fundamental reason.
+-      eden_size = pointer_delta(from_start, 
+-				eden_start, 
+-				sizeof(char));
++      eden_size = pointer_delta(from_start,
++                                eden_start,
++                                sizeof(char));
+     } else {
+       eden_size = MIN2(requested_eden_size,
+                        pointer_delta(from_start, eden_start, sizeof(char)));
+@@ -439,13 +436,13 @@
+ 
+     // First calculate an optimal to-space
+     to_end   = (char*)_virtual_space->high();
+-    to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size, 
+-				    sizeof(char));
++    to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
++                                    sizeof(char));
+ 
+     // Does the optimal to-space overlap from-space?
+     if (to_start < (char*)from_space()->end()) {
+       assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+-      
++
+       // Calculate the minimum offset possible for from_end
+       size_t from_size = pointer_delta(from_space()->top(), from_start, sizeof(char));
+ 
+@@ -466,22 +463,22 @@
+     }
+ 
+     guarantee(to_start != to_end, "to space is zero sized");
+-      
++
+     if (PrintAdaptiveSizePolicy && Verbose) {
+       gclog_or_tty->print_cr("    [eden_start .. eden_end): "
+-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
+-                    eden_start, 
+-                    eden_end, 
++                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
++                    eden_start,
++                    eden_end,
+                     pointer_delta(eden_end, eden_start, sizeof(char)));
+       gclog_or_tty->print_cr("    [from_start .. from_end): "
+-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
+-                    from_start, 
+-                    from_end, 
++                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
++                    from_start,
++                    from_end,
+                     pointer_delta(from_end, from_start, sizeof(char)));
+       gclog_or_tty->print_cr("    [  to_start ..   to_end): "
+-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
+-                    to_start,   
+-                    to_end, 
++                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
++                    to_start,
++                    to_end,
+                     pointer_delta(  to_end,   to_start, sizeof(char)));
+     }
+   } else {
+@@ -494,17 +491,17 @@
+     // to space as if we were able to resize from space, even though from
+     // space is not modified.
+     // Giving eden priority was tried and gave poorer performance.
+-    to_end   = (char*)pointer_delta(_virtual_space->high(), 
+-                                    (char*)requested_survivor_size, 
+-				    sizeof(char));
++    to_end   = (char*)pointer_delta(_virtual_space->high(),
++                                    (char*)requested_survivor_size,
++                                    sizeof(char));
+     to_end   = MIN2(to_end, from_start);
+-    to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size, 
+-				    sizeof(char));
++    to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
++                                    sizeof(char));
+     // if the space sizes are to be increased by several times then
+     // 'to_start' will point beyond the young generation. In this case
+     // 'to_start' should be adjusted.
+     to_start = MAX2(to_start, eden_start + alignment);
+-    
++
+     // Compute how big eden can be, then adjust end.
+     // See  comments above on calculating eden_end.
+     size_t eden_size;
+@@ -526,25 +523,25 @@
+ 
+     if (PrintAdaptiveSizePolicy && Verbose) {
+       gclog_or_tty->print_cr("    [eden_start .. eden_end): "
+-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
+-                    eden_start, 
+-                    eden_end, 
++                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
++                    eden_start,
++                    eden_end,
+                     pointer_delta(eden_end, eden_start, sizeof(char)));
+-      gclog_or_tty->print_cr("    [  to_start ..   to_end): " 
+-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
+-                    to_start,   
+-                    to_end, 
++      gclog_or_tty->print_cr("    [  to_start ..   to_end): "
++                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
++                    to_start,
++                    to_end,
+                     pointer_delta(  to_end,   to_start, sizeof(char)));
+-      gclog_or_tty->print_cr("    [from_start .. from_end): " 
+-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
+-                    from_start, 
+-                    from_end, 
++      gclog_or_tty->print_cr("    [from_start .. from_end): "
++                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
++                    from_start,
++                    from_end,
+                     pointer_delta(from_end, from_start, sizeof(char)));
+     }
+   }
+-  
+ 
+-  guarantee((HeapWord*)from_start <= from_space()->bottom(), 
++
++  guarantee((HeapWord*)from_start <= from_space()->bottom(),
+             "from start moved to the right");
+   guarantee((HeapWord*)from_end >= from_space()->top(),
+             "from end moved into live data");
+@@ -712,7 +709,7 @@
+   size_t delta_in_survivor = 0;
+   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+   const size_t space_alignment = heap->intra_generation_alignment();
+-  const size_t gen_alignment = heap->generation_alignment();
++  const size_t gen_alignment = heap->young_gen_alignment();
+ 
+   MutableSpace* space_shrinking = NULL;
+   if (from_space()->end() > to_space()->end()) {
+@@ -726,17 +723,17 @@
+   assert(((HeapWord*)virtual_space()->high()) >= space_shrinking->end(),
+     "Survivor space beyond high end");
+   size_t unused_committed = pointer_delta(virtual_space()->high(),
+-    space_shrinking->end(), sizeof(char));   
++    space_shrinking->end(), sizeof(char));
+ 
+   if (space_shrinking->is_empty()) {
+     // Don't let the space shrink to 0
+-    assert(space_shrinking->capacity_in_bytes() >= space_alignment, 
++    assert(space_shrinking->capacity_in_bytes() >= space_alignment,
+       "Space is too small");
+     delta_in_survivor = space_shrinking->capacity_in_bytes() - space_alignment;
+   } else {
+-    delta_in_survivor = pointer_delta(space_shrinking->end(), 
+-				      space_shrinking->top(),
+-				      sizeof(char));
++    delta_in_survivor = pointer_delta(space_shrinking->end(),
++                                      space_shrinking->top(),
++                                      sizeof(char));
+   }
+ 
+   size_t delta_in_bytes = unused_committed + delta_in_survivor;
+@@ -746,9 +743,9 @@
+ 
+ // Return the number of bytes available for resizing down the young
+ // generation.  This is the minimum of
+-// 	input "bytes"
+-//	bytes to the minimum young gen size
+-//	bytes to the size currently being used + some small extra
++//      input "bytes"
++//      bytes to the minimum young gen size
++//      bytes to the size currently being used + some small extra
+ size_t PSYoungGen::limit_gen_shrink(size_t bytes) {
+   // Allow shrinkage into the current eden but keep eden large enough
+   // to maintain the minimum young gen size
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)psYoungGen.hpp	1.48 07/05/05 17:05:31 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class PSMarkSweepDecorator;
+@@ -58,7 +55,7 @@
+   SpaceCounters*            _from_counters;
+   SpaceCounters*            _to_counters;
+ 
+-  // Initialize the space boundaries 
++  // Initialize the space boundaries
+   void compute_initial_space_boundaries();
+ 
+   // Space boundary helper
+@@ -72,7 +69,7 @@
+ 
+   // Return number of bytes that the generation can change.
+   // These should not be used by PSYoungGen
+-  virtual size_t available_for_expansion(); 
++  virtual size_t available_for_expansion();
+   virtual size_t available_for_contraction();
+ 
+   // Given a desired shrinkage in the size of the young generation,
+@@ -87,7 +84,7 @@
+ 
+  public:
+   // Initialize the generation.
+-  PSYoungGen(size_t        initial_byte_size, 
++  PSYoungGen(size_t        initial_byte_size,
+              size_t        minimum_byte_size,
+              size_t        maximum_byte_size);
+   void initialize_work();
+@@ -177,7 +174,7 @@
+   void print_on(outputStream* st) const;
+   void print_used_change(size_t prev_used) const;
+   virtual const char* name() const { return "PSYoungGen"; }
+- 
++
+   void verify(bool allow_dirty);
+ 
+   // Space boundary invariant checker
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,28 +1,25 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)vmPSOperations.cpp	1.1 07/05/14 11:57:11 JVM"
+-#endif
+ /*
+  * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+- *   
++ *
+  * This code is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License version 2 only, as
+  * published by the Free Software Foundation.
+- *   
++ *
+  * This code is distributed in the hope that it will be useful, but WITHOUT
+  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+  * version 2 for more details (a copy is included in the LICENSE file that
+  * accompanied this code).
+- *  
++ *
+  * You should have received a copy of the GNU General Public License version
+  * 2 along with this work; if not, write to the Free Software Foundation,
+  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+- *   
++ *
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -47,7 +44,7 @@
+ 
+   GCCauseSetter gccs(heap, _gc_cause);
+   _result = heap->failed_mem_allocate(_size, _is_tlab);
+- 
++
+   if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
+     set_gc_locked();
+   }
+@@ -89,11 +86,11 @@
+   notify_gc_begin(true);
+ 
+   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, 
++  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap,
+     "must be a ParallelScavengeHeap");
+ 
+   GCCauseSetter gccs(heap, _gc_cause);
+-  if (_gc_cause == GCCause::_gc_locker 
++  if (_gc_cause == GCCause::_gc_locker
+       DEBUG_ONLY(|| _gc_cause == GCCause::_scavenge_alot)) {
+     // If (and only if) the scavenge fails, this will invoke a full gc.
+     heap->invoke_scavenge();
+@@ -102,4 +99,3 @@
+   }
+   notify_gc_end();
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,28 +1,25 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)vmPSOperations.hpp	1.2 07/05/16 16:53:01 JVM"
+-#endif
+ /*
+  * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+- *   
++ *
+  * This code is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License version 2 only, as
+  * published by the Free Software Foundation.
+- *   
++ *
+  * This code is distributed in the hope that it will be useful, but WITHOUT
+  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+  * version 2 for more details (a copy is included in the LICENSE file that
+  * accompanied this code).
+- *  
++ *
+  * You should have received a copy of the GNU General Public License version
+  * 2 along with this work; if not, write to the Free Software Foundation,
+  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+- *   
++ *
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class VM_ParallelGCFailedAllocation: public VM_GC_Operation {
+@@ -39,7 +36,7 @@
+     return VMOp_ParallelGCFailedAllocation;
+   }
+   virtual void doit();
+-  
++
+   HeapWord* result() const       { return _result; }
+ };
+ 
+@@ -52,8 +49,8 @@
+   VM_ParallelGCFailedPermanentAllocation(size_t size,
+                                          unsigned int gc_count,
+                                          unsigned int full_gc_count);
+-  virtual VMOp_Type type() const { 
+-    return VMOp_ParallelGCFailedPermanentAllocation; 
++  virtual VMOp_Type type() const {
++    return VMOp_ParallelGCFailedPermanentAllocation;
+   }
+   virtual void doit();
+   HeapWord* result() const       { return _result; }
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmStructs_parallelgc.hpp openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmStructs_parallelgc.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmStructs_parallelgc.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmStructs_parallelgc.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,28 +1,25 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)vmStructs_parallelgc.hpp	1.2 07/05/01 19:01:30 JVM"
+-#endif
+ /*
+  * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+- *   
++ *
+  * This code is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License version 2 only, as
+  * published by the Free Software Foundation.
+- *   
++ *
+  * This code is distributed in the hope that it will be useful, but WITHOUT
+  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+  * version 2 for more details (a copy is included in the LICENSE file that
+  * accompanied this code).
+- *  
++ *
+  * You should have received a copy of the GNU General Public License version
+  * 2 along with this work; if not, write to the Free Software Foundation,
+  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+- *   
++ *
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #define VM_STRUCTS_PARALLELGC(nonstatic_field, \
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp openjdk/hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)asParNewGeneration.cpp	1.11 07/05/05 17:05:25 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,17 +19,17 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_asParNewGeneration.cpp.incl"
+ 
+-ASParNewGeneration::ASParNewGeneration(ReservedSpace rs, 
+-				       size_t initial_byte_size, 
+-				       size_t min_byte_size,
+-				       int level) :
+-  ParNewGeneration(rs, initial_byte_size, level), 
++ASParNewGeneration::ASParNewGeneration(ReservedSpace rs,
++                                       size_t initial_byte_size,
++                                       size_t min_byte_size,
++                                       int level) :
++  ParNewGeneration(rs, initial_byte_size, level),
+   _min_gen_size(min_byte_size) {}
+ 
+ const char* ASParNewGeneration::name() const {
+@@ -40,7 +37,7 @@
+ }
+ 
+ void ASParNewGeneration::adjust_desired_tenuring_threshold() {
+-  assert(UseAdaptiveSizePolicy, 
++  assert(UseAdaptiveSizePolicy,
+     "Should only be used with UseAdaptiveSizePolicy");
+ }
+ 
+@@ -92,17 +89,17 @@
+   assert(((HeapWord*)virtual_space()->high()) >= space_shrinking->end(),
+     "Survivor space beyond high end");
+   size_t unused_committed = pointer_delta(virtual_space()->high(),
+-    space_shrinking->end(), sizeof(char));   
++    space_shrinking->end(), sizeof(char));
+ 
+   if (space_shrinking->is_empty()) {
+     // Don't let the space shrink to 0
+-    assert(space_shrinking->capacity_in_bytes() >= space_alignment, 
++    assert(space_shrinking->capacity_in_bytes() >= space_alignment,
+       "Space is too small");
+     delta_in_survivor = space_shrinking->capacity_in_bytes() - space_alignment;
+   } else {
+-    delta_in_survivor = pointer_delta(space_shrinking->end(), 
+-				      space_shrinking->top(),
+-				      sizeof(char));
++    delta_in_survivor = pointer_delta(space_shrinking->end(),
++                                      space_shrinking->top(),
++                                      sizeof(char));
+   }
+ 
+   size_t delta_in_bytes = unused_committed + delta_in_survivor;
+@@ -126,9 +123,9 @@
+ 
+ // Return the number of bytes available for resizing down the young
+ // generation.  This is the minimum of
+-// 	input "bytes"
+-//	bytes to the minimum young gen size
+-//	bytes to the size currently being used + some small extra
++//      input "bytes"
++//      bytes to the minimum young gen size
++//      bytes to the size currently being used + some small extra
+ size_t ASParNewGeneration::limit_gen_shrink (size_t bytes) {
+   // Allow shrinkage into the current eden but keep eden large enough
+   // to maintain the minimum young gen size
+@@ -139,8 +136,8 @@
+ // Note that the the alignment used is the OS page size as
+ // opposed to an alignment associated with the virtual space
+ // (as is done in the ASPSYoungGen/ASPSOldGen)
+-bool ASParNewGeneration::resize_generation(size_t eden_size, 
+-					   size_t survivor_size) {
++bool ASParNewGeneration::resize_generation(size_t eden_size,
++                                           size_t survivor_size) {
+   const size_t alignment = os::vm_page_size();
+   size_t orig_size = virtual_space()->committed_size();
+   bool size_changed = false;
+@@ -151,14 +148,14 @@
+   // size and disired survivor sizes are desired goals and may
+   // exceed the total generation size.
+ 
+-  assert(min_gen_size() <= orig_size && orig_size <= max_gen_size(), 
++  assert(min_gen_size() <= orig_size && orig_size <= max_gen_size(),
+     "just checking");
+ 
+   // Adjust new generation size
+   const size_t eden_plus_survivors =
+-	  align_size_up(eden_size + 2 * survivor_size, alignment);
+-  size_t desired_size = MAX2(MIN2(eden_plus_survivors, max_gen_size()), 
+-			     min_gen_size());
++          align_size_up(eden_size + 2 * survivor_size, alignment);
++  size_t desired_size = MAX2(MIN2(eden_plus_survivors, max_gen_size()),
++                             min_gen_size());
+   assert(desired_size <= max_gen_size(), "just checking");
+ 
+   if (desired_size > orig_size) {
+@@ -202,13 +199,13 @@
+     if (Verbose && PrintGC) {
+       size_t current_size  = virtual_space()->committed_size();
+       gclog_or_tty->print_cr("ASParNew generation size changed: "
+-			     SIZE_FORMAT "K->" SIZE_FORMAT "K",
+-			     orig_size/K, current_size/K);
++                             SIZE_FORMAT "K->" SIZE_FORMAT "K",
++                             orig_size/K, current_size/K);
+     }
+   }
+ 
+   guarantee(eden_plus_survivors <= virtual_space()->committed_size() ||
+-	    virtual_space()->committed_size() == max_gen_size(), "Sanity");
++            virtual_space()->committed_size() == max_gen_size(), "Sanity");
+ 
+   return true;
+ }
+@@ -217,7 +214,7 @@
+ 
+   GenCollectedHeap* gch = GenCollectedHeap::heap();
+   HeapWord* new_end = (HeapWord*)virtual_space()->high();
+-  
++
+   if (from()->end() > to()->end()) {
+     assert(new_end >= from()->end(), "Shrinking past from-space");
+   } else {
+@@ -229,14 +226,14 @@
+     }
+   }
+ }
+-void ASParNewGeneration::resize_spaces(size_t requested_eden_size, 
+-			               size_t requested_survivor_size) {
++void ASParNewGeneration::resize_spaces(size_t requested_eden_size,
++                                       size_t requested_survivor_size) {
+   assert(UseAdaptiveSizePolicy, "sanity check");
+-  assert(requested_eden_size > 0  && requested_survivor_size > 0, 
+-	 "just checking");
++  assert(requested_eden_size > 0  && requested_survivor_size > 0,
++         "just checking");
+   CollectedHeap* heap = Universe::heap();
+   assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Sanity");
+-      
++
+ 
+   // We require eden and to space to be empty
+   if ((!eden()->is_empty()) || (!to()->is_empty())) {
+@@ -246,35 +243,35 @@
+   size_t cur_eden_size = eden()->capacity();
+ 
+   if (PrintAdaptiveSizePolicy && Verbose) {
+-    gclog_or_tty->print_cr("ASParNew::resize_spaces(requested_eden_size: " 
+-                  SIZE_FORMAT 
++    gclog_or_tty->print_cr("ASParNew::resize_spaces(requested_eden_size: "
++                  SIZE_FORMAT
+                   ", requested_survivor_size: " SIZE_FORMAT ")",
+                   requested_eden_size, requested_survivor_size);
+-    gclog_or_tty->print_cr("    eden: [" PTR_FORMAT ".." PTR_FORMAT ") " 
+-                  SIZE_FORMAT, 
+-                  eden()->bottom(), 
+-                  eden()->end(), 
++    gclog_or_tty->print_cr("    eden: [" PTR_FORMAT ".." PTR_FORMAT ") "
++                  SIZE_FORMAT,
++                  eden()->bottom(),
++                  eden()->end(),
+                   pointer_delta(eden()->end(),
+                                 eden()->bottom(),
+                                 sizeof(char)));
+-    gclog_or_tty->print_cr("    from: [" PTR_FORMAT ".." PTR_FORMAT ") " 
+-		  SIZE_FORMAT, 
+-                  from()->bottom(), 
+-                  from()->end(), 
++    gclog_or_tty->print_cr("    from: [" PTR_FORMAT ".." PTR_FORMAT ") "
++                  SIZE_FORMAT,
++                  from()->bottom(),
++                  from()->end(),
+                   pointer_delta(from()->end(),
+                                 from()->bottom(),
+                                 sizeof(char)));
+-    gclog_or_tty->print_cr("      to: [" PTR_FORMAT ".." PTR_FORMAT ") " 
+-		  SIZE_FORMAT, 
+-                  to()->bottom(),   
+-                  to()->end(), 
++    gclog_or_tty->print_cr("      to: [" PTR_FORMAT ".." PTR_FORMAT ") "
++                  SIZE_FORMAT,
++                  to()->bottom(),
++                  to()->end(),
+                   pointer_delta(  to()->end(),
+                                   to()->bottom(),
+                                   sizeof(char)));
+   }
+ 
+   // There's nothing to do if the new sizes are the same as the current
+-  if (requested_survivor_size == to()->capacity() && 
++  if (requested_survivor_size == to()->capacity() &&
+       requested_survivor_size == from()->capacity() &&
+       requested_eden_size == eden()->capacity()) {
+     if (PrintAdaptiveSizePolicy && Verbose) {
+@@ -282,16 +279,16 @@
+     }
+     return;
+   }
+-  
++
+   char* eden_start = (char*)eden()->bottom();
+-  char* eden_end   = (char*)eden()->end();   
++  char* eden_end   = (char*)eden()->end();
+   char* from_start = (char*)from()->bottom();
+   char* from_end   = (char*)from()->end();
+   char* to_start   = (char*)to()->bottom();
+   char* to_end     = (char*)to()->end();
+ 
+   const size_t alignment = os::vm_page_size();
+-  const bool maintain_minimum = 
++  const bool maintain_minimum =
+     (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
+ 
+   // Check whether from space is below to space
+@@ -317,9 +314,9 @@
+       // This could be done in general but policy at a higher
+       // level is determining a requested size for eden and that
+       // should be honored unless there is a fundamental reason.
+-      eden_size = pointer_delta(from_start, 
+-				eden_start, 
+-				sizeof(char));
++      eden_size = pointer_delta(from_start,
++                                eden_start,
++                                sizeof(char));
+     } else {
+       eden_size = MIN2(requested_eden_size,
+                        pointer_delta(from_start, eden_start, sizeof(char)));
+@@ -337,8 +334,8 @@
+ 
+     // First calculate an optimal to-space
+     to_end   = (char*)virtual_space()->high();
+-    to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size, 
+-				    sizeof(char));
++    to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
++                                    sizeof(char));
+ 
+     // Does the optimal to-space overlap from-space?
+     if (to_start < (char*)from()->end()) {
+@@ -366,29 +363,29 @@
+       if (requested_eden_size <= cur_eden_size) {
+         to_start = from_end;
+         if (to_start + requested_survivor_size > to_start) {
+-	  to_end = to_start + requested_survivor_size;
++          to_end = to_start + requested_survivor_size;
+         }
+       }
+       // else leave to_end pointing to the high end of the virtual space.
+     }
+ 
+     guarantee(to_start != to_end, "to space is zero sized");
+-      
++
+     if (PrintAdaptiveSizePolicy && Verbose) {
+       gclog_or_tty->print_cr("    [eden_start .. eden_end): "
+-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
+-                    eden_start, 
+-                    eden_end, 
++                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
++                    eden_start,
++                    eden_end,
+                     pointer_delta(eden_end, eden_start, sizeof(char)));
+       gclog_or_tty->print_cr("    [from_start .. from_end): "
+-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
+-                    from_start, 
+-                    from_end, 
++                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
++                    from_start,
++                    from_end,
+                     pointer_delta(from_end, from_start, sizeof(char)));
+       gclog_or_tty->print_cr("    [  to_start ..   to_end): "
+-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
+-                    to_start,   
+-                    to_end, 
++                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
++                    to_start,
++                    to_end,
+                     pointer_delta(  to_end,   to_start, sizeof(char)));
+     }
+   } else {
+@@ -400,12 +397,12 @@
+     // Calculate the to-space boundaries based on
+     // the start of from-space.
+     to_end = from_start;
+-    to_start = (char*)pointer_delta(from_start, 
+-                                    (char*)requested_survivor_size, 
+-				    sizeof(char));
++    to_start = (char*)pointer_delta(from_start,
++                                    (char*)requested_survivor_size,
++                                    sizeof(char));
+     // Calculate the ideal eden boundaries.
+     // eden_end is already at the bottom of the generation
+-    assert(eden_start == virtual_space()->low(), 
++    assert(eden_start == virtual_space()->low(),
+       "Eden is not starting at the low end of the virtual space");
+     if (eden_start + requested_eden_size >= eden_start) {
+       eden_end = eden_start + requested_eden_size;
+@@ -434,7 +431,7 @@
+       eden_size = pointer_delta(eden_end, eden_start, sizeof(char));
+     }
+     eden_size = align_size_down(eden_size, alignment);
+-    assert(maintain_minimum || eden_size <= requested_eden_size, 
++    assert(maintain_minimum || eden_size <= requested_eden_size,
+       "Eden size is too large");
+     assert(eden_size >= alignment, "Eden size is too small");
+     eden_end = eden_start + eden_size;
+@@ -460,32 +457,32 @@
+         from_end = from_start + requested_survivor_size;
+       }
+       if (from_end > virtual_space()->high()) {
+-	from_end = virtual_space()->high();
++        from_end = virtual_space()->high();
+       }
+     }
+ 
+     assert(to_start >= eden_end, "to-space should be above eden");
+     if (PrintAdaptiveSizePolicy && Verbose) {
+       gclog_or_tty->print_cr("    [eden_start .. eden_end): "
+-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
+-                    eden_start, 
+-                    eden_end, 
++                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
++                    eden_start,
++                    eden_end,
+                     pointer_delta(eden_end, eden_start, sizeof(char)));
+-      gclog_or_tty->print_cr("    [  to_start ..   to_end): " 
+-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
+-                    to_start,   
+-                    to_end, 
++      gclog_or_tty->print_cr("    [  to_start ..   to_end): "
++                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
++                    to_start,
++                    to_end,
+                     pointer_delta(  to_end,   to_start, sizeof(char)));
+-      gclog_or_tty->print_cr("    [from_start .. from_end): " 
+-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
+-                    from_start, 
+-                    from_end, 
++      gclog_or_tty->print_cr("    [from_start .. from_end): "
++                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
++                    from_start,
++                    from_end,
+                     pointer_delta(from_end, from_start, sizeof(char)));
+     }
+   }
+-  
+ 
+-  guarantee((HeapWord*)from_start <= from()->bottom(), 
++
++  guarantee((HeapWord*)from_start <= from()->bottom(),
+             "from start moved to the right");
+   guarantee((HeapWord*)from_end >= from()->top(),
+             "from end moved into live data");
+@@ -534,7 +531,7 @@
+     "not a CMS generational heap");
+ 
+ 
+-  CMSAdaptiveSizePolicy* size_policy = 
++  CMSAdaptiveSizePolicy* size_policy =
+     (CMSAdaptiveSizePolicy*)gch->gen_policy()->size_policy();
+   assert(size_policy->is_gc_cms_adaptive_size_policy(),
+     "Wrong type of size policy");
+@@ -544,7 +541,7 @@
+     // Keep running averages on how much survived
+     size_policy->avg_survived()->sample(survived);
+   } else {
+-    size_t promoted = 
++    size_t promoted =
+       (size_t) next_gen()->gc_stats()->avg_promoted()->last_sample();
+     assert(promoted < gch->capacity(), "Conversion problem?");
+     size_t survived_guess = survived + promoted;
+@@ -563,14 +560,14 @@
+   size_policy->compute_young_generation_free_space(eden()->capacity(),
+                                                    max_gen_size());
+ 
+-  resize(size_policy->calculated_eden_size_in_bytes(), 
+-	 size_policy->calculated_survivor_size_in_bytes());
++  resize(size_policy->calculated_eden_size_in_bytes(),
++         size_policy->calculated_survivor_size_in_bytes());
+ 
+   if (UsePerfData) {
+-    CMSGCAdaptivePolicyCounters* counters = 
++    CMSGCAdaptivePolicyCounters* counters =
+       (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
+-    assert(counters->kind() == 
+-	   GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
++    assert(counters->kind() ==
++           GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
+       "Wrong kind of counters");
+     counters->update_tenuring_threshold(_tenuring_threshold);
+     counters->update_survivor_overflowed(_survivor_overflow);
+@@ -581,7 +578,7 @@
+ 
+ #ifndef PRODUCT
+ // Changes from PSYoungGen version
+-//	value of "alignment"
++//      value of "alignment"
+ void ASParNewGeneration::space_invariants() {
+   const size_t alignment = os::vm_page_size();
+ 
+@@ -592,7 +589,7 @@
+ 
+   // Relationship of spaces to each other
+   char* eden_start = (char*)eden()->bottom();
+-  char* eden_end   = (char*)eden()->end();   
++  char* eden_end   = (char*)eden()->end();
+   char* from_start = (char*)from()->bottom();
+   char* from_end   = (char*)from()->end();
+   char* to_start   = (char*)to()->bottom();
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.hpp openjdk/hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)asParNewGeneration.hpp	1.8 07/05/05 17:05:25 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A Generation that does parallel young-gen collection extended
+@@ -30,23 +27,23 @@
+ 
+ // Division of generation into spaces
+ // done by DefNewGeneration::compute_space_boundaries()
+-//	+---------------+
+-//	| uncommitted	|
+-//	|---------------|
+-//	| ss0		|
+-//	|---------------|
+-//	| ss1		|
+-//	|---------------|
+-//	|		|
+-//	| eden		|
+-//	|		|
+-//	+---------------+	<-- low end of VirtualSpace
++//      +---------------+
++//      | uncommitted   |
++//      |---------------|
++//      | ss0           |
++//      |---------------|
++//      | ss1           |
++//      |---------------|
++//      |               |
++//      | eden          |
++//      |               |
++//      +---------------+       <-- low end of VirtualSpace
+ //
+ class ASParNewGeneration: public ParNewGeneration {
+ 
+   size_t _min_gen_size;
+ 
+-  // Resize the generation based on the desired sizes of 
++  // Resize the generation based on the desired sizes of
+   // the constituent spaces.
+   bool resize_generation(size_t eden_size, size_t survivor_size);
+   // Resize the spaces based on their desired sizes but
+@@ -68,10 +65,10 @@
+ 
+  public:
+ 
+-  ASParNewGeneration(ReservedSpace rs, 
+-		     size_t initial_byte_size, 
+-		     size_t min_byte_size,
+-		     int level);
++  ASParNewGeneration(ReservedSpace rs,
++                     size_t initial_byte_size,
++                     size_t min_byte_size,
++                     int level);
+ 
+   virtual const char* short_name() const { return "ASParNew"; }
+   virtual const char* name() const;
+@@ -80,13 +77,13 @@
+   // Change the sizes of eden and the survivor spaces in
+   // the generation.  The parameters are desired sizes
+   // and are not guaranteed to be met.  For example, if
+-  // the total is larger than the generation. 
++  // the total is larger than the generation.
+   void resize(size_t eden_size, size_t survivor_size);
+ 
+   virtual void compute_new_size();
+ 
+   size_t max_gen_size()                 { return _reserved.byte_size(); }
+-  size_t min_gen_size() const		{ return _min_gen_size; }
++  size_t min_gen_size() const           { return _min_gen_size; }
+ 
+   // Space boundary invariant checker
+   void space_invariants() PRODUCT_RETURN;
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp openjdk/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,28 +1,25 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)parCardTableModRefBS.cpp	1.1 07/05/16 19:06:21 JVM"
+-#endif
+ /*
+  * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+- *   
++ *
+  * This code is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License version 2 only, as
+  * published by the Free Software Foundation.
+- *   
++ *
+  * This code is distributed in the hope that it will be useful, but WITHOUT
+  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+  * version 2 for more details (a copy is included in the LICENSE file that
+  * accompanied this code).
+- *  
++ *
+  * You should have received a copy of the GNU General Public License version
+  * 2 along with this work; if not, write to the Free Software Foundation,
+  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+- *   
++ *
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -35,7 +32,7 @@
+                                                         int n_threads) {
+   if (n_threads > 0) {
+     assert(n_threads == (int)ParallelGCThreads, "# worker threads != # requested!");
+-      
++
+       // Make sure the LNC array is valid for the space.
+     jbyte**   lowest_non_clean;
+     uintptr_t lowest_non_clean_base_chunk_index;
+@@ -52,7 +49,7 @@
+     int stride = 0;
+     while (!pst->is_task_claimed(/* reference */ stride)) {
+       process_stride(sp, mr, stride, n_strides, dcto_cl, cl, clear,
+-                     lowest_non_clean, 
++                     lowest_non_clean,
+                      lowest_non_clean_base_chunk_index,
+                      lowest_non_clean_chunk_size);
+     }
+@@ -73,14 +70,14 @@
+ void
+ CardTableModRefBS::
+ process_stride(Space* sp,
+-	       MemRegion used,
+-	       jint stride, int n_strides,
+-	       DirtyCardToOopClosure* dcto_cl,
+-	       MemRegionClosure* cl,
+-	       bool clear,
+-	       jbyte** lowest_non_clean,
+-	       uintptr_t lowest_non_clean_base_chunk_index,
+-	       size_t    lowest_non_clean_chunk_size) {
++               MemRegion used,
++               jint stride, int n_strides,
++               DirtyCardToOopClosure* dcto_cl,
++               MemRegionClosure* cl,
++               bool clear,
++               jbyte** lowest_non_clean,
++               uintptr_t lowest_non_clean_base_chunk_index,
++               size_t    lowest_non_clean_chunk_size) {
+   // We don't have to go downwards here; it wouldn't help anyway,
+   // because of parallelism.
+ 
+@@ -94,13 +91,13 @@
+ 
+   if ((uintptr_t)stride >= start_chunk_stride_num) {
+     chunk_card_start = (jbyte*)(start_card +
+-				(stride - start_chunk_stride_num) *
+-				CardsPerStrideChunk);
++                                (stride - start_chunk_stride_num) *
++                                CardsPerStrideChunk);
+   } else {
+     // Go ahead to the next chunk group boundary, then to the requested stride.
+     chunk_card_start = (jbyte*)(start_card +
+-				(n_strides - start_chunk_stride_num + stride) *
+-				CardsPerStrideChunk);
++                                (n_strides - start_chunk_stride_num + stride) *
++                                CardsPerStrideChunk);
+   }
+ 
+   while (chunk_card_start < end_card) {
+@@ -116,12 +113,12 @@
+ 
+     // Process the chunk.
+     process_chunk_boundaries(sp,
+-			     dcto_cl,
+-			     chunk_mr,
+-			     used,
+-			     lowest_non_clean,
+-			     lowest_non_clean_base_chunk_index,
+-			     lowest_non_clean_chunk_size);
++                             dcto_cl,
++                             chunk_mr,
++                             used,
++                             lowest_non_clean,
++                             lowest_non_clean_base_chunk_index,
++                             lowest_non_clean_chunk_size);
+ 
+     non_clean_card_iterate_work(chunk_mr, cl, clear);
+ 
+@@ -133,12 +130,12 @@
+ void
+ CardTableModRefBS::
+ process_chunk_boundaries(Space* sp,
+-			 DirtyCardToOopClosure* dcto_cl,
+-			 MemRegion chunk_mr,
+-			 MemRegion used,
+-			 jbyte** lowest_non_clean,
+-			 uintptr_t lowest_non_clean_base_chunk_index,
+-			 size_t    lowest_non_clean_chunk_size)
++                         DirtyCardToOopClosure* dcto_cl,
++                         MemRegion chunk_mr,
++                         MemRegion used,
++                         jbyte** lowest_non_clean,
++                         uintptr_t lowest_non_clean_base_chunk_index,
++                         size_t    lowest_non_clean_chunk_size)
+ {
+   // We must worry about the chunk boundaries.
+ 
+@@ -153,7 +150,7 @@
+     HeapWord* last_block = sp->block_start(chunk_mr.end());
+     assert(last_block <= chunk_mr.end(), "In case this property changes.");
+     if (last_block == chunk_mr.end()
+-	|| !sp->block_is_obj(last_block)) {
++        || !sp->block_is_obj(last_block)) {
+       max_to_do = chunk_mr.end();
+ 
+     } else {
+@@ -163,57 +160,57 @@
+       // the chunk.
+       jbyte* last_obj_card = byte_for(last_block);
+       if (!card_may_have_been_dirty(*last_obj_card)) {
+-	// The card containing the head is not dirty.  Any marks in
+-	// subsequent cards still in this chunk must have been made
+-	// precisely; we can cap processing at the end.
+-	max_to_do = chunk_mr.end();
++        // The card containing the head is not dirty.  Any marks in
++        // subsequent cards still in this chunk must have been made
++        // precisely; we can cap processing at the end.
++        max_to_do = chunk_mr.end();
+       } else {
+-	// The last object must be considered dirty, and extends onto the
+-	// following chunk.  Look for a dirty card in that chunk that will
+-	// bound our processing.
+-	jbyte* limit_card = NULL;
+-	size_t last_block_size = sp->block_size(last_block);
+-	jbyte* last_card_of_last_obj =
+-	  byte_for(last_block + last_block_size - 1);
+-	jbyte* first_card_of_next_chunk = byte_for(chunk_mr.end());
+-	// This search potentially goes a long distance looking
+-	// for the next card that will be scanned.  For example,
+-	// an object that is an array of primitives will not
+-	// have any cards covering regions interior to the array
+-	// that will need to be scanned. The scan can be terminated
+-	// at the last card of the next chunk.  That would leave
+-	// limit_card as NULL and would result in "max_to_do"
+-	// being set with the LNC value or with the end
+-	// of the last block.
+-	jbyte* last_card_of_next_chunk = first_card_of_next_chunk + 
+-	  CardsPerStrideChunk; 
+-	assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start())
+-	  == CardsPerStrideChunk, "last card of next chunk may be wrong");
+-	jbyte* last_card_to_check = (jbyte*) MIN2(last_card_of_last_obj,
+-					          last_card_of_next_chunk);
+-	for (jbyte* cur = first_card_of_next_chunk;
+-	     cur <= last_card_to_check; cur++) {
+-	  if (card_will_be_scanned(*cur)) {
+-	    limit_card = cur; break;
+-	  }
+-	}
++        // The last object must be considered dirty, and extends onto the
++        // following chunk.  Look for a dirty card in that chunk that will
++        // bound our processing.
++        jbyte* limit_card = NULL;
++        size_t last_block_size = sp->block_size(last_block);
++        jbyte* last_card_of_last_obj =
++          byte_for(last_block + last_block_size - 1);
++        jbyte* first_card_of_next_chunk = byte_for(chunk_mr.end());
++        // This search potentially goes a long distance looking
++        // for the next card that will be scanned.  For example,
++        // an object that is an array of primitives will not
++        // have any cards covering regions interior to the array
++        // that will need to be scanned. The scan can be terminated
++        // at the last card of the next chunk.  That would leave
++        // limit_card as NULL and would result in "max_to_do"
++        // being set with the LNC value or with the end
++        // of the last block.
++        jbyte* last_card_of_next_chunk = first_card_of_next_chunk +
++          CardsPerStrideChunk;
++        assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start())
++          == CardsPerStrideChunk, "last card of next chunk may be wrong");
++        jbyte* last_card_to_check = (jbyte*) MIN2(last_card_of_last_obj,
++                                                  last_card_of_next_chunk);
++        for (jbyte* cur = first_card_of_next_chunk;
++             cur <= last_card_to_check; cur++) {
++          if (card_will_be_scanned(*cur)) {
++            limit_card = cur; break;
++          }
++        }
+         assert(0 <= cur_chunk_index+1 &&
+                cur_chunk_index+1 < lowest_non_clean_chunk_size,
+                "Bounds error.");
+-	// LNC for the next chunk
++        // LNC for the next chunk
+         jbyte* lnc_card = lowest_non_clean[cur_chunk_index+1];
+-	if (limit_card == NULL) {
+-	  limit_card = lnc_card;
+-	}
+-	if (limit_card != NULL) {
+-	  if (lnc_card != NULL) {
+-   	    limit_card = (jbyte*)MIN2((intptr_t)limit_card,
+-				      (intptr_t)lnc_card);
++        if (limit_card == NULL) {
++          limit_card = lnc_card;
++        }
++        if (limit_card != NULL) {
++          if (lnc_card != NULL) {
++            limit_card = (jbyte*)MIN2((intptr_t)limit_card,
++                                      (intptr_t)lnc_card);
+           }
+-	  max_to_do = addr_for(limit_card);
+-	} else {
+-	  max_to_do = last_block + last_block_size;
+-	}
++          max_to_do = addr_for(limit_card);
++        } else {
++          max_to_do = last_block + last_block_size;
++        }
+       }
+     }
+     assert(max_to_do != NULL, "OOPS!");
+@@ -234,28 +231,28 @@
+     // first_block is the block possibly spanning the chunk start
+     HeapWord* first_block = sp->block_start(chunk_mr.start());
+     // Does the block span the start of the chunk and is it
+-    // an object?  
++    // an object?
+     if (first_block < chunk_mr.start() &&
+-	sp->block_is_obj(first_block)) {
++        sp->block_is_obj(first_block)) {
+       jbyte* first_dirty_card = NULL;
+       jbyte* last_card_of_first_obj =
+-	  byte_for(first_block + sp->block_size(first_block) - 1);
++          byte_for(first_block + sp->block_size(first_block) - 1);
+       jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start());
+       jbyte* last_card_of_cur_chunk = byte_for(chunk_mr.last());
+-      jbyte* last_card_to_check = 
+-	(jbyte*) MIN2((intptr_t) last_card_of_cur_chunk,
+-		      (intptr_t) last_card_of_first_obj);
++      jbyte* last_card_to_check =
++        (jbyte*) MIN2((intptr_t) last_card_of_cur_chunk,
++                      (intptr_t) last_card_of_first_obj);
+       for (jbyte* cur = first_card_of_cur_chunk;
+-	   cur <= last_card_to_check; cur++) {
+-	if (card_will_be_scanned(*cur)) {
+-	  first_dirty_card = cur; break;
+-	}
++           cur <= last_card_to_check; cur++) {
++        if (card_will_be_scanned(*cur)) {
++          first_dirty_card = cur; break;
++        }
+       }
+       if (first_dirty_card != NULL) {
+-	assert(0 <= cur_chunk_index &&
+-		 cur_chunk_index < lowest_non_clean_chunk_size,
+-	       "Bounds error.");
+-	lowest_non_clean[cur_chunk_index] = first_dirty_card;
++        assert(0 <= cur_chunk_index &&
++                 cur_chunk_index < lowest_non_clean_chunk_size,
++               "Bounds error.");
++        lowest_non_clean[cur_chunk_index] = first_dirty_card;
+       }
+     }
+   }
+@@ -264,19 +261,19 @@
+ void
+ CardTableModRefBS::
+ get_LNC_array_for_space(Space* sp,
+-			jbyte**& lowest_non_clean,
+-			uintptr_t& lowest_non_clean_base_chunk_index,
+-			size_t& lowest_non_clean_chunk_size) {
++                        jbyte**& lowest_non_clean,
++                        uintptr_t& lowest_non_clean_base_chunk_index,
++                        size_t& lowest_non_clean_chunk_size) {
+ 
+   int       i        = find_covering_region_containing(sp->bottom());
+   MemRegion covered  = _covered[i];
+   size_t    n_chunks = chunks_to_cover(covered);
+-  
++
+   // Only the first thread to obtain the lock will resize the
+   // LNC array for the covered region.  Any later expansion can't affect
+   // the used_at_save_marks region.
+   // (I observed a bug in which the first thread to execute this would
+-  // resize, and then it would cause "expand_and_allocates" that would 
++  // resize, and then it would cause "expand_and_allocates" that would
+   // Increase the number of chunks in the covered region.  Then a second
+   // thread would come and execute this, see that the size didn't match,
+   // and free and allocate again.  So the first thread would be using a
+@@ -290,23 +287,23 @@
+     MutexLocker x(ParGCRareEvent_lock);
+     if (_last_LNC_resizing_collection[i] != cur_collection) {
+       if (_lowest_non_clean[i] == NULL ||
+-	  n_chunks != _lowest_non_clean_chunk_size[i]) {
+-	
+-	// Should we delete the old?
+-	if (_lowest_non_clean[i] != NULL) {
+-	  assert(n_chunks != _lowest_non_clean_chunk_size[i],
+-		 "logical consequence");
+-	  FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i]);
+-	  _lowest_non_clean[i] = NULL;
+-	}
+-	// Now allocate a new one if necessary.
+-	if (_lowest_non_clean[i] == NULL) {
+-	  _lowest_non_clean[i]                  = NEW_C_HEAP_ARRAY(CardPtr, n_chunks);
+-	  _lowest_non_clean_chunk_size[i]       = n_chunks;
+-	  _lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start());
+-	  for (int j = 0; j < (int)n_chunks; j++)
+-	    _lowest_non_clean[i][j] = NULL;
+-	}
++          n_chunks != _lowest_non_clean_chunk_size[i]) {
++
++        // Should we delete the old?
++        if (_lowest_non_clean[i] != NULL) {
++          assert(n_chunks != _lowest_non_clean_chunk_size[i],
++                 "logical consequence");
++          FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i]);
++          _lowest_non_clean[i] = NULL;
++        }
++        // Now allocate a new one if necessary.
++        if (_lowest_non_clean[i] == NULL) {
++          _lowest_non_clean[i]                  = NEW_C_HEAP_ARRAY(CardPtr, n_chunks);
++          _lowest_non_clean_chunk_size[i]       = n_chunks;
++          _lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start());
++          for (int j = 0; j < (int)n_chunks; j++)
++            _lowest_non_clean[i][j] = NULL;
++        }
+       }
+       _last_LNC_resizing_collection[i] = cur_collection;
+     }
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp openjdk/hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)parGCAllocBuffer.cpp	1.28 07/05/29 09:44:12 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -66,9 +63,9 @@
+     } else {
+       // Is there wasted space we'd like to retain for the next GC?
+       if (pointer_delta(_end, _top) > FillerHeaderSize) {
+-	_retained = true;
+-	_retained_filler = MemRegion(_top, FillerHeaderSize);
+-	_top = _top + FillerHeaderSize;
++        _retained = true;
++        _retained_filler = MemRegion(_top, FillerHeaderSize);
++        _top = _top + FillerHeaderSize;
+       } else {
+         invalidate();
+       }
+@@ -137,7 +134,7 @@
+      (size_t)Generation::GenGrain);
+ 
+ ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz,
+-						 BlockOffsetSharedArray* bsa) :
++                                                 BlockOffsetSharedArray* bsa) :
+   ParGCAllocBuffer(word_sz),
+   _bsa(bsa),
+   _bt(bsa, MemRegion(_bottom, _hard_end)),
+@@ -156,7 +153,7 @@
+ // parameter below to directly manipulate the shared array without
+ // modifying the _next_threshold state in the BOT.
+ void ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr,
+-						     bool contig) {
++                                                     bool contig) {
+   SharedHeap::fill_region_with_object(mr);
+   if (contig) {
+     _bt.alloc_block(mr.start(), mr.end());
+@@ -169,8 +166,8 @@
+   HeapWord* res = NULL;
+   if (_true_end > _hard_end) {
+     assert((HeapWord*)align_size_down(intptr_t(_hard_end),
+-				      ChunkSizeInBytes) == _hard_end,
+-	   "or else _true_end should be equal to _hard_end");
++                                      ChunkSizeInBytes) == _hard_end,
++           "or else _true_end should be equal to _hard_end");
+     assert(_retained, "or else _true_end should be equal to _hard_end");
+     assert(_retained_filler.end() <= _top, "INVARIANT");
+     SharedHeap::fill_region_with_object(_retained_filler);
+@@ -204,7 +201,7 @@
+   if (_retained) {
+     // We're about to make the retained_filler into a block.
+     _bt.BlockOffsetArray::alloc_block(_retained_filler.start(),
+-				      _retained_filler.end());
++                                      _retained_filler.end());
+   }
+   // Reset _hard_end to _true_end (and update _end)
+   if (retain && _hard_end != NULL) {
+@@ -231,7 +228,7 @@
+     HeapWord* first_card_start = _bsa->address_for_index(first_card_index);
+     if (first_card_start < pre_top) {
+       HeapWord* second_card_start =
+-	_bsa->address_for_index(first_card_index + 1);
++        _bsa->address_for_index(first_card_index + 1);
+ 
+       // Ensure enough room to fill with the smallest block
+       second_card_start = MAX2(second_card_start, pre_top + AlignmentReserve);
+@@ -243,8 +240,8 @@
+         second_card_start = _hard_end;
+       }
+       if (pre_top < second_card_start) {
+-	MemRegion first_card_suffix(pre_top, second_card_start);
+-	fill_region_with_block(first_card_suffix, true);
++        MemRegion first_card_suffix(pre_top, second_card_start);
++        fill_region_with_block(first_card_suffix, true);
+       }
+       pre_top = second_card_start;
+       _top = pre_top;
+@@ -269,8 +266,8 @@
+         last_card_start = _top;
+       }
+       if (last_card_start < _hard_end) {
+-	MemRegion last_card_prefix(last_card_start, _hard_end);
+-	fill_region_with_block(last_card_prefix, false);
++        MemRegion last_card_prefix(last_card_start, _hard_end);
++        fill_region_with_block(last_card_prefix, false);
+       }
+       _hard_end = last_card_start;
+       _end      = MAX2(_top, _hard_end - AlignmentReserve);
+@@ -288,10 +285,10 @@
+       _top = pre_top + ParGCAllocBuffer::FillerHeaderSize;
+       // If there's no space left, don't retain.
+       if (_top >= _end) {
+-	_retained = false;
++        _retained = false;
+         invalidate();
+-	return;
+-      } 
++        return;
++      }
+       _retained_filler = MemRegion(pre_top, _top);
+       _bt.set_region(MemRegion(_top, _hard_end));
+       _bt.initialize_threshold();
+@@ -307,30 +304,30 @@
+ 
+       // "chunk_boundary" is the address of the first chunk boundary less
+       // than "hard_end".
+-      HeapWord* chunk_boundary = 
+-        (HeapWord*)align_size_down(intptr_t(_hard_end-1), ChunkSizeInBytes); 
+-      assert(chunk_boundary < _hard_end, "Or else above did not work."); 
++      HeapWord* chunk_boundary =
++        (HeapWord*)align_size_down(intptr_t(_hard_end-1), ChunkSizeInBytes);
++      assert(chunk_boundary < _hard_end, "Or else above did not work.");
+       assert(pointer_delta(_true_end, chunk_boundary) >= AlignmentReserve,
+              "Consequence of last card handling above.");
+- 
+-      if (_top <= chunk_boundary) { 
+-	assert(_true_end == _hard_end, "Invariant.");
+-	while (_top <= chunk_boundary) {
++
++      if (_top <= chunk_boundary) {
++        assert(_true_end == _hard_end, "Invariant.");
++        while (_top <= chunk_boundary) {
+           assert(pointer_delta(_hard_end, chunk_boundary) >= AlignmentReserve,
+                  "Consequence of last card handling above.");
+-	  MemRegion chunk_portion(chunk_boundary, _hard_end);
+-	  _bt.BlockOffsetArray::alloc_block(chunk_portion.start(),
++          MemRegion chunk_portion(chunk_boundary, _hard_end);
++          _bt.BlockOffsetArray::alloc_block(chunk_portion.start(),
+                                             chunk_portion.end());
+-	  SharedHeap::fill_region_with_object(chunk_portion);
+-	  _hard_end = chunk_portion.start();
+-	  chunk_boundary -= ChunkSizeInWords;
+-	}
++          SharedHeap::fill_region_with_object(chunk_portion);
++          _hard_end = chunk_portion.start();
++          chunk_boundary -= ChunkSizeInWords;
++        }
+         _end = _hard_end - AlignmentReserve;
+         assert(_top <= _end, "Invariant.");
+-	// Now reset the initial filler chunk so it doesn't overlap with
+-	// the one(s) inserted above.
+-	MemRegion new_filler(pre_top, _hard_end);
+-	fill_region_with_block(new_filler, false);
++        // Now reset the initial filler chunk so it doesn't overlap with
++        // the one(s) inserted above.
++        MemRegion new_filler(pre_top, _hard_end);
++        fill_region_with_block(new_filler, false);
+       }
+     } else {
+       _retained = false;
+@@ -338,7 +335,7 @@
+     }
+   } else {
+     assert(!end_of_gc ||
+-	   (!_retained && _true_end == _hard_end), "Checking.");
++           (!_retained && _true_end == _hard_end), "Checking.");
+   }
+   assert(_end <= _hard_end, "Invariant.");
+   assert(_top < _end || _top == _hard_end, "Invariant");
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp openjdk/hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)parGCAllocBuffer.hpp	1.30 07/05/29 09:44:13 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Forward decl.
+@@ -75,12 +72,12 @@
+     }
+   }
+ 
+-  // Undo the last allocation in the buffer, which is required to be of the 
++  // Undo the last allocation in the buffer, which is required to be of the
+   // "obj" of the given "word_sz".
+   void undo_allocation(HeapWord* obj, size_t word_sz) {
+     assert(_top - word_sz >= _bottom
+-	   && _top - word_sz == obj,
+-	   "Bad undo_allocation");
++           && _top - word_sz == obj,
++           "Bad undo_allocation");
+     _top = _top - word_sz;
+   }
+ 
+@@ -155,7 +152,7 @@
+   size_t _desired_plab_sz;// output of filter (below), suitably trimmed and quantized
+   AdaptiveWeightedAverage
+          _filter;         // integrator with decay
+-  
++
+  public:
+   PLABStats(size_t desired_plab_sz_, unsigned wt) :
+     _allocated(0),
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp openjdk/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)parNewGeneration.cpp	1.101 07/05/22 17:23:45 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -34,9 +31,9 @@
+ #endif
+ ParScanThreadState::ParScanThreadState(Space* to_space_,
+                                        ParNewGeneration* gen_,
+-				       Generation* old_gen_,
+-				       int thread_num_,
+-				       ObjToScanQueueSet* work_queue_set_,
++                                       Generation* old_gen_,
++                                       int thread_num_,
++                                       ObjToScanQueueSet* work_queue_set_,
+                                        size_t desired_plab_sz_,
+                                        ParallelTaskTerminator& term_) :
+   _to_space(to_space_), _old_gen(old_gen_), _thread_num(thread_num_),
+@@ -123,7 +120,7 @@
+ 
+ void ParScanThreadState::trim_queues(int max_size) {
+   ObjToScanQueue* queue = work_queue();
+-  while (queue->size() > (juint)max_size) { 
++  while (queue->size() > (juint)max_size) {
+     oop obj_to_scan;
+     if (queue->pop_local(obj_to_scan)) {
+       note_pop();
+@@ -154,7 +151,7 @@
+     ParGCAllocBuffer* const plab = to_space_alloc_buffer();
+     Space*            const sp   = to_space();
+     if (word_sz * 100 <
+-	ParallelGCBufferWastePct * plab->word_sz()) {
++        ParallelGCBufferWastePct * plab->word_sz()) {
+       // Is small enough; abandon this buffer and start a new one.
+       plab->retire(false, false);
+       size_t buf_size = plab->word_sz();
+@@ -167,25 +164,25 @@
+           buf_size = free_bytes >> LogHeapWordSize;
+           assert(buf_size == (size_t)align_object_size(buf_size),
+                  "Invariant");
+-	  buf_space  = sp->par_allocate(buf_size);
++          buf_space  = sp->par_allocate(buf_size);
+           free_bytes = sp->free();
+         }
+       }
+       if (buf_space != NULL) {
+-	plab->set_word_size(buf_size);
+-	plab->set_buf(buf_space);
++        plab->set_word_size(buf_size);
++        plab->set_buf(buf_space);
+         record_survivor_plab(buf_space, buf_size);
+-	obj = plab->allocate(word_sz);
++        obj = plab->allocate(word_sz);
+         // Note that we cannot compare buf_size < word_sz below
+         // because of AlignmentReserve (see ParGCAllocBuffer::allocate()).
+-	assert(obj != NULL || plab->words_remaining() < word_sz,
++        assert(obj != NULL || plab->words_remaining() < word_sz,
+                "Else should have been able to allocate");
+         // It's conceivable that we may be able to use the
+         // buffer we just grabbed for subsequent small requests
+         // even if not for this one.
+       } else {
+-	// We're used up.
+-	_to_space_full = true;
++        // We're used up.
++        _to_space_full = true;
+       }
+ 
+     } else {
+@@ -198,11 +195,11 @@
+ 
+ 
+ void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj,
+-						size_t word_sz) {
++                                                size_t word_sz) {
+   // Is the alloc in the current alloc buffer?
+   if (to_space_alloc_buffer()->contains(obj)) {
+     assert(to_space_alloc_buffer()->contains(obj + word_sz - 1),
+-	   "Should contain whole object.");
++           "Should contain whole object.");
+     to_space_alloc_buffer()->undo_allocation(obj, word_sz);
+   } else {
+     SharedHeap::fill_region_with_object(MemRegion(obj, word_sz));
+@@ -212,11 +209,11 @@
+ class ParScanThreadStateSet: private ResourceArray {
+ public:
+   // Initializes states for the specified number of threads;
+-  ParScanThreadStateSet(int                     num_threads, 
+-                        Space&                  to_space, 
++  ParScanThreadStateSet(int                     num_threads,
++                        Space&                  to_space,
+                         ParNewGeneration&       gen,
+-                        Generation&             old_gen, 
+-                        ObjToScanQueueSet&      queue_set, 
++                        Generation&             old_gen,
++                        ObjToScanQueueSet&      queue_set,
+                         size_t                  desired_plab_sz,
+                         ParallelTaskTerminator& term);
+   inline ParScanThreadState& thread_sate(int i);
+@@ -238,7 +235,7 @@
+ 
+ ParScanThreadStateSet::ParScanThreadStateSet(
+   int num_threads, Space& to_space, ParNewGeneration& gen,
+-  Generation& old_gen, ObjToScanQueueSet& queue_set, 
++  Generation& old_gen, ObjToScanQueueSet& queue_set,
+   size_t desired_plab_sz, ParallelTaskTerminator& term)
+   : ResourceArray(sizeof(ParScanThreadState), num_threads),
+     _gen(gen), _next_gen(old_gen), _term(term),
+@@ -247,7 +244,7 @@
+   assert(num_threads > 0, "sanity check!");
+   // Initialize states.
+   for (int i = 0; i < num_threads; ++i) {
+-    new ((ParScanThreadState*)_data + i) 
++    new ((ParScanThreadState*)_data + i)
+         ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
+                            desired_plab_sz, term);
+   }
+@@ -269,7 +266,7 @@
+ {
+   for (int i = 0; i < length(); ++i) {
+     ParScanThreadState& par_scan_state = thread_sate(i);
+-  
++
+     // Flush stats related to To-space PLAB activity and
+     // retire the last buffer.
+     par_scan_state.to_space_alloc_buffer()->
+@@ -324,7 +321,7 @@
+ 
+ 
+ ParScanClosure::ParScanClosure(ParNewGeneration* g,
+-			       ParScanThreadState* par_scan_state) :
++                               ParScanThreadState* par_scan_state) :
+   OopsInGenClosure(g), _par_scan_state(par_scan_state), _g(g)
+ {
+   assert(_g->level() == 0, "Optimized for youngest generation");
+@@ -400,7 +397,7 @@
+ }
+ 
+ ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen,
+-		HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :
++                HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :
+     AbstractGangTask("ParNewGeneration collection"),
+     _gen(gen), _next_gen(next_gen),
+     _young_old_boundary(young_old_boundary),
+@@ -421,7 +418,7 @@
+ 
+   ParScanThreadState& par_scan_state = _state_set->thread_sate(i);
+   par_scan_state.set_young_old_boundary(_young_old_boundary);
+-  
++
+   par_scan_state.start_strong_roots();
+   gch->gen_process_strong_roots(_gen->level(),
+                                 true, // Process younger gens, if any,
+@@ -520,14 +517,14 @@
+       assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
+       if (obj->is_forwarded()) {
+         *p = obj->forwardee();
+-      } else {        
++      } else {
+         *p = _g->DefNewGeneration::copy_to_survivor_space(obj, p);
+       }
+     }
+     if (_gc_barrier) {
+       // If p points to a younger generation, mark the card.
+       if ((HeapWord*)obj < _gen_boundary) {
+-	_rs->write_ref_field_gc_par(p, obj);
++        _rs->write_ref_field_gc_par(p, obj);
+       }
+     }
+   }
+@@ -536,14 +533,14 @@
+ class ParNewRefProcTaskProxy: public AbstractGangTask {
+   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
+ public:
+-  ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen,		
++  ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen,
+                          Generation& next_gen,
+                          HeapWord* young_old_boundary,
+                          ParScanThreadStateSet& state_set);
+ 
+ private:
+   virtual void work(int i);
+-  
++
+ private:
+   ParNewGeneration&      _gen;
+   ProcessTask&           _task;
+@@ -553,14 +550,14 @@
+ };
+ 
+ ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(
+-    ProcessTask& task, ParNewGeneration& gen,		
+-    Generation& next_gen, 
++    ProcessTask& task, ParNewGeneration& gen,
++    Generation& next_gen,
+     HeapWord* young_old_boundary,
+     ParScanThreadStateSet& state_set)
+   : AbstractGangTask("ParNewGeneration parallel reference processing"),
+     _gen(gen),
+     _task(task),
+-    _next_gen(next_gen), 
++    _next_gen(next_gen),
+     _young_old_boundary(young_old_boundary),
+     _state_set(state_set)
+ {
+@@ -572,8 +569,8 @@
+   HandleMark hm;
+   ParScanThreadState& par_scan_state = _state_set.thread_sate(i);
+   par_scan_state.set_young_old_boundary(_young_old_boundary);
+-  _task.work(i, par_scan_state.is_alive_closure(), 
+-             par_scan_state.keep_alive_closure(), 
++  _task.work(i, par_scan_state.is_alive_closure(),
++             par_scan_state.keep_alive_closure(),
+              par_scan_state.evacuate_followers_closure());
+ }
+ 
+@@ -616,9 +613,9 @@
+   workers->run_task(&enq_task);
+ }
+ 
+-void ParNewRefProcTaskExecutor::set_single_threaded_mode() 
+-{ 
+-  _state_set.flush(); 
++void ParNewRefProcTaskExecutor::set_single_threaded_mode()
++{
++  _state_set.flush();
+   GenCollectedHeap* gch = GenCollectedHeap::heap();
+   gch->set_par_threads(0);  // 0 ==> non-parallel.
+   gch->save_marks();
+@@ -630,8 +627,8 @@
+ 
+ EvacuateFollowersClosureGeneral::
+ EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
+-				OopsInGenClosure* cur,
+-				OopsInGenClosure* older) :
++                                OopsInGenClosure* cur,
++                                OopsInGenClosure* older) :
+   _gch(gch), _level(level),
+   _scan_cur_or_nonheap(cur), _scan_older(older)
+ {}
+@@ -641,8 +638,8 @@
+     // Beware: this call will lead to closure applications via virtual
+     // calls.
+     _gch->oop_since_save_marks_iterate(_level,
+-				       _scan_cur_or_nonheap,
+-				       _scan_older);
++                                       _scan_cur_or_nonheap,
++                                       _scan_older);
+   } while (!_gch->no_allocs_since_save_marks(_level));
+ }
+ 
+@@ -659,7 +656,7 @@
+ 
+ void ParNewGeneration::collect(bool   full,
+                                bool   clear_all_soft_refs,
+-			       size_t size,
++                               size_t size,
+                                bool   is_tlab) {
+   assert(full || size > 0, "otherwise we don't want to collect");
+   GenCollectedHeap* gch = GenCollectedHeap::heap();
+@@ -668,10 +665,10 @@
+   AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
+   WorkGang* workers = gch->workers();
+   _next_gen = gch->next_gen(this);
+-  assert(_next_gen != NULL, 
++  assert(_next_gen != NULL,
+     "This must be the youngest gen, and not the only gen");
+   assert(gch->n_gens() == 2,
+-	 "Par collection currently only works with single older gen.");
++         "Par collection currently only works with single older gen.");
+   // Do we have to avoid promotion_undo?
+   if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) {
+     set_avoid_promotion_undo(true);
+@@ -706,7 +703,7 @@
+   assert(workers != NULL, "Need parallel worker threads.");
+   ParallelTaskTerminator _term(workers->total_workers(), task_queues());
+   ParScanThreadStateSet thread_state_set(workers->total_workers(),
+-                                         *to(), *this, *_next_gen, *task_queues(), 
++                                         *to(), *this, *_next_gen, *task_queues(),
+                                          desired_plab_sz(), _term);
+ 
+   ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set);
+@@ -715,7 +712,7 @@
+   gch->change_strong_roots_parity();
+   gch->rem_set()->prepare_for_younger_refs_iterate(true);
+   // It turns out that even when we're using 1 thread, doing the work in a
+-  // separate thread causes wide variance in run times.  We can't help this 
++  // separate thread causes wide variance in run times.  We can't help this
+   // in the multi-threaded case, but we special-case n=1 here to get
+   // repeatable measurements of the 1-thread overhead of the parallel code.
+   if (n_workers > 1) {
+@@ -727,13 +724,13 @@
+ 
+   if (PAR_STATS_ENABLED && ParallelGCVerbose) {
+     gclog_or_tty->print("Thread totals:\n"
+-	       "  Pushes: %7d    Pops: %7d    Steals %7d (sum = %7d).\n",
+-	       thread_state_set.pushes(), thread_state_set.pops(), 
++               "  Pushes: %7d    Pops: %7d    Steals %7d (sum = %7d).\n",
++               thread_state_set.pushes(), thread_state_set.pops(),
+                thread_state_set.steals(),
+-	       thread_state_set.pops()+thread_state_set.steals());
++               thread_state_set.pops()+thread_state_set.steals());
+   }
+   assert(thread_state_set.pushes() == thread_state_set.pops() + thread_state_set.steals(),
+-	 "Or else the queues are leaky.");
++         "Or else the queues are leaky.");
+ 
+   // For now, process discovered weak refs sequentially.
+ #ifdef COMPILER2
+@@ -741,7 +738,7 @@
+ #else
+   ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy();
+ #endif // COMPILER2
+- 
++
+   // Process (weak) reference objects found during scavenge.
+   IsAliveClosure is_alive(this);
+   ScanWeakRefClosure scan_weak_ref(this);
+@@ -749,12 +746,12 @@
+   ScanClosure               scan_without_gc_barrier(this, false);
+   ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
+   set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
+-  EvacuateFollowersClosureGeneral evacuate_followers(gch, _level, 
++  EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
+     &scan_without_gc_barrier, &scan_with_gc_barrier);
+   if (ref_processor()->processing_is_mt()) {
+     ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
+     ref_processor()->process_discovered_references(
+-        soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers, 
++        soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers,
+         &task_executor);
+   } else {
+     thread_state_set.flush();
+@@ -769,10 +766,10 @@
+     eden()->clear();
+     from()->clear();
+     swap_spaces();
+-  
++
+     assert(to()->is_empty(), "to space should be empty now");
+   } else {
+-    assert(HandlePromotionFailure, 
++    assert(HandlePromotionFailure,
+       "Should only be here if promotion failure handling is on");
+     if (_promo_failure_scan_stack != NULL) {
+       // Can be non-null because of reference processing.
+@@ -810,7 +807,7 @@
+   update_time_of_last_gc(os::javaTimeMillis());
+ 
+   SpecializationStats::print();
+-  
++
+   ref_processor()->set_enqueuing_is_done(true);
+   if (ref_processor()->processing_is_mt()) {
+     ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
+@@ -878,9 +875,9 @@
+ // to install the forwarding pointer before it copies the object,
+ // thus avoiding the need to undo the copy as in
+ // copy_to_survivor_space_avoiding_with_undo.
+- 
++
+ oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(
+-	ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
++        ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
+   // In the sequential version, this assert also says that the object is
+   // not forwarded.  That might not be the case here.  It is the case that
+   // the caller observed it to be not forwarded at some time in the past.
+@@ -893,8 +890,8 @@
+   oopDesc dummyOld;
+   dummyOld.set_mark(m);
+   assert(!dummyOld.is_forwarded(),
+-	 "should not be called with forwarding pointer mark word.");
+-  
++         "should not be called with forwarding pointer mark word.");
++
+   oop new_obj = NULL;
+   oop forward_ptr;
+ 
+@@ -915,11 +912,11 @@
+     forward_ptr = old->forward_to_atomic(ClaimedForwardPtr);
+     if (forward_ptr != NULL) {
+       // someone else beat us to it.
+-	return real_forwardee(old);
++        return real_forwardee(old);
+     }
+ 
+     new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
+-				       old, m, sz);
++                                       old, m, sz);
+ 
+     if (new_obj == NULL) {
+       if (!HandlePromotionFailure) {
+@@ -970,7 +967,7 @@
+     par_scan_state->note_push();
+ 
+     return new_obj;
+-  } 
++  }
+ 
+   // Oops.  Someone beat us to it.  Undo the allocation.  Where did we
+   // allocate it?
+@@ -993,7 +990,7 @@
+ // the forwarding pointer.  The other threads have to undo their copy.
+ 
+ oop ParNewGeneration::copy_to_survivor_space_with_undo(
+-	ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
++        ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
+ 
+   // In the sequential version, this assert also says that the object is
+   // not forwarded.  That might not be the case here.  It is the case that
+@@ -1007,8 +1004,8 @@
+   oopDesc dummyOld;
+   dummyOld.set_mark(m);
+   assert(!dummyOld.is_forwarded(),
+-	 "should not be called with forwarding pointer mark word.");
+-  
++         "should not be called with forwarding pointer mark word.");
++
+   bool failed_to_promote = false;
+   oop new_obj = NULL;
+   oop forward_ptr;
+@@ -1025,7 +1022,7 @@
+     // Either to-space is full or we decided to promote
+     // try allocating obj tenured
+     new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
+-				       old, m, sz);
++                                       old, m, sz);
+ 
+     if (new_obj == NULL) {
+       if (!HandlePromotionFailure) {
+@@ -1084,7 +1081,7 @@
+     par_scan_state->note_push();
+ 
+     return new_obj;
+-  } 
++  }
+ 
+   // Oops.  Someone beat us to it.  Undo the allocation.  Where did we
+   // allocate it?
+@@ -1126,7 +1123,7 @@
+   ObjToScanQueue* work_q = par_scan_state->work_queue();
+   // How many to take?
+   int objsFromOverflow = MIN2(work_q->max_elems()/4,
+-			      (juint)ParGCDesiredObjsFromOverflowList);
++                              (juint)ParGCDesiredObjsFromOverflowList);
+ 
+   if (_overflow_list == NULL) return false;
+ 
+@@ -1193,7 +1190,7 @@
+         _reserved,                  // span
+         refs_discovery_is_atomic(), // atomic_discovery
+         refs_discovery_is_mt(),     // mt_discovery
+-        &_is_alive_closure,
++        NULL,                       // is_alive_non_header
+         ParallelGCThreads,
+         ParallelRefProcEnabled);
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp openjdk/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)parNewGeneration.hpp	1.48 07/05/17 15:52:44 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class ChunkArray;
+@@ -33,7 +30,7 @@
+ class ParEvacuateFollowersClosure;
+ 
+ // It would be better if these types could be kept local to the .cpp file,
+-// but they must be here to allow ParScanClosure::do_oop_work to be defined 
++// but they must be here to allow ParScanClosure::do_oop_work to be defined
+ // in genOopClosures.inline.hpp.
+ 
+ 
+@@ -73,7 +70,7 @@
+   DefNewGeneration::IsAliveClosure     _is_alive_closure;
+   ParScanWeakRefClosure                _scan_weak_ref_closure;
+   ParKeepAliveClosure                  _keep_alive_closure;
+-  
++
+ 
+   Space* _to_space;
+   Space* to_space() { return _to_space; }
+@@ -109,20 +106,20 @@
+ 
+   void record_survivor_plab(HeapWord* plab_start, size_t plab_word_size);
+ 
+-  ParScanThreadState(Space* to_space_, ParNewGeneration* gen_, 
++  ParScanThreadState(Space* to_space_, ParNewGeneration* gen_,
+                      Generation* old_gen_, int thread_num_,
+                      ObjToScanQueueSet* work_queue_set_, size_t desired_plab_sz_,
+                      ParallelTaskTerminator& term_);
+ 
+ public:
+   ageTable* age_table() {return &_ageTable;}
+-  
++
+   ObjToScanQueue* work_queue() { return _work_queue; }
+ 
+   ParGCAllocBuffer* to_space_alloc_buffer() {
+     return &_to_space_alloc_buffer;
+   }
+-  
++
+   ParEvacuateFollowersClosure&      evacuate_followers_closure() { return _evacuate_followers; }
+   DefNewGeneration::IsAliveClosure& is_alive_closure() { return _is_alive_closure; }
+   ParScanWeakRefClosure&            scan_weak_ref_closure() { return _scan_weak_ref_closure; }
+@@ -208,9 +205,9 @@
+   class ParScanThreadStateSet* _state_set;
+ 
+ public:
+-  ParNewGenTask(ParNewGeneration*      gen, 
++  ParNewGenTask(ParNewGeneration*      gen,
+                 Generation*            next_gen,
+-		HeapWord*              young_old_boundary, 
++                HeapWord*              young_old_boundary,
+                 ParScanThreadStateSet* state_set);
+ 
+   HeapWord* young_old_boundary() { return _young_old_boundary; }
+@@ -252,8 +249,8 @@
+                             ParScanThreadStateSet& state_set)
+     : _generation(generation), _state_set(state_set)
+   { }
+-  
+-  // Executes a task using worker threads.  
++
++  // Executes a task using worker threads.
+   virtual void execute(ProcessTask& task);
+   virtual void execute(EnqueueTask& task);
+   // Switch to single threaded mode.
+@@ -284,14 +281,14 @@
+   // Desired size of survivor space plab's
+   PLABStats _plab_stats;
+ 
+-  // A list of from-space images of to-be-scanned objects, threaded through 
++  // A list of from-space images of to-be-scanned objects, threaded through
+   // klass-pointers (klass information already copied to the forwarded
+   // image.)  Manipulated with CAS.
+   oop _overflow_list;
+ 
+   // If true, older generation does not support promotion undo, so avoid.
+   static bool _avoid_promotion_undo;
+-  
++
+   // This closure is used by the reference processor to filter out
+   // references to live referent.
+   DefNewGeneration::IsAliveClosure _is_alive_closure;
+@@ -299,8 +296,8 @@
+   static oop real_forwardee_slow(oop obj);
+   static void waste_some_time();
+ 
+-  // Preserve the mark of "obj", if necessary, in preparation for its mark 
+-  // word being overwritten with a self-forwarding-pointer. 
++  // Preserve the mark of "obj", if necessary, in preparation for its mark
++  // word being overwritten with a self-forwarding-pointer.
+   void preserve_mark_if_necessary(oop obj, markOop m);
+ 
+  protected:
+@@ -341,7 +338,7 @@
+   // Make the collection virtual.
+   virtual void collect(bool   full,
+                        bool   clear_all_soft_refs,
+-                       size_t size, 
++                       size_t size,
+                        bool   is_tlab);
+ 
+   // This needs to be visible to the closure function.
+@@ -349,20 +346,20 @@
+   // that must not contain a forwarding pointer (though one might be
+   // inserted in "obj"s mark word by a parallel thread).
+   inline oop copy_to_survivor_space(ParScanThreadState* par_scan_state,
+-			     oop obj, size_t obj_sz, markOop m) {
++                             oop obj, size_t obj_sz, markOop m) {
+     if (_avoid_promotion_undo) {
+        return copy_to_survivor_space_avoiding_promotion_undo(par_scan_state,
+-                                         		     obj, obj_sz, m);
++                                                             obj, obj_sz, m);
+     }
+ 
+     return copy_to_survivor_space_with_undo(par_scan_state, obj, obj_sz, m);
+   }
+ 
+   oop copy_to_survivor_space_avoiding_promotion_undo(ParScanThreadState* par_scan_state,
+-			     oop obj, size_t obj_sz, markOop m);
++                             oop obj, size_t obj_sz, markOop m);
+ 
+   oop copy_to_survivor_space_with_undo(ParScanThreadState* par_scan_state,
+-			     oop obj, size_t obj_sz, markOop m);
++                             oop obj, size_t obj_sz, markOop m);
+ 
+   // Push the given (from-space) object on the global overflow list.
+   void push_on_overflow_list(oop from_space_obj);
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.hpp openjdk/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,35 +1,33 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)parOopClosures.hpp	1.1 07/05/16 10:51:44 JVM"
+-#endif
+ /*
+  * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+- *   
++ *
+  * This code is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License version 2 only, as
+  * published by the Free Software Foundation.
+- *   
++ *
+  * This code is distributed in the hope that it will be useful, but WITHOUT
+  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+  * version 2 for more details (a copy is included in the LICENSE file that
+  * accompanied this code).
+- *  
++ *
+  * You should have received a copy of the GNU General Public License version
+  * 2 along with this work; if not, write to the Free Software Foundation,
+  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+- *   
++ *
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Closures for ParNewGeneration
+ 
+ class ParScanThreadState;
+ class ParNewGeneration;
+-typedef class OopTaskQueueSet ObjToScanQueueSet;
++template<class E> class GenericTaskQueueSet;
++typedef GenericTaskQueueSet<oop> ObjToScanQueueSet;
+ class ParallelTaskTerminator;
+ 
+ class ParScanClosure: public OopsInGenClosure {
+@@ -38,8 +36,8 @@
+   ParNewGeneration* _g;
+   HeapWord* _boundary;
+   void do_oop_work(oop* p,
+-			  bool gc_barrier,
+-			  bool root_scan);
++                          bool gc_barrier,
++                          bool root_scan);
+ 
+   void par_do_barrier(oop* p);
+ 
+@@ -52,14 +50,14 @@
+   void do_oop(oop* p)    { do_oop_work(p, true, false); }
+   void do_oop_nv(oop* p) { do_oop_work(p, true, false); }
+   ParScanWithBarrierClosure(ParNewGeneration* g,
+-			    ParScanThreadState* par_scan_state) :
++                            ParScanThreadState* par_scan_state) :
+     ParScanClosure(g, par_scan_state) {}
+ };
+ 
+ class ParScanWithoutBarrierClosure: public ParScanClosure {
+ public:
+   ParScanWithoutBarrierClosure(ParNewGeneration* g,
+-			       ParScanThreadState* par_scan_state) :
++                               ParScanThreadState* par_scan_state) :
+     ParScanClosure(g, par_scan_state) {}
+   void do_oop(oop* p)    { do_oop_work(p, false, false); }
+   void do_oop_nv(oop* p) { do_oop_work(p, false, false); }
+@@ -68,7 +66,7 @@
+ class ParRootScanWithBarrierTwoGensClosure: public ParScanClosure {
+ public:
+   ParRootScanWithBarrierTwoGensClosure(ParNewGeneration* g,
+-				       ParScanThreadState* par_scan_state) :
++                                       ParScanThreadState* par_scan_state) :
+     ParScanClosure(g, par_scan_state) {}
+   void do_oop(oop* p) { do_oop_work(p, true, true); }
+ };
+@@ -76,7 +74,7 @@
+ class ParRootScanWithoutBarrierClosure: public ParScanClosure {
+ public:
+   ParRootScanWithoutBarrierClosure(ParNewGeneration* g,
+-				   ParScanThreadState* par_scan_state) :
++                                   ParScanThreadState* par_scan_state) :
+     ParScanClosure(g, par_scan_state) {}
+   void do_oop(oop* p) { do_oop_work(p, false, true); }
+ };
+@@ -95,7 +93,7 @@
+   ParScanThreadState* _par_scan_state;
+   ParScanThreadState* par_scan_state() { return _par_scan_state; }
+ 
+-  // We want to preserve the specific types here (rather than "OopClosure") 
++  // We want to preserve the specific types here (rather than "OopClosure")
+   // for later de-virtualization of do_oop calls.
+   ParScanWithoutBarrierClosure* _to_space_closure;
+   ParScanWithoutBarrierClosure* to_space_closure() {
+@@ -117,7 +115,7 @@
+ 
+   ParNewGeneration* _par_gen;
+   ParNewGeneration* par_gen() { return _par_gen; }
+-  
++
+   ObjToScanQueueSet*  _task_queues;
+   ObjToScanQueueSet*  task_queues() { return _task_queues; }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp openjdk/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,28 +1,25 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)parOopClosures.inline.hpp	1.1 07/05/16 10:51:44 JVM"
+-#endif
+ /*
+  * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+- *   
++ *
+  * This code is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License version 2 only, as
+  * published by the Free Software Foundation.
+- *   
++ *
+  * This code is distributed in the hope that it will be useful, but WITHOUT
+  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+  * version 2 for more details (a copy is included in the LICENSE file that
+  * accompanied this code).
+- *  
++ *
+  * You should have received a copy of the GNU General Public License version
+  * 2 along with this work; if not, write to the Free Software Foundation,
+  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+- *   
++ *
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ inline void ParScanWeakRefClosure::do_oop(oop* p)
+@@ -32,14 +29,14 @@
+   // weak references are sometimes scanned twice; must check
+   // that to-space doesn't already contain this object
+   if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) {
+-    // we need to ensure that it is copied (see comment in 
++    // we need to ensure that it is copied (see comment in
+     // ParScanClosure::do_oop_work).
+     klassOop objK = obj->klass();
+     markOop m = obj->mark();
+     if (m->is_marked()) { // Contains forwarding pointer.
+       *p = ParNewGeneration::real_forwardee(obj);
+     } else {
+-      size_t obj_sz = obj->size_given_klass(objK->klass_part()); 
++      size_t obj_sz = obj->size_given_klass(objK->klass_part());
+       *p = ((ParNewGeneration*)_g)->copy_to_survivor_space(_par_scan_state,
+                                                            obj, obj_sz, m);
+     }
+@@ -66,15 +63,15 @@
+                                         bool root_scan) {
+   oop obj = *p;
+   assert((!Universe::heap()->is_in_reserved(p) ||
+-	  generation()->is_in_reserved(p))
+-	 && (generation()->level() == 0 || gc_barrier),
+-	 "The gen must be right, and we must be doing the barrier "
+-	 "in older generations.");
++          generation()->is_in_reserved(p))
++         && (generation()->level() == 0 || gc_barrier),
++         "The gen must be right, and we must be doing the barrier "
++         "in older generations.");
+   if (obj != NULL) {
+     if ((HeapWord*)obj < _boundary) {
+       assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
+       // OK, we need to ensure that it is copied.
+-      // We read the klass and mark in this order, so that we can reliably 
++      // We read the klass and mark in this order, so that we can reliably
+       // get the size of the object: if the mark we read is not a
+       // forwarding pointer, then the klass is valid: the klass is only
+       // overwritten with an overflow next pointer after the object is
+@@ -82,22 +79,21 @@
+       klassOop objK = obj->klass();
+       markOop m = obj->mark();
+       if (m->is_marked()) { // Contains forwarding pointer.
+-	*p = ParNewGeneration::real_forwardee(obj);
++        *p = ParNewGeneration::real_forwardee(obj);
+       } else {
+-        size_t obj_sz = obj->size_given_klass(objK->klass_part()); 
++        size_t obj_sz = obj->size_given_klass(objK->klass_part());
+         *p = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m);
+-	if (root_scan) {
+-	  // This may have pushed an object.  If we have a root
+-	  // category with a lot of roots, can't let the queue get too
+-	  // full:
+-	  (void)_par_scan_state->trim_queues(10 * ParallelGCThreads);
+-	}
++        if (root_scan) {
++          // This may have pushed an object.  If we have a root
++          // category with a lot of roots, can't let the queue get too
++          // full:
++          (void)_par_scan_state->trim_queues(10 * ParallelGCThreads);
++        }
+       }
+       if (gc_barrier) {
+-	// Now call parent closure
+-	par_do_barrier(p);
++        // Now call parent closure
++        par_do_barrier(p);
+       }
+     }
+   }
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/parNew/vmStructs_parNew.hpp openjdk/hotspot/src/share/vm/gc_implementation/parNew/vmStructs_parNew.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/parNew/vmStructs_parNew.hpp	2008-02-28 05:02:35.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/parNew/vmStructs_parNew.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,11 +1,25 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)vmStructs_parNew.hpp	1.1 07/05/01 16:48:02 JVM"
+-#endif
+ /*
+- * @(#)vmStructs_parNew.hpp	1.1 07/05/01
+- * 
+- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+- * SUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
++ * Copyright 2006-2007 Sun Microsystems, Inc.  All Rights Reserved.
++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++ *
++ * This code is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 only, as
++ * published by the Free Software Foundation.
++ *
++ * This code is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++ * version 2 for more details (a copy is included in the LICENSE file that
++ * accompanied this code).
++ *
++ * You should have received a copy of the GNU General Public License version
++ * 2 along with this work; if not, write to the Free Software Foundation,
++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
++ * CA 95054 USA or visit www.sun.com if you need additional information or
++ * have any questions.
++ *
+  */
+ 
+ #define VM_TYPES_PARNEW(declare_type)                                     \
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp openjdk/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)adaptiveSizePolicy.cpp	1.13 07/05/05 17:05:33 JVM"
+-#endif
+ /*
+  * Copyright 2004-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ #include "incls/_precompiled.incl"
+ #include "incls/_adaptiveSizePolicy.cpp.incl"
+@@ -30,18 +27,18 @@
+ elapsedTimer AdaptiveSizePolicy::_minor_timer;
+ elapsedTimer AdaptiveSizePolicy::_major_timer;
+ 
+-// The throughput goal is implemented as 
+-//	_throughput_goal = 1 - ( 1 / (1 + gc_cost_ratio))
++// The throughput goal is implemented as
++//      _throughput_goal = 1 - ( 1 / (1 + gc_cost_ratio))
+ // gc_cost_ratio is the ratio
+-//	application cost / gc cost
++//      application cost / gc cost
+ // For example a gc_cost_ratio of 4 translates into a
+ // throughput goal of .80
+ 
+ AdaptiveSizePolicy::AdaptiveSizePolicy(size_t init_eden_size,
+                                        size_t init_promo_size,
+-				       size_t init_survivor_size,
+-				       double gc_pause_goal_sec,
+-				       uint gc_cost_ratio) :
++                                       size_t init_survivor_size,
++                                       double gc_pause_goal_sec,
++                                       uint gc_cost_ratio) :
+     _eden_size(init_eden_size),
+     _promo_size(init_promo_size),
+     _survivor_size(init_survivor_size),
+@@ -54,7 +51,7 @@
+     _threshold_tolerance_percent(1.0 + ThresholdTolerance/100.0),
+     _young_gen_change_for_minor_throughput(0),
+     _old_gen_change_for_major_throughput(0) {
+-  _avg_minor_pause    = 
++  _avg_minor_pause    =
+     new AdaptivePaddedAverage(AdaptiveTimeWeight, PausePadding);
+   _avg_minor_interval = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
+   _avg_minor_gc_cost  = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
+@@ -115,42 +112,42 @@
+       UseAdaptiveSizePolicyWithSystemGC) {
+     double minor_pause_in_seconds = _minor_timer.seconds();
+     double minor_pause_in_ms = minor_pause_in_seconds * MILLIUNITS;
+-  
++
+     // Sample for performance counter
+     _avg_minor_pause->sample(minor_pause_in_seconds);
+-  
++
+     // Cost of collection (unit-less)
+     double collection_cost = 0.0;
+     if ((_latest_minor_mutator_interval_seconds > 0.0) &&
+         (minor_pause_in_seconds > 0.0)) {
+       double interval_in_seconds =
+         _latest_minor_mutator_interval_seconds + minor_pause_in_seconds;
+-      collection_cost = 
++      collection_cost =
+         minor_pause_in_seconds / interval_in_seconds;
+       _avg_minor_gc_cost->sample(collection_cost);
+       // Sample for performance counter
+       _avg_minor_interval->sample(interval_in_seconds);
+     }
+-  
+-    // The policy does not have enough data until at least some 
++
++    // The policy does not have enough data until at least some
+     // minor collections have been done.
+-    _young_gen_policy_is_ready = 
++    _young_gen_policy_is_ready =
+       (_avg_minor_gc_cost->count() >= AdaptiveSizePolicyReadyThreshold);
+-  
++
+     // Calculate variables used to estimate pause time vs. gen sizes
+     double eden_size_in_mbytes = ((double)_eden_size)/((double)M);
+     update_minor_pause_young_estimator(minor_pause_in_ms);
+     update_minor_pause_old_estimator(minor_pause_in_ms);
+-  
++
+     if (PrintAdaptiveSizePolicy && Verbose) {
+       gclog_or_tty->print("AdaptiveSizePolicy::minor_collection_end: "
+         "minor gc cost: %f  average: %f", collection_cost,
+         _avg_minor_gc_cost->average());
+-      gclog_or_tty->print_cr("  minor pause: %f minor period %f", 
++      gclog_or_tty->print_cr("  minor pause: %f minor period %f",
+         minor_pause_in_ms,
+         _latest_minor_mutator_interval_seconds * MILLIUNITS);
+     }
+-  
++
+     // Calculate variable used to estimate collection cost vs. gen sizes
+     assert(collection_cost >= 0.0, "Expected to be non-negative");
+     _minor_collection_estimator->update(eden_size_in_mbytes, collection_cost);
+@@ -203,7 +200,7 @@
+   return result;
+ }
+ 
+-// Linear decay of major gc cost 
++// Linear decay of major gc cost
+ double AdaptiveSizePolicy::decaying_major_gc_cost() const {
+   double major_interval = major_gc_interval_average_for_decay();
+   double major_gc_cost_average = major_gc_cost();
+@@ -224,14 +221,14 @@
+ // Use a value of the major gc cost that has been decayed
+ // by the factor
+ //
+-//	average-interval-between-major-gc * AdaptiveSizeMajorGCDecayTimeScale /
+-//	  time-since-last-major-gc
++//      average-interval-between-major-gc * AdaptiveSizeMajorGCDecayTimeScale /
++//        time-since-last-major-gc
+ //
+ // if the average-interval-between-major-gc * AdaptiveSizeMajorGCDecayTimeScale
+ // is less than time-since-last-major-gc.
+ //
+-// In cases where there are initial major gc's that 
+-// are of a relatively high cost but no later major 
++// In cases where there are initial major gc's that
++// are of a relatively high cost but no later major
+ // gc's, the total gc cost can remain high because
+ // the major gc cost remains unchanged (since there are no major
+ // gc's).  In such a situation the value of the unchanging
+@@ -240,7 +237,7 @@
+ // small.  Use the decaying gc cost only to decide whether to
+ // adjust for throughput.  Using it also to determine the adjustment
+ // to be made for throughput also seems reasonable but there is
+-// no test case to use to decide if it is the right thing to do 
++// no test case to use to decide if it is the right thing to do
+ // don't do it yet.
+ 
+ double AdaptiveSizePolicy::decaying_gc_cost() const {
+@@ -251,7 +248,7 @@
+       (avg_major_interval > 0.00)) {
+     double time_since_last_major_gc = time_since_major_gc();
+ 
+-    // Decay the major gc cost?  
++    // Decay the major gc cost?
+     if (time_since_last_major_gc >
+         ((double) AdaptiveSizeMajorGCDecayTimeScale) * avg_major_interval) {
+ 
+@@ -259,8 +256,8 @@
+       decayed_major_gc_cost = decaying_major_gc_cost();
+       if (PrintGCDetails && Verbose) {
+         gclog_or_tty->print_cr("\ndecaying_gc_cost: major interval average:"
+-	  " %f  time since last major gc: %f", 
+-	  avg_major_interval, time_since_last_major_gc);
++          " %f  time since last major gc: %f",
++          avg_major_interval, time_since_last_major_gc);
+         gclog_or_tty->print_cr("  major gc cost: %f  decayed major gc cost: %f",
+           major_gc_cost(), decayed_major_gc_cost);
+       }
+@@ -292,16 +289,16 @@
+   // Print goal for which action is needed.
+   char* action = NULL;
+   bool change_for_pause = false;
+-  if ((change_old_gen_for_maj_pauses() == 
+-	 decrease_old_gen_for_maj_pauses_true) ||
+-      (change_young_gen_for_min_pauses() == 
+-	 decrease_young_gen_for_min_pauses_true)) {
++  if ((change_old_gen_for_maj_pauses() ==
++         decrease_old_gen_for_maj_pauses_true) ||
++      (change_young_gen_for_min_pauses() ==
++         decrease_young_gen_for_min_pauses_true)) {
+     action = (char*) " *** pause time goal ***";
+     change_for_pause = true;
+   } else if ((change_old_gen_for_throughput() ==
+-	       increase_old_gen_for_throughput_true) || 
++               increase_old_gen_for_throughput_true) ||
+             (change_young_gen_for_throughput() ==
+-	       increase_young_gen_for_througput_true)) {
++               increase_young_gen_for_througput_true)) {
+     action = (char*) " *** throughput goal ***";
+   } else if (decrease_for_footprint()) {
+     action = (char*) " *** reduced footprint ***";
+@@ -333,16 +330,16 @@
+   } else if (change_for_pause) {
+     tenured_gen_action = no_change_msg;
+   }
+-    
++
+   // Throughput
+   if (change_old_gen_for_throughput() == increase_old_gen_for_throughput_true) {
+     assert(change_young_gen_for_throughput() ==
+-	   increase_young_gen_for_througput_true, 
+-	   "Both generations should be growing");
++           increase_young_gen_for_througput_true,
++           "Both generations should be growing");
+     young_gen_action = grow_msg;
+     tenured_gen_action = grow_msg;
+-  } else if (change_young_gen_for_throughput() == 
+-	     increase_young_gen_for_througput_true) {
++  } else if (change_young_gen_for_throughput() ==
++             increase_young_gen_for_througput_true) {
+     // Only the young generation may grow at start up (before
+     // enough full collections have been done to grow the old generation).
+     young_gen_action = grow_msg;
+@@ -350,7 +347,7 @@
+   }
+ 
+   // Minimum footprint
+-  if (decrease_for_footprint() != 0) { 
++  if (decrease_for_footprint() != 0) {
+     young_gen_action = shrink_msg;
+     tenured_gen_action = shrink_msg;
+   }
+@@ -367,8 +364,8 @@
+ }
+ 
+ bool AdaptiveSizePolicy::print_adaptive_size_policy_on(
+-                                            outputStream* st, 
+-				            int tenuring_threshold_arg) const {
++                                            outputStream* st,
++                                            int tenuring_threshold_arg) const {
+   if (!AdaptiveSizePolicy::print_adaptive_size_policy_on(st)) {
+     return false;
+   }
+@@ -377,13 +374,13 @@
+   bool tenuring_threshold_changed = true;
+   if (decrement_tenuring_threshold_for_survivor_limit()) {
+     st->print("    Tenuring threshold:    (attempted to decrease to avoid"
+-	      " survivor space overflow) = ");
++              " survivor space overflow) = ");
+   } else if (decrement_tenuring_threshold_for_gc_cost()) {
+     st->print("    Tenuring threshold:    (attempted to decrease to balance"
+-	      " GC costs) = ");
++              " GC costs) = ");
+   } else if (increment_tenuring_threshold_for_gc_cost()) {
+     st->print("    Tenuring threshold:    (attempted to increase to balance"
+-	      " GC costs) = ");
++              " GC costs) = ");
+   } else {
+     tenuring_threshold_changed = false;
+     assert(!tenuring_threshold_change(), "(no change was attempted)");
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp openjdk/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)adaptiveSizePolicy.hpp	1.15 07/05/05 17:05:32 JVM"
+-#endif
+ /*
+  * Copyright 2004-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This class keeps statistical information and computes the
+@@ -250,8 +247,8 @@
+ 
+   // Return the mutator cost using the decayed
+   // GC cost.
+-  double adjusted_mutator_cost() const { 
+-    double result = 1.0 - decaying_gc_cost(); 
++  double adjusted_mutator_cost() const {
++    double result = 1.0 - decaying_gc_cost();
+     assert(result >= 0.0, "adjusted mutator cost calculation is incorrect");
+     return result;
+   }
+@@ -323,28 +320,28 @@
+ 
+  public:
+   AdaptiveSizePolicy(size_t init_eden_size,
+-		     size_t init_promo_size,
+-		     size_t init_survivor_size,
+-		     double gc_pause_goal_sec,
+-		     uint gc_cost_ratio);
++                     size_t init_promo_size,
++                     size_t init_survivor_size,
++                     double gc_pause_goal_sec,
++                     uint gc_cost_ratio);
+ 
+-  bool is_gc_cms_adaptive_size_policy() { 
+-    return kind() == _gc_cms_adaptive_size_policy; 
++  bool is_gc_cms_adaptive_size_policy() {
++    return kind() == _gc_cms_adaptive_size_policy;
+   }
+-  bool is_gc_ps_adaptive_size_policy() { 
+-    return kind() == _gc_ps_adaptive_size_policy; 
++  bool is_gc_ps_adaptive_size_policy() {
++    return kind() == _gc_ps_adaptive_size_policy;
+   }
+ 
+   AdaptivePaddedAverage*   avg_minor_pause() const { return _avg_minor_pause; }
+-  AdaptiveWeightedAverage* avg_minor_interval() const { 
+-    return _avg_minor_interval; 
++  AdaptiveWeightedAverage* avg_minor_interval() const {
++    return _avg_minor_interval;
+   }
+-  AdaptiveWeightedAverage* avg_minor_gc_cost() const { 
+-    return _avg_minor_gc_cost; 
++  AdaptiveWeightedAverage* avg_minor_gc_cost() const {
++    return _avg_minor_gc_cost;
+   }
+ 
+-  AdaptiveWeightedAverage* avg_major_gc_cost() const { 
+-    return _avg_major_gc_cost; 
++  AdaptiveWeightedAverage* avg_major_gc_cost() const {
++    return _avg_major_gc_cost;
+   }
+ 
+   AdaptiveWeightedAverage* avg_young_live() const { return _avg_young_live; }
+@@ -441,13 +438,13 @@
+ 
+   // Printing support
+   virtual bool print_adaptive_size_policy_on(outputStream* st) const;
+-  bool print_adaptive_size_policy_on(outputStream* st, int 
+-				  tenuring_threshold) const;
++  bool print_adaptive_size_policy_on(outputStream* st, int
++                                  tenuring_threshold) const;
+ };
+ 
+ // Class that can be used to print information about the
+-// adaptive size policy at intervals specified by 
+-// AdaptiveSizePolicyOutputInterval.  Only print information 
++// adaptive size policy at intervals specified by
++// AdaptiveSizePolicyOutputInterval.  Only print information
+ // if an adaptive size policy is in use.
+ class AdaptiveSizePolicyOutput : StackObj {
+   AdaptiveSizePolicy* _size_policy;
+@@ -457,11 +454,11 @@
+     // interval test should be ignored.  An interval is of zero is
+     // a special value that indicates that the interval test should
+     // always fail (never do the print based on the interval test).
+-    return PrintGCDetails && 
+-	   UseAdaptiveSizePolicy &&
+-	   (UseParallelGC || UseConcMarkSweepGC) &&
++    return PrintGCDetails &&
++           UseAdaptiveSizePolicy &&
++           (UseParallelGC || UseConcMarkSweepGC) &&
+            (AdaptiveSizePolicyOutputInterval > 0) &&
+-	   ((count == 0) ||
++           ((count == 0) ||
+              ((count % AdaptiveSizePolicyOutputInterval) == 0));
+   }
+  public:
+@@ -476,16 +473,16 @@
+       _size_policy = NULL;
+       _do_print = false;
+     }
+-  } 
+-  AdaptiveSizePolicyOutput(AdaptiveSizePolicy* size_policy, 
+-			   uint count) :
++  }
++  AdaptiveSizePolicyOutput(AdaptiveSizePolicy* size_policy,
++                           uint count) :
+     _size_policy(size_policy) {
+     if (UseAdaptiveSizePolicy && (AdaptiveSizePolicyOutputInterval > 0)) {
+       _do_print = print_test(count);
+     } else {
+       _do_print = false;
+     }
+-  } 
++  }
+   ~AdaptiveSizePolicyOutput() {
+     if (_do_print) {
+       assert(UseAdaptiveSizePolicy, "Should not be in use");
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/ageTable.cpp openjdk/hotspot/src/share/vm/gc_implementation/shared/ageTable.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/ageTable.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/ageTable.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ageTable.cpp	1.36 07/05/05 17:05:33 JVM"
+-#endif
+ /*
+  * Copyright 1997-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ /* Copyright 1992 Sun Microsystems, Inc. and Stanford University.
+@@ -113,7 +110,7 @@
+       GCPolicyCounters* gc_counters = policy->counters();
+       gc_counters->tenuring_threshold()->set_value(result);
+       gc_counters->desired_survivor_size()->set_value(
+-	desired_survivor_size*oopSize);
++        desired_survivor_size*oopSize);
+     }
+   }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/ageTable.hpp openjdk/hotspot/src/share/vm/gc_implementation/shared/ageTable.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/ageTable.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/ageTable.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ageTable.hpp	1.29 07/05/05 17:05:32 JVM"
+-#endif
+ /*
+  * Copyright 1997-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ /* Copyright 1992 Sun Microsystems, Inc. and Stanford University.
+@@ -53,7 +50,7 @@
+   void add(oop p, size_t oop_size) {
+     int age = p->age();
+     assert(age > 0 && age < table_size, "invalid age of object");
+-    sizes[age] += oop_size; 
++    sizes[age] += oop_size;
+   }
+ 
+   // Merge another age table with the current one.  Used
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/collectorCounters.cpp openjdk/hotspot/src/share/vm/gc_implementation/shared/collectorCounters.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/collectorCounters.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/collectorCounters.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)collectorCounters.cpp	1.10 07/05/05 17:05:32 JVM"
+-#endif
+ /*
+  * Copyright 2002-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/collectorCounters.hpp openjdk/hotspot/src/share/vm/gc_implementation/shared/collectorCounters.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/collectorCounters.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/collectorCounters.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)collectorCounters.hpp	1.10 07/05/05 17:05:33 JVM"
+-#endif
+ /*
+  * Copyright 2002-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // CollectorCounters is a holder class for performance counters
+@@ -50,7 +47,7 @@
+     ~CollectorCounters() {
+       if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
+     }
+-  
++
+     inline PerfCounter* invocation_counter() const  { return _invocations; }
+ 
+     inline PerfCounter* time_counter() const        { return _time; }
+@@ -81,4 +78,3 @@
+       if (UsePerfData) _c->last_exit_counter()->set_value(os::elapsed_counter());
+     }
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/cSpaceCounters.cpp openjdk/hotspot/src/share/vm/gc_implementation/shared/cSpaceCounters.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/cSpaceCounters.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/cSpaceCounters.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)cSpaceCounters.cpp	1.10 07/05/05 17:05:32 JVM"
+-#endif
+ /*
+  * Copyright 2002-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -31,11 +28,11 @@
+ CSpaceCounters::CSpaceCounters(const char* name, int ordinal, size_t max_size,
+                                ContiguousSpace* s, GenerationCounters* gc) :
+    _space(s) {
+-   
++
+   if (UsePerfData) {
+     EXCEPTION_MARK;
+     ResourceMark rm;
+- 
++
+     const char* cns = PerfDataManager::name_space(gc->name_space(), "space",
+                                                   ordinal);
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/cSpaceCounters.hpp openjdk/hotspot/src/share/vm/gc_implementation/shared/cSpaceCounters.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/cSpaceCounters.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/cSpaceCounters.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)cSpaceCounters.hpp	1.12 07/05/05 17:05:33 JVM"
+-#endif
+ /*
+  * Copyright 2002-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A CSpaceCounters is a holder class for performance counters
+@@ -50,7 +47,7 @@
+   ~CSpaceCounters() {
+       if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
+   }
+-  
++
+   inline void update_capacity() {
+     _capacity->set_value(_space->capacity());
+   }
+@@ -78,4 +75,3 @@
+       return _space->used();
+     }
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/gcAdaptivePolicyCounters.cpp openjdk/hotspot/src/share/vm/gc_implementation/shared/gcAdaptivePolicyCounters.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/gcAdaptivePolicyCounters.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/gcAdaptivePolicyCounters.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)gcAdaptivePolicyCounters.cpp	1.10 07/05/05 17:05:34 JVM"
+-#endif
+ /*
+  * Copyright 2004-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -31,12 +28,12 @@
+ // This class keeps statistical information and computes the
+ // size of the heap.
+ 
+-GCAdaptivePolicyCounters::GCAdaptivePolicyCounters(const char* name, 
+-					int collectors,
+-					int generations,
+-					AdaptiveSizePolicy* size_policy_arg)
+-	: GCPolicyCounters(name, collectors, generations),
+-	  _size_policy(size_policy_arg) {
++GCAdaptivePolicyCounters::GCAdaptivePolicyCounters(const char* name,
++                                        int collectors,
++                                        int generations,
++                                        AdaptiveSizePolicy* size_policy_arg)
++        : GCPolicyCounters(name, collectors, generations),
++          _size_policy(size_policy_arg) {
+   if (UsePerfData) {
+     EXCEPTION_MARK;
+     ResourceMark rm;
+@@ -47,11 +44,11 @@
+ 
+     cname = PerfDataManager::counter_name(name_space(), "promoSize");
+     _promo_size_counter = PerfDataManager::create_variable(SUN_GC, cname,
+-      PerfData::U_Bytes, size_policy()->calculated_promo_size_in_bytes(), 
++      PerfData::U_Bytes, size_policy()->calculated_promo_size_in_bytes(),
+       CHECK);
+ 
+     cname = PerfDataManager::counter_name(name_space(), "youngCapacity");
+-    size_t young_capacity_in_bytes = 
++    size_t young_capacity_in_bytes =
+       _size_policy->calculated_eden_size_in_bytes() +
+       _size_policy->calculated_survivor_size_in_bytes();
+     _young_capacity_counter = PerfDataManager::create_variable(SUN_GC, cname,
+@@ -77,28 +74,28 @@
+       CHECK);
+ 
+     cname = PerfDataManager::counter_name(name_space(), "avgMinorIntervalTime");
+-    _avg_minor_interval_counter = PerfDataManager::create_variable(SUN_GC, 
++    _avg_minor_interval_counter = PerfDataManager::create_variable(SUN_GC,
+       cname,
+-      PerfData::U_Ticks, 
+-      (jlong) _size_policy->_avg_minor_interval->average(), 
++      PerfData::U_Ticks,
++      (jlong) _size_policy->_avg_minor_interval->average(),
+       CHECK);
+ 
+-#ifdef NOT_PRODUCT 
++#ifdef NOT_PRODUCT
+       // This is a counter for the most recent minor pause time
+       // (the last sample, not the average).  It is useful for
+       // verifying the average pause time but not worth putting
+       // into the product.
+-      cname = PerfDataManager::counter_name(name_space(), "minorPauseTime"); 
++      cname = PerfDataManager::counter_name(name_space(), "minorPauseTime");
+       _minor_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
+       PerfData::U_Ticks, (jlong) _size_policy->_avg_minor_pause->last_sample(),
+       CHECK);
+ #endif
+ 
+     cname = PerfDataManager::counter_name(name_space(), "minorGcCost");
+-    _minor_gc_cost_counter = PerfDataManager::create_variable(SUN_GC, 
++    _minor_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
+       cname,
+-      PerfData::U_Ticks, 
+-      (jlong) _size_policy->minor_gc_cost(), 
++      PerfData::U_Ticks,
++      (jlong) _size_policy->minor_gc_cost(),
+       CHECK);
+ 
+     cname = PerfDataManager::counter_name(name_space(), "mutatorCost");
+@@ -115,12 +112,12 @@
+ 
+     cname = PerfDataManager::counter_name(name_space(), "avgYoungLive");
+     _avg_young_live_counter = PerfDataManager::create_variable(SUN_GC, cname,
+-      PerfData::U_Bytes, (jlong) size_policy()->avg_young_live()->average(), 
++      PerfData::U_Bytes, (jlong) size_policy()->avg_young_live()->average(),
+       CHECK);
+ 
+     cname = PerfDataManager::counter_name(name_space(), "avgOldLive");
+     _avg_old_live_counter = PerfDataManager::create_variable(SUN_GC, cname,
+-      PerfData::U_Bytes, (jlong) size_policy()->avg_old_live()->average(), 
++      PerfData::U_Bytes, (jlong) size_policy()->avg_old_live()->average(),
+       CHECK);
+ 
+     cname = PerfDataManager::counter_name(name_space(), "survivorOverflowed");
+@@ -179,17 +176,17 @@
+       PerfData::U_None, (jlong)0, CHECK);
+ 
+     cname = PerfDataManager::counter_name(name_space(), "minorPauseYoungSlope");
+-    _minor_pause_young_slope_counter = 
++    _minor_pause_young_slope_counter =
+       PerfDataManager::create_variable(SUN_GC, cname,
+       PerfData::U_None, (jlong) 0, CHECK);
+ 
+     cname = PerfDataManager::counter_name(name_space(), "majorCollectionSlope");
+-    _major_collection_slope_counter = 
++    _major_collection_slope_counter =
+       PerfDataManager::create_variable(SUN_GC, cname,
+       PerfData::U_None, (jlong) 0, CHECK);
+ 
+     cname = PerfDataManager::counter_name(name_space(), "minorCollectionSlope");
+-    _minor_collection_slope_counter = 
++    _minor_collection_slope_counter =
+       PerfDataManager::create_variable(SUN_GC, cname,
+       PerfData::U_None, (jlong) 0, CHECK);
+   }
+@@ -199,7 +196,7 @@
+   if (UsePerfData && (size_policy() != NULL)) {
+     update_avg_minor_pause_counter();
+     update_avg_minor_interval_counter();
+-#ifdef NOT_PRODUCT 
++#ifdef NOT_PRODUCT
+     update_minor_pause_counter();
+ #endif
+     update_minor_gc_cost_counter();
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/gcAdaptivePolicyCounters.hpp openjdk/hotspot/src/share/vm/gc_implementation/shared/gcAdaptivePolicyCounters.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/gcAdaptivePolicyCounters.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/gcAdaptivePolicyCounters.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)gcAdaptivePolicyCounters.hpp	1.11 07/05/05 17:05:34 JVM"
+-#endif
+ /*
+  * Copyright 2004-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This class keeps statistical information and computes the
+@@ -30,38 +27,38 @@
+ 
+ class GCAdaptivePolicyCounters : public GCPolicyCounters {
+  protected:
+-  PerfVariable* 	_eden_size_counter;
++  PerfVariable*         _eden_size_counter;
+   PerfVariable*         _promo_size_counter;
+ 
+-  PerfVariable* 	_young_capacity_counter;
++  PerfVariable*         _young_capacity_counter;
+ 
+-  PerfVariable*     	_minor_gc_cost_counter;
+-  PerfVariable* 	_major_gc_cost_counter;
+-  PerfVariable* 	_mutator_cost_counter;
++  PerfVariable*         _minor_gc_cost_counter;
++  PerfVariable*         _major_gc_cost_counter;
++  PerfVariable*         _mutator_cost_counter;
+ 
+   PerfVariable*         _avg_young_live_counter;
+   PerfVariable*         _avg_old_live_counter;
+ 
+-  PerfVariable* 	_avg_minor_pause_counter;
+-  PerfVariable* 	_avg_minor_interval_counter;
++  PerfVariable*         _avg_minor_pause_counter;
++  PerfVariable*         _avg_minor_interval_counter;
+ 
+-#ifdef NOT_PRODUCT 
++#ifdef NOT_PRODUCT
+   PerfVariable*         _minor_pause_counter;
+ #endif
+ 
+-  PerfVariable* 	_change_young_gen_for_min_pauses_counter;
+-  PerfVariable* 	_change_young_gen_for_throughput_counter;
+-  PerfVariable* 	_change_old_gen_for_maj_pauses_counter;
+-  PerfVariable* 	_change_old_gen_for_throughput_counter;
+-  PerfVariable* 	_decrease_for_footprint_counter;
++  PerfVariable*         _change_young_gen_for_min_pauses_counter;
++  PerfVariable*         _change_young_gen_for_throughput_counter;
++  PerfVariable*         _change_old_gen_for_maj_pauses_counter;
++  PerfVariable*         _change_old_gen_for_throughput_counter;
++  PerfVariable*         _decrease_for_footprint_counter;
+ 
+   PerfVariable*         _minor_pause_young_slope_counter;
+   PerfVariable*         _major_pause_old_slope_counter;
+ 
+   PerfVariable*         _decide_at_full_gc_counter;
+ 
+-  PerfVariable*     	_survived_counter;
+-  PerfVariable*     	_promoted_counter;
++  PerfVariable*         _survived_counter;
++  PerfVariable*         _promoted_counter;
+ 
+   PerfVariable*         _avg_survived_avg_counter;
+   PerfVariable*         _avg_survived_dev_counter;
+@@ -96,7 +93,7 @@
+       (size_policy()->avg_minor_interval()->average() * 1000.0));
+   }
+ 
+-#ifdef NOT_PRODUCT 
++#ifdef NOT_PRODUCT
+   inline void update_minor_pause_counter() {
+     _minor_pause_counter->set_value((jlong)
+       (size_policy()->avg_minor_pause()->last_sample() * 1000.0));
+@@ -159,9 +156,9 @@
+   virtual AdaptiveSizePolicy* size_policy() { return _size_policy; }
+ 
+  public:
+-  GCAdaptivePolicyCounters(const char* name, 
+-			   int collectors, 
+-			   int generations,
++  GCAdaptivePolicyCounters(const char* name,
++                           int collectors,
++                           int generations,
+                            AdaptiveSizePolicy* size_policy);
+ 
+   inline void update_survived(size_t survived) {
+@@ -221,7 +218,7 @@
+ 
+   void set_size_policy(AdaptiveSizePolicy* v) { _size_policy = v; }
+ 
+-  virtual GCPolicyCounters::Name kind() const { 
+-    return GCPolicyCounters::GCAdaptivePolicyCountersKind; 
++  virtual GCPolicyCounters::Name kind() const {
++    return GCPolicyCounters::GCAdaptivePolicyCountersKind;
+   }
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/gcPolicyCounters.cpp openjdk/hotspot/src/share/vm/gc_implementation/shared/gcPolicyCounters.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/gcPolicyCounters.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/gcPolicyCounters.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)gcPolicyCounters.cpp	1.13 07/05/05 17:05:34 JVM"
+-#endif
+ /*
+  * Copyright 2002-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/gcPolicyCounters.hpp openjdk/hotspot/src/share/vm/gc_implementation/shared/gcPolicyCounters.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/gcPolicyCounters.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/gcPolicyCounters.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)gcPolicyCounters.hpp	1.18 07/05/05 17:05:33 JVM"
+-#endif
+ /*
+  * Copyright 2002-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // GCPolicyCounters is a holder class for performance counters
+@@ -68,7 +65,7 @@
+ 
+     virtual void update_counters() {}
+ 
+-    virtual GCPolicyCounters::Name kind() const { 
+-      return GCPolicyCounters::GCPolicyCountersKind; 
++    virtual GCPolicyCounters::Name kind() const {
++      return GCPolicyCounters::GCPolicyCountersKind;
+     }
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/gcStats.cpp openjdk/hotspot/src/share/vm/gc_implementation/shared/gcStats.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/gcStats.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/gcStats.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)gcStats.cpp	1.9 07/05/05 17:05:33 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/gcStats.hpp openjdk/hotspot/src/share/vm/gc_implementation/shared/gcStats.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/gcStats.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/gcStats.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)gcStats.hpp	1.10 07/05/05 17:05:34 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class GCStats : public CHeapObj {
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/gcUtil.cpp openjdk/hotspot/src/share/vm/gc_implementation/shared/gcUtil.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/gcUtil.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/gcUtil.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)gcUtil.cpp	1.21 07/05/05 17:05:32 JVM"
+-#endif
+ /*
+  * Copyright 2002-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -30,7 +27,7 @@
+ 
+ // Catch-all file for utility classes
+ 
+-float AdaptiveWeightedAverage::compute_adaptive_average(float new_sample, 
++float AdaptiveWeightedAverage::compute_adaptive_average(float new_sample,
+                                                         float average) {
+   // We smooth the samples by not using weight() directly until we've
+   // had enough data to make it meaningful. We'd like the first weight
+@@ -99,7 +96,7 @@
+   if ( _mean_x.count() > 1 ) {
+     double slope_denominator;
+     slope_denominator = (_mean_x.count() * _sum_x_squared - _sum_x * _sum_x);
+-    // Some tolerance should be injected here.  A denominator that is 
++    // Some tolerance should be injected here.  A denominator that is
+     // nearly 0 should be avoided.
+ 
+     if (slope_denominator != 0.0) {
+@@ -119,7 +116,7 @@
+ 
+ double LinearLeastSquareFit::y(double x) {
+   double new_y;
+-  
++
+   if ( _mean_x.count() > 1 ) {
+     new_y = (_intercept + _slope * x);
+     return new_y;
+@@ -137,8 +134,8 @@
+ // since that untuitive expectation is not built into the complement.
+ bool LinearLeastSquareFit::decrement_will_decrease() {
+   return (_slope >= 0.00);
+-} 
++}
+ 
+ bool LinearLeastSquareFit::increment_will_decrease() {
+   return (_slope <= 0.00);
+-} 
++}
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/gcUtil.hpp openjdk/hotspot/src/share/vm/gc_implementation/shared/gcUtil.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/gcUtil.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/gcUtil.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)gcUtil.hpp	1.19 07/05/05 17:05:32 JVM"
+-#endif
+ /*
+  * Copyright 2002-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Catch-all file for utility classes
+@@ -31,14 +28,14 @@
+ // of some float value (templates would be handy here if we
+ // need different types).
+ //
+-// The average is adaptive in that we smooth it for the 
++// The average is adaptive in that we smooth it for the
+ // initial samples; we don't use the weight until we have
+ // enough samples for it to be meaningful.
+ //
+ // This serves as our best estimate of a future unknown.
+ //
+ class AdaptiveWeightedAverage : public CHeapObj {
+- private: 
++ private:
+   float            _average;        // The last computed average
+   unsigned         _sample_count;   // How often we've sampled this average
+   unsigned         _weight;         // The weight used to smooth the averages
+@@ -46,18 +43,18 @@
+                                     //   recent data.
+ 
+  protected:
+-  float		   _last_sample;    // The last value sampled.
++  float            _last_sample;    // The last value sampled.
+ 
+   void  increment_count()       { _sample_count++;       }
+   void  set_average(float avg)  { _average = avg;        }
+ 
+-  // Helper function, computes an adaptive weighted average 
++  // Helper function, computes an adaptive weighted average
+   // given a sample and the last average
+   float compute_adaptive_average(float new_sample, float average);
+ 
+  public:
+   // Input weight must be between 0 and 100
+-  AdaptiveWeightedAverage(unsigned weight) : 
++  AdaptiveWeightedAverage(unsigned weight) :
+     _average(0.0), _sample_count(0), _weight(weight), _last_sample(0.0) {
+   }
+ 
+@@ -65,7 +62,7 @@
+   float    average() const       { return _average;       }
+   unsigned weight()  const       { return _weight;        }
+   unsigned count()   const       { return _sample_count;  }
+-  float    last_sample() const	 { return _last_sample; }
++  float    last_sample() const   { return _last_sample; }
+ 
+   // Update data with a new sample.
+   void sample(float new_sample);
+@@ -90,7 +87,7 @@
+ // unknown.
+ class AdaptivePaddedAverage : public AdaptiveWeightedAverage {
+  private:
+-  float          _padded_avg;     // The last computed padded average 
++  float          _padded_avg;     // The last computed padded average
+   float          _deviation;      // Running deviation from the average
+   unsigned       _padding;        // A multiple which, added to the average,
+                                   // gives us an upper bound guess.
+@@ -105,14 +102,14 @@
+     _padded_avg(0.0), _deviation(0.0), _padding(0) {}
+ 
+   AdaptivePaddedAverage(unsigned weight, unsigned padding) :
+-    AdaptiveWeightedAverage(weight), 
++    AdaptiveWeightedAverage(weight),
+     _padded_avg(0.0), _deviation(0.0), _padding(padding) {}
+ 
+   // Placement support
+   void* operator new(size_t ignored, void* p) { return p; }
+   // Allocator
+   void* operator new(size_t size) { return CHeapObj::operator new(size); }
+-  
++
+   // Accessor
+   float padded_average() const         { return _padded_avg; }
+   float deviation()      const         { return _deviation;  }
+@@ -143,17 +140,17 @@
+ //              y = intercept + slope * x
+ 
+ class LinearLeastSquareFit : public CHeapObj {
+-  double _sum_x;  	// sum of all independent data points x
++  double _sum_x;        // sum of all independent data points x
+   double _sum_x_squared; // sum of all independent data points x**2
+-  double _sum_y;  	// sum of all dependent data points y
+-  double _sum_xy; 	// sum of all x * y.
++  double _sum_y;        // sum of all dependent data points y
++  double _sum_xy;       // sum of all x * y.
+   double _intercept;     // constant term
+-  double _slope;     	// slope
++  double _slope;        // slope
+   // The weighted averages are not currently used but perhaps should
+   // be used to get decaying averages.
+   AdaptiveWeightedAverage _mean_x; // weighted mean of independent variable
+   AdaptiveWeightedAverage _mean_y; // weighted mean of dependent variable
+-  
++
+  public:
+   LinearLeastSquareFit(unsigned weight);
+   void update(double x, double y);
+@@ -177,4 +174,3 @@
+     _timer->start();
+   }
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/generationCounters.cpp openjdk/hotspot/src/share/vm/gc_implementation/shared/generationCounters.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/generationCounters.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/generationCounters.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)generationCounters.cpp	1.10 07/05/05 17:05:34 JVM"
+-#endif
+ /*
+  * Copyright 2002-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/generationCounters.hpp openjdk/hotspot/src/share/vm/gc_implementation/shared/generationCounters.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/generationCounters.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/generationCounters.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)generationCounters.hpp	1.14 07/05/05 17:05:34 JVM"
+-#endif
+ /*
+  * Copyright 2002-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A GenerationCounter is a holder class for performance counters
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/gSpaceCounters.cpp openjdk/hotspot/src/share/vm/gc_implementation/shared/gSpaceCounters.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/gSpaceCounters.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/gSpaceCounters.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)gSpaceCounters.cpp	1.11 07/05/05 17:05:33 JVM"
+-#endif
+ /*
+  * Copyright 2002-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -32,7 +29,7 @@
+                                Generation* g, GenerationCounters* gc,
+                                bool sampled) :
+    _gen(g) {
+-   
++
+   if (UsePerfData) {
+     EXCEPTION_MARK;
+     ResourceMark rm;
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/gSpaceCounters.hpp openjdk/hotspot/src/share/vm/gc_implementation/shared/gSpaceCounters.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/gSpaceCounters.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/gSpaceCounters.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)gSpaceCounters.hpp	1.14 07/05/05 17:05:33 JVM"
+-#endif
+ /*
+  * Copyright 2002-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A GSpaceCounter is a holder class for performance counters
+@@ -50,7 +47,7 @@
+   ~GSpaceCounters() {
+     if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
+   }
+-  
++
+   inline void update_capacity() {
+     _capacity->set_value(_gen->capacity());
+   }
+@@ -75,7 +72,7 @@
+ 
+   debug_only(
+     // for security reasons, we do not allow arbitrary reads from
+-    // the counters as they may live in shared memory. 
++    // the counters as they may live in shared memory.
+     jlong used() {
+       return _used->get_value();
+     }
+@@ -103,4 +100,3 @@
+       return _gen->used();
+     }
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.cpp openjdk/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)immutableSpace.cpp	1.13 07/05/05 17:05:34 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -80,4 +77,3 @@
+   }
+   guarantee(p == end(), "end of last object must match end of space");
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.hpp openjdk/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)immutableSpace.hpp	1.14 07/05/05 17:05:33 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,14 +19,14 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // An ImmutableSpace is a viewport into a contiguous range
+ // (or subrange) of previously allocated objects.
+ 
+ // Invariant: bottom() and end() are on page_size boundaries and
+-// bottom() <= end() 
++// bottom() <= end()
+ 
+ class ImmutableSpace: public CHeapObj {
+   friend class VMStructs;
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/isGCActiveMark.hpp openjdk/hotspot/src/share/vm/gc_implementation/shared/isGCActiveMark.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/isGCActiveMark.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/isGCActiveMark.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)isGCActiveMark.hpp	1.9 07/05/05 17:05:34 JVM"
+-#endif
+ /*
+  * Copyright 2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This class provides a method for block structured setting of the
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/liveRange.hpp openjdk/hotspot/src/share/vm/gc_implementation/shared/liveRange.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/liveRange.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/liveRange.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)liveRange.hpp	1.11 07/05/05 17:05:34 JVM"
+-#endif
+ /*
+  * Copyright 2001-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This is a shared helper class used during phase 3 and 4 to move all the objects
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp openjdk/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)markSweep.cpp	1.196 07/05/05 17:05:35 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -33,8 +30,8 @@
+ 
+ GrowableArray<oop>*     MarkSweep::_preserved_oop_stack = NULL;
+ GrowableArray<markOop>* MarkSweep::_preserved_mark_stack= NULL;
+-size_t			MarkSweep::_preserved_count = 0;
+-size_t			MarkSweep::_preserved_count_max = 0;
++size_t                  MarkSweep::_preserved_count = 0;
++size_t                  MarkSweep::_preserved_count_max = 0;
+ PreservedMark*          MarkSweep::_preserved_marks = NULL;
+ ReferenceProcessor*     MarkSweep::_ref_processor   = NULL;
+ 
+@@ -47,8 +44,8 @@
+ size_t                  MarkSweep::_live_oops_index_at_perm = 0;
+ GrowableArray<oop*>*    MarkSweep::_other_refs_stack = NULL;
+ GrowableArray<oop*>*    MarkSweep::_adjusted_pointers = NULL;
+-bool 			MarkSweep::_pointer_tracking = false;
+-bool 			MarkSweep::_root_tracking = true;
++bool                    MarkSweep::_pointer_tracking = false;
++bool                    MarkSweep::_root_tracking = true;
+ 
+ GrowableArray<HeapWord*>* MarkSweep::_cur_gc_live_oops = NULL;
+ GrowableArray<HeapWord*>* MarkSweep::_cur_gc_live_oops_moved_to = NULL;
+@@ -75,7 +72,7 @@
+ 
+ void MarkSweep::mark_and_follow(oop* p) {
+   assert(Universe::heap()->is_in_reserved(p),
+-	 "we should only be traversing objects here");
++         "we should only be traversing objects here");
+   oop m = *p;
+   if (m != NULL && !m->mark()->is_marked()) {
+     mark_object(m);
+@@ -93,7 +90,7 @@
+ MarkSweep::MarkAndPushClosure MarkSweep::mark_and_push_closure;
+ 
+ void MarkSweep::follow_root(oop* p) {
+-  assert(!Universe::heap()->is_in_reserved(p), 
++  assert(!Universe::heap()->is_in_reserved(p),
+          "roots shouldn't be things within the heap");
+ #ifdef VALIDATE_MARK_SWEEP
+   if (ValidateMarkSweep) {
+@@ -147,8 +144,8 @@
+ 
+ void MarkSweep::adjust_marks() {
+   assert(_preserved_oop_stack == NULL ||
+-	 _preserved_oop_stack->length() == _preserved_mark_stack->length(),
+-	 "inconsistent preserved oop stacks");
++         _preserved_oop_stack->length() == _preserved_mark_stack->length(),
++         "inconsistent preserved oop stacks");
+ 
+   // adjust the oops we saved earlier
+   for (size_t i = 0; i < _preserved_count; i++) {
+@@ -166,11 +163,11 @@
+ 
+ void MarkSweep::restore_marks() {
+   assert(_preserved_oop_stack == NULL ||
+-	 _preserved_oop_stack->length() == _preserved_mark_stack->length(),
+-	 "inconsistent preserved oop stacks");
++         _preserved_oop_stack->length() == _preserved_mark_stack->length(),
++         "inconsistent preserved oop stacks");
+   if (PrintGC && Verbose) {
+     gclog_or_tty->print_cr("Restoring %d marks", _preserved_count +
+-		  (_preserved_oop_stack ? _preserved_oop_stack->length() : 0));
++                  (_preserved_oop_stack ? _preserved_oop_stack->length() : 0));
+   }
+ 
+   // restore the marks we saved earlier
+@@ -183,7 +180,7 @@
+     for (int i = 0; i < _preserved_oop_stack->length(); i++) {
+       oop obj       = _preserved_oop_stack->at(i);
+       markOop mark  = _preserved_mark_stack->at(i);
+-      obj->set_mark(mark);      
++      obj->set_mark(mark);
+     }
+   }
+ }
+@@ -204,11 +201,11 @@
+     if (index != -1) {
+       int l = _root_refs_stack->length();
+       if (l > 0 && l - 1 != index) {
+-	oop* last = _root_refs_stack->pop();
+-	assert(last != p, "should be different");
+-	_root_refs_stack->at_put(index, last);
++        oop* last = _root_refs_stack->pop();
++        assert(last != p, "should be different");
++        _root_refs_stack->at_put(index, last);
+       } else {
+-	_root_refs_stack->remove(p);
++        _root_refs_stack->remove(p);
+       }
+     }
+   }
+@@ -231,7 +228,7 @@
+   if (ValidateMarkSweep) {
+     _adjusted_pointers->clear();
+     _pointer_tracking = true;
+-    
++
+     AdjusterTracker checker;
+     obj->oop_iterate(&checker);
+   }
+@@ -272,9 +269,9 @@
+ }
+ 
+ void MarkSweep::live_oop_moved_to(HeapWord* q, size_t size,
+-				  HeapWord* compaction_top) {
++                                  HeapWord* compaction_top) {
+   assert(oop(q)->forwardee() == NULL || oop(q)->forwardee() == oop(compaction_top),
+-	 "should be moved to forwarded location");
++         "should be moved to forwarded location");
+   if (ValidateMarkSweep) {
+     MarkSweep::validate_live_oop(oop(q), size);
+     _live_oops_moved_to->push(oop(compaction_top));
+@@ -358,4 +355,3 @@
+ }
+ 
+ #endif
+-
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp openjdk/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)markSweep.hpp	1.67 07/05/17 15:52:55 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class ReferenceProcessor;
+@@ -58,7 +55,7 @@
+   // In line closure decls
+   //
+ 
+-  class FollowRootClosure: public OopsInGenClosure{ 
++  class FollowRootClosure: public OopsInGenClosure{
+    public:
+     void do_oop(oop* p) { follow_root(p); }
+     virtual const bool do_nmethods() const { return true; }
+@@ -108,17 +105,17 @@
+   //
+  protected:
+   // Traversal stack used during phase1
+-  static GrowableArray<oop>*             _marking_stack;   
++  static GrowableArray<oop>*             _marking_stack;
+   // Stack for live klasses to revisit at end of marking phase
+-  static GrowableArray<Klass*>*          _revisit_klass_stack;   
++  static GrowableArray<Klass*>*          _revisit_klass_stack;
+ 
+   // Space for storing/restoring mark word
+   static GrowableArray<markOop>*         _preserved_mark_stack;
+   static GrowableArray<oop>*             _preserved_oop_stack;
+-  static size_t			         _preserved_count;
+-  static size_t			         _preserved_count_max;
++  static size_t                          _preserved_count;
++  static size_t                          _preserved_count_max;
+   static PreservedMark*                  _preserved_marks;
+-  
++
+   // Reference processing (used in ...follow_contents)
+   static ReferenceProcessor*             _ref_processor;
+ 
+@@ -179,11 +176,11 @@
+ 
+   static void mark_and_follow(oop* p);    // Mark pointer and follow contents.
+   static void _mark_and_push(oop* p);     // Mark pointer and push obj on
+-					  // marking stack.
++                                          // marking stack.
++
+ 
+-  
+   static void mark_and_push(oop* p) {     // Check mark and maybe push on
+-					  // marking stack
++                                          // marking stack
+     // assert(Universe::is_reserved_heap((oop)p), "we should only be traversing objects here");
+     oop m = *p;
+     if (m != NULL && !m->mark()->is_marked()) {
+@@ -199,7 +196,7 @@
+   static void restore_marks();            // Restore the marks that we saved in preserve_mark
+ 
+   static void _adjust_pointer(oop* p, bool isroot);
+-  
++
+   static void adjust_root_pointer(oop* p) { _adjust_pointer(p, true); }
+   static void adjust_pointer(oop* p)      { _adjust_pointer(p, false); }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp openjdk/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)markSweep.inline.hpp	1.17 07/05/29 09:44:12 JVM"
+-#endif
+ /*
+  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ inline void MarkSweep::_adjust_pointer(oop* p, bool isroot) {
+@@ -38,7 +35,7 @@
+     if (new_pointer != NULL) {
+       *p = new_pointer;
+       assert(Universe::heap()->is_in_reserved(new_pointer),
+-	     "should be in object space");
++             "should be in object space");
+       VALIDATE_MARK_SWEEP_ONLY(saved_new_pointer = new_pointer);
+     }
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp openjdk/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)mutableNUMASpace.cpp	1.8 07/05/05 17:05:35 JVM"
+-#endif
+ 
+ /*
+  * Copyright 2006-2007 Sun Microsystems, Inc.  All Rights Reserved.
+@@ -23,7 +20,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -79,7 +76,7 @@
+                                                      os::vm_page_size());
+         if (crossing_start != crossing_end) {
+           // If object header crossed a small page boundary we mark the area
+-	  // as invalid rounding it to a page_size().
++          // as invalid rounding it to a page_size().
+           HeapWord *start = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
+           HeapWord *end = MIN2((HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), page_size()),
+                                s->end());
+@@ -385,7 +382,7 @@
+         }
+       }
+     }
+-    *top_region = MemRegion(intersection.end(), new_region.end()); 
++    *top_region = MemRegion(intersection.end(), new_region.end());
+   } else {
+     *top_region = MemRegion();
+   }
+@@ -468,10 +465,10 @@
+   if (!old_region.equals(region())) {
+     new_region = MemRegion(rounded_bottom, rounded_end);
+     MemRegion intersection = new_region.intersection(old_region);
+-    if (intersection.start() == NULL || 
+-        intersection.end() == NULL   || 
+-	prev_page_size > page_size()) { // If the page size got smaller we have to change 
+-	                                // the page size preference for the whole space. 
++    if (intersection.start() == NULL ||
++        intersection.end() == NULL   ||
++        prev_page_size > page_size()) { // If the page size got smaller we have to change
++                                        // the page size preference for the whole space.
+       intersection = MemRegion(new_region.start(), new_region.start());
+     }
+     select_tails(new_region, intersection, &bottom_region, &top_region);
+@@ -568,7 +565,7 @@
+   }
+ }
+ 
+-// Set the top of the whole space. 
++// Set the top of the whole space.
+ // Mark the the holes in chunks below the top() as invalid.
+ void MutableNUMASpace::set_top(HeapWord* value) {
+   bool found_top = false;
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp openjdk/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)mutableNUMASpace.hpp	1.8 07/05/05 17:05:34 JVM"
+-#endif
+ /*
+  * Copyright 2006-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,19 +19,19 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ /*
+  *    The NUMA-aware allocator (MutableNUMASpace) is basically a modification
+  * of MutableSpace which preserves interfaces but implements different
+  * functionality. The space is split into chunks for each locality group
+- * (resizing for adaptive size policy is also supported). For each thread 
++ * (resizing for adaptive size policy is also supported). For each thread
+  * allocations are performed in the chunk corresponding to the home locality
+- * group of the thread. Whenever any chunk fills-in the young generation 
++ * group of the thread. Whenever any chunk fills-in the young generation
+  * collection occurs.
+  *   The chunks can be also be adaptively resized. The idea behind the adaptive
+- * sizing is to reduce the loss of the space in the eden due to fragmentation. 
++ * sizing is to reduce the loss of the space in the eden due to fragmentation.
+  * The main cause of fragmentation is uneven allocation rates of threads.
+  * The allocation rate difference between locality groups may be caused either by
+  * application specifics or by uneven LWP distribution by the OS. Besides,
+@@ -47,8 +44,8 @@
+  * bytes that can be moved during the adaptation phase.
+  *   Chunks may contain pages from a wrong locality group. The page-scanner has
+  * been introduced to address the problem. Remote pages typically appear due to
+- * the memory shortage in the target locality group. Besides Solaris would 
+- * allocate a large page from the remote locality group even if there are small 
++ * the memory shortage in the target locality group. Besides Solaris would
++ * allocate a large page from the remote locality group even if there are small
+  * local pages available. The page-scanner scans the pages right after the
+  * collection and frees remote pages in hope that subsequent reallocation would
+  * be more successful. This approach proved to be useful on systems with high
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.cpp openjdk/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)mutableSpace.cpp	1.22 07/05/05 17:05:35 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -56,7 +53,7 @@
+     HeapWord* new_top = obj + size;
+     set_top(new_top);
+     assert(is_object_aligned((intptr_t)obj) && is_object_aligned((intptr_t)new_top),
+-	   "checking alignment");
++           "checking alignment");
+     return obj;
+   } else {
+     return NULL;
+@@ -73,8 +70,8 @@
+       // result can be one of two:
+       //  the old top value: the exchange succeeded
+       //  otherwise: the new value of the top is returned.
+-      if (result != obj) {          
+-	continue; // another thread beat us to the allocation, try again
++      if (result != obj) {
++        continue; // another thread beat us to the allocation, try again
+       }
+       assert(is_object_aligned((intptr_t)obj) && is_object_aligned((intptr_t)new_top),
+              "checking alignment");
+@@ -110,14 +107,14 @@
+ 
+ void MutableSpace::print_short() const { print_short_on(tty); }
+ void MutableSpace::print_short_on( outputStream* st) const {
+-  st->print(" space " SIZE_FORMAT "K, %d%% used", capacity_in_bytes() / K, 
++  st->print(" space " SIZE_FORMAT "K, %d%% used", capacity_in_bytes() / K,
+             (int) ((double) used_in_bytes() * 100 / capacity_in_bytes()));
+ }
+ 
+ void MutableSpace::print() const { print_on(tty); }
+ void MutableSpace::print_on(outputStream* st) const {
+   MutableSpace::print_short_on(st);
+-  st->print_cr(" [" INTPTR_FORMAT "," INTPTR_FORMAT "," INTPTR_FORMAT ")", 
++  st->print_cr(" [" INTPTR_FORMAT "," INTPTR_FORMAT "," INTPTR_FORMAT ")",
+                  bottom(), top(), end());
+ }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.hpp openjdk/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)mutableSpace.hpp	1.22 07/05/05 17:05:35 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A MutableSpace is a subtype of ImmutableSpace that supports the
+@@ -30,7 +27,7 @@
+ // be only partially full, and the querry methods that go with such
+ // an assumption.
+ //
+-// Invariant: (ImmutableSpace +) bottom() <= top() <= end() 
++// Invariant: (ImmutableSpace +) bottom() <= top() <= end()
+ // top() is inclusive and end() is exclusive.
+ 
+ class MutableSpace: public ImmutableSpace {
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/spaceCounters.cpp openjdk/hotspot/src/share/vm/gc_implementation/shared/spaceCounters.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/spaceCounters.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/spaceCounters.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)spaceCounters.cpp	1.10 07/05/05 17:05:35 JVM"
+-#endif
+ /*
+  * Copyright 2002-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -31,7 +28,7 @@
+ SpaceCounters::SpaceCounters(const char* name, int ordinal, size_t max_size,
+                              MutableSpace* m, GenerationCounters* gc) :
+    _object_space(m) {
+-   
++
+   if (UsePerfData) {
+     EXCEPTION_MARK;
+     ResourceMark rm;
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/spaceCounters.hpp openjdk/hotspot/src/share/vm/gc_implementation/shared/spaceCounters.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/spaceCounters.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/spaceCounters.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)spaceCounters.hpp	1.11 07/05/05 17:05:35 JVM"
+-#endif
+ /*
+  * Copyright 2002-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A SpaceCounter is a holder class for performance counters
+@@ -50,7 +47,7 @@
+   ~SpaceCounters() {
+     if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
+   }
+-  
++
+   inline void update_capacity() {
+     _capacity->set_value(_object_space->capacity_in_bytes());
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp openjdk/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)vmGCOperations.cpp	1.21 07/05/29 09:44:12 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ # include "incls/_precompiled.incl"
+ # include "incls/_vmGCOperations.cpp.incl"
+@@ -56,8 +53,8 @@
+ // resulting in multiple gc requests.  We only want to do one of them.
+ // In case a GC locker is active and the need for a GC is already signalled,
+ // we want to skip this GC attempt altogether, without doing a futile
+-// safepoint operation. 
+-bool VM_GC_Operation::skip_operation() const { 
++// safepoint operation.
++bool VM_GC_Operation::skip_operation() const {
+   bool skip = (_gc_count_before != Universe::heap()->total_collections());
+   if (_full && skip) {
+     skip = (_full_gc_count_before != Universe::heap()->total_full_collections());
+@@ -136,7 +133,7 @@
+     set_gc_locked();
+   }
+   notify_gc_end();
+-} 			
++}
+ 
+ void VM_GenCollectFull::doit() {
+   JvmtiGCFullMarker jgcm;
+@@ -146,4 +143,4 @@
+   GCCauseSetter gccs(gch, _gc_cause);
+   gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
+   notify_gc_end();
+-} 			
++}
+diff -ruN openjdk6/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp openjdk/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp
+--- openjdk6/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)vmGCOperations.hpp	1.14 07/05/29 09:44:12 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,14 +19,14 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The following class hierarchy represents
+ // a set of operations (VM_Operation) related to GC.
+ //
+ //  VM_Operation
+-//      VM_GC_Operation 
++//      VM_GC_Operation
+ //          VM_GC_HeapInspection
+ //          VM_GenCollectForAllocation
+ //          VM_GenCollectFull
+@@ -40,7 +37,7 @@
+ //  VM_GC_Operation
+ //   - implements methods common to all classes in the hierarchy:
+ //     prevents multiple gc requests and manages lock on heap;
+-//     
++//
+ //  VM_GC_HeapInspection
+ //   - prints class histogram on SIGBREAK if PrintClassHistogram
+ //     is specified; and also the attach "inspectheap" operation
+@@ -51,13 +48,13 @@
+ //   - this operation is invoked when allocation is failed;
+ //     operation performs garbage collection and tries to
+ //     allocate afterwards;
+-//     
++//
+ //  VM_GenCollectFull
+ //  VM_GenCollectFullConcurrent
+ //  VM_ParallelGCSystemGC
+-//   - these operations preform full collection of heaps of 
++//   - these operations preform full collection of heaps of
+ //     different kind
+-//  
++//
+ 
+ class VM_GC_Operation: public VM_Operation {
+  protected:
+@@ -93,7 +90,7 @@
+     }
+   }
+   ~VM_GC_Operation() {}
+-  
++
+   // Acquire the reference synchronization lock
+   virtual bool doit_prologue();
+   // Do notifyAll (if needed) and release held lock
+@@ -146,7 +143,7 @@
+     _res = NULL;
+   }
+   ~VM_GenCollectForAllocation()  {}
+-  virtual VMOp_Type type() const { return VMOp_GenCollectForAllocation; }    
++  virtual VMOp_Type type() const { return VMOp_GenCollectForAllocation; }
+   virtual void doit();
+   HeapWord* result() const       { return _res; }
+ };
+@@ -161,7 +158,7 @@
+   VM_GenCollectFull(unsigned int gc_count_before,
+                     unsigned int full_gc_count_before,
+                     GCCause::Cause gc_cause,
+-                      int max_level) 
++                      int max_level)
+     : VM_GC_Operation(gc_count_before, full_gc_count_before, true /* full */),
+       _max_level(max_level)
+   { _gc_cause = gc_cause; }
+diff -ruN openjdk6/hotspot/src/share/vm/gc_interface/collectedHeap.cpp openjdk/hotspot/src/share/vm/gc_interface/collectedHeap.cpp
+--- openjdk6/hotspot/src/share/vm/gc_interface/collectedHeap.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_interface/collectedHeap.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)collectedHeap.cpp	1.23 07/05/05 17:05:40 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -121,10 +118,10 @@
+   if (obj == NULL) {
+     return NULL;
+   }
+-  if (ZeroTLAB) {		
++  if (ZeroTLAB) {
+     // ..and clear it.
+     Copy::zero_to_words(obj, new_tlab_size);
+-  } else {			
++  } else {
+     // ...and clear just the allocated object.
+     Copy::zero_to_words(obj, size);
+   }
+@@ -132,6 +129,23 @@
+   return obj;
+ }
+ 
++oop CollectedHeap::new_store_barrier(oop new_obj) {
++  // %%% This needs refactoring.  (It was imported from the server compiler.)
++  guarantee(can_elide_tlab_store_barriers(), "store barrier elision not supported");
++  BarrierSet* bs = this->barrier_set();
++  assert(bs->has_write_region_opt(), "Barrier set does not have write_region");
++  int new_size = new_obj->size();
++  bs->write_region(MemRegion((HeapWord*)new_obj, new_size));
++  return new_obj;
++}
++
++bool CollectedHeap::can_elide_permanent_oop_store_barriers() const {
++  // %%% This needs refactoring.  (It was gating logic from the server compiler.)
++  guarantee(kind() < CollectedHeap::G1CollectedHeap, "");
++  return !UseConcMarkSweepGC;
++}
++
++
+ HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
+   guarantee(false, "thread-local allocation buffers not supported");
+   return NULL;
+@@ -142,7 +156,7 @@
+   // See note in ensure_parsability() below.
+   assert(SafepointSynchronize::is_at_safepoint() ||
+          !is_init_completed(),
+-	 "should only fill tlabs at safepoint");
++         "should only fill tlabs at safepoint");
+   // The main thread starts allocating via a TLAB even before it
+   // has added itself to the threads list at vm boot-up.
+   assert(Threads::first() != NULL,
+@@ -163,7 +177,7 @@
+   // started allocating but are now a full-fledged JavaThread
+   // (and have thus made our TLAB's) available for filling.
+   assert(SafepointSynchronize::is_at_safepoint() ||
+-         !is_init_completed(), 
++         !is_init_completed(),
+          "Should only be called at a safepoint or at start-up"
+          " otherwise concurrent mutator activity may make heap "
+          " unparsable again");
+@@ -176,7 +190,7 @@
+   if (UseTLAB) {
+     assert(SafepointSynchronize::is_at_safepoint() ||
+          !is_init_completed(),
+-	 "should only accumulate statistics on tlabs at safepoint");
++         "should only accumulate statistics on tlabs at safepoint");
+ 
+     ThreadLocalAllocBuffer::accumulate_statistics_before_gc();
+   }
+@@ -186,7 +200,7 @@
+   if (UseTLAB) {
+     assert(SafepointSynchronize::is_at_safepoint() ||
+          !is_init_completed(),
+-	 "should only resize tlabs at safepoint");
++         "should only resize tlabs at safepoint");
+ 
+     ThreadLocalAllocBuffer::resize_all_tlabs();
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/gc_interface/collectedHeap.hpp openjdk/hotspot/src/share/vm/gc_interface/collectedHeap.hpp
+--- openjdk6/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)collectedHeap.hpp	1.55 07/05/17 15:52:57 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A "CollectedHeap" is an implementation of a java heap for HotSpot.  This
+@@ -108,17 +105,17 @@
+ 
+   // Helper functions for (VM) allocation.
+   inline static void post_allocation_setup_common(KlassHandle klass,
+-						  HeapWord* obj, size_t size); 
++                                                  HeapWord* obj, size_t size);
+   inline static void post_allocation_setup_no_klass_install(KlassHandle klass,
+-						            HeapWord* objPtr,
++                                                            HeapWord* objPtr,
+                                                             size_t size);
+ 
+   inline static void post_allocation_setup_obj(KlassHandle klass,
+-					       HeapWord* obj, size_t size);
++                                               HeapWord* obj, size_t size);
+ 
+   inline static void post_allocation_setup_array(KlassHandle klass,
+-						 HeapWord* obj, size_t size,
+-						 int length);
++                                                 HeapWord* obj, size_t size,
++                                                 int length);
+ 
+   // Clears an allocated object.
+   inline static void init_obj(HeapWord* obj, size_t size);
+@@ -141,7 +138,7 @@
+   virtual CollectedHeap::Name kind() const { return CollectedHeap::Abstract; }
+ 
+   /**
+-   * Returns JNI error code JNI_ENOMEM if memory could not be allocated, 
++   * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
+    * and JNI_OK on success.
+    */
+   virtual jint initialize() = 0;
+@@ -166,7 +163,7 @@
+   // objects has reached the maximal committed limit that it can
+   // reach, without a garbage collection.
+   virtual bool is_maximal_no_gc() const = 0;
+-  
++
+   virtual size_t permanent_capacity() const = 0;
+   virtual size_t permanent_used() const = 0;
+ 
+@@ -197,9 +194,9 @@
+   }
+ 
+   // Let's define some terms: a "closed" subset of a heap is one that
+-  // 
++  //
+   // 1) contains all currently-allocated objects, and
+-  // 
++  //
+   // 2) is closed under reference: no object in the closed subset
+   //    references one outside the closed subset.
+   //
+@@ -245,6 +242,11 @@
+     return p == NULL || is_in_permanent(p);
+   }
+ 
++  // Returns "TRUE" if "p" is a method oop in the
++  // current heap, with high probability. This predicate
++  // is not stable, in general.
++  bool is_valid_method(oop p) const;
++
+   void set_gc_cause(GCCause::Cause v) {
+      if (UsePerfData) {
+        _gc_lastcause = _gc_cause;
+@@ -269,28 +271,28 @@
+   // Some heaps may want to manage "permanent" data uniquely. These default
+   // to the general routines if the heap does not support such handling.
+   inline static oop permanent_obj_allocate(KlassHandle klass, int size, TRAPS);
+-  // permanent_obj_allocate_no_klass_install() does not do the installation of 
+-  // the klass pointer in the newly created object (as permanent_obj_allocate() 
++  // permanent_obj_allocate_no_klass_install() does not do the installation of
++  // the klass pointer in the newly created object (as permanent_obj_allocate()
+   // above does).  This allows for a delay in the installation of the klass
+   // pointer that is needed during the create of klassKlass's.  The
+   // method post_allocation_install_obj_klass() is used to install the
+   // klass pointer.
+   inline static oop permanent_obj_allocate_no_klass_install(KlassHandle klass,
+-							    int size, 
+-							    TRAPS);
+-  inline static void post_allocation_install_obj_klass(KlassHandle klass, 
+-					               oop obj,
+-						       int size);
++                                                            int size,
++                                                            TRAPS);
++  inline static void post_allocation_install_obj_klass(KlassHandle klass,
++                                                       oop obj,
++                                                       int size);
+   inline static oop permanent_array_allocate(KlassHandle klass, int size, int length, TRAPS);
+ 
+   // Raw memory allocation facilities
+   // The obj and array allocate methods are covers for these methods.
+   // The permanent allocation method should default to mem_allocate if
+   // permanent memory isn't supported.
+-  virtual HeapWord* mem_allocate(size_t size, 
+-				 bool is_noref, 
+-				 bool is_tlab, 
+-				 bool* gc_overhead_limit_was_exceeded) = 0;
++  virtual HeapWord* mem_allocate(size_t size,
++                                 bool is_noref,
++                                 bool is_tlab,
++                                 bool* gc_overhead_limit_was_exceeded) = 0;
+   virtual HeapWord* permanent_mem_allocate(size_t size) = 0;
+ 
+   // The boundary between a "large" and "small" array of primitives, in words.
+@@ -347,7 +349,7 @@
+   // If the heap supports thread-local allocation buffers, it should override
+   // the following methods:
+   // Returns "true" iff the heap supports thread-local allocation buffers.
+-  // The default is "no".  
++  // The default is "no".
+   virtual bool supports_tlab_allocation() const {
+     return false;
+   }
+@@ -363,7 +365,26 @@
+     guarantee(false, "thread-local allocation buffers not supported");
+     return 0;
+   }
+-  
++  // Can a compiler initialize a new object without store barriers?
++  // This permission only extends from the creation of a new object
++  // via a TLAB up to the first subsequent safepoint.
++  virtual bool can_elide_tlab_store_barriers() const {
++    guarantee(kind() < CollectedHeap::G1CollectedHeap, "else change or refactor this");
++    return true;
++  }
++  // If a compiler is eliding store barriers for TLAB-allocated objects,
++  // there is probably a corresponding slow path which can produce
++  // an object allocated anywhere.  The compiler's runtime support
++  // promises to call this function on such a slow-path-allocated
++  // object before performing initializations that have elided
++  // store barriers.  Returns new_obj, or maybe a safer copy thereof.
++  virtual oop new_store_barrier(oop new_obj);
++
++  // Can a compiler elide a store barrier when it writes
++  // a permanent oop into the heap?  Applies when the compiler
++  // is storing x to the heap, where x->is_perm() is true.
++  virtual bool can_elide_permanent_oop_store_barriers() const;
++
+   // Does this heap support heap inspection (+PrintClassHistogram?)
+   virtual bool supports_heap_inspection() const {
+     return false;   // Until RFE 5023697 is implemented
+@@ -386,7 +407,7 @@
+   // Returns "true" iff there is a stop-world GC in progress.  (I assume
+   // that it should answer "false" for the concurrent part of a concurrent
+   // collector -- dld).
+-  bool is_gc_active() { return _is_gc_active; }
++  bool is_gc_active() const { return _is_gc_active; }
+ 
+   // Total number of GC collections (started)
+   unsigned int total_collections() const { return _total_collections; }
+@@ -433,13 +454,13 @@
+   // possible to find its size, and thus to progress forward to the next
+   // block.  (Blocks may be of different sizes.)  Thus, blocks may
+   // represent Java objects, or they might be free blocks in a
+-  // free-list-based heap (or subheap), as long as the two kinds are 
++  // free-list-based heap (or subheap), as long as the two kinds are
+   // distinguishable and the size of each is determinable.
+ 
+   // Returns the address of the start of the "block" that contains the
+   // address "addr".  We say "blocks" instead of "object" since some heaps
+   // may not pack objects densely; a chunk may either be an object or a
+-  // non-object. 
++  // non-object.
+   virtual HeapWord* block_start(const void* addr) const = 0;
+ 
+   // Requires "addr" to be the start of a chunk, and returns its size.
+@@ -460,7 +481,7 @@
+ 
+   virtual void print() const = 0;
+   virtual void print_on(outputStream* st) const = 0;
+-  
++
+   // Print all GC threads (other than the VM thread)
+   // used by this heap.
+   virtual void print_gc_threads_on(outputStream* st) const = 0;
+@@ -487,7 +508,7 @@
+   // GC in which promotion failure ocurred.
+   inline void reset_promotion_should_fail(volatile size_t* count);
+   inline void reset_promotion_should_fail();
+-#endif	// #ifndef PRODUCT
++#endif  // #ifndef PRODUCT
+ 
+ #ifdef ASSERT
+   static int fired_fake_oom() {
+diff -ruN openjdk6/hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp openjdk/hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp
+--- openjdk6/hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)collectedHeap.inline.hpp	1.46 07/05/17 15:52:59 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Inline allocation implementations.
+@@ -35,7 +32,7 @@
+ }
+ 
+ void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass,
+-						           HeapWord* objPtr,
++                                                           HeapWord* objPtr,
+                                                            size_t size) {
+ 
+   oop obj = (oop)objPtr;
+@@ -54,7 +51,7 @@
+ 
+ void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass,
+                                                    oop obj,
+-						   int size) {
++                                                   int size) {
+   // These asserts are kind of complicated because of klassKlass
+   // and the beginning of the world.
+   assert(klass() != NULL || !Universe::is_fully_initialized(), "NULL klass");
+@@ -64,8 +61,8 @@
+   obj->set_klass(klass());
+   assert(!Universe::is_fully_initialized() || obj->blueprint() != NULL,
+          "missing blueprint");
+- 
+-  // support for JVMTI VMObjectAlloc event (no-op if not enabled) 
++
++  // support for JVMTI VMObjectAlloc event (no-op if not enabled)
+   JvmtiExport::vm_object_alloc_event_collector(obj);
+ 
+   if (DTraceAllocProbes) {
+@@ -82,13 +79,13 @@
+   post_allocation_setup_common(klass, obj, size);
+   assert(Universe::is_bootstrapping() ||
+          !((oop)obj)->blueprint()->oop_is_array(), "must not be an array");
+-} 
++}
+ 
+ void CollectedHeap::post_allocation_setup_array(KlassHandle klass,
+                                                 HeapWord* obj,
+                                                 size_t size,
+                                                 int length) {
+-  // Set array length before posting jvmti object alloc event 
++  // Set array length before posting jvmti object alloc event
+   // in post_allocation_setup_common()
+   assert(length >= 0, "length should be non-negative");
+   ((arrayOop)obj)->set_length(length);
+@@ -101,7 +98,7 @@
+   // Clear unhandled oops for memory allocation.  Memory allocation might
+   // not take out a lock if from tlab, so clear here.
+   CHECK_UNHANDLED_OOPS_ONLY(THREAD->clear_unhandled_oops();)
+-  
++
+   if (HAS_PENDING_EXCEPTION) {
+     NOT_PRODUCT(guarantee(false, "Should not allocate with exception pending"));
+     return NULL;  // caller does a CHECK_0 too
+@@ -118,10 +115,10 @@
+     }
+   }
+   bool gc_overhead_limit_was_exceeded;
+-  result = Universe::heap()->mem_allocate(size, 
+-					  is_noref, 
+-					  false, 
+-					  &gc_overhead_limit_was_exceeded);
++  result = Universe::heap()->mem_allocate(size,
++                                          is_noref,
++                                          false,
++                                          &gc_overhead_limit_was_exceeded);
+   if (result != NULL) {
+     NOT_PRODUCT(Universe::heap()->
+       check_for_non_bad_heap_word_value(result, size));
+@@ -137,7 +134,7 @@
+ 
+     if (JvmtiExport::should_post_resource_exhausted()) {
+       JvmtiExport::post_resource_exhausted(
+-        JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP, 
++        JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
+         "Java heap space");
+     }
+ 
+@@ -192,7 +189,7 @@
+ 
+   if (JvmtiExport::should_post_resource_exhausted()) {
+     JvmtiExport::post_resource_exhausted(
+-        JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 
++        JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
+         "PermGen space");
+   }
+ 
+@@ -230,7 +227,7 @@
+   HeapWord* obj = common_mem_allocate_init(size, false, CHECK_NULL);
+   post_allocation_setup_obj(klass, obj, size);
+   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
+-  return (oop)obj;  
++  return (oop)obj;
+ }
+ 
+ oop CollectedHeap::array_allocate(KlassHandle klass,
+@@ -243,7 +240,7 @@
+   HeapWord* obj = common_mem_allocate_init(size, false, CHECK_NULL);
+   post_allocation_setup_array(klass, obj, size, length);
+   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
+-  return (oop)obj;  
++  return (oop)obj;
+ }
+ 
+ oop CollectedHeap::large_typearray_allocate(KlassHandle klass,
+@@ -256,27 +253,27 @@
+   HeapWord* obj = common_mem_allocate_init(size, true, CHECK_NULL);
+   post_allocation_setup_array(klass, obj, size, length);
+   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
+-  return (oop)obj;  
++  return (oop)obj;
+ }
+ 
+ oop CollectedHeap::permanent_obj_allocate(KlassHandle klass, int size, TRAPS) {
+   oop obj = permanent_obj_allocate_no_klass_install(klass, size, CHECK_NULL);
+   post_allocation_install_obj_klass(klass, obj, size);
+-  NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value((HeapWord*) obj, 
+-							      size));
+-  return obj;  
++  NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value((HeapWord*) obj,
++                                                              size));
++  return obj;
+ }
+ 
+ oop CollectedHeap::permanent_obj_allocate_no_klass_install(KlassHandle klass,
+-							   int size, 
+-							   TRAPS) {
++                                                           int size,
++                                                           TRAPS) {
+   debug_only(check_for_valid_allocation_state());
+   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
+   assert(size >= 0, "int won't convert to size_t");
+   HeapWord* obj = common_permanent_mem_allocate_init(size, CHECK_NULL);
+   post_allocation_setup_no_klass_install(klass, obj, size);
+   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
+-  return (oop)obj;  
++  return (oop)obj;
+ }
+ 
+ oop CollectedHeap::permanent_array_allocate(KlassHandle klass,
+@@ -289,10 +286,48 @@
+   HeapWord* obj = common_permanent_mem_allocate_init(size, CHECK_NULL);
+   post_allocation_setup_array(klass, obj, size, length);
+   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
+-  return (oop)obj;  
++  return (oop)obj;
++}
++
++// Returns "TRUE" if "p" is a method oop in the
++// current heap with high probability. NOTE: The main
++// current consumers of this interface are Forte::
++// and ThreadProfiler::. In these cases, the
++// interpreter frame from which "p" came, may be
++// under construction when sampled asynchronously, so
++// the clients want to check that it represents a
++// valid method before using it. Nonetheless since
++// the clients do not typically lock out GC, the
++// predicate is_valid_method() is not stable, so
++// it is possible that by the time "p" is used, it
++// is no longer valid.
++inline bool CollectedHeap::is_valid_method(oop p) const {
++  return
++    p != NULL &&
++
++    // Check whether it is aligned at a HeapWord boundary.
++    Space::is_aligned(p) &&
++
++    // Check whether "method" is in the allocated part of the
++    // permanent generation -- this needs to be checked before
++    // p->klass() below to avoid a SEGV (but see below
++    // for a potential window of vulnerability).
++    is_permanent((void*)p) &&
++
++    // See if GC is active; however, there is still an
++    // apparently unavoidable window after this call
++    // and before the client of this interface uses "p".
++    // If the client chooses not to lock out GC, then
++    // it's a risk the client must accept.
++    !is_gc_active() &&
++
++    // Check that p is a methodOop.
++    p->klass() == Universe::methodKlassObj();
+ }
+ 
+-#ifndef	PRODUCT
++
++#ifndef PRODUCT
++
+ inline bool
+ CollectedHeap::promotion_should_fail(volatile size_t* count) {
+   // Access to count is not atomic; the value does not have to be exact.
+@@ -302,8 +337,8 @@
+     if (elapsed_gcs >= PromotionFailureALotInterval) {
+       // Test for unsigned arithmetic wrap-around.
+       if (++*count >= PromotionFailureALotCount) {
+-	*count = 0;
+-	return true;
++        *count = 0;
++        return true;
+       }
+     }
+   }
+@@ -324,4 +359,4 @@
+ inline void CollectedHeap::reset_promotion_should_fail() {
+   reset_promotion_should_fail(&_promotion_failure_alot_count);
+ }
+-#endif	// #ifndef PRODUCT
++#endif  // #ifndef PRODUCT
+diff -ruN openjdk6/hotspot/src/share/vm/gc_interface/gcCause.cpp openjdk/hotspot/src/share/vm/gc_interface/gcCause.cpp
+--- openjdk6/hotspot/src/share/vm/gc_interface/gcCause.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_interface/gcCause.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)gcCause.cpp	1.20 07/05/05 17:05:40 JVM"
+-#endif
+ /*
+  * Copyright 2002-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/gc_interface/gcCause.hpp openjdk/hotspot/src/share/vm/gc_interface/gcCause.hpp
+--- openjdk6/hotspot/src/share/vm/gc_interface/gcCause.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/gc_interface/gcCause.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)gcCause.hpp	1.24 07/05/05 17:05:40 JVM"
+-#endif
+ /*
+  * Copyright 2002-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //
+@@ -72,10 +69,10 @@
+             cause == GCCause::_jvmti_force_gc);
+   }
+   inline static bool is_serviceability_requested_gc(GCCause::Cause
+-						             cause) {
++                                                             cause) {
+     return (cause == GCCause::_jvmti_force_gc ||
+-	    cause == GCCause::_heap_inspection || 
+-	    cause == GCCause::_heap_dump);
++            cause == GCCause::_heap_inspection ||
++            cause == GCCause::_heap_dump);
+   }
+   // Return a string describing the GCCause.
+   static const char* to_string(GCCause::Cause cause);
+diff -ruN openjdk6/hotspot/src/share/vm/includeDB_compiler1 openjdk/hotspot/src/share/vm/includeDB_compiler1
+--- openjdk6/hotspot/src/share/vm/includeDB_compiler1	2008-02-28 05:02:31.000000000 -0500
++++ openjdk/hotspot/src/share/vm/includeDB_compiler1	2008-01-31 09:19:00.000000000 -0500
+@@ -281,7 +281,7 @@
+ c1_LinearScan_<arch>.hpp                generate_platform_dependent_include
+ 
+ c1_MacroAssembler.hpp                   assembler.hpp
+-c1_MacroAssembler.hpp                   assembler_<arch>.inline.hpp
++c1_MacroAssembler.hpp                   assembler_<arch_model>.inline.hpp
+ 
+ c1_MacroAssembler_<arch>.cpp            arrayOop.hpp
+ c1_MacroAssembler_<arch>.cpp            biasedLocking.hpp
+@@ -326,7 +326,7 @@
+ c1_Runtime1.cpp                         disassembler_<arch>.hpp
+ c1_Runtime1.cpp                         events.hpp
+ c1_Runtime1.cpp                         interfaceSupport.hpp
+-c1_Runtime1.cpp                         interpreter_<arch>.hpp
++c1_Runtime1.cpp                         interpreter.hpp
+ c1_Runtime1.cpp                         javaCalls.hpp
+ c1_Runtime1.cpp                         objArrayKlass.hpp
+ c1_Runtime1.cpp                         oop.inline.hpp
+@@ -352,7 +352,7 @@
+ c1_Runtime1_<arch>.cpp                  c1_MacroAssembler.hpp
+ c1_Runtime1_<arch>.cpp                  c1_Runtime1.hpp
+ c1_Runtime1_<arch>.cpp                  compiledICHolderOop.hpp
+-c1_Runtime1_<arch>.cpp                  interpreter_<arch>.hpp
++c1_Runtime1_<arch>.cpp                  interpreter.hpp
+ c1_Runtime1_<arch>.cpp                  jvmtiExport.hpp
+ c1_Runtime1_<arch>.cpp                  nativeInst_<arch>.hpp
+ c1_Runtime1_<arch>.cpp                  oop.inline.hpp
+@@ -395,8 +395,6 @@
+ 
+ compileBroker.cpp                       c1_Compiler.hpp
+ 
+-fprofiler.cpp                           c1_Compiler.hpp
+-
+ frame.hpp                               c1_Defs.hpp
+ 
+ frame_<arch>.cpp                        c1_Runtime1.hpp
+@@ -408,7 +406,7 @@
+ 
+ instanceKlass.cpp                       c1_Compiler.hpp
+ 
+-interpreter_<arch>.cpp                  c1_Runtime1.hpp
++interpreter_<arch_model>.cpp            c1_Runtime1.hpp
+ 
+ java.cpp                                c1_Compiler.hpp
+ java.cpp                                c1_Runtime1.hpp
+@@ -419,7 +417,7 @@
+ 
+ os_<os_family>.cpp                      c1_Runtime1.hpp
+ 
+-os_<os_family>_<arch>.cpp               c1_Runtime1.hpp
++os_<os_arch>.cpp                        c1_Runtime1.hpp
+ 
+ registerMap.hpp                         c1_Defs.hpp
+ 
+@@ -427,12 +425,11 @@
+ 
+ sharedRuntime.cpp                       c1_Runtime1.hpp
+ 
+-sharedRuntime_<arch>.cpp                c1_Runtime1.hpp
++sharedRuntime_<arch_model>.cpp          c1_Runtime1.hpp
+ 
+ thread.cpp                              c1_Compiler.hpp
+ 
+ top.hpp                                 c1_globals.hpp
+ 
+-vmStructs.cpp                           c1_Defs.hpp
+-vmStructs.cpp                           c1_Runtime1.hpp
++vmStructs.hpp                           c1_Runtime1.hpp
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/includeDB_compiler2 openjdk/hotspot/src/share/vm/includeDB_compiler2
+--- openjdk6/hotspot/src/share/vm/includeDB_compiler2	2008-02-28 05:02:31.000000000 -0500
++++ openjdk/hotspot/src/share/vm/includeDB_compiler2	2008-01-31 09:19:00.000000000 -0500
+@@ -22,55 +22,55 @@
+ //  
+ //
+ 
+-ad_<arch>.cpp                           adGlobals_<arch>.hpp
+-ad_<arch>.cpp                           ad_<arch>.hpp
+-ad_<arch>.cpp                           allocation.inline.hpp
+-ad_<arch>.cpp                           assembler.hpp
+-ad_<arch>.cpp                           assembler_<arch>.inline.hpp
+-ad_<arch>.cpp                           biasedLocking.hpp
+-ad_<arch>.cpp                           cfgnode.hpp
+-ad_<arch>.cpp                           collectedHeap.inline.hpp
+-ad_<arch>.cpp                           compiledICHolderOop.hpp
+-ad_<arch>.cpp                           growableArray.hpp
+-ad_<arch>.cpp                           locknode.hpp
+-ad_<arch>.cpp                           markOop.hpp
+-ad_<arch>.cpp                           methodOop.hpp
+-ad_<arch>.cpp                           nativeInst_<arch>.hpp
+-ad_<arch>.cpp                           oop.inline.hpp
+-ad_<arch>.cpp                           oop.inline2.hpp
+-ad_<arch>.cpp                           opcodes.hpp
+-ad_<arch>.cpp                           regalloc.hpp
+-ad_<arch>.cpp                           regmask.hpp
+-ad_<arch>.cpp                           runtime.hpp
+-ad_<arch>.cpp                           sharedRuntime.hpp
+-ad_<arch>.cpp                           stubRoutines.hpp
+-ad_<arch>.cpp                           vmreg.hpp
+-ad_<arch>.cpp                           vmreg_<arch>.inline.hpp
+-
+-ad_<arch>.hpp                           addnode.hpp
+-ad_<arch>.hpp                           machnode.hpp
+-ad_<arch>.hpp                           matcher.hpp
+-ad_<arch>.hpp                           opcodes.hpp
+-ad_<arch>.hpp                           regalloc.hpp
+-ad_<arch>.hpp                           resourceArea.hpp
+-ad_<arch>.hpp                           subnode.hpp
+-ad_<arch>.hpp                           vectornode.hpp
+-
+-ad_<arch>_clone.cpp                     ad_<arch>.hpp
+-
+-ad_<arch>_expand.cpp                    ad_<arch>.hpp
+-
+-ad_<arch>_format.cpp                    ad_<arch>.hpp
+-
+-ad_<arch>_gen.cpp                       ad_<arch>.hpp
+-ad_<arch>_gen.cpp                       cfgnode.hpp
+-ad_<arch>_gen.cpp                       locknode.hpp
++ad_<arch_model>.cpp                     adGlobals_<arch_model>.hpp
++ad_<arch_model>.cpp                     ad_<arch_model>.hpp
++ad_<arch_model>.cpp                     allocation.inline.hpp
++ad_<arch_model>.cpp                     assembler.hpp
++ad_<arch_model>.cpp                     assembler_<arch_model>.inline.hpp
++ad_<arch_model>.cpp                     biasedLocking.hpp
++ad_<arch_model>.cpp                     cfgnode.hpp
++ad_<arch_model>.cpp                     collectedHeap.inline.hpp
++ad_<arch_model>.cpp                     compiledICHolderOop.hpp
++ad_<arch_model>.cpp                     growableArray.hpp
++ad_<arch_model>.cpp                     locknode.hpp
++ad_<arch_model>.cpp                     markOop.hpp
++ad_<arch_model>.cpp                     methodOop.hpp
++ad_<arch_model>.cpp                     nativeInst_<arch>.hpp
++ad_<arch_model>.cpp                     oop.inline.hpp
++ad_<arch_model>.cpp                     oop.inline2.hpp
++ad_<arch_model>.cpp                     opcodes.hpp
++ad_<arch_model>.cpp                     regalloc.hpp
++ad_<arch_model>.cpp                     regmask.hpp
++ad_<arch_model>.cpp                     runtime.hpp
++ad_<arch_model>.cpp                     sharedRuntime.hpp
++ad_<arch_model>.cpp                     stubRoutines.hpp
++ad_<arch_model>.cpp                     vmreg.hpp
++ad_<arch_model>.cpp                     vmreg_<arch>.inline.hpp
++
++ad_<arch_model>.hpp                     addnode.hpp
++ad_<arch_model>.hpp                     machnode.hpp
++ad_<arch_model>.hpp                     matcher.hpp
++ad_<arch_model>.hpp                     opcodes.hpp
++ad_<arch_model>.hpp                     regalloc.hpp
++ad_<arch_model>.hpp                     resourceArea.hpp
++ad_<arch_model>.hpp                     subnode.hpp
++ad_<arch_model>.hpp                     vectornode.hpp
++
++ad_<arch_model>_clone.cpp               ad_<arch_model>.hpp
++
++ad_<arch_model>_expand.cpp              ad_<arch_model>.hpp
++
++ad_<arch_model>_format.cpp              ad_<arch_model>.hpp
++
++ad_<arch_model>_gen.cpp                 ad_<arch_model>.hpp
++ad_<arch_model>_gen.cpp                 cfgnode.hpp
++ad_<arch_model>_gen.cpp                 locknode.hpp
+ 
+-ad_<arch>_misc.cpp                      ad_<arch>.hpp
++ad_<arch_model>_misc.cpp                ad_<arch_model>.hpp
+ 
+-ad_<arch>_peephole.cpp                  ad_<arch>.hpp
++ad_<arch_model>_peephole.cpp            ad_<arch_model>.hpp
+ 
+-ad_<arch>_pipeline.cpp                  ad_<arch>.hpp
++ad_<arch_model>_pipeline.cpp            ad_<arch_model>.hpp
+ 
+ addnode.cpp                             addnode.hpp
+ addnode.cpp                             allocation.inline.hpp
+@@ -125,6 +125,8 @@
+ bytecodeInfo.cpp                        systemDictionary.hpp
+ bytecodeInfo.cpp                        vmSymbols.hpp
+ 
++bytecodeInterpreter.hpp                 methodDataOop.hpp
++
+ c2_globals.cpp                          c2_globals.hpp
+ 
+ c2_globals.hpp                          c2_globals_<arch>.hpp
+@@ -139,14 +141,12 @@
+ 
+ c2_init_<arch>.cpp                      compile.hpp
+ 
+-c2compiler.cpp                          ad_<arch>.hpp
++c2compiler.cpp                          ad_<arch_model>.hpp
+ c2compiler.cpp                          c2compiler.hpp
+ c2compiler.cpp                          runtime.hpp
+ 
+ c2compiler.hpp                          abstractCompiler.hpp
+ 
+-cInterpreter.hpp                        methodDataOop.hpp
+-
+ callGenerator.cpp                       addnode.hpp
+ callGenerator.cpp                       callGenerator.hpp
+ callGenerator.cpp                       callnode.hpp
+@@ -287,7 +287,7 @@
+ 
+ coalesce.hpp                            phase.hpp
+ 
+-compile.cpp                             ad_<arch>.hpp
++compile.cpp                             ad_<arch_model>.hpp
+ compile.cpp                             addnode.hpp
+ compile.cpp                             arguments.hpp
+ compile.cpp                             assembler.hpp
+@@ -333,6 +333,7 @@
+ compile.hpp                             deoptimization.hpp
+ compile.hpp                             dict.hpp
+ compile.hpp                             exceptionHandlerTable.hpp
++compile.hpp                             idealGraphPrinter.hpp
+ compile.hpp                             phase.hpp
+ compile.hpp                             port.hpp
+ compile.hpp                             regmask.hpp
+@@ -358,11 +359,11 @@
+ connode.hpp                             opcodes.hpp
+ connode.hpp                             type.hpp
+ 
+-deoptimization.cpp                      ad_<arch>.hpp
++deoptimization.cpp                      ad_<arch_model>.hpp
+ 
+-dfa_<arch>.cpp                          ad_<arch>.hpp
+-dfa_<arch>.cpp                          matcher.hpp
+-dfa_<arch>.cpp                          opcodes.hpp
++dfa_<arch_model>.cpp                    ad_<arch_model>.hpp
++dfa_<arch_model>.cpp                    matcher.hpp
++dfa_<arch_model>.cpp                    opcodes.hpp
+ 
+ dict.cpp                                allocation.inline.hpp
+ dict.cpp                                dict.hpp
+@@ -389,7 +390,6 @@
+ doCall.cpp                              addnode.hpp
+ doCall.cpp                              callGenerator.hpp
+ doCall.cpp                              cfgnode.hpp
+-doCall.cpp                              cha.hpp
+ doCall.cpp                              compileLog.hpp
+ doCall.cpp                              linkResolver.hpp
+ doCall.cpp                              mulnode.hpp
+@@ -422,9 +422,9 @@
+ escape.hpp                              growableArray.hpp
+ escape.hpp                              node.hpp
+ 
+-frame.hpp                               adGlobals_<arch>.hpp
++frame.hpp                               adGlobals_<arch_model>.hpp
+ 
+-gcm.cpp                                 ad_<arch>.hpp
++gcm.cpp                                 ad_<arch_model>.hpp
+ gcm.cpp                                 allocation.inline.hpp
+ gcm.cpp                                 block.hpp
+ gcm.cpp                                 c2compiler.hpp
+@@ -481,9 +481,11 @@
+ graphKit.hpp                            type.hpp
+ 
+ idealKit.cpp                            addnode.hpp
++idealKit.cpp                            callnode.hpp
+ idealKit.cpp                            cfgnode.hpp
+ idealKit.cpp                            idealKit.hpp
+ 
++idealKit.hpp                            connode.hpp
+ idealKit.hpp                            mulnode.hpp
+ idealKit.hpp                            phaseX.hpp
+ idealKit.hpp                            subnode.hpp
+@@ -530,7 +532,7 @@
+ java.cpp                                methodLiveness.hpp
+ java.cpp                                runtime.hpp
+ 
+-lcm.cpp                                 ad_<arch>.hpp
++lcm.cpp                                 ad_<arch_model>.hpp
+ lcm.cpp                                 allocation.inline.hpp
+ lcm.cpp                                 block.hpp
+ lcm.cpp                                 c2compiler.hpp
+@@ -572,7 +574,7 @@
+ locknode.cpp                            rootnode.hpp
+ locknode.cpp                            runtime.hpp
+ 
+-locknode.hpp                            ad_<arch>.hpp
++locknode.hpp                            ad_<arch_model>.hpp
+ locknode.hpp                            node.hpp
+ locknode.hpp                            opcodes.hpp
+ locknode.hpp                            subnode.hpp
+@@ -647,7 +649,7 @@
+ macro.cpp                               vectset.hpp
+ macro.hpp                               phase.hpp
+ 
+-matcher.cpp                             ad_<arch>.hpp
++matcher.cpp                             ad_<arch_model>.hpp
+ matcher.cpp                             addnode.hpp
+ matcher.cpp                             allocation.inline.hpp
+ matcher.cpp                             atomic.hpp
+@@ -762,7 +764,7 @@
+ output.cpp                              type.hpp
+ output.cpp                              xmlstream.hpp
+ 
+-output.hpp                              ad_<arch>.hpp
++output.hpp                              ad_<arch_model>.hpp
+ output.hpp                              block.hpp
+ output.hpp                              node.hpp
+ 
+@@ -881,11 +883,11 @@
+ regalloc.hpp                            phase.hpp
+ regalloc.hpp                            vmreg.hpp
+ 
+-regmask.cpp                             ad_<arch>.hpp
++regmask.cpp                             ad_<arch_model>.hpp
+ regmask.cpp                             compile.hpp
+ regmask.cpp                             regmask.hpp
+ 
+-regmask.hpp                             adGlobals_<arch>.hpp
++regmask.hpp                             adGlobals_<arch_model>.hpp
+ regmask.hpp                             optoreg.hpp
+ regmask.hpp                             port.hpp
+ regmask.hpp                             vmreg.hpp
+@@ -901,7 +903,7 @@
+ 
+ rootnode.hpp                            loopnode.hpp
+ 
+-runtime.cpp                             ad_<arch>.hpp
++runtime.cpp                             ad_<arch_model>.hpp
+ runtime.cpp                             addnode.hpp
+ runtime.cpp                             barrierSet.hpp
+ runtime.cpp                             bytecode.hpp
+@@ -953,21 +955,21 @@
+ runtime.hpp                             type.hpp
+ runtime.hpp                             vframe.hpp
+ 
+-runtime_<arch>.cpp                      adGlobals_<arch>.hpp
+-runtime_<arch>.cpp                      ad_<arch>.hpp
+-runtime_<arch>.cpp                      assembler.hpp
+-runtime_<arch>.cpp                      assembler_<arch>.inline.hpp
+-runtime_<arch>.cpp                      globalDefinitions.hpp
+-runtime_<arch>.cpp                      interfaceSupport.hpp
+-runtime_<arch>.cpp                      interpreter_<arch>.hpp
+-runtime_<arch>.cpp                      nativeInst_<arch>.hpp
+-runtime_<arch>.cpp                      runtime.hpp
+-runtime_<arch>.cpp                      sharedRuntime.hpp
+-runtime_<arch>.cpp                      stubRoutines.hpp
+-runtime_<arch>.cpp                      systemDictionary.hpp
+-runtime_<arch>.cpp                      vframeArray.hpp
+-runtime_<arch>.cpp                      vmreg.hpp
+-runtime_<arch>.cpp                      vmreg_<arch>.inline.hpp
++runtime_<arch_model>.cpp                adGlobals_<arch_model>.hpp
++runtime_<arch_model>.cpp                ad_<arch_model>.hpp
++runtime_<arch_model>.cpp                assembler.hpp
++runtime_<arch_model>.cpp                assembler_<arch_model>.inline.hpp
++runtime_<arch_model>.cpp                globalDefinitions.hpp
++runtime_<arch_model>.cpp                interfaceSupport.hpp
++runtime_<arch_model>.cpp                interpreter.hpp
++runtime_<arch_model>.cpp                nativeInst_<arch>.hpp
++runtime_<arch_model>.cpp                runtime.hpp
++runtime_<arch_model>.cpp                sharedRuntime.hpp
++runtime_<arch_model>.cpp                stubRoutines.hpp
++runtime_<arch_model>.cpp                systemDictionary.hpp
++runtime_<arch_model>.cpp                vframeArray.hpp
++runtime_<arch_model>.cpp                vmreg.hpp
++runtime_<arch_model>.cpp                vmreg_<arch>.inline.hpp
+ 
+ set.cpp                                 allocation.inline.hpp
+ set.cpp                                 set.hpp
+@@ -975,16 +977,14 @@
+ set.hpp                                 allocation.hpp
+ set.hpp                                 port.hpp
+ 
+-sharedRuntime_<arch>.cpp                runtime.hpp
++sharedRuntime_<arch_model>.cpp          runtime.hpp
+ 
+ split_if.cpp                            allocation.inline.hpp
+ split_if.cpp                            callnode.hpp
+ split_if.cpp                            connode.hpp
+ split_if.cpp                            loopnode.hpp
+ 
+-stackValue.cpp                          debugInfo.hpp
+-
+-stubGenerator_<arch>.cpp                runtime.hpp
++stubGenerator_<arch_model>.cpp          runtime.hpp
+ 
+ stubRoutines.cpp                        runtime.hpp
+ 
+@@ -1025,8 +1025,6 @@
+ superword.hpp                           phaseX.hpp
+ superword.hpp                           vectornode.hpp
+ 
+-systemDictionary.cpp                    cha.hpp
+-
+ thread.cpp                              c2compiler.hpp
+ 
+ top.hpp                                 c2_globals.hpp
+@@ -1070,15 +1068,34 @@
+ 
+ vframe_hp.cpp                           matcher.hpp
+ 
+-vmStructs.cpp                           adGlobals_<arch>.hpp
++vmStructs.cpp                           adGlobals_<arch_model>.hpp
+ vmStructs.cpp                           matcher.hpp
+ 
+-vmreg.hpp                               adGlobals_<arch>.hpp
++vmreg.hpp                               adGlobals_<arch_model>.hpp
+ vmreg.hpp                               adlcVMDeps.hpp
+ vmreg.hpp                               ostream.hpp
+ 
+ vtableStubs.cpp                         matcher.hpp
+ 
+-vtableStubs_<arch>.cpp                  ad_<arch>.hpp
+-vtableStubs_<arch>.cpp                  runtime.hpp
++vtableStubs_<arch_model>.cpp            ad_<arch_model>.hpp
++vtableStubs_<arch_model>.cpp            runtime.hpp
+ 
++idealGraphPrinter.hpp                   dict.hpp
++idealGraphPrinter.hpp                   vectset.hpp
++idealGraphPrinter.hpp                   growableArray.hpp
++idealGraphPrinter.hpp                   ostream.hpp
++
++idealGraphPrinter.cpp                   idealGraphPrinter.hpp
++idealGraphPrinter.cpp			chaitin.hpp
++idealGraphPrinter.cpp                   machnode.hpp
++idealGraphPrinter.cpp                   parse.hpp
++idealGraphPrinter.cpp                   threadCritical.hpp
++
++compile.cpp                             idealGraphPrinter.hpp
++thread.cpp                              idealGraphPrinter.hpp
++phaseX.cpp                              idealGraphPrinter.hpp
++parse2.cpp                              idealGraphPrinter.hpp
++parse1.cpp                              idealGraphPrinter.hpp
++matcher.cpp                             idealGraphPrinter.hpp
++loopnode.cpp                            idealGraphPrinter.hpp
++chaitin.cpp				idealGraphPrinter.hpp
+diff -ruN openjdk6/hotspot/src/share/vm/includeDB_core openjdk/hotspot/src/share/vm/includeDB_core
+--- openjdk6/hotspot/src/share/vm/includeDB_core	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/includeDB_core	2008-01-31 09:19:00.000000000 -0500
+@@ -47,7 +47,7 @@
+ // bodies for inline functions declared in H.hpp.
+ //
+ // NOTE: Files that use the token "generate_platform_dependent_include" 
+-// are expected to contain macro references like <os>, <arch>, ... and
++// are expected to contain macro references like <os>, <arch_model>, ... and
+ // makedeps has a dependency on these platform files looking like:
+ // foo_<macro>.trailing_string 
+ // (where "trailing_string" can be any legal filename strings but typically
+@@ -120,6 +120,13 @@
+ 
+ abstractCompiler.hpp                    compilerInterface.hpp
+ 
++abstractInterpreter.hpp                 bytecodes.hpp
++abstractInterpreter.hpp                 interp_masm_<arch_model>.hpp
++abstractInterpreter.hpp                 stubs.hpp
++abstractInterpreter.hpp                 thread_<os_family>.inline.hpp
++abstractInterpreter.hpp                 top.hpp
++abstractInterpreter.hpp                 vmThread.hpp
++
+ accessFlags.cpp                         accessFlags.hpp
+ accessFlags.cpp                         oop.inline.hpp
+ accessFlags.cpp                         os_<os_family>.inline.hpp
+@@ -175,7 +182,7 @@
+ arguments.cpp                           oop.inline.hpp
+ arguments.cpp                           os_<os_family>.inline.hpp
+ arguments.cpp                           universe.inline.hpp
+-arguments.cpp                           vm_version_<arch>.hpp
++arguments.cpp                           vm_version_<arch_model>.hpp
+ 
+ arguments.hpp                           perfData.hpp
+ arguments.hpp                           top.hpp
+@@ -226,7 +233,7 @@
+ 
+ assembler.cpp                           assembler.hpp
+ assembler.cpp                           assembler.inline.hpp
+-assembler.cpp                           assembler_<arch>.inline.hpp
++assembler.cpp                           assembler_<arch_model>.inline.hpp
+ assembler.cpp                           codeBuffer.hpp
+ assembler.cpp                           icache.hpp
+ assembler.cpp                           os.hpp
+@@ -239,36 +246,36 @@
+ assembler.hpp                           register_<arch>.hpp
+ assembler.hpp                           relocInfo.hpp
+ assembler.hpp                           top.hpp
+-assembler.hpp                           vm_version_<arch>.hpp
++assembler.hpp                           vm_version_<arch_model>.hpp
+ 
+ assembler.inline.hpp                    assembler.hpp
+ assembler.inline.hpp                    codeBuffer.hpp
+ assembler.inline.hpp                    disassembler_<arch>.hpp
+ assembler.inline.hpp                    threadLocalStorage.hpp
+ 
+-assembler_<arch>.cpp                    assembler_<arch>.inline.hpp
+-assembler_<arch>.cpp                    biasedLocking.hpp
+-assembler_<arch>.cpp                    cardTableModRefBS.hpp
+-assembler_<arch>.cpp                    collectedHeap.hpp
+-assembler_<arch>.cpp                    interfaceSupport.hpp
+-assembler_<arch>.cpp                    interpreter_<arch>.hpp
+-assembler_<arch>.cpp                    objectMonitor.hpp
+-assembler_<arch>.cpp                    os.hpp
+-assembler_<arch>.cpp                    resourceArea.hpp
+-assembler_<arch>.cpp                    sharedRuntime.hpp
+-assembler_<arch>.cpp                    stubRoutines.hpp
+-
+-assembler_<arch>.hpp                    generate_platform_dependent_include
+-
+-assembler_<arch>.inline.hpp             assembler.inline.hpp
+-assembler_<arch>.inline.hpp             codeBuffer.hpp
+-assembler_<arch>.inline.hpp             codeCache.hpp
+-assembler_<arch>.inline.hpp             handles.inline.hpp
+-
+-assembler_<os_arch>.cpp                 assembler.hpp
+-assembler_<os_arch>.cpp                 assembler_<arch>.inline.hpp
+-assembler_<os_arch>.cpp                 os.hpp
+-assembler_<os_arch>.cpp                 threadLocalStorage.hpp
++assembler_<arch_model>.cpp              assembler_<arch_model>.inline.hpp
++assembler_<arch_model>.cpp              biasedLocking.hpp
++assembler_<arch_model>.cpp              cardTableModRefBS.hpp
++assembler_<arch_model>.cpp              collectedHeap.hpp
++assembler_<arch_model>.cpp              interfaceSupport.hpp
++assembler_<arch_model>.cpp              interpreter.hpp
++assembler_<arch_model>.cpp              objectMonitor.hpp
++assembler_<arch_model>.cpp              os.hpp
++assembler_<arch_model>.cpp              resourceArea.hpp
++assembler_<arch_model>.cpp              sharedRuntime.hpp
++assembler_<arch_model>.cpp              stubRoutines.hpp
++
++assembler_<arch_model>.hpp              generate_platform_dependent_include
++
++assembler_<arch_model>.inline.hpp       assembler.inline.hpp
++assembler_<arch_model>.inline.hpp       codeBuffer.hpp
++assembler_<arch_model>.inline.hpp       codeCache.hpp
++assembler_<arch_model>.inline.hpp       handles.inline.hpp
++
++assembler_<os_arch_model>.cpp           assembler.hpp
++assembler_<os_arch_model>.cpp           assembler_<arch_model>.inline.hpp
++assembler_<os_arch_model>.cpp           os.hpp
++assembler_<os_arch_model>.cpp           threadLocalStorage.hpp
+ 
+ atomic.cpp                              atomic.hpp
+ atomic.cpp                              atomic_<os_arch>.inline.hpp
+@@ -278,30 +285,14 @@
+ 
+ atomic_<os_arch>.inline.hpp             atomic.hpp
+ atomic_<os_arch>.inline.hpp             os.hpp
+-atomic_<os_arch>.inline.hpp             vm_version_<arch>.hpp
++atomic_<os_arch>.inline.hpp             vm_version_<arch_model>.hpp
+ 
+-attachListener.cpp                      arguments.hpp
+-attachListener.cpp                      attachListener.hpp
+-attachListener.cpp                      globals.hpp
+-attachListener.cpp                      heapDumper.hpp
+-attachListener.cpp                      java.hpp
+-attachListener.cpp                      javaCalls.hpp
+-attachListener.cpp                      javaClasses.hpp
+-attachListener.cpp                      jvmtiExport.hpp
+-attachListener.cpp                      os.hpp
+-attachListener.cpp                      resourceArea.hpp
+-attachListener.cpp                      systemDictionary.hpp
+-attachListener.cpp                      vmGCOperations.hpp
++// attachListener is jck optional, put cpp deps in includeDB_features
+ 
+ attachListener.hpp                      allocation.hpp
+ attachListener.hpp                      debug.hpp
+ attachListener.hpp                      ostream.hpp
+ 
+-attachListener_<os_family>.cpp          attachListener.hpp
+-attachListener_<os_family>.cpp          dtraceAttacher.hpp
+-attachListener_<os_family>.cpp          interfaceSupport.hpp
+-attachListener_<os_family>.cpp          os.hpp
+-
+ barrierSet.hpp                          memRegion.hpp
+ barrierSet.hpp                          oopsHierarchy.hpp
+ 
+@@ -384,6 +375,64 @@
+ bytecodeHistogram.hpp                   allocation.hpp
+ bytecodeHistogram.hpp                   bytecodes.hpp
+ 
++bytecodeInterpreter.cpp                 no_precompiled_headers
++bytecodeInterpreter.cpp                 bytecodeHistogram.hpp
++bytecodeInterpreter.cpp                 bytecodeInterpreter.hpp
++bytecodeInterpreter.cpp                 bytecodeInterpreter.inline.hpp
++bytecodeInterpreter.cpp                 cardTableModRefBS.hpp
++bytecodeInterpreter.cpp                 collectedHeap.hpp
++bytecodeInterpreter.cpp                 exceptions.hpp
++bytecodeInterpreter.cpp                 frame.inline.hpp
++bytecodeInterpreter.cpp                 handles.inline.hpp
++bytecodeInterpreter.cpp                 interfaceSupport.hpp
++bytecodeInterpreter.cpp                 interpreterRuntime.hpp
++bytecodeInterpreter.cpp                 interpreter.hpp
++bytecodeInterpreter.cpp                 jvmtiExport.hpp
++bytecodeInterpreter.cpp                 objArrayKlass.hpp
++bytecodeInterpreter.cpp                 oop.inline.hpp
++bytecodeInterpreter.cpp                 orderAccess_<os_arch>.inline.hpp
++bytecodeInterpreter.cpp                 resourceArea.hpp
++bytecodeInterpreter.cpp                 sharedRuntime.hpp
++bytecodeInterpreter.cpp                 threadCritical.hpp
++bytecodeInterpreter.cpp                 vmSymbols.hpp
++
++bytecodeInterpreter_<arch>.cpp          assembler.hpp
++bytecodeInterpreter_<arch>.cpp          bytecodeInterpreter.hpp
++bytecodeInterpreter_<arch>.cpp          bytecodeInterpreter.inline.hpp
++bytecodeInterpreter_<arch>.cpp          debug.hpp
++bytecodeInterpreter_<arch>.cpp          deoptimization.hpp
++bytecodeInterpreter_<arch>.cpp          frame.inline.hpp
++bytecodeInterpreter_<arch>.cpp          interp_masm_<arch_model>.hpp
++bytecodeInterpreter_<arch>.cpp          interpreterRuntime.hpp
++bytecodeInterpreter_<arch>.cpp          interpreter.hpp
++bytecodeInterpreter_<arch>.cpp          jvmtiExport.hpp
++bytecodeInterpreter_<arch>.cpp          jvmtiThreadState.hpp
++bytecodeInterpreter_<arch>.cpp          methodDataOop.hpp
++bytecodeInterpreter_<arch>.cpp          methodOop.hpp
++bytecodeInterpreter_<arch>.cpp          oop.inline.hpp
++bytecodeInterpreter_<arch>.cpp          sharedRuntime.hpp
++bytecodeInterpreter_<arch>.cpp          stubRoutines.hpp
++bytecodeInterpreter_<arch>.cpp          synchronizer.hpp
++bytecodeInterpreter_<arch>.cpp          vframeArray.hpp
++
++bytecodeInterpreterWithChecks.cpp       bytecodeInterpreter.cpp
++
++bytecodeInterpreter.hpp                 allocation.hpp
++bytecodeInterpreter.hpp                 bytes_<arch>.hpp
++bytecodeInterpreter.hpp                 frame.hpp
++bytecodeInterpreter.hpp                 globalDefinitions.hpp
++bytecodeInterpreter.hpp                 globals.hpp
++bytecodeInterpreter.hpp                 methodDataOop.hpp
++bytecodeInterpreter.hpp                 methodOop.hpp
++bytecodeInterpreter.hpp                 synchronizer.hpp
++
++bytecodeInterpreter.inline.hpp          bytecodeInterpreter.hpp
++bytecodeInterpreter.inline.hpp          stubRoutines.hpp
++
++bytecodeInterpreter_<arch>.hpp          generate_platform_dependent_include
++
++bytecodeInterpreter_<arch>.inline.hpp   generate_platform_dependent_include
++
+ bytecodeStream.cpp                      bytecodeStream.hpp
+ bytecodeStream.cpp                      bytecodes.hpp
+ 
+@@ -421,45 +470,6 @@
+ 
+ bytes_<os_arch>.inline.hpp              generate_platform_dependent_include
+ 
+-cInterpretMethod.hpp                    interpreter_<arch>.hpp
+-cInterpretMethod.hpp                    threadLocalAllocBuffer.inline.hpp
+-
+-cInterpreter.cpp                        bytecodeHistogram.hpp
+-cInterpreter.cpp                        cInterpretMethod.hpp
+-cInterpreter.cpp                        cInterpreter.hpp
+-cInterpreter.cpp                        cInterpreter.inline.hpp
+-cInterpreter.cpp                        cardTableModRefBS.hpp
+-cInterpreter.cpp                        collectedHeap.hpp
+-cInterpreter.cpp                        exceptions.hpp
+-cInterpreter.cpp                        frame.inline.hpp
+-cInterpreter.cpp                        handles.inline.hpp
+-cInterpreter.cpp                        interfaceSupport.hpp
+-cInterpreter.cpp                        interpreterRuntime.hpp
+-cInterpreter.cpp                        interpreter_<arch>.hpp
+-cInterpreter.cpp                        jvmtiExport.hpp
+-cInterpreter.cpp                        objArrayKlass.hpp
+-cInterpreter.cpp                        oop.inline.hpp
+-cInterpreter.cpp                        orderAccess_<os_arch>.inline.hpp
+-cInterpreter.cpp                        resourceArea.hpp
+-cInterpreter.cpp                        sharedRuntime.hpp
+-cInterpreter.cpp                        threadCritical.hpp
+-cInterpreter.cpp                        vmSymbols.hpp
+-
+-cInterpreter.hpp                        allocation.hpp
+-cInterpreter.hpp                        bytes_<arch>.hpp
+-cInterpreter.hpp                        frame.hpp
+-cInterpreter.hpp                        globalDefinitions.hpp
+-cInterpreter.hpp                        globals.hpp
+-cInterpreter.hpp                        methodOop.hpp
+-cInterpreter.hpp                        synchronizer.hpp
+-
+-cInterpreter.inline.hpp                 cInterpreter.hpp
+-cInterpreter.inline.hpp                 stubRoutines.hpp
+-
+-cInterpreter_<arch>.hpp                 generate_platform_dependent_include
+-
+-cInterpreter_<arch>.inline.hpp          generate_platform_dependent_include
+-
+ cardTableModRefBS.cpp                   allocation.inline.hpp
+ cardTableModRefBS.cpp                   cardTableModRefBS.hpp
+ cardTableModRefBS.cpp                   cardTableRS.hpp
+@@ -487,19 +497,6 @@
+ cardTableRS.hpp                         genRemSet.hpp
+ cardTableRS.hpp                         memRegion.hpp
+ 
+-cha.cpp                                 cha.hpp
+-cha.cpp                                 instanceKlass.hpp
+-cha.cpp                                 linkResolver.hpp
+-cha.cpp                                 methodOop.hpp
+-cha.cpp                                 oop.inline.hpp
+-cha.cpp                                 systemDictionary.hpp
+-cha.cpp                                 universe.inline.hpp
+-
+-cha.hpp                                 growableArray.hpp
+-cha.hpp                                 handles.hpp
+-cha.hpp                                 handles.inline.hpp
+-cha.hpp                                 klassOop.hpp
+-
+ ciArray.cpp                             ciArray.hpp
+ ciArray.cpp                             ciKlass.hpp
+ ciArray.cpp                             ciUtilities.hpp
+@@ -654,7 +651,6 @@
+ ciMethod.cpp                            abstractCompiler.hpp
+ ciMethod.cpp                            allocation.inline.hpp
+ ciMethod.cpp                            bcEscapeAnalyzer.hpp
+-ciMethod.cpp                            cha.hpp
+ ciMethod.cpp                            ciCallProfile.hpp
+ ciMethod.cpp                            ciExceptionHandler.hpp
+ ciMethod.cpp                            ciInstanceKlass.hpp
+@@ -984,6 +980,8 @@
+ codeCache.cpp                           allocation.inline.hpp
+ codeCache.cpp                           codeBlob.hpp
+ codeCache.cpp                           codeCache.hpp
++codeCache.cpp                           dependencies.hpp
++codeCache.cpp                           gcLocker.hpp
+ codeCache.cpp                           icache.hpp
+ codeCache.cpp                           iterator.hpp
+ codeCache.cpp                           java.hpp
+@@ -1046,7 +1044,7 @@
+ compilationPolicy.cpp                   frame.hpp
+ compilationPolicy.cpp                   globalDefinitions.hpp
+ compilationPolicy.cpp                   handles.inline.hpp
+-compilationPolicy.cpp                   interpreter_<arch>.hpp
++compilationPolicy.cpp                   interpreter.hpp
+ compilationPolicy.cpp                   methodDataOop.hpp
+ compilationPolicy.cpp                   methodOop.hpp
+ compilationPolicy.cpp                   nativeLookup.hpp
+@@ -1104,7 +1102,7 @@
+ compiledIC.cpp                          events.hpp
+ compiledIC.cpp                          icBuffer.hpp
+ compiledIC.cpp                          icache.hpp
+-compiledIC.cpp                          interpreter_<arch>.hpp
++compiledIC.cpp                          interpreter.hpp
+ compiledIC.cpp                          linkResolver.hpp
+ compiledIC.cpp                          methodOop.hpp
+ compiledIC.cpp                          nmethod.hpp
+@@ -1275,7 +1273,7 @@
+ cpCacheOop.cpp                          cpCacheOop.hpp
+ cpCacheOop.cpp                          handles.inline.hpp
+ cpCacheOop.cpp                          interpreter.hpp
+-cpCacheOop.cpp                          jvmtiRedefineClasses.hpp
++cpCacheOop.cpp                          jvmtiRedefineClassesTrace.hpp
+ cpCacheOop.cpp                          markSweep.hpp
+ cpCacheOop.cpp                          markSweep.inline.hpp
+ cpCacheOop.cpp                          objArrayOop.hpp
+@@ -1287,6 +1285,39 @@
+ cpCacheOop.hpp                          arrayOop.hpp
+ cpCacheOop.hpp                          bytecodes.hpp
+ 
++cppInterpreter.cpp                      bytecodeInterpreter.hpp
++cppInterpreter.cpp                      interpreter.hpp
++cppInterpreter.cpp                      interpreterGenerator.hpp
++cppInterpreter.cpp                      interpreterRuntime.hpp
++
++cppInterpreter.hpp                      abstractInterpreter.hpp
++
++cppInterpreter_<arch>.cpp               arguments.hpp
++cppInterpreter_<arch>.cpp               arrayOop.hpp
++cppInterpreter_<arch>.cpp               assembler.hpp
++cppInterpreter_<arch>.cpp               bytecodeHistogram.hpp
++cppInterpreter_<arch>.cpp               debug.hpp
++cppInterpreter_<arch>.cpp               deoptimization.hpp
++cppInterpreter_<arch>.cpp               frame.inline.hpp
++cppInterpreter_<arch>.cpp               interpreterRuntime.hpp
++cppInterpreter_<arch>.cpp               interpreter.hpp
++cppInterpreter_<arch>.cpp               interpreterGenerator.hpp
++cppInterpreter_<arch>.cpp               jvmtiExport.hpp
++cppInterpreter_<arch>.cpp               jvmtiThreadState.hpp
++cppInterpreter_<arch>.cpp               methodDataOop.hpp
++cppInterpreter_<arch>.cpp               methodOop.hpp
++cppInterpreter_<arch>.cpp               oop.inline.hpp
++cppInterpreter_<arch>.cpp               sharedRuntime.hpp
++cppInterpreter_<arch>.cpp               stubRoutines.hpp
++cppInterpreter_<arch>.cpp               synchronizer.hpp
++cppInterpreter_<arch>.cpp               cppInterpreter.hpp
++cppInterpreter_<arch>.cpp               timer.hpp
++cppInterpreter_<arch>.cpp               vframeArray.hpp
++
++cppInterpreter_<arch>.hpp          generate_platform_dependent_include
++
++cppInterpreterGenerator_<arch>.hpp generate_platform_dependent_include
++
+ debug.cpp                               arguments.hpp
+ debug.cpp                               bytecodeHistogram.hpp
+ debug.cpp                               codeCache.hpp
+@@ -1298,7 +1329,7 @@
+ debug.cpp                               frame.hpp
+ debug.cpp                               heapDumper.hpp
+ debug.cpp                               icBuffer.hpp
+-debug.cpp                               interpreter_<arch>.hpp
++debug.cpp                               interpreter.hpp
+ debug.cpp                               java.hpp
+ debug.cpp                               markSweep.hpp
+ debug.cpp                               nmethod.hpp
+@@ -1433,7 +1464,7 @@
+ dictionary.cpp                          classLoadingService.hpp
+ dictionary.cpp                          dictionary.hpp
+ dictionary.cpp                          hashtable.inline.hpp
+-dictionary.cpp                          jvmtiRedefineClasses.hpp
++dictionary.cpp                          jvmtiRedefineClassesTrace.hpp
+ dictionary.cpp                          oop.inline.hpp
+ dictionary.cpp                          systemDictionary.hpp
+ 
+@@ -1465,24 +1496,7 @@
+ dtraceAttacher.cpp                      vmThread.hpp
+ dtraceAttacher.cpp                      vm_operations.hpp
+ 
+-dump.cpp                                classify.hpp
+-dump.cpp                                copy.hpp
+-dump.cpp                                filemap.hpp
+-dump.cpp                                javaCalls.hpp
+-dump.cpp                                javaClasses.hpp
+-dump.cpp                                loaderConstraints.hpp
+-dump.cpp                                methodDataOop.hpp
+-dump.cpp                                oop.hpp
+-dump.cpp                                oopFactory.hpp
+-dump.cpp                                resourceArea.hpp
+-dump.cpp                                signature.hpp
+-dump.cpp                                symbolTable.hpp
+-dump.cpp                                systemDictionary.hpp
+-dump.cpp                                vmThread.hpp
+-dump.cpp                                vm_operations.hpp
+-
+-dump_<arch>.cpp                         assembler_<arch>.inline.hpp
+-dump_<arch>.cpp                         compactingPermGenGen.hpp
++// dump is jck optional, put cpp deps in includeDB_features
+ 
+ events.cpp                              allocation.inline.hpp
+ events.cpp                              events.hpp
+@@ -1559,34 +1573,8 @@
+ filemap.hpp                             compactingPermGenGen.hpp
+ filemap.hpp                             space.hpp
+ 
+-forte.cpp                               collectedHeap.inline.hpp
+-forte.cpp                               debugInfoRec.hpp
+-forte.cpp                               forte.hpp
+-forte.cpp                               oop.inline.hpp
+-forte.cpp                               oop.inline2.hpp
+-forte.cpp                               pcDesc.hpp
+-forte.cpp                               space.hpp
+-forte.cpp                               thread.hpp
+-forte.cpp                               universe.inline.hpp
+-forte.cpp                               vframe.hpp
+-forte.cpp                               vframeArray.hpp
+-
+-fprofiler.cpp                           allocation.inline.hpp
+-fprofiler.cpp                           classLoader.hpp
+-fprofiler.cpp                           collectedHeap.inline.hpp
+-fprofiler.cpp                           deoptimization.hpp
+-fprofiler.cpp                           fprofiler.hpp
+-fprofiler.cpp                           interpreter_<arch>.hpp
+-fprofiler.cpp                           mutexLocker.hpp
+-fprofiler.cpp                           oop.inline.hpp
+-fprofiler.cpp                           oop.inline2.hpp
+-fprofiler.cpp                           stubCodeGenerator.hpp
+-fprofiler.cpp                           stubRoutines.hpp
+-fprofiler.cpp                           symbolOop.hpp
+-fprofiler.cpp                           task.hpp
+-fprofiler.cpp                           universe.inline.hpp
+-fprofiler.cpp                           vframe.hpp
+-fprofiler.cpp                           vtableStubs.hpp
++// forte is jck optional, put cpp deps in includeDB_features
++// fprofiler is jck optional, put cpp deps in includeDB_features
+ 
+ fprofiler.hpp                           thread_<os_family>.inline.hpp
+ fprofiler.hpp                           timer.hpp
+@@ -1594,8 +1582,7 @@
+ frame.cpp                               collectedHeap.inline.hpp
+ frame.cpp                               frame.inline.hpp
+ frame.cpp                               handles.inline.hpp
+-frame.cpp                               init.hpp
+-frame.cpp                               interpreter_<arch>.hpp
++frame.cpp                               interpreter.hpp
+ frame.cpp                               javaCalls.hpp
+ frame.cpp                               markOop.hpp
+ frame.cpp                               methodDataOop.hpp
+@@ -1620,8 +1607,8 @@
+ frame.hpp                               synchronizer.hpp
+ frame.hpp                               top.hpp
+ 
+-frame.inline.hpp                        cInterpreter.hpp
+-frame.inline.hpp                        cInterpreter.inline.hpp
++frame.inline.hpp                        bytecodeInterpreter.hpp
++frame.inline.hpp                        bytecodeInterpreter.inline.hpp
+ frame.inline.hpp                        frame.hpp
+ frame.inline.hpp                        interpreter.hpp
+ frame.inline.hpp                        jniTypes_<arch>.hpp
+@@ -1630,7 +1617,7 @@
+ 
+ frame_<arch>.cpp                        frame.inline.hpp
+ frame_<arch>.cpp                        handles.inline.hpp
+-frame_<arch>.cpp                        interpreter_<arch>.hpp
++frame_<arch>.cpp                        interpreter.hpp
+ frame_<arch>.cpp                        javaCalls.hpp
+ frame_<arch>.cpp                        markOop.hpp
+ frame_<arch>.cpp                        methodOop.hpp
+@@ -1871,33 +1858,14 @@
+ heap.hpp                                allocation.hpp
+ heap.hpp                                virtualspace.hpp
+ 
+-heapDumper.cpp                          genCollectedHeap.hpp
+-heapDumper.cpp                          heapDumper.hpp 
+-heapDumper.cpp                          javaCalls.hpp
+-heapDumper.cpp                          jniHandles.hpp
+-heapDumper.cpp                          objArrayKlass.hpp
+-heapDumper.cpp                          ostream.hpp
+-heapDumper.cpp                          reflectionUtils.hpp
+-heapDumper.cpp                          symbolTable.hpp
+-heapDumper.cpp                          systemDictionary.hpp
+-heapDumper.cpp                          universe.hpp
+-heapDumper.cpp                          vframe.hpp
+-heapDumper.cpp                          vmSymbols.hpp
+-heapDumper.cpp                          vmThread.hpp
+-heapDumper.cpp                          vm_operations.hpp
++// heapDumper is jck optional, put cpp deps in includeDB_features
+ 
+ heapDumper.hpp                          allocation.hpp
+ heapDumper.hpp                          klassOop.hpp
+ heapDumper.hpp                          oop.hpp
+ heapDumper.hpp                          os.hpp
+ 
+-heapInspection.cpp                      collectedHeap.hpp
+-heapInspection.cpp                      genCollectedHeap.hpp
+-heapInspection.cpp                      globalDefinitions.hpp
+-heapInspection.cpp                      heapInspection.hpp
+-heapInspection.cpp                      klassOop.hpp
+-heapInspection.cpp                      os.hpp
+-heapInspection.cpp                      resourceArea.hpp
++// heapInspection is jck optional, put cpp deps in includeDB_features
+ 
+ heapInspection.hpp                      allocation.inline.hpp
+ heapInspection.hpp                      oop.inline.hpp
+@@ -1924,11 +1892,11 @@
+ 
+ hpi_imported.h                          jni.h
+ 
+-icBuffer.cpp                            assembler_<arch>.inline.hpp
++icBuffer.cpp                            assembler_<arch_model>.inline.hpp
+ icBuffer.cpp                            collectedHeap.inline.hpp
+ icBuffer.cpp                            compiledIC.hpp
+ icBuffer.cpp                            icBuffer.hpp
+-icBuffer.cpp                            interpreter_<arch>.hpp
++icBuffer.cpp                            interpreter.hpp
+ icBuffer.cpp                            linkResolver.hpp
+ icBuffer.cpp                            methodOop.hpp
+ icBuffer.cpp                            mutexLocker.hpp
+@@ -1945,7 +1913,7 @@
+ icBuffer.hpp                            stubs.hpp
+ 
+ icBuffer_<arch>.cpp                     assembler.hpp
+-icBuffer_<arch>.cpp                     assembler_<arch>.inline.hpp
++icBuffer_<arch>.cpp                     assembler_<arch_model>.inline.hpp
+ icBuffer_<arch>.cpp                     bytecodes.hpp
+ icBuffer_<arch>.cpp                     collectedHeap.inline.hpp
+ icBuffer_<arch>.cpp                     icBuffer.hpp
+@@ -1960,13 +1928,14 @@
+ icache.hpp                              allocation.hpp
+ icache.hpp                              stubCodeGenerator.hpp
+ 
+-icache_<arch>.cpp                       assembler_<arch>.inline.hpp
++icache_<arch>.cpp                       assembler_<arch_model>.inline.hpp
+ icache_<arch>.cpp                       icache.hpp
+ 
+ icache_<arch>.hpp                       generate_platform_dependent_include
+ 
+ init.cpp                                bytecodes.hpp
+ init.cpp                                collectedHeap.hpp
++init.cpp				handles.inline.hpp
+ init.cpp                                icBuffer.hpp
+ init.cpp                                icache.hpp
+ init.cpp                                init.hpp
+@@ -1987,7 +1956,7 @@
+ instanceKlass.cpp                       javaClasses.hpp
+ instanceKlass.cpp                       jvmti.h
+ instanceKlass.cpp                       jvmtiExport.hpp
+-instanceKlass.cpp                       jvmtiRedefineClasses.hpp
++instanceKlass.cpp                       jvmtiRedefineClassesTrace.hpp
+ instanceKlass.cpp                       methodOop.hpp
+ instanceKlass.cpp                       mutexLocker.hpp
+ instanceKlass.cpp                       objArrayKlassKlass.hpp
+@@ -2076,33 +2045,33 @@
+ 
+ interfaceSupport_<os_family>.hpp        generate_platform_dependent_include
+ 
+-interp_masm_<arch>.cpp                  arrayOop.hpp
+-interp_masm_<arch>.cpp                  biasedLocking.hpp
+-interp_masm_<arch>.cpp                  interp_masm_<arch>.hpp
+-interp_masm_<arch>.cpp                  interpreterRuntime.hpp
+-interp_masm_<arch>.cpp                  interpreter_<arch>.hpp
+-interp_masm_<arch>.cpp                  jvmtiExport.hpp
+-interp_masm_<arch>.cpp                  jvmtiThreadState.hpp
+-interp_masm_<arch>.cpp                  markOop.hpp
+-interp_masm_<arch>.cpp                  methodDataOop.hpp
+-interp_masm_<arch>.cpp                  methodOop.hpp
+-interp_masm_<arch>.cpp                  sharedRuntime.hpp
+-interp_masm_<arch>.cpp                  synchronizer.hpp
+-interp_masm_<arch>.cpp                  thread_<os_family>.inline.hpp
++interp_masm_<arch_model>.cpp            arrayOop.hpp
++interp_masm_<arch_model>.cpp            biasedLocking.hpp
++interp_masm_<arch_model>.cpp            interp_masm_<arch_model>.hpp
++interp_masm_<arch_model>.cpp            interpreterRuntime.hpp
++interp_masm_<arch_model>.cpp            interpreter.hpp
++interp_masm_<arch_model>.cpp            jvmtiExport.hpp
++interp_masm_<arch_model>.cpp            jvmtiThreadState.hpp
++interp_masm_<arch_model>.cpp            markOop.hpp
++interp_masm_<arch_model>.cpp            methodDataOop.hpp
++interp_masm_<arch_model>.cpp            methodOop.hpp
++interp_masm_<arch_model>.cpp            sharedRuntime.hpp
++interp_masm_<arch_model>.cpp            synchronizer.hpp
++interp_masm_<arch_model>.cpp            thread_<os_family>.inline.hpp
+ 
+-interp_masm_<arch>.hpp                  assembler_<arch>.inline.hpp
+-interp_masm_<arch>.hpp                  invocationCounter.hpp
++interp_masm_<arch_model>.hpp            assembler_<arch_model>.inline.hpp
++interp_masm_<arch_model>.hpp            invocationCounter.hpp
+ 
+ interpreter.cpp                         allocation.inline.hpp
+ interpreter.cpp                         arrayOop.hpp
+ interpreter.cpp                         assembler.hpp
+ interpreter.cpp                         bytecodeHistogram.hpp
+-interpreter.cpp                         cInterpreter.hpp
++interpreter.cpp                         bytecodeInterpreter.hpp
+ interpreter.cpp                         forte.hpp
+ interpreter.cpp                         handles.inline.hpp
+ interpreter.cpp                         interpreter.hpp
+ interpreter.cpp                         interpreterRuntime.hpp
+-interpreter.cpp                         interpreter_<arch>.hpp
++interpreter.cpp                         interpreter.hpp
+ interpreter.cpp                         jvmtiExport.hpp
+ interpreter.cpp                         methodDataOop.hpp
+ interpreter.cpp                         methodOop.hpp
+@@ -2114,25 +2083,20 @@
+ interpreter.cpp                         timer.hpp
+ interpreter.cpp                         vtune.hpp
+ 
+-interpreter.hpp                         bytecodes.hpp
+-interpreter.hpp                         cInterpreter.hpp
+-interpreter.hpp                         interp_masm_<arch>.hpp
++interpreter.hpp                         cppInterpreter.hpp
+ interpreter.hpp                         stubs.hpp
+-interpreter.hpp                         templateTable.hpp
+-interpreter.hpp                         thread_<os_family>.inline.hpp
+-interpreter.hpp                         top.hpp
+-interpreter.hpp                         vmThread.hpp
+-
+-interpreterRT_<arch>.cpp                allocation.inline.hpp
+-interpreterRT_<arch>.cpp                handles.inline.hpp
+-interpreterRT_<arch>.cpp                icache.hpp
+-interpreterRT_<arch>.cpp                interfaceSupport.hpp
+-interpreterRT_<arch>.cpp                interpreterRuntime.hpp
+-interpreterRT_<arch>.cpp                interpreter_<arch>.hpp
+-interpreterRT_<arch>.cpp                methodOop.hpp
+-interpreterRT_<arch>.cpp                oop.inline.hpp
+-interpreterRT_<arch>.cpp                signature.hpp
+-interpreterRT_<arch>.cpp                universe.inline.hpp
++interpreter.hpp                         templateInterpreter.hpp
++
++interpreterRT_<arch_model>.cpp          allocation.inline.hpp
++interpreterRT_<arch_model>.cpp          handles.inline.hpp
++interpreterRT_<arch_model>.cpp          icache.hpp
++interpreterRT_<arch_model>.cpp          interfaceSupport.hpp
++interpreterRT_<arch_model>.cpp          interpreterRuntime.hpp
++interpreterRT_<arch_model>.cpp          interpreter.hpp
++interpreterRT_<arch_model>.cpp          methodOop.hpp
++interpreterRT_<arch_model>.cpp          oop.inline.hpp
++interpreterRT_<arch_model>.cpp          signature.hpp
++interpreterRT_<arch_model>.cpp          universe.inline.hpp
+ 
+ interpreterRT_<arch>.hpp                allocation.hpp
+ interpreterRT_<arch>.hpp                generate_platform_dependent_include
+@@ -2149,7 +2113,7 @@
+ interpreterRuntime.cpp                  instanceKlass.hpp
+ interpreterRuntime.cpp                  interfaceSupport.hpp
+ interpreterRuntime.cpp                  interpreterRuntime.hpp
+-interpreterRuntime.cpp                  interpreter_<arch>.hpp
++interpreterRuntime.cpp                  interpreter.hpp
+ interpreterRuntime.cpp                  java.hpp
+ interpreterRuntime.cpp                  jfieldIDWorkaround.hpp
+ interpreterRuntime.cpp                  jvmtiExport.hpp
+@@ -2169,7 +2133,7 @@
+ interpreterRuntime.cpp                  threadCritical.hpp
+ interpreterRuntime.cpp                  universe.inline.hpp
+ interpreterRuntime.cpp                  vmSymbols.hpp
+-interpreterRuntime.cpp                  vm_version_<arch>.hpp
++interpreterRuntime.cpp                  vm_version_<arch_model>.hpp
+ 
+ interpreterRuntime.hpp                  bytecode.hpp
+ interpreterRuntime.hpp                  frame.inline.hpp
+@@ -2180,29 +2144,36 @@
+ interpreterRuntime.hpp                  top.hpp
+ interpreterRuntime.hpp                  universe.hpp
+ 
+-interpreter_<arch>.cpp                  arguments.hpp
+-interpreter_<arch>.cpp                  arrayOop.hpp
+-interpreter_<arch>.cpp                  assembler.hpp
+-interpreter_<arch>.cpp                  bytecodeHistogram.hpp
+-interpreter_<arch>.cpp                  debug.hpp
+-interpreter_<arch>.cpp                  deoptimization.hpp
+-interpreter_<arch>.cpp                  frame.inline.hpp
+-interpreter_<arch>.cpp                  interpreterRuntime.hpp
+-interpreter_<arch>.cpp                  interpreter_<arch>.hpp
+-interpreter_<arch>.cpp                  jvmtiExport.hpp
+-interpreter_<arch>.cpp                  jvmtiThreadState.hpp
+-interpreter_<arch>.cpp                  methodDataOop.hpp
+-interpreter_<arch>.cpp                  methodOop.hpp
+-interpreter_<arch>.cpp                  oop.inline.hpp
+-interpreter_<arch>.cpp                  sharedRuntime.hpp
+-interpreter_<arch>.cpp                  stubRoutines.hpp
+-interpreter_<arch>.cpp                  synchronizer.hpp
+-interpreter_<arch>.cpp                  templateTable.hpp
+-interpreter_<arch>.cpp                  timer.hpp
+-interpreter_<arch>.cpp                  vframeArray.hpp
++interpreter_<arch_model>.cpp            arguments.hpp
++interpreter_<arch_model>.cpp            arrayOop.hpp
++interpreter_<arch_model>.cpp            assembler.hpp
++interpreter_<arch_model>.cpp            bytecodeHistogram.hpp
++interpreter_<arch_model>.cpp            debug.hpp
++interpreter_<arch_model>.cpp            deoptimization.hpp
++interpreter_<arch_model>.cpp            frame.inline.hpp
++interpreter_<arch_model>.cpp            interpreterRuntime.hpp
++interpreter_<arch_model>.cpp            interpreter.hpp
++interpreter_<arch_model>.cpp            interpreterGenerator.hpp
++interpreter_<arch_model>.cpp            jvmtiExport.hpp
++interpreter_<arch_model>.cpp            jvmtiThreadState.hpp
++interpreter_<arch_model>.cpp            methodDataOop.hpp
++interpreter_<arch_model>.cpp            methodOop.hpp
++interpreter_<arch_model>.cpp            oop.inline.hpp
++interpreter_<arch_model>.cpp            sharedRuntime.hpp
++interpreter_<arch_model>.cpp            stubRoutines.hpp
++interpreter_<arch_model>.cpp            synchronizer.hpp
++interpreter_<arch_model>.cpp            templateTable.hpp
++interpreter_<arch_model>.cpp            timer.hpp
++interpreter_<arch_model>.cpp            vframeArray.hpp
++
++interpreter_<arch>.hpp                  generate_platform_dependent_include
++
++interpreterGenerator.hpp                cppInterpreter.hpp
++interpreterGenerator.hpp                cppInterpreterGenerator.hpp
++interpreterGenerator.hpp                templateInterpreter.hpp
++interpreterGenerator.hpp                templateInterpreterGenerator.hpp
+ 
+-interpreter_<arch>.hpp                  frame.inline.hpp
+-interpreter_<arch>.hpp                  interpreter.hpp
++interpreterGenerator_<arch>.hpp         generate_platform_dependent_include
+ 
+ invocationCounter.cpp                   frame.hpp
+ invocationCounter.cpp                   handles.inline.hpp
+@@ -2260,7 +2231,7 @@
+ java.cpp                                universe.hpp
+ java.cpp                                vmError.hpp
+ java.cpp                                vm_operations.hpp
+-java.cpp                                vm_version_<arch>.hpp
++java.cpp                                vm_version_<arch_model>.hpp
+ java.cpp                                vtune.hpp
+ 
+ java.hpp                                os.hpp
+@@ -2283,7 +2254,7 @@
+ javaCalls.cpp                           compileBroker.hpp
+ javaCalls.cpp                           handles.inline.hpp
+ javaCalls.cpp                           interfaceSupport.hpp
+-javaCalls.cpp                           interpreter_<arch>.hpp
++javaCalls.cpp                           interpreter.hpp
+ javaCalls.cpp                           javaCalls.hpp
+ javaCalls.cpp                           linkResolver.hpp
+ javaCalls.cpp                           mutexLocker.hpp
+@@ -2388,31 +2359,18 @@
+ jni.cpp                                 vmSymbols.hpp
+ jni.cpp                                 vm_operations.hpp
+ 
+-jniCheck.cpp                            fieldDescriptor.hpp
+-jniCheck.cpp                            handles.hpp
+-jniCheck.cpp                            instanceKlass.hpp
+-jniCheck.cpp                            interfaceSupport.hpp
+-jniCheck.cpp                            jfieldIDWorkaround.hpp
+-jniCheck.cpp                            jni.h
+-jniCheck.cpp                            jniCheck.hpp
+-jniCheck.cpp                            jniTypes_<arch>.hpp
+-jniCheck.cpp                            jvm_misc.hpp
+-jniCheck.cpp                            oop.inline.hpp
+-jniCheck.cpp                            symbolOop.hpp
+-jniCheck.cpp                            systemDictionary.hpp
+-jniCheck.cpp                            thread.hpp
+-jniCheck.cpp                            vmSymbols.hpp
++// jniCheck is jck optional, put cpp deps in includeDB_features
+ 
+ jniFastGetField.cpp                     jniFastGetField.hpp
+ 
+ jniFastGetField.hpp                     allocation.hpp
+ jniFastGetField.hpp                     jvm_misc.hpp
+ 
+-jniFastGetField_<arch>.cpp              assembler_<arch>.inline.hpp
+-jniFastGetField_<arch>.cpp              jniFastGetField.hpp
+-jniFastGetField_<arch>.cpp              jvm_misc.hpp
+-jniFastGetField_<arch>.cpp              resourceArea.hpp
+-jniFastGetField_<arch>.cpp              safepoint.hpp
++jniFastGetField_<arch_model>.cpp        assembler_<arch_model>.inline.hpp
++jniFastGetField_<arch_model>.cpp        jniFastGetField.hpp
++jniFastGetField_<arch_model>.cpp        jvm_misc.hpp
++jniFastGetField_<arch_model>.cpp        resourceArea.hpp
++jniFastGetField_<arch_model>.cpp        safepoint.hpp
+ 
+ jniHandles.cpp                          jniHandles.hpp
+ jniHandles.cpp                          mutexLocker.hpp
+@@ -2487,181 +2445,6 @@
+ jvm_misc.hpp                            handles.hpp
+ jvm_misc.hpp                            jni.h
+ 
+-jvmtiAgentThread.hpp                    jvmtiEnv.hpp
+-
+-jvmtiClassFileReconstituter.cpp         bytecodeStream.hpp
+-jvmtiClassFileReconstituter.cpp         bytes_<arch>.hpp
+-jvmtiClassFileReconstituter.cpp         jvmtiClassFileReconstituter.hpp
+-jvmtiClassFileReconstituter.cpp         symbolTable.hpp
+-
+-jvmtiClassFileReconstituter.hpp         jvmtiEnv.hpp
+-
+-jvmtiCodeBlobEvents.cpp                 codeBlob.hpp
+-jvmtiCodeBlobEvents.cpp                 codeCache.hpp
+-jvmtiCodeBlobEvents.cpp                 handles.hpp
+-jvmtiCodeBlobEvents.cpp                 handles.inline.hpp
+-jvmtiCodeBlobEvents.cpp                 jvmtiCodeBlobEvents.hpp
+-jvmtiCodeBlobEvents.cpp                 jvmtiExport.hpp
+-jvmtiCodeBlobEvents.cpp                 oop.inline.hpp
+-jvmtiCodeBlobEvents.cpp                 resourceArea.hpp
+-jvmtiCodeBlobEvents.cpp                 scopeDesc.hpp
+-jvmtiCodeBlobEvents.cpp                 vmThread.hpp
+-
+-jvmtiCodeBlobEvents.hpp                 jvmti.h
+-
+-jvmtiEnter.cpp                          jvmtiEnter.hpp
+-
+-jvmtiEnter.hpp                          interfaceSupport.hpp
+-jvmtiEnter.hpp                          jvmtiEnv.hpp
+-jvmtiEnter.hpp                          jvmtiImpl.hpp
+-jvmtiEnter.hpp                          resourceArea.hpp
+-jvmtiEnter.hpp                          systemDictionary.hpp
+-
+-jvmtiEnterTrace.cpp                     jvmtiEnter.hpp
+-
+-jvmtiEnv.cpp                            arguments.hpp
+-jvmtiEnv.cpp                            bytecodeStream.hpp
+-jvmtiEnv.cpp                            cpCacheOop.hpp
+-jvmtiEnv.cpp                            deoptimization.hpp
+-jvmtiEnv.cpp                            exceptions.hpp
+-jvmtiEnv.cpp                            instanceKlass.hpp
+-jvmtiEnv.cpp                            interfaceSupport.hpp
+-jvmtiEnv.cpp                            interpreter.hpp
+-jvmtiEnv.cpp                            javaCalls.hpp
+-jvmtiEnv.cpp                            jfieldIDWorkaround.hpp
+-jvmtiEnv.cpp                            jniCheck.hpp
+-jvmtiEnv.cpp                            jvm_misc.hpp
+-jvmtiEnv.cpp                            jvmtiAgentThread.hpp
+-jvmtiEnv.cpp                            jvmtiClassFileReconstituter.hpp
+-jvmtiEnv.cpp                            jvmtiCodeBlobEvents.hpp
+-jvmtiEnv.cpp                            jvmtiEnv.hpp
+-jvmtiEnv.cpp                            jvmtiExtensions.hpp
+-jvmtiEnv.cpp                            jvmtiGetLoadedClasses.hpp
+-jvmtiEnv.cpp                            jvmtiImpl.hpp
+-jvmtiEnv.cpp                            jvmtiManageCapabilities.hpp
+-jvmtiEnv.cpp                            jvmtiRedefineClasses.hpp
+-jvmtiEnv.cpp                            jvmtiTagMap.hpp 
+-jvmtiEnv.cpp                            jvmtiThreadState.inline.hpp
+-jvmtiEnv.cpp                            objectMonitor.inline.hpp
+-jvmtiEnv.cpp                            osThread.hpp
+-jvmtiEnv.cpp                            preserveException.hpp
+-jvmtiEnv.cpp                            reflectionUtils.hpp
+-jvmtiEnv.cpp                            resourceArea.hpp
+-jvmtiEnv.cpp                            signature.hpp
+-jvmtiEnv.cpp                            systemDictionary.hpp
+-jvmtiEnv.cpp                            threadService.hpp
+-jvmtiEnv.cpp                            thread_<os_family>.inline.hpp
+-jvmtiEnv.cpp                            universe.inline.hpp
+-jvmtiEnv.cpp                            vframe.hpp
+-jvmtiEnv.cpp                            vmSymbols.hpp
+-jvmtiEnv.cpp                            vmThread.hpp
+-
+-jvmtiEnv.hpp                            jvmtiEnvBase.hpp
+-
+-jvmtiEnvBase.cpp                        biasedLocking.hpp
+-jvmtiEnvBase.cpp                        interfaceSupport.hpp
+-jvmtiEnvBase.cpp                        jfieldIDWorkaround.hpp
+-jvmtiEnvBase.cpp                        jvmtiEnv.hpp
+-jvmtiEnvBase.cpp                        jvmtiEnvBase.hpp
+-jvmtiEnvBase.cpp                        jvmtiEventController.inline.hpp
+-jvmtiEnvBase.cpp                        jvmtiExtensions.hpp
+-jvmtiEnvBase.cpp                        jvmtiImpl.hpp
+-jvmtiEnvBase.cpp                        jvmtiManageCapabilities.hpp
+-jvmtiEnvBase.cpp                        jvmtiTagMap.hpp
+-jvmtiEnvBase.cpp                        jvmtiThreadState.inline.hpp
+-jvmtiEnvBase.cpp                        objArrayKlass.hpp
+-jvmtiEnvBase.cpp                        objArrayOop.hpp
+-jvmtiEnvBase.cpp                        objectMonitor.hpp
+-jvmtiEnvBase.cpp                        objectMonitor.inline.hpp
+-jvmtiEnvBase.cpp                        signature.hpp
+-jvmtiEnvBase.cpp                        systemDictionary.hpp
+-jvmtiEnvBase.cpp                        vframe.hpp
+-jvmtiEnvBase.cpp                        vframe_hp.hpp
+-jvmtiEnvBase.cpp                        vmThread.hpp
+-jvmtiEnvBase.cpp                        vm_operations.hpp
+-
+-jvmtiEnvBase.hpp                        classLoader.hpp
+-jvmtiEnvBase.hpp                        fieldDescriptor.hpp
+-jvmtiEnvBase.hpp                        frame.hpp
+-jvmtiEnvBase.hpp                        growableArray.hpp
+-jvmtiEnvBase.hpp                        handles.inline.hpp
+-jvmtiEnvBase.hpp                        jvmtiEnvThreadState.hpp
+-jvmtiEnvBase.hpp                        jvmtiEventController.hpp
+-jvmtiEnvBase.hpp                        jvmtiThreadState.hpp
+-jvmtiEnvBase.hpp                        thread.hpp
+-jvmtiEnvBase.hpp                        vm_operations.hpp
+-
+-jvmtiEnvThreadState.cpp                 handles.hpp
+-jvmtiEnvThreadState.cpp                 handles.inline.hpp
+-jvmtiEnvThreadState.cpp                 interfaceSupport.hpp
+-jvmtiEnvThreadState.cpp                 interpreter.hpp
+-jvmtiEnvThreadState.cpp                 javaCalls.hpp
+-jvmtiEnvThreadState.cpp                 jvmtiEnv.hpp
+-jvmtiEnvThreadState.cpp                 jvmtiEnvThreadState.hpp
+-jvmtiEnvThreadState.cpp                 jvmtiEventController.inline.hpp
+-jvmtiEnvThreadState.cpp                 jvmtiImpl.hpp
+-jvmtiEnvThreadState.cpp                 resourceArea.hpp
+-jvmtiEnvThreadState.cpp                 signature.hpp
+-jvmtiEnvThreadState.cpp                 systemDictionary.hpp
+-jvmtiEnvThreadState.cpp                 vframe.hpp
+-jvmtiEnvThreadState.cpp                 vm_operations.hpp
+-
+-jvmtiEnvThreadState.hpp                 allocation.hpp
+-jvmtiEnvThreadState.hpp                 allocation.inline.hpp
+-jvmtiEnvThreadState.hpp                 globalDefinitions.hpp
+-jvmtiEnvThreadState.hpp                 growableArray.hpp
+-jvmtiEnvThreadState.hpp                 instanceKlass.hpp
+-jvmtiEnvThreadState.hpp                 jvmti.h
+-jvmtiEnvThreadState.hpp                 jvmtiEventController.hpp
+-
+-jvmtiEventController.cpp                frame.hpp
+-jvmtiEventController.cpp                interpreter.hpp
+-jvmtiEventController.cpp                jvmtiEnv.hpp
+-jvmtiEventController.cpp                jvmtiEventController.hpp
+-jvmtiEventController.cpp                jvmtiEventController.inline.hpp
+-jvmtiEventController.cpp                jvmtiExport.hpp
+-jvmtiEventController.cpp                jvmtiImpl.hpp
+-jvmtiEventController.cpp                jvmtiThreadState.inline.hpp
+-jvmtiEventController.cpp                resourceArea.hpp
+-jvmtiEventController.cpp                thread.hpp
+-jvmtiEventController.cpp                vframe.hpp
+-jvmtiEventController.cpp                vframe_hp.hpp
+-jvmtiEventController.cpp                vmThread.hpp
+-jvmtiEventController.cpp                vm_operations.hpp
+-
+-jvmtiEventController.hpp                allocation.hpp
+-jvmtiEventController.hpp                allocation.inline.hpp
+-jvmtiEventController.hpp                globalDefinitions.hpp
+-jvmtiEventController.hpp                jvmti.h
+-
+-jvmtiEventController.inline.hpp         jvmtiEventController.hpp
+-jvmtiEventController.inline.hpp         jvmtiImpl.hpp
+-
+-jvmtiExport.cpp                         arguments.hpp
+-jvmtiExport.cpp                         attachListener.hpp
+-jvmtiExport.cpp                         handles.hpp
+-jvmtiExport.cpp                         interfaceSupport.hpp
+-jvmtiExport.cpp                         interpreter.hpp
+-jvmtiExport.cpp                         jvmtiCodeBlobEvents.hpp
+-jvmtiExport.cpp                         jvmtiEventController.hpp
+-jvmtiExport.cpp                         jvmtiEventController.inline.hpp
+-jvmtiExport.cpp                         jvmtiExport.hpp
+-jvmtiExport.cpp                         jvmtiImpl.hpp
+-jvmtiExport.cpp                         jvmtiManageCapabilities.hpp
+-jvmtiExport.cpp                         jvmtiTagMap.hpp
+-jvmtiExport.cpp                         jvmtiThreadState.inline.hpp
+-jvmtiExport.cpp                         nmethod.hpp
+-jvmtiExport.cpp                         objArrayKlass.hpp
+-jvmtiExport.cpp                         objArrayOop.hpp
+-jvmtiExport.cpp                         objectMonitor.inline.hpp
+-jvmtiExport.cpp                         pcDesc.hpp
+-jvmtiExport.cpp                         resourceArea.hpp
+-jvmtiExport.cpp                         scopeDesc.hpp
+-jvmtiExport.cpp                         serviceUtil.hpp
+-jvmtiExport.cpp                         systemDictionary.hpp
+-jvmtiExport.cpp                         thread.hpp
+-jvmtiExport.cpp                         vframe.hpp
+-
+ jvmtiExport.hpp                         allocation.hpp
+ jvmtiExport.hpp                         globalDefinitions.hpp
+ jvmtiExport.hpp                         growableArray.hpp
+@@ -2671,113 +2454,6 @@
+ jvmtiExport.hpp                         oop.hpp
+ jvmtiExport.hpp                         oopsHierarchy.hpp
+ 
+-jvmtiExtensions.cpp                     jvmtiExport.hpp
+-jvmtiExtensions.cpp                     jvmtiExtensions.hpp
+-
+-jvmtiExtensions.hpp                     allocation.hpp
+-jvmtiExtensions.hpp                     jvmti.h
+-jvmtiExtensions.hpp                     jvmtiEnv.hpp
+-
+-jvmtiGetLoadedClasses.cpp               jvmtiGetLoadedClasses.hpp
+-jvmtiGetLoadedClasses.cpp               systemDictionary.hpp
+-jvmtiGetLoadedClasses.cpp               thread.hpp
+-jvmtiGetLoadedClasses.cpp               universe.inline.hpp
+-
+-jvmtiGetLoadedClasses.hpp               jvmtiEnv.hpp
+-
+-jvmtiImpl.cpp                           exceptions.hpp
+-jvmtiImpl.cpp                           handles.hpp
+-jvmtiImpl.cpp                           handles.inline.hpp
+-jvmtiImpl.cpp                           instanceKlass.hpp
+-jvmtiImpl.cpp                           interfaceSupport.hpp
+-jvmtiImpl.cpp                           interpreter.hpp
+-jvmtiImpl.cpp                           javaCalls.hpp
+-jvmtiImpl.cpp                           jvmtiAgentThread.hpp
+-jvmtiImpl.cpp                           jvmtiEnv.hpp
+-jvmtiImpl.cpp                           jvmtiEventController.inline.hpp
+-jvmtiImpl.cpp                           jvmtiImpl.hpp
+-jvmtiImpl.cpp                           jvmtiRedefineClasses.hpp
+-jvmtiImpl.cpp                           resourceArea.hpp
+-jvmtiImpl.cpp                           signature.hpp
+-jvmtiImpl.cpp                           systemDictionary.hpp
+-jvmtiImpl.cpp                           thread_<os_family>.inline.hpp
+-jvmtiImpl.cpp                           vframe.hpp
+-jvmtiImpl.cpp                           vframe_hp.hpp
+-jvmtiImpl.cpp                           vm_operations.hpp
+-
+-jvmtiImpl.hpp                           jvmti.h
+-jvmtiImpl.hpp                           jvmtiEnvThreadState.hpp
+-jvmtiImpl.hpp                           jvmtiEventController.hpp
+-jvmtiImpl.hpp                           objArrayOop.hpp
+-jvmtiImpl.hpp                           stackValueCollection.hpp
+-jvmtiImpl.hpp                           systemDictionary.hpp
+-jvmtiImpl.hpp                           vm_operations.hpp
+-
+-jvmtiManageCapabilities.cpp             jvmtiEnv.hpp
+-jvmtiManageCapabilities.cpp             jvmtiExport.hpp
+-jvmtiManageCapabilities.cpp             jvmtiManageCapabilities.hpp
+-
+-jvmtiManageCapabilities.hpp             allocation.hpp
+-jvmtiManageCapabilities.hpp             jvmti.h
+-
+-jvmtiRedefineClasses.cpp                codeCache.hpp
+-jvmtiRedefineClasses.cpp                deoptimization.hpp
+-jvmtiRedefineClasses.cpp                gcLocker.hpp
+-jvmtiRedefineClasses.cpp                jvmtiImpl.hpp
+-jvmtiRedefineClasses.cpp                jvmtiRedefineClasses.hpp
+-jvmtiRedefineClasses.cpp                klassVtable.hpp
+-jvmtiRedefineClasses.cpp                methodComparator.hpp
+-jvmtiRedefineClasses.cpp                oopMapCache.hpp
+-jvmtiRedefineClasses.cpp                relocator.hpp
+-jvmtiRedefineClasses.cpp                rewriter.hpp
+-jvmtiRedefineClasses.cpp                systemDictionary.hpp
+-jvmtiRedefineClasses.cpp                universe.inline.hpp
+-jvmtiRedefineClasses.cpp                verifier.hpp
+-
+-jvmtiRedefineClasses.hpp                jvmtiEnv.hpp
+-jvmtiRedefineClasses.hpp                objArrayKlass.hpp
+-jvmtiRedefineClasses.hpp                objArrayOop.hpp
+-jvmtiRedefineClasses.hpp                oopFactory.hpp
+-jvmtiRedefineClasses.hpp                resourceArea.hpp
+-jvmtiRedefineClasses.hpp                vm_operations.hpp
+-
+-jvmtiTagMap.cpp                         biasedLocking.hpp
+-jvmtiTagMap.cpp                         javaCalls.hpp
+-jvmtiTagMap.cpp                         jniHandles.hpp
+-jvmtiTagMap.cpp                         jvmtiEnv.hpp
+-jvmtiTagMap.cpp                         jvmtiEventController.hpp
+-jvmtiTagMap.cpp                         jvmtiEventController.inline.hpp
+-jvmtiTagMap.cpp                         jvmtiExport.hpp
+-jvmtiTagMap.cpp                         jvmtiImpl.hpp
+-jvmtiTagMap.cpp                         jvmtiTagMap.hpp
+-jvmtiTagMap.cpp                         mutex.hpp
+-jvmtiTagMap.cpp                         mutexLocker.hpp
+-jvmtiTagMap.cpp                         objArrayKlass.hpp
+-jvmtiTagMap.cpp                         oop.inline2.hpp
+-jvmtiTagMap.cpp                         reflectionUtils.hpp
+-jvmtiTagMap.cpp                         serviceUtil.hpp
+-jvmtiTagMap.cpp                         symbolTable.hpp
+-jvmtiTagMap.cpp                         systemDictionary.hpp
+-jvmtiTagMap.cpp                         vframe.hpp
+-jvmtiTagMap.cpp                         vmSymbols.hpp
+-jvmtiTagMap.cpp                         vmThread.hpp
+-jvmtiTagMap.cpp                         vm_operations.hpp
+-
+-jvmtiTagMap.hpp                         allocation.hpp
+-jvmtiTagMap.hpp                         collectedHeap.hpp
+-jvmtiTagMap.hpp                         genCollectedHeap.hpp
+-jvmtiTagMap.hpp                         jvmti.h
+-jvmtiTagMap.hpp                         jvmtiEnv.hpp
+-jvmtiTagMap.hpp                         universe.hpp
+-
+-jvmtiThreadState.cpp                    gcLocker.hpp
+-jvmtiThreadState.cpp                    jvmtiEnv.hpp
+-jvmtiThreadState.cpp                    jvmtiEventController.inline.hpp
+-jvmtiThreadState.cpp                    jvmtiImpl.hpp
+-jvmtiThreadState.cpp                    jvmtiThreadState.inline.hpp
+-jvmtiThreadState.cpp                    resourceArea.hpp
+-jvmtiThreadState.cpp                    vframe.hpp
+-
+ jvmtiThreadState.hpp                    allocation.hpp
+ jvmtiThreadState.hpp                    allocation.inline.hpp
+ jvmtiThreadState.hpp                    growableArray.hpp
+@@ -2785,9 +2461,6 @@
+ jvmtiThreadState.hpp                    jvmtiEventController.hpp
+ jvmtiThreadState.hpp                    thread.hpp
+ 
+-jvmtiThreadState.inline.hpp             jvmtiEnvThreadState.hpp
+-jvmtiThreadState.inline.hpp             jvmtiThreadState.hpp
+-
+ klass.cpp                               atomic.hpp
+ klass.cpp                               collectedHeap.inline.hpp
+ klass.cpp                               instanceKlass.hpp
+@@ -2844,7 +2517,7 @@
+ klassVtable.cpp                         gcLocker.hpp
+ klassVtable.cpp                         handles.inline.hpp
+ klassVtable.cpp                         instanceKlass.hpp
+-klassVtable.cpp                         jvmtiRedefineClasses.hpp
++klassVtable.cpp                         jvmtiRedefineClassesTrace.hpp
+ klassVtable.cpp                         klassOop.hpp
+ klassVtable.cpp                         klassVtable.hpp
+ klassVtable.cpp                         markSweep.hpp
+@@ -3046,7 +2719,7 @@
+ 
+ methodComparator.cpp                    globalDefinitions.hpp
+ methodComparator.cpp                    handles.inline.hpp
+-methodComparator.cpp                    jvmtiRedefineClasses.hpp
++methodComparator.cpp                    jvmtiRedefineClassesTrace.hpp
+ methodComparator.cpp                    methodComparator.hpp
+ methodComparator.cpp                    oop.inline.hpp
+ methodComparator.cpp                    symbolOop.hpp
+@@ -3193,7 +2866,7 @@
+ mutex_<os_family>.inline.hpp            os_<os_family>.inline.hpp
+ mutex_<os_family>.inline.hpp            thread_<os_family>.inline.hpp
+ 
+-nativeInst_<arch>.cpp                   assembler_<arch>.inline.hpp
++nativeInst_<arch>.cpp                   assembler_<arch_model>.inline.hpp
+ nativeInst_<arch>.cpp                   handles.hpp
+ nativeInst_<arch>.cpp                   nativeInst_<arch>.hpp
+ nativeInst_<arch>.cpp                   oop.hpp
+@@ -3240,7 +2913,7 @@
+ nmethod.cpp                             disassembler_<arch>.hpp
+ nmethod.cpp                             dtrace.hpp
+ nmethod.cpp                             events.hpp
+-nmethod.cpp                             jvmtiRedefineClasses.hpp
++nmethod.cpp                             jvmtiRedefineClassesTrace.hpp
+ nmethod.cpp                             methodDataOop.hpp
+ nmethod.cpp                             nmethod.hpp
+ nmethod.cpp                             scopeDesc.hpp
+@@ -3458,7 +3131,7 @@
+ 
+ os_<os_arch>.cpp                        allocation.inline.hpp
+ os_<os_arch>.cpp                        arguments.hpp
+-os_<os_arch>.cpp                        assembler_<arch>.inline.hpp
++os_<os_arch>.cpp                        assembler_<arch_model>.inline.hpp
+ os_<os_arch>.cpp                        classLoader.hpp
+ os_<os_arch>.cpp                        events.hpp
+ os_<os_arch>.cpp                        extendedPC.hpp
+@@ -3466,7 +3139,7 @@
+ os_<os_arch>.cpp                        hpi.hpp
+ os_<os_arch>.cpp                        icBuffer.hpp
+ os_<os_arch>.cpp                        interfaceSupport.hpp
+-os_<os_arch>.cpp                        interpreter_<arch>.hpp
++os_<os_arch>.cpp                        interpreter.hpp
+ os_<os_arch>.cpp                        java.hpp
+ os_<os_arch>.cpp                        javaCalls.hpp
+ os_<os_arch>.cpp                        jniFastGetField.hpp
+@@ -3492,7 +3165,7 @@
+ 
+ os_<os_family>.cpp                      allocation.inline.hpp
+ os_<os_family>.cpp                      arguments.hpp
+-os_<os_family>.cpp                      assembler_<arch>.inline.hpp
++os_<os_family>.cpp                      assembler_<arch_model>.inline.hpp
+ os_<os_family>.cpp                      attachListener.hpp
+ os_<os_family>.cpp                      classLoader.hpp
+ os_<os_family>.cpp                      compileBroker.hpp
+@@ -3504,7 +3177,7 @@
+ os_<os_family>.cpp                      hpi.hpp
+ os_<os_family>.cpp                      icBuffer.hpp
+ os_<os_family>.cpp                      interfaceSupport.hpp
+-os_<os_family>.cpp                      interpreter_<arch>.hpp
++os_<os_family>.cpp                      interpreter.hpp
+ os_<os_family>.cpp                      java.hpp
+ os_<os_family>.cpp                      javaCalls.hpp
+ os_<os_family>.cpp                      jniFastGetField.hpp
+@@ -3550,7 +3223,7 @@
+ osThread.hpp                            objectMonitor.hpp
+ osThread.hpp                            top.hpp
+ 
+-osThread_<os_family>.cpp                assembler_<arch>.inline.hpp
++osThread_<os_family>.cpp                assembler_<arch_model>.inline.hpp
+ osThread_<os_family>.cpp                atomic.hpp
+ osThread_<os_family>.cpp                handles.inline.hpp
+ osThread_<os_family>.cpp                mutexLocker.hpp
+@@ -3567,6 +3240,8 @@
+ ostream.cpp                             defaultStream.hpp
+ ostream.cpp                             oop.inline.hpp
+ ostream.cpp                             os_<os_family>.inline.hpp
++ostream.cpp                             hpi.hpp
++ostream.cpp                             hpi_<os_family>.hpp
+ ostream.cpp                             ostream.hpp
+ ostream.cpp                             top.hpp
+ ostream.cpp                             xmlstream.hpp
+@@ -3744,7 +3419,7 @@
+ register_<arch>.cpp                     register_<arch>.hpp
+ 
+ register_<arch>.hpp                     register.hpp
+-register_<arch>.hpp                     vm_version_<arch>.hpp
++register_<arch>.hpp                     vm_version_<arch_model>.hpp
+ 
+ registerMap.hpp                         globalDefinitions.hpp
+ registerMap.hpp                         register_<arch>.hpp
+@@ -3753,11 +3428,11 @@
+ registerMap_<arch>.hpp                  generate_platform_dependent_include
+ 
+ register_definitions_<arch>.cpp         assembler.hpp
+-register_definitions_<arch>.cpp         interp_masm_<arch>.hpp
++register_definitions_<arch>.cpp         interp_masm_<arch_model>.hpp
+ register_definitions_<arch>.cpp         register.hpp
+ register_definitions_<arch>.cpp         register_<arch>.hpp
+ 
+-relocInfo.cpp                           assembler_<arch>.inline.hpp
++relocInfo.cpp                           assembler_<arch_model>.inline.hpp
+ relocInfo.cpp                           compiledIC.hpp
+ relocInfo.cpp                           copy.hpp
+ relocInfo.cpp                           nativeInst_<arch>.hpp
+@@ -3770,7 +3445,7 @@
+ relocInfo.hpp                           top.hpp
+ 
+ relocInfo_<arch>.cpp                    assembler.inline.hpp
+-relocInfo_<arch>.cpp                    assembler_<arch>.inline.hpp
++relocInfo_<arch>.cpp                    assembler_<arch_model>.inline.hpp
+ relocInfo_<arch>.cpp                    nativeInst_<arch>.hpp
+ relocInfo_<arch>.cpp                    relocInfo.hpp
+ relocInfo_<arch>.cpp                    safepoint.hpp
+@@ -3805,11 +3480,7 @@
+ resourceArea.hpp                        allocation.hpp
+ resourceArea.hpp                        thread_<os_family>.inline.hpp
+ 
+-restore.cpp                             filemap.hpp
+-restore.cpp                             hashtable.inline.hpp
+-restore.cpp                             oop.inline.hpp
+-restore.cpp                             symbolTable.hpp
+-restore.cpp                             systemDictionary.hpp
++// restore is jck optional, put cpp deps in includeDB_features
+ 
+ rewriter.cpp                            bytecodes.hpp
+ rewriter.cpp                            gcLocker.hpp
+@@ -3826,7 +3497,7 @@
+ rewriter.hpp                            handles.inline.hpp
+ 
+ rframe.cpp                              frame.inline.hpp
+-rframe.cpp                              interpreter_<arch>.hpp
++rframe.cpp                              interpreter.hpp
+ rframe.cpp                              oop.inline.hpp
+ rframe.cpp                              rframe.hpp
+ rframe.cpp                              symbolOop.hpp
+@@ -3854,7 +3525,6 @@
+ safepoint.cpp                           icBuffer.hpp
+ safepoint.cpp                           interfaceSupport.hpp
+ safepoint.cpp                           interpreter.hpp
+-safepoint.cpp                           interpreter_<arch>.hpp
+ safepoint.cpp                           mutexLocker.hpp
+ safepoint.cpp                           nativeInst_<arch>.hpp
+ safepoint.cpp                           nmethod.hpp
+@@ -3895,15 +3565,7 @@
+ scopeDesc.hpp                           methodOop.hpp
+ scopeDesc.hpp                           pcDesc.hpp
+ 
+-serialize.cpp                           classify.hpp
+-serialize.cpp                           codeCache.hpp
+-serialize.cpp                           compactingPermGenGen.hpp
+-serialize.cpp                           compiledICHolderOop.hpp
+-serialize.cpp                           methodDataOop.hpp
+-serialize.cpp                           objArrayOop.hpp
+-serialize.cpp                           oop.hpp
+-serialize.cpp                           symbolTable.hpp
+-serialize.cpp                           systemDictionary.hpp
++// serialize is jck optional, put cpp deps in includeDB_features
+ 
+ serviceUtil.hpp                         objArrayOop.hpp
+ serviceUtil.hpp                         systemDictionary.hpp
+@@ -3938,7 +3600,7 @@
+ sharedRuntime.cpp                       init.hpp
+ sharedRuntime.cpp                       interfaceSupport.hpp
+ sharedRuntime.cpp                       interpreterRuntime.hpp
+-sharedRuntime.cpp                       interpreter_<arch>.hpp
++sharedRuntime.cpp                       interpreter.hpp
+ sharedRuntime.cpp                       javaCalls.hpp
+ sharedRuntime.cpp                       jvmtiExport.hpp
+ sharedRuntime.cpp                       nativeInst_<arch>.hpp
+@@ -3964,17 +3626,16 @@
+ sharedRuntime.hpp                       resourceArea.hpp
+ sharedRuntime.hpp                       threadLocalStorage.hpp
+ 
+-sharedRuntime_<arch>.cpp                assembler.hpp
+-sharedRuntime_<arch>.cpp                assembler_<arch>.inline.hpp
+-sharedRuntime_<arch>.cpp                compiledICHolderOop.hpp
+-sharedRuntime_<arch>.cpp                debugInfoRec.hpp
+-sharedRuntime_<arch>.cpp                icBuffer.hpp
+-sharedRuntime_<arch>.cpp                interpreter.hpp
+-sharedRuntime_<arch>.cpp                interpreter_<arch>.hpp
+-sharedRuntime_<arch>.cpp                sharedRuntime.hpp
+-sharedRuntime_<arch>.cpp                vframeArray.hpp
+-sharedRuntime_<arch>.cpp                vmreg_<arch>.inline.hpp
+-sharedRuntime_<arch>.cpp                vtableStubs.hpp
++sharedRuntime_<arch_model>.cpp          assembler.hpp
++sharedRuntime_<arch_model>.cpp          assembler_<arch_model>.inline.hpp
++sharedRuntime_<arch_model>.cpp          compiledICHolderOop.hpp
++sharedRuntime_<arch_model>.cpp          debugInfoRec.hpp
++sharedRuntime_<arch_model>.cpp          icBuffer.hpp
++sharedRuntime_<arch_model>.cpp          interpreter.hpp
++sharedRuntime_<arch_model>.cpp          sharedRuntime.hpp
++sharedRuntime_<arch_model>.cpp          vframeArray.hpp
++sharedRuntime_<arch_model>.cpp          vmreg_<arch>.inline.hpp
++sharedRuntime_<arch_model>.cpp          vtableStubs.hpp
+ 
+ sharedRuntimeTrans.cpp                  interfaceSupport.hpp
+ sharedRuntimeTrans.cpp                  jni.h
+@@ -4069,11 +3730,14 @@
+ stackMapTable.hpp                       methodOop.hpp
+ stackMapTable.hpp                       stackMapFrame.hpp
+ 
++stackValue.cpp                          debugInfo.hpp
++stackValue.cpp                          frame.inline.hpp
+ stackValue.cpp                          handles.inline.hpp
+ stackValue.cpp                          oop.hpp
+ stackValue.cpp                          stackValue.hpp
+ 
+ stackValue.hpp                          handles.hpp
++stackValue.hpp                          location.hpp
+ stackValue.hpp                          top.hpp
+ 
+ stackValueCollection.cpp                jniTypes_<arch>.hpp
+@@ -4093,12 +3757,12 @@
+ statSampler.cpp                         statSampler.hpp
+ statSampler.cpp                         systemDictionary.hpp
+ statSampler.cpp                         vmSymbols.hpp
+-statSampler.cpp                         vm_version_<arch>.hpp
++statSampler.cpp                         vm_version_<arch_model>.hpp
+ 
+ statSampler.hpp                         perfData.hpp
+ statSampler.hpp                         task.hpp
+ 
+-stubCodeGenerator.cpp                   assembler_<arch>.inline.hpp
++stubCodeGenerator.cpp                   assembler_<arch_model>.inline.hpp
+ stubCodeGenerator.cpp                   disassembler_<arch>.hpp
+ stubCodeGenerator.cpp                   forte.hpp
+ stubCodeGenerator.cpp                   oop.inline.hpp
+@@ -4108,20 +3772,21 @@
+ stubCodeGenerator.hpp                   allocation.hpp
+ stubCodeGenerator.hpp                   assembler.hpp
+ 
+-stubGenerator_<arch>.cpp                assembler.hpp
+-stubGenerator_<arch>.cpp                assembler_<arch>.inline.hpp
+-stubGenerator_<arch>.cpp                handles.inline.hpp
+-stubGenerator_<arch>.cpp                instanceOop.hpp
+-stubGenerator_<arch>.cpp                interpreter_<arch>.hpp
+-stubGenerator_<arch>.cpp                methodOop.hpp
+-stubGenerator_<arch>.cpp                nativeInst_<arch>.hpp
+-stubGenerator_<arch>.cpp                objArrayKlass.hpp
+-stubGenerator_<arch>.cpp                oop.inline.hpp
+-stubGenerator_<arch>.cpp                sharedRuntime.hpp
+-stubGenerator_<arch>.cpp                stubCodeGenerator.hpp
+-stubGenerator_<arch>.cpp                stubRoutines.hpp
+-stubGenerator_<arch>.cpp                thread_<os_family>.inline.hpp
+-stubGenerator_<arch>.cpp                top.hpp
++stubGenerator_<arch_model>.cpp          assembler.hpp
++stubGenerator_<arch_model>.cpp          assembler_<arch_model>.inline.hpp
++stubGenerator_<arch_model>.cpp          frame.inline.hpp
++stubGenerator_<arch_model>.cpp          handles.inline.hpp
++stubGenerator_<arch_model>.cpp          instanceOop.hpp
++stubGenerator_<arch_model>.cpp          interpreter.hpp
++stubGenerator_<arch_model>.cpp          methodOop.hpp
++stubGenerator_<arch_model>.cpp          nativeInst_<arch>.hpp
++stubGenerator_<arch_model>.cpp          objArrayKlass.hpp
++stubGenerator_<arch_model>.cpp          oop.inline.hpp
++stubGenerator_<arch_model>.cpp          sharedRuntime.hpp
++stubGenerator_<arch_model>.cpp          stubCodeGenerator.hpp
++stubGenerator_<arch_model>.cpp          stubRoutines.hpp
++stubGenerator_<arch_model>.cpp          thread_<os_family>.inline.hpp
++stubGenerator_<arch_model>.cpp          top.hpp
+ 
+ stubRoutines.cpp                        codeBuffer.hpp
+ stubRoutines.cpp                        copy.hpp
+@@ -4140,12 +3805,12 @@
+ stubRoutines.hpp                        stubCodeGenerator.hpp
+ stubRoutines.hpp                        top.hpp
+ 
+-stubRoutines_<arch>.cpp                 deoptimization.hpp
+-stubRoutines_<arch>.cpp                 frame.inline.hpp
+-stubRoutines_<arch>.cpp                 stubRoutines.hpp
+-stubRoutines_<arch>.cpp                 thread_<os_family>.inline.hpp
++stubRoutines_<arch_model>.cpp           deoptimization.hpp
++stubRoutines_<arch_model>.cpp           frame.inline.hpp
++stubRoutines_<arch_model>.cpp           stubRoutines.hpp
++stubRoutines_<arch_model>.cpp           thread_<os_family>.inline.hpp
+ 
+-stubRoutines_<arch>.hpp                 generate_platform_dependent_include
++stubRoutines_<arch_model>.hpp           generate_platform_dependent_include
+ 
+ stubRoutines_<os_family>.cpp            os.hpp
+ stubRoutines_<os_family>.cpp            stubRoutines.hpp
+@@ -4282,26 +3947,60 @@
+ taskqueue.hpp                           mutex.hpp
+ taskqueue.hpp                           orderAccess_<os_arch>.inline.hpp
+ 
++templateInterpreter.cpp                 interpreter.hpp
++templateInterpreter.cpp                 interpreterGenerator.hpp
++templateInterpreter.cpp                 interpreterRuntime.hpp
++templateInterpreter.cpp                 templateTable.hpp
++
++templateInterpreter.hpp                 abstractInterpreter.hpp
++templateInterpreter.hpp                 templateTable.hpp
++
++templateInterpreter_<arch_model>.cpp    arguments.hpp
++templateInterpreter_<arch_model>.cpp    arrayOop.hpp
++templateInterpreter_<arch_model>.cpp    assembler.hpp
++templateInterpreter_<arch_model>.cpp    bytecodeHistogram.hpp
++templateInterpreter_<arch_model>.cpp    debug.hpp
++templateInterpreter_<arch_model>.cpp    deoptimization.hpp
++templateInterpreter_<arch_model>.cpp    frame.inline.hpp
++templateInterpreter_<arch_model>.cpp    interpreterRuntime.hpp
++templateInterpreter_<arch_model>.cpp    interpreter.hpp
++templateInterpreter_<arch_model>.cpp    interpreterGenerator.hpp
++templateInterpreter_<arch_model>.cpp    jvmtiExport.hpp
++templateInterpreter_<arch_model>.cpp    jvmtiThreadState.hpp
++templateInterpreter_<arch_model>.cpp    methodDataOop.hpp
++templateInterpreter_<arch_model>.cpp    methodOop.hpp
++templateInterpreter_<arch_model>.cpp    oop.inline.hpp
++templateInterpreter_<arch_model>.cpp    sharedRuntime.hpp
++templateInterpreter_<arch_model>.cpp    stubRoutines.hpp
++templateInterpreter_<arch_model>.cpp    synchronizer.hpp
++templateInterpreter_<arch_model>.cpp    templateTable.hpp
++templateInterpreter_<arch_model>.cpp    timer.hpp
++templateInterpreter_<arch_model>.cpp    vframeArray.hpp
++
++templateInterpreter_<arch>.hpp          generate_platform_dependent_include
++
++templateInterpreterGenerator_<arch>.hpp generate_platform_dependent_include
++
+ templateTable.cpp                       templateTable.hpp
+ templateTable.cpp                       timer.hpp
+ 
+ templateTable.hpp                       allocation.hpp
+ templateTable.hpp                       bytecodes.hpp
+ templateTable.hpp                       frame.hpp
+-templateTable.hpp                       interp_masm_<arch>.hpp
++templateTable.hpp                       interp_masm_<arch_model>.hpp
+ 
+-templateTable_<arch>.cpp                interpreterRuntime.hpp
+-templateTable_<arch>.cpp                interpreter_<arch>.hpp
+-templateTable_<arch>.cpp                methodDataOop.hpp
+-templateTable_<arch>.cpp                objArrayKlass.hpp
+-templateTable_<arch>.cpp                oop.inline.hpp
+-templateTable_<arch>.cpp                sharedRuntime.hpp
+-templateTable_<arch>.cpp                stubRoutines.hpp
+-templateTable_<arch>.cpp                synchronizer.hpp
+-templateTable_<arch>.cpp                templateTable.hpp
+-templateTable_<arch>.cpp                universe.inline.hpp
++templateTable_<arch_model>.cpp          interpreterRuntime.hpp
++templateTable_<arch_model>.cpp          interpreter.hpp
++templateTable_<arch_model>.cpp          methodDataOop.hpp
++templateTable_<arch_model>.cpp          objArrayKlass.hpp
++templateTable_<arch_model>.cpp          oop.inline.hpp
++templateTable_<arch_model>.cpp          sharedRuntime.hpp
++templateTable_<arch_model>.cpp          stubRoutines.hpp
++templateTable_<arch_model>.cpp          synchronizer.hpp
++templateTable_<arch_model>.cpp          templateTable.hpp
++templateTable_<arch_model>.cpp          universe.inline.hpp
+ 
+-templateTable_<arch>.hpp                generate_platform_dependent_include
++templateTable_<arch_model>.hpp          generate_platform_dependent_include
+ 
+ tenuredGeneration.cpp                   allocation.inline.hpp
+ tenuredGeneration.cpp                   blockOffsetTable.inline.hpp
+@@ -4337,7 +4036,7 @@
+ thread.cpp                              instanceKlass.hpp
+ thread.cpp                              interfaceSupport.hpp
+ thread.cpp                              interpreter.hpp
+-thread.cpp                              interpreter_<arch>.hpp
++thread.cpp                              interpreter.hpp
+ thread.cpp                              java.hpp
+ thread.cpp                              javaCalls.hpp
+ thread.cpp                              javaClasses.hpp
+@@ -4540,6 +4239,7 @@
+ universe.cpp                            cpCacheKlass.hpp
+ universe.cpp                            cpCacheOop.hpp
+ universe.cpp                            deoptimization.hpp
++universe.cpp                            dependencies.hpp
+ universe.cpp                            events.hpp
+ universe.cpp                            filemap.hpp
+ universe.cpp                            fprofiler.hpp
+@@ -4552,11 +4252,11 @@
+ universe.cpp                            instanceKlass.hpp
+ universe.cpp                            instanceKlassKlass.hpp
+ universe.cpp                            instanceRefKlass.hpp
+-universe.cpp                            interpreter_<arch>.hpp
++universe.cpp                            interpreter.hpp
+ universe.cpp                            java.hpp
+ universe.cpp                            javaCalls.hpp
+ universe.cpp                            javaClasses.hpp
+-universe.cpp                            jvmtiRedefineClasses.hpp
++universe.cpp                            jvmtiRedefineClassesTrace.hpp
+ universe.cpp                            klassKlass.hpp
+ universe.cpp                            klassOop.hpp
+ universe.cpp                            memoryService.hpp
+@@ -4649,7 +4349,7 @@
+ vframe.cpp                              debugInfoRec.hpp
+ vframe.cpp                              handles.inline.hpp
+ vframe.cpp                              instanceKlass.hpp
+-vframe.cpp                              interpreter_<arch>.hpp
++vframe.cpp                              interpreter.hpp
+ vframe.cpp                              javaClasses.hpp
+ vframe.cpp                              nmethod.hpp
+ vframe.cpp                              objectMonitor.hpp
+@@ -4682,7 +4382,7 @@
+ vframeArray.cpp                         allocation.inline.hpp
+ vframeArray.cpp                         events.hpp
+ vframeArray.cpp                         handles.inline.hpp
+-vframeArray.cpp                         interpreter_<arch>.hpp
++vframeArray.cpp                         interpreter.hpp
+ vframeArray.cpp                         jvmtiThreadState.hpp
+ vframeArray.cpp                         methodDataOop.hpp
+ vframeArray.cpp                         monitorChunk.hpp
+@@ -4705,7 +4405,7 @@
+ vframe_hp.cpp                           debugInfoRec.hpp
+ vframe_hp.cpp                           handles.inline.hpp
+ vframe_hp.cpp                           instanceKlass.hpp
+-vframe_hp.cpp                           interpreter_<arch>.hpp
++vframe_hp.cpp                           interpreter.hpp
+ vframe_hp.cpp                           monitorChunk.hpp
+ vframe_hp.cpp                           nmethod.hpp
+ vframe_hp.cpp                           oop.inline.hpp
+@@ -4748,86 +4448,7 @@
+ vmError_<os_family>.cpp                 thread.hpp
+ vmError_<os_family>.cpp                 vmError.hpp
+ 
+-vmStructs.cpp                           arguments.hpp
+-vmStructs.cpp                           arrayKlass.hpp
+-vmStructs.cpp                           arrayKlassKlass.hpp
+-vmStructs.cpp                           arrayOop.hpp
+-vmStructs.cpp                           bytecodes.hpp
+-vmStructs.cpp                           cInterpreter.hpp
+-vmStructs.cpp                           cardTableRS.hpp
+-vmStructs.cpp                           codeBlob.hpp
+-vmStructs.cpp                           codeCache.hpp
+-vmStructs.cpp                           collectedHeap.hpp
+-vmStructs.cpp                           compactPermGen.hpp
+-vmStructs.cpp                           compiledICHolderKlass.hpp
+-vmStructs.cpp                           compiledICHolderOop.hpp
+-vmStructs.cpp                           compressedStream.hpp
+-vmStructs.cpp                           constMethodKlass.hpp
+-vmStructs.cpp                           constMethodOop.hpp
+-vmStructs.cpp                           constantPoolKlass.hpp
+-vmStructs.cpp                           constantPoolOop.hpp
+-vmStructs.cpp                           cpCacheKlass.hpp
+-vmStructs.cpp                           cpCacheOop.hpp
+-vmStructs.cpp                           defNewGeneration.hpp
+-vmStructs.cpp                           dictionary.hpp
+-vmStructs.cpp                           freeBlockDictionary.hpp
+-vmStructs.cpp                           genCollectedHeap.hpp
+-vmStructs.cpp                           generation.hpp
+-vmStructs.cpp                           generationSpec.hpp
+-vmStructs.cpp                           globalDefinitions.hpp
+-vmStructs.cpp                           globals.hpp
+-vmStructs.cpp                           hashtable.hpp
+-vmStructs.cpp                           heap.hpp
+-vmStructs.cpp                           immutableSpace.hpp
+-vmStructs.cpp                           instanceKlass.hpp
+-vmStructs.cpp                           instanceKlassKlass.hpp
+-vmStructs.cpp                           instanceOop.hpp
+-vmStructs.cpp                           interpreter.hpp
+-vmStructs.cpp                           java.hpp
+-vmStructs.cpp                           javaCalls.hpp
+-vmStructs.cpp                           javaClasses.hpp
+-vmStructs.cpp                           jvmtiAgentThread.hpp
+-vmStructs.cpp                           klass.hpp
+-vmStructs.cpp                           klassOop.hpp
+-vmStructs.cpp                           loaderConstraints.hpp
+-vmStructs.cpp                           location.hpp
+-vmStructs.cpp                           markOop.hpp
+-vmStructs.cpp                           markSweep.hpp
+-vmStructs.cpp                           methodDataKlass.hpp
+-vmStructs.cpp                           methodDataOop.hpp
+-vmStructs.cpp                           methodKlass.hpp
+-vmStructs.cpp                           methodOop.hpp
+-vmStructs.cpp                           mutableSpace.hpp
+-vmStructs.cpp                           nmethod.hpp
+-vmStructs.cpp                           objArrayKlass.hpp
+-vmStructs.cpp                           objArrayKlassKlass.hpp
+-vmStructs.cpp                           objArrayOop.hpp
+-vmStructs.cpp                           oop.hpp
+-vmStructs.cpp                           oopMap.hpp
+-vmStructs.cpp                           pcDesc.hpp
+-vmStructs.cpp                           perfMemory.hpp
+-vmStructs.cpp                           permGen.hpp
+-vmStructs.cpp                           placeholders.hpp
+-vmStructs.cpp                           sharedRuntime.hpp
+-vmStructs.cpp                           space.hpp
+-vmStructs.cpp                           stubRoutines.hpp
+-vmStructs.cpp                           stubs.hpp
+-vmStructs.cpp                           symbolKlass.hpp
+-vmStructs.cpp                           symbolOop.hpp
+-vmStructs.cpp                           symbolTable.hpp
+-vmStructs.cpp                           systemDictionary.hpp
+-vmStructs.cpp                           tenuredGeneration.hpp
+-vmStructs.cpp                           thread_<os_family>.inline.hpp
+-vmStructs.cpp                           typeArrayKlass.hpp
+-vmStructs.cpp                           typeArrayKlassKlass.hpp
+-vmStructs.cpp                           typeArrayOop.hpp
+-vmStructs.cpp                           universe.hpp
+-vmStructs.cpp                           virtualspace.hpp
+-vmStructs.cpp                           vmStructs.hpp
+-vmStructs.cpp                           vmStructs_<arch>.hpp
+-vmStructs.cpp                           vmStructs_<os_arch>.hpp
+-vmStructs.cpp                           vmreg.hpp
+-vmStructs.cpp                           watermark.hpp
++// vmStructs is jck optional, put cpp deps in includeDB_features
+ 
+ vmStructs.hpp                           debug.hpp
+ 
+@@ -4879,22 +4500,22 @@
+ vm_version.cpp                          arguments.hpp
+ vm_version.cpp                          oop.inline.hpp
+ vm_version.cpp                          universe.hpp
+-vm_version.cpp                          vm_version_<arch>.hpp
++vm_version.cpp                          vm_version_<arch_model>.hpp
+ 
+ vm_version.hpp                          allocation.hpp
+ vm_version.hpp                          ostream.hpp
+ 
+-vm_version_<arch>.cpp                   assembler_<arch>.inline.hpp
+-vm_version_<arch>.cpp                   java.hpp
+-vm_version_<arch>.cpp                   os_<os_family>.inline.hpp
+-vm_version_<arch>.cpp                   resourceArea.hpp
+-vm_version_<arch>.cpp                   stubCodeGenerator.hpp
+-vm_version_<arch>.cpp                   vm_version_<arch>.hpp
++vm_version_<arch_model>.cpp             assembler_<arch_model>.inline.hpp
++vm_version_<arch_model>.cpp             java.hpp
++vm_version_<arch_model>.cpp             os_<os_family>.inline.hpp
++vm_version_<arch_model>.cpp             resourceArea.hpp
++vm_version_<arch_model>.cpp             stubCodeGenerator.hpp
++vm_version_<arch_model>.cpp             vm_version_<arch_model>.hpp
+ 
+-vm_version_<arch>.hpp                   globals_extension.hpp
+-vm_version_<arch>.hpp                   vm_version.hpp
++vm_version_<arch_model>.hpp             globals_extension.hpp
++vm_version_<arch_model>.hpp             vm_version.hpp
+ 
+-vm_version_<os_arch>.cpp                vm_version_<arch>.hpp
++vm_version_<os_arch>.cpp                vm_version_<arch_model>.hpp
+ 
+ vmreg.cpp                               assembler.hpp
+ vmreg.cpp                               vmreg.hpp
+@@ -4923,19 +4544,19 @@
+ 
+ vtableStubs.hpp                         allocation.hpp
+ 
+-vtableStubs_<arch>.cpp                  assembler.hpp
+-vtableStubs_<arch>.cpp                  assembler_<arch>.inline.hpp
+-vtableStubs_<arch>.cpp                  instanceKlass.hpp
+-vtableStubs_<arch>.cpp                  interp_masm_<arch>.hpp
+-vtableStubs_<arch>.cpp                  klassVtable.hpp
+-vtableStubs_<arch>.cpp                  resourceArea.hpp
+-vtableStubs_<arch>.cpp                  sharedRuntime.hpp
+-vtableStubs_<arch>.cpp                  vmreg_<arch>.inline.hpp
+-vtableStubs_<arch>.cpp                  vtableStubs.hpp
++vtableStubs_<arch_model>.cpp            assembler.hpp
++vtableStubs_<arch_model>.cpp            assembler_<arch_model>.inline.hpp
++vtableStubs_<arch_model>.cpp            instanceKlass.hpp
++vtableStubs_<arch_model>.cpp            interp_masm_<arch_model>.hpp
++vtableStubs_<arch_model>.cpp            klassVtable.hpp
++vtableStubs_<arch_model>.cpp            resourceArea.hpp
++vtableStubs_<arch_model>.cpp            sharedRuntime.hpp
++vtableStubs_<arch_model>.cpp            vmreg_<arch>.inline.hpp
++vtableStubs_<arch_model>.cpp            vtableStubs.hpp
+ 
+ vtune.hpp                               allocation.hpp
+ 
+-vtune_<os_family>.cpp                   interpreter_<arch>.hpp
++vtune_<os_family>.cpp                   interpreter.hpp
+ vtune_<os_family>.cpp                   vtune.hpp
+ 
+ watermark.hpp                           allocation.hpp
+diff -ruN openjdk6/hotspot/src/share/vm/includeDB_coreonly openjdk/hotspot/src/share/vm/includeDB_coreonly
+--- openjdk6/hotspot/src/share/vm/includeDB_coreonly	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/includeDB_coreonly	1969-12-31 19:00:00.000000000 -0500
+@@ -1,26 +0,0 @@
+-//
+-// Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
+-// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+-//
+-// This code is free software; you can redistribute it and/or modify it
+-// under the terms of the GNU General Public License version 2 only, as
+-// published by the Free Software Foundation.
+-//
+-// This code is distributed in the hope that it will be useful, but WITHOUT
+-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+-// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+-// version 2 for more details (a copy is included in the LICENSE file that
+-// accompanied this code).
+-//
+-// You should have received a copy of the GNU General Public License version
+-// 2 along with this work; if not, write to the Free Software Foundation,
+-// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+-//
+-// Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+-// CA 95054 USA or visit www.sun.com if you need additional information or
+-// have any questions.
+-//  
+-//
+-
+-// NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps!
+-
+diff -ruN openjdk6/hotspot/src/share/vm/includeDB_features openjdk/hotspot/src/share/vm/includeDB_features
+--- openjdk6/hotspot/src/share/vm/includeDB_features	1969-12-31 19:00:00.000000000 -0500
++++ openjdk/hotspot/src/share/vm/includeDB_features	2008-01-31 09:19:00.000000000 -0500
+@@ -0,0 +1,319 @@
++//
++// Copyright 2007 Sun Microsystems, Inc.  All Rights Reserved.
++// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++//
++// This code is free software; you can redistribute it and/or modify it
++// under the terms of the GNU General Public License version 2 only, as
++// published by the Free Software Foundation.
++//
++// This code is distributed in the hope that it will be useful, but WITHOUT
++// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++// version 2 for more details (a copy is included in the LICENSE file that
++// accompanied this code).
++//
++// You should have received a copy of the GNU General Public License version
++// 2 along with this work; if not, write to the Free Software Foundation,
++// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
++//
++// Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
++// CA 95054 USA or visit www.sun.com if you need additional information or
++// have any questions.
++//  
++//
++
++attachListener.cpp                      arguments.hpp
++attachListener.cpp                      attachListener.hpp
++attachListener.cpp                      globals.hpp
++attachListener.cpp                      heapDumper.hpp
++attachListener.cpp                      java.hpp
++attachListener.cpp                      javaCalls.hpp
++attachListener.cpp                      javaClasses.hpp
++attachListener.cpp                      jvmtiExport.hpp
++attachListener.cpp                      os.hpp
++attachListener.cpp                      resourceArea.hpp
++attachListener.cpp                      systemDictionary.hpp
++attachListener.cpp                      vmGCOperations.hpp
++
++attachListener_<os_family>.cpp          attachListener.hpp
++attachListener_<os_family>.cpp          dtraceAttacher.hpp
++attachListener_<os_family>.cpp          interfaceSupport.hpp
++attachListener_<os_family>.cpp          os.hpp
++
++dump.cpp                                classify.hpp
++dump.cpp                                copy.hpp
++dump.cpp                                filemap.hpp
++dump.cpp                                javaCalls.hpp
++dump.cpp                                javaClasses.hpp
++dump.cpp                                loaderConstraints.hpp
++dump.cpp                                methodDataOop.hpp
++dump.cpp                                oop.hpp
++dump.cpp                                oopFactory.hpp
++dump.cpp                                resourceArea.hpp
++dump.cpp                                signature.hpp
++dump.cpp                                symbolTable.hpp
++dump.cpp                                systemDictionary.hpp
++dump.cpp                                vmThread.hpp
++dump.cpp                                vm_operations.hpp
++
++dump_<arch_model>.cpp                   assembler_<arch_model>.inline.hpp
++dump_<arch_model>.cpp                   compactingPermGenGen.hpp
++
++forte.cpp                               collectedHeap.inline.hpp
++forte.cpp                               debugInfoRec.hpp
++forte.cpp                               forte.hpp
++forte.cpp                               oop.inline.hpp
++forte.cpp                               oop.inline2.hpp
++forte.cpp                               pcDesc.hpp
++forte.cpp                               space.hpp
++forte.cpp                               thread.hpp
++forte.cpp                               universe.inline.hpp
++forte.cpp                               vframe.hpp
++forte.cpp                               vframeArray.hpp
++
++fprofiler.cpp                           allocation.inline.hpp
++fprofiler.cpp                           classLoader.hpp
++fprofiler.cpp                           collectedHeap.inline.hpp
++fprofiler.cpp                           deoptimization.hpp
++fprofiler.cpp                           fprofiler.hpp
++fprofiler.cpp                           interpreter.hpp
++fprofiler.cpp                           macros.hpp
++fprofiler.cpp                           mutexLocker.hpp
++fprofiler.cpp                           oop.inline.hpp
++fprofiler.cpp                           oop.inline2.hpp
++fprofiler.cpp                           stubCodeGenerator.hpp
++fprofiler.cpp                           stubRoutines.hpp
++fprofiler.cpp                           symbolOop.hpp
++fprofiler.cpp                           task.hpp
++fprofiler.cpp                           universe.inline.hpp
++fprofiler.cpp                           vframe.hpp
++fprofiler.cpp                           vtableStubs.hpp
++
++heapDumper.cpp                          genCollectedHeap.hpp
++heapDumper.cpp                          heapDumper.hpp 
++heapDumper.cpp                          javaCalls.hpp
++heapDumper.cpp                          jniHandles.hpp
++heapDumper.cpp                          objArrayKlass.hpp
++heapDumper.cpp                          ostream.hpp
++heapDumper.cpp                          reflectionUtils.hpp
++heapDumper.cpp                          symbolTable.hpp
++heapDumper.cpp                          systemDictionary.hpp
++heapDumper.cpp                          universe.hpp
++heapDumper.cpp                          vframe.hpp
++heapDumper.cpp                          vmGCOperations.hpp
++heapDumper.cpp                          vmSymbols.hpp
++heapDumper.cpp                          vmThread.hpp
++heapDumper.cpp                          vm_operations.hpp
++
++heapInspection.cpp                      collectedHeap.hpp
++heapInspection.cpp                      genCollectedHeap.hpp
++heapInspection.cpp                      globalDefinitions.hpp
++heapInspection.cpp                      heapInspection.hpp
++heapInspection.cpp                      klassOop.hpp
++heapInspection.cpp                      os.hpp
++heapInspection.cpp                      resourceArea.hpp
++
++jniCheck.cpp                            fieldDescriptor.hpp
++jniCheck.cpp                            handles.hpp
++jniCheck.cpp                            instanceKlass.hpp
++jniCheck.cpp                            interfaceSupport.hpp
++jniCheck.cpp                            jfieldIDWorkaround.hpp
++jniCheck.cpp                            jni.h
++jniCheck.cpp                            jniCheck.hpp
++jniCheck.cpp                            jniTypes_<arch>.hpp
++jniCheck.cpp                            jvm_misc.hpp
++jniCheck.cpp                            oop.inline.hpp
++jniCheck.cpp                            symbolOop.hpp
++jniCheck.cpp                            systemDictionary.hpp
++jniCheck.cpp                            thread.hpp
++jniCheck.cpp                            vmSymbols.hpp
++
++jvmtiCodeBlobEvents.cpp                 codeBlob.hpp
++jvmtiCodeBlobEvents.cpp                 codeCache.hpp
++jvmtiCodeBlobEvents.cpp                 handles.hpp
++jvmtiCodeBlobEvents.cpp                 handles.inline.hpp
++jvmtiCodeBlobEvents.cpp                 jvmtiCodeBlobEvents.hpp
++jvmtiCodeBlobEvents.cpp                 jvmtiExport.hpp
++jvmtiCodeBlobEvents.cpp                 oop.inline.hpp
++jvmtiCodeBlobEvents.cpp                 resourceArea.hpp
++jvmtiCodeBlobEvents.cpp                 scopeDesc.hpp
++jvmtiCodeBlobEvents.cpp                 vmThread.hpp
++
++jvmtiCodeBlobEvents.hpp                 jvmti.h
++
++jvmtiExtensions.cpp                     jvmtiExport.hpp
++jvmtiExtensions.cpp                     jvmtiExtensions.hpp
++
++jvmtiExtensions.hpp                     allocation.hpp
++jvmtiExtensions.hpp                     jvmti.h
++jvmtiExtensions.hpp                     jvmtiEnv.hpp
++
++jvmtiImpl.cpp                           exceptions.hpp
++jvmtiImpl.cpp                           handles.hpp
++jvmtiImpl.cpp                           handles.inline.hpp
++jvmtiImpl.cpp                           instanceKlass.hpp
++jvmtiImpl.cpp                           interfaceSupport.hpp
++jvmtiImpl.cpp                           interpreter.hpp
++jvmtiImpl.cpp                           javaCalls.hpp
++jvmtiImpl.cpp                           jvmtiAgentThread.hpp
++jvmtiImpl.cpp                           jvmtiEnv.hpp
++jvmtiImpl.cpp                           jvmtiEventController.inline.hpp
++jvmtiImpl.cpp                           jvmtiImpl.hpp
++jvmtiImpl.cpp                           jvmtiRedefineClasses.hpp
++jvmtiImpl.cpp                           resourceArea.hpp
++jvmtiImpl.cpp                           signature.hpp
++jvmtiImpl.cpp                           systemDictionary.hpp
++jvmtiImpl.cpp                           thread_<os_family>.inline.hpp
++jvmtiImpl.cpp                           vframe.hpp
++jvmtiImpl.cpp                           vframe_hp.hpp
++jvmtiImpl.cpp                           vm_operations.hpp
++
++jvmtiImpl.hpp                           jvmti.h
++jvmtiImpl.hpp                           jvmtiEnvThreadState.hpp
++jvmtiImpl.hpp                           jvmtiEventController.hpp
++jvmtiImpl.hpp                           jvmtiTrace.hpp
++jvmtiImpl.hpp                           jvmtiUtil.hpp
++jvmtiImpl.hpp                           objArrayOop.hpp
++jvmtiImpl.hpp                           stackValueCollection.hpp
++jvmtiImpl.hpp                           systemDictionary.hpp
++jvmtiImpl.hpp                           vm_operations.hpp
++
++jvmtiTagMap.cpp                         biasedLocking.hpp
++jvmtiTagMap.cpp                         javaCalls.hpp
++jvmtiTagMap.cpp                         jniHandles.hpp
++jvmtiTagMap.cpp                         jvmtiEnv.hpp
++jvmtiTagMap.cpp                         jvmtiEventController.hpp
++jvmtiTagMap.cpp                         jvmtiEventController.inline.hpp
++jvmtiTagMap.cpp                         jvmtiExport.hpp
++jvmtiTagMap.cpp                         jvmtiImpl.hpp
++jvmtiTagMap.cpp                         jvmtiTagMap.hpp
++jvmtiTagMap.cpp                         mutex.hpp
++jvmtiTagMap.cpp                         mutexLocker.hpp
++jvmtiTagMap.cpp                         objArrayKlass.hpp
++jvmtiTagMap.cpp                         oop.inline2.hpp
++jvmtiTagMap.cpp                         reflectionUtils.hpp
++jvmtiTagMap.cpp                         serviceUtil.hpp
++jvmtiTagMap.cpp                         symbolTable.hpp
++jvmtiTagMap.cpp                         systemDictionary.hpp
++jvmtiTagMap.cpp                         vframe.hpp
++jvmtiTagMap.cpp                         vmSymbols.hpp
++jvmtiTagMap.cpp                         vmThread.hpp
++jvmtiTagMap.cpp                         vm_operations.hpp
++
++jvmtiTagMap.hpp                         allocation.hpp
++jvmtiTagMap.hpp                         collectedHeap.hpp
++jvmtiTagMap.hpp                         genCollectedHeap.hpp
++jvmtiTagMap.hpp                         jvmti.h
++jvmtiTagMap.hpp                         jvmtiEnv.hpp
++jvmtiTagMap.hpp                         universe.hpp
++
++jvmtiTrace.cpp                          jvmtiEnv.hpp
++jvmtiTrace.cpp                          jvmtiTrace.hpp
++
++jvmtiTrace.hpp                          jvmti.h
++jvmtiTrace.hpp                          jvmtiEnvThreadState.hpp
++jvmtiTrace.hpp                          jvmtiEventController.hpp
++jvmtiTrace.hpp                          jvmtiUtil.hpp
++jvmtiTrace.hpp                          objArrayOop.hpp
++jvmtiTrace.hpp                          stackValueCollection.hpp
++jvmtiTrace.hpp                          systemDictionary.hpp
++jvmtiTrace.hpp                          vm_operations.hpp
++
++restore.cpp                             filemap.hpp
++restore.cpp                             hashtable.inline.hpp
++restore.cpp                             oop.inline.hpp
++restore.cpp                             symbolTable.hpp
++restore.cpp                             systemDictionary.hpp
++
++serialize.cpp                           classify.hpp
++serialize.cpp                           codeCache.hpp
++serialize.cpp                           compactingPermGenGen.hpp
++serialize.cpp                           compiledICHolderOop.hpp
++serialize.cpp                           methodDataOop.hpp
++serialize.cpp                           objArrayOop.hpp
++serialize.cpp                           oop.hpp
++serialize.cpp                           symbolTable.hpp
++serialize.cpp                           systemDictionary.hpp
++
++vmStructs.cpp                           arguments.hpp
++vmStructs.cpp                           arrayKlass.hpp
++vmStructs.cpp                           arrayKlassKlass.hpp
++vmStructs.cpp                           arrayOop.hpp
++vmStructs.cpp                           bytecodes.hpp
++vmStructs.cpp                           bytecodeInterpreter.hpp
++vmStructs.cpp                           cardTableRS.hpp
++vmStructs.cpp                           codeBlob.hpp
++vmStructs.cpp                           codeCache.hpp
++vmStructs.cpp                           collectedHeap.hpp
++vmStructs.cpp                           compactPermGen.hpp
++vmStructs.cpp                           compiledICHolderKlass.hpp
++vmStructs.cpp                           compiledICHolderOop.hpp
++vmStructs.cpp                           compressedStream.hpp
++vmStructs.cpp                           constMethodKlass.hpp
++vmStructs.cpp                           constMethodOop.hpp
++vmStructs.cpp                           constantPoolKlass.hpp
++vmStructs.cpp                           constantPoolOop.hpp
++vmStructs.cpp                           cpCacheKlass.hpp
++vmStructs.cpp                           cpCacheOop.hpp
++vmStructs.cpp                           defNewGeneration.hpp
++vmStructs.cpp                           dictionary.hpp
++vmStructs.cpp                           freeBlockDictionary.hpp
++vmStructs.cpp                           genCollectedHeap.hpp
++vmStructs.cpp                           generation.hpp
++vmStructs.cpp                           generationSpec.hpp
++vmStructs.cpp                           globalDefinitions.hpp
++vmStructs.cpp                           globals.hpp
++vmStructs.cpp                           hashtable.hpp
++vmStructs.cpp                           heap.hpp
++vmStructs.cpp                           immutableSpace.hpp
++vmStructs.cpp                           instanceKlass.hpp
++vmStructs.cpp                           instanceKlassKlass.hpp
++vmStructs.cpp                           instanceOop.hpp
++vmStructs.cpp                           interpreter.hpp
++vmStructs.cpp                           java.hpp
++vmStructs.cpp                           javaCalls.hpp
++vmStructs.cpp                           javaClasses.hpp
++vmStructs.cpp                           jvmtiAgentThread.hpp
++vmStructs.cpp                           klass.hpp
++vmStructs.cpp                           klassOop.hpp
++vmStructs.cpp                           loaderConstraints.hpp
++vmStructs.cpp                           location.hpp
++vmStructs.cpp                           markOop.hpp
++vmStructs.cpp                           markSweep.hpp
++vmStructs.cpp                           methodDataKlass.hpp
++vmStructs.cpp                           methodDataOop.hpp
++vmStructs.cpp                           methodKlass.hpp
++vmStructs.cpp                           methodOop.hpp
++vmStructs.cpp                           mutableSpace.hpp
++vmStructs.cpp                           nmethod.hpp
++vmStructs.cpp                           objArrayKlass.hpp
++vmStructs.cpp                           objArrayKlassKlass.hpp
++vmStructs.cpp                           objArrayOop.hpp
++vmStructs.cpp                           oop.hpp
++vmStructs.cpp                           oopMap.hpp
++vmStructs.cpp                           pcDesc.hpp
++vmStructs.cpp                           perfMemory.hpp
++vmStructs.cpp                           permGen.hpp
++vmStructs.cpp                           placeholders.hpp
++vmStructs.cpp                           sharedRuntime.hpp
++vmStructs.cpp                           space.hpp
++vmStructs.cpp                           stubRoutines.hpp
++vmStructs.cpp                           stubs.hpp
++vmStructs.cpp                           symbolKlass.hpp
++vmStructs.cpp                           symbolOop.hpp
++vmStructs.cpp                           symbolTable.hpp
++vmStructs.cpp                           systemDictionary.hpp
++vmStructs.cpp                           tenuredGeneration.hpp
++vmStructs.cpp                           thread_<os_family>.inline.hpp
++vmStructs.cpp                           typeArrayKlass.hpp
++vmStructs.cpp                           typeArrayKlassKlass.hpp
++vmStructs.cpp                           typeArrayOop.hpp
++vmStructs.cpp                           universe.hpp
++vmStructs.cpp                           virtualspace.hpp
++vmStructs.cpp                           vmStructs.hpp
++vmStructs.cpp                           vmStructs_<arch>.hpp
++vmStructs.cpp                           vmStructs_<os_arch>.hpp
++vmStructs.cpp                           vmreg.hpp
++vmStructs.cpp                           watermark.hpp
++
++vmStructs.hpp                           debug.hpp
+diff -ruN openjdk6/hotspot/src/share/vm/includeDB_gc_parallel openjdk/hotspot/src/share/vm/includeDB_gc_parallel
+--- openjdk6/hotspot/src/share/vm/includeDB_gc_parallel	2008-02-28 05:02:32.000000000 -0500
++++ openjdk/hotspot/src/share/vm/includeDB_gc_parallel	2008-01-31 09:19:00.000000000 -0500
+@@ -42,11 +42,14 @@
+ instanceKlass.cpp                       psScavenge.inline.hpp
+ instanceKlass.cpp                       parOopClosures.inline.hpp
+ 
++instanceKlassKlass.cpp                  cardTableRS.hpp
++instanceKlassKlass.cpp                  oop.pcgc.inline.hpp
+ instanceKlassKlass.cpp                  psPromotionManager.inline.hpp
+ instanceKlassKlass.cpp                  psScavenge.inline.hpp
++instanceKlassKlass.cpp                  parOopClosures.inline.hpp
+ 
+-instanceRefKlass.cpp                    psPromotionManager.inline.hpp
+ instanceRefKlass.cpp                    oop.pcgc.inline.hpp
++instanceRefKlass.cpp                    psPromotionManager.inline.hpp
+ instanceRefKlass.cpp                    psScavenge.inline.hpp
+ instanceRefKlass.cpp                    parOopClosures.inline.hpp
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/includeDB_jvmti openjdk/hotspot/src/share/vm/includeDB_jvmti
+--- openjdk6/hotspot/src/share/vm/includeDB_jvmti	1969-12-31 19:00:00.000000000 -0500
++++ openjdk/hotspot/src/share/vm/includeDB_jvmti	2008-01-31 09:19:00.000000000 -0500
+@@ -0,0 +1,257 @@
++//
++// Copyright 2007 Sun Microsystems, Inc.  All Rights Reserved.
++// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++//
++// This code is free software; you can redistribute it and/or modify it
++// under the terms of the GNU General Public License version 2 only, as
++// published by the Free Software Foundation.
++//
++// This code is distributed in the hope that it will be useful, but WITHOUT
++// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++// version 2 for more details (a copy is included in the LICENSE file that
++// accompanied this code).
++//
++// You should have received a copy of the GNU General Public License version
++// 2 along with this work; if not, write to the Free Software Foundation,
++// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
++//
++// Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
++// CA 95054 USA or visit www.sun.com if you need additional information or
++// have any questions.
++//  
++//
++
++jvmtiAgentThread.hpp                    jvmtiEnv.hpp
++
++jvmtiClassFileReconstituter.cpp         bytecodeStream.hpp
++jvmtiClassFileReconstituter.cpp         bytes_<arch>.hpp
++jvmtiClassFileReconstituter.cpp         jvmtiClassFileReconstituter.hpp
++jvmtiClassFileReconstituter.cpp         symbolTable.hpp
++
++jvmtiClassFileReconstituter.hpp         jvmtiEnv.hpp
++
++// jvmtiCodeBlobEvents is jck optional, please put deps in includeDB_features
++
++jvmtiEnter.cpp                          jvmtiEnter.hpp
++jvmtiEnter.cpp                          jvmtiUtil.hpp
++
++jvmtiEnter.hpp                          interfaceSupport.hpp
++jvmtiEnter.hpp                          jvmtiEnv.hpp
++jvmtiEnter.hpp                          jvmtiImpl.hpp
++jvmtiEnter.hpp                          resourceArea.hpp
++jvmtiEnter.hpp                          systemDictionary.hpp
++
++jvmtiEnterTrace.cpp                     jvmtiEnter.hpp
++jvmtiEnterTrace.cpp                     jvmtiUtil.hpp
++
++jvmtiEnv.cpp                            arguments.hpp
++jvmtiEnv.cpp                            bytecodeStream.hpp
++jvmtiEnv.cpp                            cpCacheOop.hpp
++jvmtiEnv.cpp                            deoptimization.hpp
++jvmtiEnv.cpp                            exceptions.hpp
++jvmtiEnv.cpp                            instanceKlass.hpp
++jvmtiEnv.cpp                            interfaceSupport.hpp
++jvmtiEnv.cpp                            interpreter.hpp
++jvmtiEnv.cpp                            javaCalls.hpp
++jvmtiEnv.cpp                            jfieldIDWorkaround.hpp
++jvmtiEnv.cpp                            jniCheck.hpp
++jvmtiEnv.cpp                            jvm_misc.hpp
++jvmtiEnv.cpp                            jvmtiAgentThread.hpp
++jvmtiEnv.cpp                            jvmtiClassFileReconstituter.hpp
++jvmtiEnv.cpp                            jvmtiCodeBlobEvents.hpp
++jvmtiEnv.cpp                            jvmtiEnv.hpp
++jvmtiEnv.cpp                            jvmtiExtensions.hpp
++jvmtiEnv.cpp                            jvmtiGetLoadedClasses.hpp
++jvmtiEnv.cpp                            jvmtiImpl.hpp
++jvmtiEnv.cpp                            jvmtiManageCapabilities.hpp
++jvmtiEnv.cpp                            jvmtiRedefineClasses.hpp
++jvmtiEnv.cpp                            jvmtiTagMap.hpp
++jvmtiEnv.cpp                            jvmtiThreadState.inline.hpp
++jvmtiEnv.cpp                            jvmtiUtil.hpp
++jvmtiEnv.cpp                            objectMonitor.inline.hpp
++jvmtiEnv.cpp                            osThread.hpp
++jvmtiEnv.cpp                            preserveException.hpp
++jvmtiEnv.cpp                            reflectionUtils.hpp
++jvmtiEnv.cpp                            resourceArea.hpp
++jvmtiEnv.cpp                            signature.hpp
++jvmtiEnv.cpp                            systemDictionary.hpp
++jvmtiEnv.cpp                            threadService.hpp
++jvmtiEnv.cpp                            thread_<os_family>.inline.hpp
++jvmtiEnv.cpp                            universe.inline.hpp
++jvmtiEnv.cpp                            vframe.hpp
++jvmtiEnv.cpp                            vmSymbols.hpp
++jvmtiEnv.cpp                            vmThread.hpp
++
++jvmtiEnv.hpp                            jvmtiEnvBase.hpp
++
++jvmtiEnvBase.cpp                        biasedLocking.hpp
++jvmtiEnvBase.cpp                        interfaceSupport.hpp
++jvmtiEnvBase.cpp                        jfieldIDWorkaround.hpp
++jvmtiEnvBase.cpp                        jvmtiEnv.hpp
++jvmtiEnvBase.cpp                        jvmtiEnvBase.hpp
++jvmtiEnvBase.cpp                        jvmtiEventController.inline.hpp
++jvmtiEnvBase.cpp                        jvmtiExtensions.hpp
++jvmtiEnvBase.cpp                        jvmtiImpl.hpp
++jvmtiEnvBase.cpp                        jvmtiManageCapabilities.hpp
++jvmtiEnvBase.cpp                        jvmtiTagMap.hpp
++jvmtiEnvBase.cpp                        jvmtiThreadState.inline.hpp
++jvmtiEnvBase.cpp                        objArrayKlass.hpp
++jvmtiEnvBase.cpp                        objArrayOop.hpp
++jvmtiEnvBase.cpp                        objectMonitor.hpp
++jvmtiEnvBase.cpp                        objectMonitor.inline.hpp
++jvmtiEnvBase.cpp                        signature.hpp
++jvmtiEnvBase.cpp                        systemDictionary.hpp
++jvmtiEnvBase.cpp                        vframe.hpp
++jvmtiEnvBase.cpp                        vframe_hp.hpp
++jvmtiEnvBase.cpp                        vmThread.hpp
++jvmtiEnvBase.cpp                        vm_operations.hpp
++
++jvmtiEnvBase.hpp                        classLoader.hpp
++jvmtiEnvBase.hpp                        fieldDescriptor.hpp
++jvmtiEnvBase.hpp                        frame.hpp
++jvmtiEnvBase.hpp                        growableArray.hpp
++jvmtiEnvBase.hpp                        handles.inline.hpp
++jvmtiEnvBase.hpp                        jvmtiEnvThreadState.hpp
++jvmtiEnvBase.hpp                        jvmtiEventController.hpp
++jvmtiEnvBase.hpp                        jvmtiThreadState.hpp
++jvmtiEnvBase.hpp                        thread.hpp
++jvmtiEnvBase.hpp                        vm_operations.hpp
++
++jvmtiEnvThreadState.cpp                 handles.hpp
++jvmtiEnvThreadState.cpp                 handles.inline.hpp
++jvmtiEnvThreadState.cpp                 interfaceSupport.hpp
++jvmtiEnvThreadState.cpp                 interpreter.hpp
++jvmtiEnvThreadState.cpp                 javaCalls.hpp
++jvmtiEnvThreadState.cpp                 jvmtiEnv.hpp
++jvmtiEnvThreadState.cpp                 jvmtiEnvThreadState.hpp
++jvmtiEnvThreadState.cpp                 jvmtiEventController.inline.hpp
++jvmtiEnvThreadState.cpp                 jvmtiImpl.hpp
++jvmtiEnvThreadState.cpp                 resourceArea.hpp
++jvmtiEnvThreadState.cpp                 signature.hpp
++jvmtiEnvThreadState.cpp                 systemDictionary.hpp
++jvmtiEnvThreadState.cpp                 vframe.hpp
++jvmtiEnvThreadState.cpp                 vm_operations.hpp
++
++jvmtiEnvThreadState.hpp                 allocation.hpp
++jvmtiEnvThreadState.hpp                 allocation.inline.hpp
++jvmtiEnvThreadState.hpp                 globalDefinitions.hpp
++jvmtiEnvThreadState.hpp                 growableArray.hpp
++jvmtiEnvThreadState.hpp                 instanceKlass.hpp
++jvmtiEnvThreadState.hpp                 jvmti.h
++jvmtiEnvThreadState.hpp                 jvmtiEventController.hpp
++
++jvmtiEventController.cpp                frame.hpp
++jvmtiEventController.cpp                interpreter.hpp
++jvmtiEventController.cpp                jvmtiEnv.hpp
++jvmtiEventController.cpp                jvmtiEventController.hpp
++jvmtiEventController.cpp                jvmtiEventController.inline.hpp
++jvmtiEventController.cpp                jvmtiExport.hpp
++jvmtiEventController.cpp                jvmtiImpl.hpp
++jvmtiEventController.cpp                jvmtiThreadState.inline.hpp
++jvmtiEventController.cpp                resourceArea.hpp
++jvmtiEventController.cpp                thread.hpp
++jvmtiEventController.cpp                vframe.hpp
++jvmtiEventController.cpp                vframe_hp.hpp
++jvmtiEventController.cpp                vmThread.hpp
++jvmtiEventController.cpp                vm_operations.hpp
++
++jvmtiEventController.hpp                allocation.hpp
++jvmtiEventController.hpp                allocation.inline.hpp
++jvmtiEventController.hpp                globalDefinitions.hpp
++jvmtiEventController.hpp                jvmti.h
++
++jvmtiEventController.inline.hpp         jvmtiEventController.hpp
++jvmtiEventController.inline.hpp         jvmtiImpl.hpp
++jvmtiEventController.inline.hpp         jvmtiUtil.hpp
++
++jvmtiExport.cpp                         arguments.hpp
++jvmtiExport.cpp                         attachListener.hpp
++jvmtiExport.cpp                         handles.hpp
++jvmtiExport.cpp                         interfaceSupport.hpp
++jvmtiExport.cpp                         interpreter.hpp
++jvmtiExport.cpp                         jvmtiCodeBlobEvents.hpp
++jvmtiExport.cpp                         jvmtiEnv.hpp
++jvmtiExport.cpp                         jvmtiEventController.hpp
++jvmtiExport.cpp                         jvmtiEventController.inline.hpp
++jvmtiExport.cpp                         jvmtiExport.hpp
++jvmtiExport.cpp                         jvmtiImpl.hpp
++jvmtiExport.cpp                         jvmtiManageCapabilities.hpp
++jvmtiExport.cpp                         jvmtiTagMap.hpp
++jvmtiExport.cpp                         jvmtiThreadState.inline.hpp
++jvmtiExport.cpp                         nmethod.hpp
++jvmtiExport.cpp                         objArrayKlass.hpp
++jvmtiExport.cpp                         objArrayOop.hpp
++jvmtiExport.cpp                         objectMonitor.inline.hpp
++jvmtiExport.cpp                         pcDesc.hpp
++jvmtiExport.cpp                         resourceArea.hpp
++jvmtiExport.cpp                         scopeDesc.hpp
++jvmtiExport.cpp                         serviceUtil.hpp
++jvmtiExport.cpp                         systemDictionary.hpp
++jvmtiExport.cpp                         thread.hpp
++jvmtiExport.cpp                         vframe.hpp
++
++// jvmtiExtensions is jck optional, please put deps in includeDB_features
++
++jvmtiGetLoadedClasses.cpp               jvmtiGetLoadedClasses.hpp
++jvmtiGetLoadedClasses.cpp               systemDictionary.hpp
++jvmtiGetLoadedClasses.cpp               thread.hpp
++jvmtiGetLoadedClasses.cpp               universe.inline.hpp
++
++jvmtiGetLoadedClasses.hpp               jvmtiEnv.hpp
++
++// jvmtiImpl is jck optional, please put deps in includeDB_features
++
++jvmtiManageCapabilities.cpp             jvmtiEnv.hpp
++jvmtiManageCapabilities.cpp             jvmtiExport.hpp
++jvmtiManageCapabilities.cpp             jvmtiManageCapabilities.hpp
++ 
++jvmtiManageCapabilities.hpp             allocation.hpp
++jvmtiManageCapabilities.hpp             jvmti.h
++
++jvmtiRedefineClasses.cpp                codeCache.hpp
++jvmtiRedefineClasses.cpp                deoptimization.hpp
++jvmtiRedefineClasses.cpp                gcLocker.hpp
++jvmtiRedefineClasses.cpp                jvmtiImpl.hpp
++jvmtiRedefineClasses.cpp                jvmtiRedefineClasses.hpp
++jvmtiRedefineClasses.cpp                klassVtable.hpp
++jvmtiRedefineClasses.cpp                methodComparator.hpp
++jvmtiRedefineClasses.cpp                oopMapCache.hpp
++jvmtiRedefineClasses.cpp                relocator.hpp
++jvmtiRedefineClasses.cpp                rewriter.hpp
++jvmtiRedefineClasses.cpp                systemDictionary.hpp
++jvmtiRedefineClasses.cpp                universe.inline.hpp
++jvmtiRedefineClasses.cpp                verifier.hpp
++
++jvmtiRedefineClasses.hpp                jvmtiEnv.hpp
++jvmtiRedefineClasses.hpp                jvmtiRedefineClassesTrace.hpp
++jvmtiRedefineClasses.hpp                objArrayKlass.hpp
++jvmtiRedefineClasses.hpp                objArrayOop.hpp
++jvmtiRedefineClasses.hpp                oopFactory.hpp
++jvmtiRedefineClasses.hpp                resourceArea.hpp
++jvmtiRedefineClasses.hpp                vm_operations.hpp
++
++// jvmtiTagMap is jck optional, please put deps in includeDB_features
++// jvmtiTrace is jck optional, please put deps in includeDB_features
++
++jvmtiThreadState.cpp                    gcLocker.hpp
++jvmtiThreadState.cpp                    jvmtiEnv.hpp
++jvmtiThreadState.cpp                    jvmtiEventController.inline.hpp
++jvmtiThreadState.cpp                    jvmtiImpl.hpp
++jvmtiThreadState.cpp                    jvmtiThreadState.inline.hpp
++jvmtiThreadState.cpp                    resourceArea.hpp
++jvmtiThreadState.cpp                    vframe.hpp
++
++jvmtiThreadState.inline.hpp             jvmtiEnvThreadState.hpp
++jvmtiThreadState.inline.hpp             jvmtiThreadState.hpp
++
++jvmtiUtil.cpp                           exceptions.hpp
++jvmtiUtil.cpp                           handles.hpp
++jvmtiUtil.cpp                           handles.inline.hpp
++jvmtiUtil.cpp                           interfaceSupport.hpp
++jvmtiUtil.cpp                           jvmtiUtil.hpp
++jvmtiUtil.cpp                           vm_operations.hpp
++
++jvmtiUtil.hpp                           jvmti.h
++jvmtiUtil.hpp                           jvmtiEventController.hpp
++jvmtiUtil.hpp                           resourceArea.hpp
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp openjdk/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp
+--- openjdk6/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp	1969-12-31 19:00:00.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -0,0 +1,245 @@
++/*
++ * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++ *
++ * This code is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 only, as
++ * published by the Free Software Foundation.
++ *
++ * This code is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++ * version 2 for more details (a copy is included in the LICENSE file that
++ * accompanied this code).
++ *
++ * You should have received a copy of the GNU General Public License version
++ * 2 along with this work; if not, write to the Free Software Foundation,
++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
++ * CA 95054 USA or visit www.sun.com if you need additional information or
++ * have any questions.
++ *
++ */
++
++// This file contains the platform-independant parts
++// of the abstract interpreter and the abstract interpreter generator.
++
++// Organization of the interpreter(s). There exists two different interpreters in hotpot
++// an assembly language version (aka template interpreter) and a high level language version
++// (aka c++ interpreter). Th division of labor is as follows:
++
++// Template Interpreter          C++ Interpreter        Functionality
++//
++// templateTable*                bytecodeInterpreter*   actual interpretation of bytecodes
++//
++// templateInterpreter*          cppInterpreter*        generation of assembly code that creates
++//                                                      and manages interpreter runtime frames.
++//                                                      Also code for populating interpreter
++//                                                      frames created during deoptimization.
++//
++// For both template and c++ interpreter. There are common files for aspects of the interpreter
++// that are generic to both interpreters. This is the layout:
++//
++// abstractInterpreter.hpp: generic description of the interpreter.
++// interpreter*:            generic frame creation and handling.
++//
++
++//------------------------------------------------------------------------------------------------------------------------
++// The C++ interface to the bytecode interpreter(s).
++
++class AbstractInterpreter: AllStatic {
++  friend class VMStructs;
++  friend class Interpreter;
++  friend class CppInterpreterGenerator;
++ public:
++  enum MethodKind {
++    zerolocals,                                                 // method needs locals initialization
++    zerolocals_synchronized,                                    // method needs locals initialization & is synchronized
++    native,                                                     // native method
++    native_synchronized,                                        // native method & is synchronized
++    empty,                                                      // empty method (code: _return)
++    accessor,                                                   // accessor method (code: _aload_0, _getfield, _(a|i)return)
++    abstract,                                                   // abstract method (throws an AbstractMethodException)
++    java_lang_math_sin,                                         // implementation of java.lang.Math.sin   (x)
++    java_lang_math_cos,                                         // implementation of java.lang.Math.cos   (x)
++    java_lang_math_tan,                                         // implementation of java.lang.Math.tan   (x)
++    java_lang_math_abs,                                         // implementation of java.lang.Math.abs   (x)
++    java_lang_math_sqrt,                                        // implementation of java.lang.Math.sqrt  (x)
++    java_lang_math_log,                                         // implementation of java.lang.Math.log   (x)
++    java_lang_math_log10,                                       // implementation of java.lang.Math.log10 (x)
++    number_of_method_entries,
++    invalid = -1
++  };
++
++  enum SomeConstants {
++    number_of_result_handlers = 10                              // number of result handlers for native calls
++  };
++
++ protected:
++  static StubQueue* _code;                                      // the interpreter code (codelets)
++
++  static bool       _notice_safepoints;                         // true if safepoints are activated
++
++  static address    _native_entry_begin;                        // Region for native entry code
++  static address    _native_entry_end;
++
++  // method entry points
++  static address    _entry_table[number_of_method_entries];     // entry points for a given method
++  static address    _native_abi_to_tosca[number_of_result_handlers];  // for native method result handlers
++  static address    _slow_signature_handler;                              // the native method generic (slow) signature handler
++
++  static address    _rethrow_exception_entry;                   // rethrows an activation in previous frame
++
++
++
++  friend class      AbstractInterpreterGenerator;
++  friend class              InterpreterGenerator;
++  friend class      InterpreterMacroAssembler;
++
++ public:
++  // Initialization/debugging
++  static void       initialize();
++  static StubQueue* code()                                      { return _code; }
++
++
++  // Method activation
++  static MethodKind method_kind(methodHandle m);
++  static address    entry_for_kind(MethodKind k)                { assert(0 <= k && k < number_of_method_entries, "illegal kind"); return _entry_table[k]; }
++  static address    entry_for_method(methodHandle m)            { return _entry_table[method_kind(m)]; }
++
++  static void       print_method_kind(MethodKind kind)          PRODUCT_RETURN;
++
++  // Runtime support
++
++  // length = invoke bytecode length (to advance to next bytecode)
++  static address    deopt_entry   (TosState state, int length) { ShouldNotReachHere(); return NULL; }
++  static address    return_entry  (TosState state, int length) { ShouldNotReachHere(); return NULL; }
++
++  static address    rethrow_exception_entry()                   { return _rethrow_exception_entry; }
++
++  // Activation size in words for a method that is just being called.
++  // Parameters haven't been pushed so count them too.
++  static int        size_top_interpreter_activation(methodOop method);
++
++  // Deoptimization support
++  static address    continuation_for(methodOop method,
++                                     address bcp,
++                                     int callee_parameters,
++                                     bool is_top_frame,
++                                     bool& use_next_mdp);
++
++  // share implementation of size_activation and layout_activation:
++  static int        size_activation(methodOop method,
++                                    int temps,
++                                    int popframe_args,
++                                    int monitors,
++                                    int callee_params,
++                                    int callee_locals,
++                                    bool is_top_frame);
++
++  static int       layout_activation(methodOop method,
++                                      int temps,
++                                      int popframe_args,
++                                      int monitors,
++                                      int callee_params,
++                                      int callee_locals,
++                                      frame* caller,
++                                      frame* interpreter_frame,
++                                      bool is_top_frame);
++
++  // Runtime support
++  static bool       is_not_reached(                       methodHandle method, int bci);
++  // Safepoint support
++  static void       notice_safepoints()                         { ShouldNotReachHere(); } // stops the thread when reaching a safepoint
++  static void       ignore_safepoints()                         { ShouldNotReachHere(); } // ignores safepoints
++
++  // Support for native calls
++  static address    slow_signature_handler()                    { return _slow_signature_handler; }
++  static address    result_handler(BasicType type)              { return _native_abi_to_tosca[BasicType_as_index(type)]; }
++  static int        BasicType_as_index(BasicType type);         // computes index into result_handler_by_index table
++  static bool       in_native_entry(address pc)                 { return _native_entry_begin <= pc && pc < _native_entry_end; }
++  // Debugging/printing
++  static void       print();                                    // prints the interpreter code
++
++  // Support for Tagged Stacks
++  //
++  // Tags are stored on the Java Expression stack above the value:
++  //
++  //  tag
++  //  value
++  //
++  // For double values:
++  //
++  //  tag2
++  //  high word
++  //  tag1
++  //  low word
++
++ public:
++  static int stackElementWords()   { return TaggedStackInterpreter ? 2 : 1; }
++  static int stackElementSize()    { return stackElementWords()*wordSize; }
++  static int logStackElementSize() { return
++                 TaggedStackInterpreter? LogBytesPerWord+1 : LogBytesPerWord; }
++
++  // Tag is at pointer, value is one below for a stack growing down
++  // (or above for stack growing up)
++  static int  value_offset_in_bytes()  {
++    return TaggedStackInterpreter ?
++      frame::interpreter_frame_expression_stack_direction() * wordSize : 0;
++  }
++  static int  tag_offset_in_bytes()    {
++    assert(TaggedStackInterpreter, "should not call this");
++    return 0;
++  }
++
++  // Tagged Locals
++  // Locals are stored relative to Llocals:
++  //
++  // tag    <- Llocals[n]
++  // value
++  //
++  // Category 2 types are indexed as:
++  //
++  // tag    <- Llocals[-n]
++  // high word
++  // tag    <- Llocals[-n+1]
++  // low word
++  //
++
++  // Local values relative to locals[n]
++  static int  local_offset_in_bytes(int n) {
++    return ((frame::interpreter_frame_expression_stack_direction() * n) *
++            stackElementSize()) + value_offset_in_bytes();
++  }
++  static int  local_tag_offset_in_bytes(int n) {
++    assert(TaggedStackInterpreter, "should not call this");
++    return ((frame::interpreter_frame_expression_stack_direction() * n) *
++            stackElementSize()) + tag_offset_in_bytes();
++  }
++
++};
++
++//------------------------------------------------------------------------------------------------------------------------
++// The interpreter generator.
++
++class Template;
++class AbstractInterpreterGenerator: public StackObj {
++ protected:
++  InterpreterMacroAssembler* _masm;
++
++  // shared code sequences
++  // Converter for native abi result to tosca result
++  address generate_result_handler_for(BasicType type);
++  address generate_slow_signature_handler();
++
++  // entry point generator
++  address generate_method_entry(AbstractInterpreter::MethodKind kind);
++
++  void bang_stack_shadow_pages(bool native_call);
++
++  void generate_all();
++
++ public:
++  AbstractInterpreterGenerator(StubQueue* _code);
++};
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/bytecode.cpp openjdk/hotspot/src/share/vm/interpreter/bytecode.cpp
+--- openjdk6/hotspot/src/share/vm/interpreter/bytecode.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/bytecode.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)bytecode.cpp	1.69 07/05/05 17:05:36 JVM"
+-#endif
+ /*
+  * Copyright 1997-2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -102,20 +99,20 @@
+   symbolHandle sh(thread, signature());
+   ResultTypeFinder rts(sh);
+   rts.iterate();
+-  return rts.type();    
++  return rts.type();
+ }
+ 
+ 
+-methodHandle Bytecode_invoke::static_target(TRAPS) {  
+-  methodHandle m;  
++methodHandle Bytecode_invoke::static_target(TRAPS) {
++  methodHandle m;
+   KlassHandle resolved_klass;
+   constantPoolHandle constants(THREAD, _method->constants());
+-  
+-  if (adjusted_invoke_code() != Bytecodes::_invokeinterface) {        
+-    LinkResolver::resolve_method(m, resolved_klass, constants, index(), CHECK_(methodHandle()));    
+-  } else {    
++
++  if (adjusted_invoke_code() != Bytecodes::_invokeinterface) {
++    LinkResolver::resolve_method(m, resolved_klass, constants, index(), CHECK_(methodHandle()));
++  } else {
+     LinkResolver::resolve_interface_method(m, resolved_klass, constants, index(), CHECK_(methodHandle()));
+-  }   
++  }
+   return m;
+ }
+ 
+@@ -135,7 +132,7 @@
+ 
+ BasicType Bytecode_static::result_type(methodOop method) const {
+   int index = java_hwrd_at(1);
+-  constantPoolOop constants = method->constants(); 
++  constantPoolOop constants = method->constants();
+   symbolOop field_type = constants->signature_ref_at(index);
+   BasicType basic_type = FieldType::basic_type(field_type);
+   return basic_type;
+@@ -176,13 +173,13 @@
+ 
+ void Bytecode_lookupswitch::verify() const {
+   switch (Bytecodes::java_code(code())) {
+-    case Bytecodes::_lookupswitch:      
++    case Bytecodes::_lookupswitch:
+       { int i = number_of_pairs() - 1;
+         while (i-- > 0) {
+           assert(pair_at(i)->match() < pair_at(i+1)->match(), "unsorted table entries");
+         }
+       }
+-      break;            
++      break;
+     default:
+       fatal("not a lookupswitch bytecode");
+   }
+@@ -206,4 +203,3 @@
+ }
+ 
+ #endif
+-
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/bytecodeHistogram.cpp openjdk/hotspot/src/share/vm/interpreter/bytecodeHistogram.cpp
+--- openjdk6/hotspot/src/share/vm/interpreter/bytecodeHistogram.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/bytecodeHistogram.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)bytecodeHistogram.cpp	1.29 07/05/05 17:05:36 JVM"
+-#endif
+ /*
+  * Copyright 1997-2000 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/bytecodeHistogram.hpp openjdk/hotspot/src/share/vm/interpreter/bytecodeHistogram.hpp
+--- openjdk6/hotspot/src/share/vm/interpreter/bytecodeHistogram.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/bytecodeHistogram.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)bytecodeHistogram.hpp	1.27 07/05/05 17:05:36 JVM"
+-#endif
+ /*
+  * Copyright 1997-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // BytecodeCounter counts the number of bytecodes executed
+@@ -32,11 +29,8 @@
+   NOT_PRODUCT(static int   _counter_value;)
+   NOT_PRODUCT(static jlong _reset_time;)
+ 
+-  friend class AbstractInterpreterGenerator;
+-  friend class         InterpreterGenerator;
+-#ifdef CC_INTERP
+-  friend class         cInterpreter;
+-#endif
++  friend class TemplateInterpreterGenerator;
++  friend class         BytecodeInterpreter;
+ 
+  public:
+   // Initialization
+@@ -58,11 +52,9 @@
+  private:
+   NOT_PRODUCT(static int _counters[Bytecodes::number_of_codes];)   // a counter for each bytecode
+ 
+-  friend class AbstractInterpreterGenerator;
++  friend class TemplateInterpreterGenerator;
+   friend class         InterpreterGenerator;
+-#ifdef CC_INTERP
+-  friend class         cInterpreter;
+-#endif
++  friend class         BytecodeInterpreter;
+ 
+  public:
+   // Initialization
+@@ -88,7 +80,7 @@
+   NOT_PRODUCT(static int  _index;)                      // new bytecode is shifted in - used to index into _counters
+   NOT_PRODUCT(static int  _counters[number_of_pairs];)  // a counter for each pair
+ 
+-  friend class AbstractInterpreterGenerator;
++  friend class TemplateInterpreterGenerator;
+   friend class         InterpreterGenerator;
+ 
+  public:
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/bytecode.hpp openjdk/hotspot/src/share/vm/interpreter/bytecode.hpp
+--- openjdk6/hotspot/src/share/vm/interpreter/bytecode.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/bytecode.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)bytecode.hpp	1.67 07/05/05 17:05:36 JVM"
+-#endif
+ /*
+  * Copyright 1997-2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Base class for different kinds of abstractions working
+@@ -99,7 +96,7 @@
+   int     fast_index() const                     { return Bytes::get_native_u2(addr_at(1)); }
+ 
+   // Attribute modification
+-  void    set_code(Bytecodes::Code code);  
++  void    set_code(Bytecodes::Code code);
+   void    set_fast_index(int i);
+ 
+   // Creation
+@@ -120,7 +117,7 @@
+ 
+  public:
+   int  match() const                             { return java_signed_word_at(0 * jintSize); }
+-  int  offset() const                            { return java_signed_word_at(1 * jintSize); }   
++  int  offset() const                            { return java_signed_word_at(1 * jintSize); }
+ };
+ 
+ 
+@@ -207,17 +204,17 @@
+   // Creation
+   inline friend Bytecode_invoke* Bytecode_invoke_at(methodHandle method, int bci);
+ 
+-  // Like Bytecode_invoke_at. Instead it returns NULL if the bci is not at an invoke. 
++  // Like Bytecode_invoke_at. Instead it returns NULL if the bci is not at an invoke.
+   inline friend Bytecode_invoke* Bytecode_invoke_at_check(methodHandle method, int bci);
+ };
+ 
+-inline Bytecode_invoke* Bytecode_invoke_at(methodHandle method, int bci) {    
++inline Bytecode_invoke* Bytecode_invoke_at(methodHandle method, int bci) {
+   Bytecode_invoke* b = new Bytecode_invoke(method, bci);
+   debug_only(b->verify());
+   return b;
+ }
+ 
+-inline Bytecode_invoke* Bytecode_invoke_at_check(methodHandle method, int bci) {    
++inline Bytecode_invoke* Bytecode_invoke_at_check(methodHandle method, int bci) {
+   Bytecode_invoke* b = new Bytecode_invoke(method, bci);
+   return b->is_valid() ? b : NULL;
+ }
+@@ -255,7 +252,7 @@
+   inline friend Bytecode_static* Bytecode_static_at(const methodOop method, address bcp);
+ };
+ 
+-inline Bytecode_static* Bytecode_static_at(const methodOop method, address bcp) {    
++inline Bytecode_static* Bytecode_static_at(const methodOop method, address bcp) {
+   Bytecode_static* b = (Bytecode_static*)bcp;
+   debug_only(b->verify());
+   return b;
+@@ -275,11 +272,11 @@
+   inline friend Bytecode_checkcast* Bytecode_checkcast_at(address bcp);
+ };
+ 
+-inline Bytecode_checkcast* Bytecode_checkcast_at(address bcp) {    
++inline Bytecode_checkcast* Bytecode_checkcast_at(address bcp) {
+   Bytecode_checkcast* b = (Bytecode_checkcast*)bcp;
+   debug_only(b->verify());
+   return b;
+-}  
++}
+ 
+ 
+ // Abstraction for instanceof
+@@ -295,11 +292,11 @@
+   inline friend Bytecode_instanceof* Bytecode_instanceof_at(address bcp);
+ };
+ 
+-inline Bytecode_instanceof* Bytecode_instanceof_at(address bcp) {    
++inline Bytecode_instanceof* Bytecode_instanceof_at(address bcp) {
+   Bytecode_instanceof* b = (Bytecode_instanceof*)bcp;
+   debug_only(b->verify());
+   return b;
+-}  
++}
+ 
+ 
+ class Bytecode_new: public Bytecode {
+@@ -313,11 +310,11 @@
+   inline friend Bytecode_new* Bytecode_new_at(address bcp);
+ };
+ 
+-inline Bytecode_new* Bytecode_new_at(address bcp) {    
++inline Bytecode_new* Bytecode_new_at(address bcp) {
+   Bytecode_new* b = (Bytecode_new*)bcp;
+   debug_only(b->verify());
+   return b;
+-}  
++}
+ 
+ 
+ class Bytecode_multianewarray: public Bytecode {
+@@ -331,11 +328,11 @@
+   inline friend Bytecode_multianewarray* Bytecode_multianewarray_at(address bcp);
+ };
+ 
+-inline Bytecode_multianewarray* Bytecode_multianewarray_at(address bcp) {    
++inline Bytecode_multianewarray* Bytecode_multianewarray_at(address bcp) {
+   Bytecode_multianewarray* b = (Bytecode_multianewarray*)bcp;
+   debug_only(b->verify());
+   return b;
+-}  
++}
+ 
+ 
+ class Bytecode_anewarray: public Bytecode {
+@@ -349,11 +346,11 @@
+   inline friend Bytecode_anewarray* Bytecode_anewarray_at(address bcp);
+ };
+ 
+-inline Bytecode_anewarray* Bytecode_anewarray_at(address bcp) {    
++inline Bytecode_anewarray* Bytecode_anewarray_at(address bcp) {
+   Bytecode_anewarray* b = (Bytecode_anewarray*)bcp;
+   debug_only(b->verify());
+   return b;
+-}  
++}
+ 
+ 
+ // Abstraction for ldc, ldc_w and ldc2_w
+@@ -362,7 +359,7 @@
+  public:
+   void verify() const {
+     Bytecodes::Code stdc = Bytecodes::java_code(code());
+-    assert(stdc == Bytecodes::_ldc || 
++    assert(stdc == Bytecodes::_ldc ||
+            stdc == Bytecodes::_ldc_w ||
+            stdc == Bytecodes::_ldc2_w, "load constant");
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp
+--- openjdk6/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp	1969-12-31 19:00:00.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -0,0 +1,3047 @@
++/*
++ * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++ *
++ * This code is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 only, as
++ * published by the Free Software Foundation.
++ *
++ * This code is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++ * version 2 for more details (a copy is included in the LICENSE file that
++ * accompanied this code).
++ *
++ * You should have received a copy of the GNU General Public License version
++ * 2 along with this work; if not, write to the Free Software Foundation,
++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
++ * CA 95054 USA or visit www.sun.com if you need additional information or
++ * have any questions.
++ *
++ */
++
++
++// no precompiled headers
++#include "incls/_bytecodeInterpreter.cpp.incl"
++
++#ifdef CC_INTERP
++
++/*
++ * USELABELS - If using GCC, then use labels for the opcode dispatching
++ * rather -then a switch statement. This improves performance because it
++ * gives us the oportunity to have the instructions that calculate the
++ * next opcode to jump to be intermixed with the rest of the instructions
++ * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro).
++ */
++#undef USELABELS
++#ifdef __GNUC__
++/*
++   ASSERT signifies debugging. It is much easier to step thru bytecodes if we
++   don't use the computed goto approach.
++*/
++#ifndef ASSERT
++#define USELABELS
++#endif
++#endif
++
++#undef CASE
++#ifdef USELABELS
++#define CASE(opcode) opc ## opcode
++#define DEFAULT opc_default
++#else
++#define CASE(opcode) case Bytecodes:: opcode
++#define DEFAULT default
++#endif
++
++/*
++ * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next
++ * opcode before going back to the top of the while loop, rather then having
++ * the top of the while loop handle it. This provides a better opportunity
++ * for instruction scheduling. Some compilers just do this prefetch
++ * automatically. Some actually end up with worse performance if you
++ * force the prefetch. Solaris gcc seems to do better, but cc does worse.
++ */
++#undef PREFETCH_OPCCODE
++#define PREFETCH_OPCCODE
++
++/*
++  Interpreter safepoint: it is expected that the interpreter will have no live
++  handles of its own creation live at an interpreter safepoint. Therefore we
++  run a HandleMarkCleaner and trash all handles allocated in the call chain
++  since the JavaCalls::call_helper invocation that initiated the chain.
++  There really shouldn't be any handles remaining to trash but this is cheap
++  in relation to a safepoint.
++*/
++#define SAFEPOINT                                                                 \
++    if ( SafepointSynchronize::is_synchronizing()) {                              \
++        {                                                                         \
++          /* zap freed handles rather than GC'ing them */                         \
++          HandleMarkCleaner __hmc(THREAD);                                        \
++        }                                                                         \
++        CALL_VM(SafepointSynchronize::block(THREAD), handle_exception);           \
++    }
++
++/*
++ * VM_JAVA_ERROR - Macro for throwing a java exception from
++ * the interpreter loop. Should really be a CALL_VM but there
++ * is no entry point to do the transition to vm so we just
++ * do it by hand here.
++ */
++#define VM_JAVA_ERROR_NO_JUMP(name, msg)                                          \
++    DECACHE_STATE();                                                              \
++    SET_LAST_JAVA_FRAME();                                                        \
++    {                                                                             \
++       ThreadInVMfromJava trans(THREAD);                                          \
++       Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg);             \
++    }                                                                             \
++    RESET_LAST_JAVA_FRAME();                                                      \
++    CACHE_STATE();
++
++// Normal throw of a java error
++#define VM_JAVA_ERROR(name, msg)                                                  \
++    VM_JAVA_ERROR_NO_JUMP(name, msg)                                              \
++    goto handle_exception;
++
++#ifdef PRODUCT
++#define DO_UPDATE_INSTRUCTION_COUNT(opcode)
++#else
++#define DO_UPDATE_INSTRUCTION_COUNT(opcode)                                                          \
++{                                                                                                    \
++    BytecodeCounter::_counter_value++;                                                               \
++    BytecodeHistogram::_counters[(Bytecodes::Code)opcode]++;                                         \
++    if (StopInterpreterAt && StopInterpreterAt == BytecodeCounter::_counter_value) os::breakpoint(); \
++    if (TraceBytecodes) {                                                                            \
++      CALL_VM((void)SharedRuntime::trace_bytecode(THREAD, 0,               \
++                                   topOfStack[Interpreter::expr_index_at(1)],   \
++                                   topOfStack[Interpreter::expr_index_at(2)]),  \
++                                   handle_exception);                      \
++    }                                                                      \
++}
++#endif
++
++#undef DEBUGGER_SINGLE_STEP_NOTIFY
++#ifdef VM_JVMTI
++/* NOTE: (kbr) This macro must be called AFTER the PC has been
++   incremented. JvmtiExport::at_single_stepping_point() may cause a
++   breakpoint opcode to get inserted at the current PC to allow the
++   debugger to coalesce single-step events.
++
++   As a result if we call at_single_stepping_point() we refetch opcode
++   to get the current opcode. This will override any other prefetching
++   that might have occurred.
++*/
++#define DEBUGGER_SINGLE_STEP_NOTIFY()                                            \
++{                                                                                \
++      if (_jvmti_interp_events) {                                                \
++        if (JvmtiExport::should_post_single_step()) {                            \
++          DECACHE_STATE();                                                       \
++          SET_LAST_JAVA_FRAME();                                                 \
++          ThreadInVMfromJava trans(THREAD);                                      \
++          JvmtiExport::at_single_stepping_point(THREAD,                          \
++                                          istate->method(),                      \
++                                          pc);                                   \
++          RESET_LAST_JAVA_FRAME();                                               \
++          CACHE_STATE();                                                         \
++          if (THREAD->pop_frame_pending() &&                                     \
++              !THREAD->pop_frame_in_process()) {                                 \
++            goto handle_Pop_Frame;                                               \
++          }                                                                      \
++          opcode = *pc;                                                          \
++        }                                                                        \
++      }                                                                          \
++}
++#else
++#define DEBUGGER_SINGLE_STEP_NOTIFY()
++#endif
++
++/*
++ * CONTINUE - Macro for executing the next opcode.
++ */
++#undef CONTINUE
++#ifdef USELABELS
++// Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an
++// initialization (which is is the initialization of the table pointer...)
++#define DISPATCH(opcode) goto *dispatch_table[opcode]
++#define CONTINUE {                              \
++        opcode = *pc;                           \
++        DO_UPDATE_INSTRUCTION_COUNT(opcode);    \
++        DEBUGGER_SINGLE_STEP_NOTIFY();          \
++        DISPATCH(opcode);                       \
++    }
++#else
++#ifdef PREFETCH_OPCCODE
++#define CONTINUE {                              \
++        opcode = *pc;                           \
++        DO_UPDATE_INSTRUCTION_COUNT(opcode);    \
++        DEBUGGER_SINGLE_STEP_NOTIFY();          \
++        continue;                               \
++    }
++#else
++#define CONTINUE {                              \
++        DO_UPDATE_INSTRUCTION_COUNT(opcode);    \
++        DEBUGGER_SINGLE_STEP_NOTIFY();          \
++        continue;                               \
++    }
++#endif
++#endif
++
++// JavaStack Implementation
++#define MORE_STACK(count)  \
++    (topOfStack -= ((count) * Interpreter::stackElementWords()))
++
++
++#define UPDATE_PC(opsize) {pc += opsize; }
++/*
++ * UPDATE_PC_AND_TOS - Macro for updating the pc and topOfStack.
++ */
++#undef UPDATE_PC_AND_TOS
++#define UPDATE_PC_AND_TOS(opsize, stack) \
++    {pc += opsize; MORE_STACK(stack); }
++
++/*
++ * UPDATE_PC_AND_TOS_AND_CONTINUE - Macro for updating the pc and topOfStack,
++ * and executing the next opcode. It's somewhat similar to the combination
++ * of UPDATE_PC_AND_TOS and CONTINUE, but with some minor optimizations.
++ */
++#undef UPDATE_PC_AND_TOS_AND_CONTINUE
++#ifdef USELABELS
++#define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) {         \
++        pc += opsize; opcode = *pc; MORE_STACK(stack);          \
++        DO_UPDATE_INSTRUCTION_COUNT(opcode);                    \
++        DEBUGGER_SINGLE_STEP_NOTIFY();                          \
++        DISPATCH(opcode);                                       \
++    }
++
++#define UPDATE_PC_AND_CONTINUE(opsize) {                        \
++        pc += opsize; opcode = *pc;                             \
++        DO_UPDATE_INSTRUCTION_COUNT(opcode);                    \
++        DEBUGGER_SINGLE_STEP_NOTIFY();                          \
++        DISPATCH(opcode);                                       \
++    }
++#else
++#ifdef PREFETCH_OPCCODE
++#define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) {         \
++        pc += opsize; opcode = *pc; MORE_STACK(stack);          \
++        DO_UPDATE_INSTRUCTION_COUNT(opcode);                    \
++        DEBUGGER_SINGLE_STEP_NOTIFY();                          \
++        goto do_continue;                                       \
++    }
++
++#define UPDATE_PC_AND_CONTINUE(opsize) {                        \
++        pc += opsize; opcode = *pc;                             \
++        DO_UPDATE_INSTRUCTION_COUNT(opcode);                    \
++        DEBUGGER_SINGLE_STEP_NOTIFY();                          \
++        goto do_continue;                                       \
++    }
++#else
++#define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
++        pc += opsize; MORE_STACK(stack);                \
++        DO_UPDATE_INSTRUCTION_COUNT(opcode);            \
++        DEBUGGER_SINGLE_STEP_NOTIFY();                  \
++        goto do_continue;                               \
++    }
++
++#define UPDATE_PC_AND_CONTINUE(opsize) {                \
++        pc += opsize;                                   \
++        DO_UPDATE_INSTRUCTION_COUNT(opcode);            \
++        DEBUGGER_SINGLE_STEP_NOTIFY();                  \
++        goto do_continue;                               \
++    }
++#endif /* PREFETCH_OPCCODE */
++#endif /* USELABELS */
++
++// About to call a new method, update the save the adjusted pc and return to frame manager
++#define UPDATE_PC_AND_RETURN(opsize)  \
++   DECACHE_TOS();                     \
++   istate->set_bcp(pc+opsize);        \
++   return;
++
++
++#define METHOD istate->method()
++#define INVOCATION_COUNT METHOD->invocation_counter()
++#define BACKEDGE_COUNT METHOD->backedge_counter()
++
++
++#define INCR_INVOCATION_COUNT INVOCATION_COUNT->increment()
++#define OSR_REQUEST(res, branch_pc) \
++            CALL_VM(res=InterpreterRuntime::frequency_counter_overflow(THREAD, branch_pc), handle_exception);
++/*
++ * For those opcodes that need to have a GC point on a backwards branch
++ */
++
++// Backedge counting is kind of strange. The asm interpreter will increment
++// the backedge counter as a separate counter but it does it's comparisons
++// to the sum (scaled) of invocation counter and backedge count to make
++// a decision. Seems kind of odd to sum them together like that
++
++// skip is delta from current bcp/bci for target, branch_pc is pre-branch bcp
++
++
++#define DO_BACKEDGE_CHECKS(skip, branch_pc)                                                         \
++    if ((skip) <= 0) {                                                                              \
++      if (UseCompiler && UseLoopCounter) {                                                          \
++        bool do_OSR = UseOnStackReplacement;                                                        \
++        BACKEDGE_COUNT->increment();                                                                \
++        if (do_OSR) do_OSR = BACKEDGE_COUNT->reached_InvocationLimit();                             \
++        if (do_OSR) {                                                                               \
++          nmethod*  osr_nmethod;                                                                    \
++          OSR_REQUEST(osr_nmethod, branch_pc);                                                      \
++          if (osr_nmethod != NULL && osr_nmethod->osr_entry_bci() != InvalidOSREntryBci) {          \
++            intptr_t* buf;                                                                          \
++            CALL_VM(buf=SharedRuntime::OSR_migration_begin(THREAD), handle_exception);              \
++            istate->set_msg(do_osr);                                                                \
++            istate->set_osr_buf((address)buf);                                                      \
++            istate->set_osr_entry(osr_nmethod->osr_entry());                                        \
++            return;                                                                                 \
++          }                                                                                         \
++        } else {                                                                                    \
++          INCR_INVOCATION_COUNT;                                                                    \
++          SAFEPOINT;                                                                                \
++        }                                                                                           \
++      }  /* UseCompiler ... */                                                                      \
++      INCR_INVOCATION_COUNT;                                                                        \
++      SAFEPOINT;                                                                                    \
++    }
++
++/*
++ * For those opcodes that need to have a GC point on a backwards branch
++ */
++
++/*
++ * Macros for caching and flushing the interpreter state. Some local
++ * variables need to be flushed out to the frame before we do certain
++ * things (like pushing frames or becomming gc safe) and some need to
++ * be recached later (like after popping a frame). We could use one
++ * macro to cache or decache everything, but this would be less then
++ * optimal because we don't always need to cache or decache everything
++ * because some things we know are already cached or decached.
++ */
++#undef DECACHE_TOS
++#undef CACHE_TOS
++#undef CACHE_PREV_TOS
++#define DECACHE_TOS()    istate->set_stack(topOfStack);
++
++#define CACHE_TOS()      topOfStack = (intptr_t *)istate->stack();
++
++#undef DECACHE_PC
++#undef CACHE_PC
++#define DECACHE_PC()    istate->set_bcp(pc);
++#define CACHE_PC()      pc = istate->bcp();
++#define CACHE_CP()      cp = istate->constants();
++#define CACHE_LOCALS()  locals = istate->locals();
++#undef CACHE_FRAME
++#define CACHE_FRAME()
++
++/*
++ * CHECK_NULL - Macro for throwing a NullPointerException if the object
++ * passed is a null ref.
++ * On some architectures/platforms it should be possible to do this implicitly
++ */
++#undef CHECK_NULL
++#define CHECK_NULL(obj_)                                                 \
++    if ((obj_) == 0) {                                                   \
++        VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), "");  \
++    }
++
++#define VMdoubleConstZero() 0.0
++#define VMdoubleConstOne() 1.0
++#define VMlongConstZero() (max_jlong-max_jlong)
++#define VMlongConstOne() ((max_jlong-max_jlong)+1)
++
++/*
++ * Alignment
++ */
++#define VMalignWordUp(val)          (((uintptr_t)(val) + 3) & ~3)
++
++// Decache the interpreter state that interpreter modifies directly (i.e. GC is indirect mod)
++#define DECACHE_STATE() DECACHE_PC(); DECACHE_TOS();
++
++// Reload interpreter state after calling the VM or a possible GC
++#define CACHE_STATE()   \
++        CACHE_TOS();    \
++        CACHE_PC();     \
++        CACHE_CP();     \
++        CACHE_LOCALS();
++
++// Call the VM don't check for pending exceptions
++#define CALL_VM_NOCHECK(func)                                     \
++          DECACHE_STATE();                                        \
++          SET_LAST_JAVA_FRAME();                                  \
++          func;                                                   \
++          RESET_LAST_JAVA_FRAME();                                \
++          CACHE_STATE();                                          \
++          if (THREAD->pop_frame_pending() &&                      \
++              !THREAD->pop_frame_in_process()) {                  \
++            goto handle_Pop_Frame;                                \
++          }
++
++// Call the VM and check for pending exceptions
++#define CALL_VM(func, label) {                                    \
++          CALL_VM_NOCHECK(func);                                  \
++          if (THREAD->has_pending_exception()) goto label;        \
++        }
++
++/*
++ * BytecodeInterpreter::run(interpreterState istate)
++ * BytecodeInterpreter::runWithChecks(interpreterState istate)
++ *
++ * The real deal. This is where byte codes actually get interpreted.
++ * Basically it's a big while loop that iterates until we return from
++ * the method passed in.
++ *
++ * The runWithChecks is used if JVMTI is enabled.
++ *
++ */
++#if defined(VM_JVMTI)
++void
++BytecodeInterpreter::runWithChecks(interpreterState istate) {
++#else
++void
++BytecodeInterpreter::run(interpreterState istate) {
++#endif
++
++  // In order to simplify some tests based on switches set at runtime
++  // we invoke the interpreter a single time after switches are enabled
++  // and set simpler to to test variables rather than method calls or complex
++  // boolean expressions.
++
++  static int initialized = 0;
++  static int checkit = 0;
++  static intptr_t* c_addr = NULL;
++  static intptr_t  c_value;
++
++  if (checkit && *c_addr != c_value) {
++    os::breakpoint();
++  }
++#ifdef VM_JVMTI
++  static bool _jvmti_interp_events = 0;
++#endif
++
++  static int _compiling;  // (UseCompiler || CountCompiledCalls)
++
++#ifdef ASSERT
++  if (istate->_msg != initialize) {
++    assert(abs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit");
++  IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong"));
++  }
++  // Verify linkages.
++  interpreterState l = istate;
++  do {
++    assert(l == l->_self_link, "bad link");
++    l = l->_prev_link;
++  } while (l != NULL);
++  // Screwups with stack management usually cause us to overwrite istate
++  // save a copy so we can verify it.
++  interpreterState orig = istate;
++#endif
++
++  static volatile jbyte* _byte_map_base; // adjusted card table base for oop store barrier
++
++  register intptr_t*        topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */
++  register address          pc = istate->bcp();
++  register jubyte opcode;
++  register intptr_t*        locals = istate->locals();
++  register constantPoolCacheOop  cp = istate->constants(); // method()->constants()->cache()
++#ifdef LOTS_OF_REGS
++  register JavaThread*      THREAD = istate->thread();
++  register volatile jbyte*  BYTE_MAP_BASE = _byte_map_base;
++#else
++#undef THREAD
++#define THREAD istate->thread()
++#undef BYTE_MAP_BASE
++#define BYTE_MAP_BASE _byte_map_base
++#endif
++
++#ifdef USELABELS
++  const static void* const opclabels_data[256] = {
++/* 0x00 */ &&opc_nop,     &&opc_aconst_null,&&opc_iconst_m1,&&opc_iconst_0,
++/* 0x04 */ &&opc_iconst_1,&&opc_iconst_2,   &&opc_iconst_3, &&opc_iconst_4,
++/* 0x08 */ &&opc_iconst_5,&&opc_lconst_0,   &&opc_lconst_1, &&opc_fconst_0,
++/* 0x0C */ &&opc_fconst_1,&&opc_fconst_2,   &&opc_dconst_0, &&opc_dconst_1,
++
++/* 0x10 */ &&opc_bipush, &&opc_sipush, &&opc_ldc,    &&opc_ldc_w,
++/* 0x14 */ &&opc_ldc2_w, &&opc_iload,  &&opc_lload,  &&opc_fload,
++/* 0x18 */ &&opc_dload,  &&opc_aload,  &&opc_iload_0,&&opc_iload_1,
++/* 0x1C */ &&opc_iload_2,&&opc_iload_3,&&opc_lload_0,&&opc_lload_1,
++
++/* 0x20 */ &&opc_lload_2,&&opc_lload_3,&&opc_fload_0,&&opc_fload_1,
++/* 0x24 */ &&opc_fload_2,&&opc_fload_3,&&opc_dload_0,&&opc_dload_1,
++/* 0x28 */ &&opc_dload_2,&&opc_dload_3,&&opc_aload_0,&&opc_aload_1,
++/* 0x2C */ &&opc_aload_2,&&opc_aload_3,&&opc_iaload, &&opc_laload,
++
++/* 0x30 */ &&opc_faload,  &&opc_daload,  &&opc_aaload,  &&opc_baload,
++/* 0x34 */ &&opc_caload,  &&opc_saload,  &&opc_istore,  &&opc_lstore,
++/* 0x38 */ &&opc_fstore,  &&opc_dstore,  &&opc_astore,  &&opc_istore_0,
++/* 0x3C */ &&opc_istore_1,&&opc_istore_2,&&opc_istore_3,&&opc_lstore_0,
++
++/* 0x40 */ &&opc_lstore_1,&&opc_lstore_2,&&opc_lstore_3,&&opc_fstore_0,
++/* 0x44 */ &&opc_fstore_1,&&opc_fstore_2,&&opc_fstore_3,&&opc_dstore_0,
++/* 0x48 */ &&opc_dstore_1,&&opc_dstore_2,&&opc_dstore_3,&&opc_astore_0,
++/* 0x4C */ &&opc_astore_1,&&opc_astore_2,&&opc_astore_3,&&opc_iastore,
++
++/* 0x50 */ &&opc_lastore,&&opc_fastore,&&opc_dastore,&&opc_aastore,
++/* 0x54 */ &&opc_bastore,&&opc_castore,&&opc_sastore,&&opc_pop,
++/* 0x58 */ &&opc_pop2,   &&opc_dup,    &&opc_dup_x1, &&opc_dup_x2,
++/* 0x5C */ &&opc_dup2,   &&opc_dup2_x1,&&opc_dup2_x2,&&opc_swap,
++
++/* 0x60 */ &&opc_iadd,&&opc_ladd,&&opc_fadd,&&opc_dadd,
++/* 0x64 */ &&opc_isub,&&opc_lsub,&&opc_fsub,&&opc_dsub,
++/* 0x68 */ &&opc_imul,&&opc_lmul,&&opc_fmul,&&opc_dmul,
++/* 0x6C */ &&opc_idiv,&&opc_ldiv,&&opc_fdiv,&&opc_ddiv,
++
++/* 0x70 */ &&opc_irem, &&opc_lrem, &&opc_frem,&&opc_drem,
++/* 0x74 */ &&opc_ineg, &&opc_lneg, &&opc_fneg,&&opc_dneg,
++/* 0x78 */ &&opc_ishl, &&opc_lshl, &&opc_ishr,&&opc_lshr,
++/* 0x7C */ &&opc_iushr,&&opc_lushr,&&opc_iand,&&opc_land,
++
++/* 0x80 */ &&opc_ior, &&opc_lor,&&opc_ixor,&&opc_lxor,
++/* 0x84 */ &&opc_iinc,&&opc_i2l,&&opc_i2f, &&opc_i2d,
++/* 0x88 */ &&opc_l2i, &&opc_l2f,&&opc_l2d, &&opc_f2i,
++/* 0x8C */ &&opc_f2l, &&opc_f2d,&&opc_d2i, &&opc_d2l,
++
++/* 0x90 */ &&opc_d2f,  &&opc_i2b,  &&opc_i2c,  &&opc_i2s,
++/* 0x94 */ &&opc_lcmp, &&opc_fcmpl,&&opc_fcmpg,&&opc_dcmpl,
++/* 0x98 */ &&opc_dcmpg,&&opc_ifeq, &&opc_ifne, &&opc_iflt,
++/* 0x9C */ &&opc_ifge, &&opc_ifgt, &&opc_ifle, &&opc_if_icmpeq,
++
++/* 0xA0 */ &&opc_if_icmpne,&&opc_if_icmplt,&&opc_if_icmpge,  &&opc_if_icmpgt,
++/* 0xA4 */ &&opc_if_icmple,&&opc_if_acmpeq,&&opc_if_acmpne,  &&opc_goto,
++/* 0xA8 */ &&opc_jsr,      &&opc_ret,      &&opc_tableswitch,&&opc_lookupswitch,
++/* 0xAC */ &&opc_ireturn,  &&opc_lreturn,  &&opc_freturn,    &&opc_dreturn,
++
++/* 0xB0 */ &&opc_areturn,     &&opc_return,         &&opc_getstatic,    &&opc_putstatic,
++/* 0xB4 */ &&opc_getfield,    &&opc_putfield,       &&opc_invokevirtual,&&opc_invokespecial,
++/* 0xB8 */ &&opc_invokestatic,&&opc_invokeinterface,NULL,               &&opc_new,
++/* 0xBC */ &&opc_newarray,    &&opc_anewarray,      &&opc_arraylength,  &&opc_athrow,
++
++/* 0xC0 */ &&opc_checkcast,   &&opc_instanceof,     &&opc_monitorenter, &&opc_monitorexit,
++/* 0xC4 */ &&opc_wide,        &&opc_multianewarray, &&opc_ifnull,       &&opc_ifnonnull,
++/* 0xC8 */ &&opc_goto_w,      &&opc_jsr_w,          &&opc_breakpoint,   &&opc_fast_igetfield,
++/* 0xCC */ &&opc_fastagetfield,&&opc_fast_aload_0,  &&opc_fast_iaccess_0, &&opc__fast_aaccess_0,
++
++/* 0xD0 */ &&opc_fast_linearswitch, &&opc_fast_binaryswitch, &&opc_return_register_finalizer,      &&opc_default,
++/* 0xD4 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
++/* 0xD8 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
++/* 0xDC */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
++
++/* 0xE0 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
++/* 0xE4 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
++/* 0xE8 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
++/* 0xEC */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
++
++/* 0xF0 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
++/* 0xF4 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
++/* 0xF8 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
++/* 0xFC */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default
++  };
++  register uintptr_t *dispatch_table = (uintptr_t*)&opclabels_data[0];
++#endif /* USELABELS */
++
++#ifdef ASSERT
++  // this will trigger a VERIFY_OOP on entry
++  if (istate->msg() != initialize && ! METHOD->is_static()) {
++    oop rcvr = LOCALS_OBJECT(0);
++  }
++#endif
++// #define HACK
++#ifdef HACK
++  bool interesting = false;
++#endif // HACK
++
++  /* QQQ this should be a stack method so we don't know actual direction */
++  assert(istate->msg() == initialize ||
++         topOfStack >= istate->stack_limit() &&
++         topOfStack < istate->stack_base(),
++         "Stack top out of range");
++
++  switch (istate->msg()) {
++    case initialize: {
++      if (initialized++) ShouldNotReachHere(); // Only one initialize call
++      _compiling = (UseCompiler || CountCompiledCalls);
++#ifdef VM_JVMTI
++      _jvmti_interp_events = JvmtiExport::can_post_interpreter_events();
++#endif
++      BarrierSet* bs = Universe::heap()->barrier_set();
++      assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
++      _byte_map_base = (volatile jbyte*)(((CardTableModRefBS*)bs)->byte_map_base);
++      return;
++    }
++    break;
++    case method_entry: {
++      THREAD->set_do_not_unlock();
++      // count invocations
++      assert(initialized, "Interpreter not initialized");
++      if (_compiling) {
++        if (ProfileInterpreter) {
++          METHOD->increment_interpreter_invocation_count();
++        }
++        INCR_INVOCATION_COUNT;
++        if (INVOCATION_COUNT->reached_InvocationLimit()) {
++            CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception);
++
++            // We no longer retry on a counter overflow
++
++            // istate->set_msg(retry_method);
++            // THREAD->clr_do_not_unlock();
++            // return;
++        }
++        SAFEPOINT;
++      }
++
++      if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) {
++        // initialize
++        os::breakpoint();
++      }
++
++#ifdef HACK
++      {
++        ResourceMark rm;
++        char *method_name = istate->method()->name_and_sig_as_C_string();
++        if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) {
++          tty->print_cr("entering: depth %d bci: %d",
++                         (istate->_stack_base - istate->_stack),
++                         istate->_bcp - istate->_method->code_base());
++          interesting = true;
++        }
++      }
++#endif // HACK
++
++
++      // lock method if synchronized
++      if (METHOD->is_synchronized()) {
++          // oop rcvr = locals[0].j.r;
++          oop rcvr;
++          if (METHOD->is_static()) {
++            rcvr = METHOD->constants()->pool_holder()->klass_part()->java_mirror();
++          } else {
++            rcvr = LOCALS_OBJECT(0);
++          }
++          // The initial monitor is ours for the taking
++          BasicObjectLock* mon = &istate->monitor_base()[-1];
++          oop monobj = mon->obj();
++          assert(mon->obj() == rcvr, "method monitor mis-initialized");
++
++          bool success = UseBiasedLocking;
++          if (UseBiasedLocking) {
++            markOop mark = rcvr->mark();
++            if (mark->has_bias_pattern()) {
++              // The bias pattern is present in the object's header. Need to check
++              // whether the bias owner and the epoch are both still current.
++              intptr_t xx = ((intptr_t) THREAD) ^ (intptr_t) mark;
++              xx = (intptr_t) rcvr->klass()->klass_part()->prototype_header() ^ xx;
++              intptr_t yy = (xx & ~((int) markOopDesc::age_mask_in_place));
++              if (yy != 0 ) {
++                // At this point we know that the header has the bias pattern and
++                // that we are not the bias owner in the current epoch. We need to
++                // figure out more details about the state of the header in order to
++                // know what operations can be legally performed on the object's
++                // header.
++
++                // If the low three bits in the xor result aren't clear, that means
++                // the prototype header is no longer biased and we have to revoke
++                // the bias on this object.
++
++                if (yy & markOopDesc::biased_lock_mask_in_place == 0 ) {
++                  // Biasing is still enabled for this data type. See whether the
++                  // epoch of the current bias is still valid, meaning that the epoch
++                  // bits of the mark word are equal to the epoch bits of the
++                  // prototype header. (Note that the prototype header's epoch bits
++                  // only change at a safepoint.) If not, attempt to rebias the object
++                  // toward the current thread. Note that we must be absolutely sure
++                  // that the current epoch is invalid in order to do this because
++                  // otherwise the manipulations it performs on the mark word are
++                  // illegal.
++                  if (yy & markOopDesc::epoch_mask_in_place == 0) {
++                    // The epoch of the current bias is still valid but we know nothing
++                    // about the owner; it might be set or it might be clear. Try to
++                    // acquire the bias of the object using an atomic operation. If this
++                    // fails we will go in to the runtime to revoke the object's bias.
++                    // Note that we first construct the presumed unbiased header so we
++                    // don't accidentally blow away another thread's valid bias.
++                    intptr_t unbiased = (intptr_t) mark & (markOopDesc::biased_lock_mask_in_place |
++                                                           markOopDesc::age_mask_in_place |
++                                                           markOopDesc::epoch_mask_in_place);
++                    if (Atomic::cmpxchg_ptr((intptr_t)THREAD | unbiased, (intptr_t*) rcvr->mark_addr(), unbiased) != unbiased) {
++                      CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
++                    }
++                  } else {
++                    try_rebias:
++                    // At this point we know the epoch has expired, meaning that the
++                    // current "bias owner", if any, is actually invalid. Under these
++                    // circumstances _only_, we are allowed to use the current header's
++                    // value as the comparison value when doing the cas to acquire the
++                    // bias in the current epoch. In other words, we allow transfer of
++                    // the bias from one thread to another directly in this situation.
++                    xx = (intptr_t) rcvr->klass()->klass_part()->prototype_header() | (intptr_t) THREAD;
++                    if (Atomic::cmpxchg_ptr((intptr_t)THREAD | (intptr_t) rcvr->klass()->klass_part()->prototype_header(),
++                                            (intptr_t*) rcvr->mark_addr(),
++                                            (intptr_t) mark) != (intptr_t) mark) {
++                      CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
++                    }
++                  }
++                } else {
++                  try_revoke_bias:
++                  // The prototype mark in the klass doesn't have the bias bit set any
++                  // more, indicating that objects of this data type are not supposed
++                  // to be biased any more. We are going to try to reset the mark of
++                  // this object to the prototype value and fall through to the
++                  // CAS-based locking scheme. Note that if our CAS fails, it means
++                  // that another thread raced us for the privilege of revoking the
++                  // bias of this particular object, so it's okay to continue in the
++                  // normal locking code.
++                  //
++                  xx = (intptr_t) rcvr->klass()->klass_part()->prototype_header() | (intptr_t) THREAD;
++                  if (Atomic::cmpxchg_ptr(rcvr->klass()->klass_part()->prototype_header(),
++                                          (intptr_t*) rcvr->mark_addr(),
++                                          mark) == mark) {
++                    // (*counters->revoked_lock_entry_count_addr())++;
++                  success = false;
++                  }
++                }
++              }
++            } else {
++              cas_label:
++              success = false;
++            }
++          }
++          if (!success) {
++            markOop displaced = rcvr->mark()->set_unlocked();
++            mon->lock()->set_displaced_header(displaced);
++            if (Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) {
++              // Is it simple recursive case?
++              if (THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
++                mon->lock()->set_displaced_header(NULL);
++              } else {
++                CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
++              }
++            }
++          }
++      }
++      THREAD->clr_do_not_unlock();
++
++      // Notify jvmti
++#ifdef VM_JVMTI
++      if (_jvmti_interp_events) {
++        // Whenever JVMTI puts a thread in interp_only_mode, method
++        // entry/exit events are sent for that thread to track stack depth.
++        if (THREAD->is_interp_only_mode()) {
++          CALL_VM(InterpreterRuntime::post_method_entry(THREAD),
++                  handle_exception);
++        }
++      }
++#endif /* VM_JVMTI */
++
++      goto run;
++    }
++
++    case popping_frame: {
++      // returned from a java call to pop the frame, restart the call
++      // clear the message so we don't confuse ourselves later
++      assert(THREAD->pop_frame_in_process(), "wrong frame pop state");
++      istate->set_msg(no_request);
++      THREAD->clr_pop_frame_in_process();
++      goto run;
++    }
++
++    case method_resume: {
++      if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) {
++        // resume
++        os::breakpoint();
++      }
++#ifdef HACK
++      {
++        ResourceMark rm;
++        char *method_name = istate->method()->name_and_sig_as_C_string();
++        if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) {
++          tty->print_cr("resume: depth %d bci: %d",
++                         (istate->_stack_base - istate->_stack) ,
++                         istate->_bcp - istate->_method->code_base());
++          interesting = true;
++        }
++      }
++#endif // HACK
++      // returned from a java call, continue executing.
++      if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) {
++        goto handle_Pop_Frame;
++      }
++
++      if (THREAD->has_pending_exception()) goto handle_exception;
++      // Update the pc by the saved amount of the invoke bytecode size
++      UPDATE_PC(istate->bcp_advance());
++      goto run;
++    }
++
++    case deopt_resume2: {
++      // Returned from an opcode that will reexecute. Deopt was
++      // a result of a PopFrame request.
++      //
++      goto run;
++    }
++
++    case deopt_resume: {
++      // Returned from an opcode that has completed. The stack has
++      // the result all we need to do is skip across the bytecode
++      // and continue (assuming there is no exception pending)
++      //
++      // compute continuation length
++      //
++      // Note: it is possible to deopt at a return_register_finalizer opcode
++      // because this requires entering the vm to do the registering. While the
++      // opcode is complete we can't advance because there are no more opcodes
++      // much like trying to deopt at a poll return. In that has we simply
++      // get out of here
++      //
++      if ( Bytecodes::code_at(pc, METHOD) == Bytecodes::_return_register_finalizer) {
++        // this will do the right thing even if an exception is pending.
++        goto handle_return;
++      }
++      UPDATE_PC(Bytecodes::length_at(pc));
++      if (THREAD->has_pending_exception()) goto handle_exception;
++      goto run;
++    }
++    case got_monitors: {
++      // continue locking now that we have a monitor to use
++      // we expect to find newly allocated monitor at the "top" of the monitor stack.
++      oop lockee = STACK_OBJECT(-1);
++      // derefing's lockee ought to provoke implicit null check
++      // find a free monitor
++      BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base();
++      assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor");
++      entry->set_obj(lockee);
++
++      markOop displaced = lockee->mark()->set_unlocked();
++      entry->lock()->set_displaced_header(displaced);
++      if (Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
++        // Is it simple recursive case?
++        if (THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
++          entry->lock()->set_displaced_header(NULL);
++        } else {
++          CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
++        }
++      }
++      UPDATE_PC_AND_TOS(1, -1);
++      goto run;
++    }
++    default: {
++      fatal("Unexpected message from frame manager");
++    }
++  }
++
++run:
++
++  DO_UPDATE_INSTRUCTION_COUNT(*pc)
++  DEBUGGER_SINGLE_STEP_NOTIFY();
++#ifdef PREFETCH_OPCCODE
++  opcode = *pc;  /* prefetch first opcode */
++#endif
++
++#ifndef USELABELS
++  while (1)
++#endif
++  {
++#ifndef PREFETCH_OPCCODE
++      opcode = *pc;
++#endif
++      // Seems like this happens twice per opcode. At worst this is only
++      // need at entry to the loop.
++      // DEBUGGER_SINGLE_STEP_NOTIFY();
++      /* Using this labels avoids double breakpoints when quickening and
++       * when returing from transition frames.
++       */
++  opcode_switch:
++      assert(istate == orig, "Corrupted istate");
++      /* QQQ Hmm this has knowledge of direction, ought to be a stack method */
++      assert(topOfStack >= istate->stack_limit(), "Stack overrun");
++      assert(topOfStack < istate->stack_base(), "Stack underrun");
++
++#ifdef USELABELS
++      DISPATCH(opcode);
++#else
++      switch (opcode)
++#endif
++      {
++      CASE(_nop):
++          UPDATE_PC_AND_CONTINUE(1);
++
++          /* Push miscellaneous constants onto the stack. */
++
++      CASE(_aconst_null):
++          SET_STACK_OBJECT(NULL, 0);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
++
++#undef  OPC_CONST_n
++#define OPC_CONST_n(opcode, const_type, value)                          \
++      CASE(opcode):                                                     \
++          SET_STACK_ ## const_type(value, 0);                           \
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
++
++          OPC_CONST_n(_iconst_m1,   INT,       -1);
++          OPC_CONST_n(_iconst_0,    INT,        0);
++          OPC_CONST_n(_iconst_1,    INT,        1);
++          OPC_CONST_n(_iconst_2,    INT,        2);
++          OPC_CONST_n(_iconst_3,    INT,        3);
++          OPC_CONST_n(_iconst_4,    INT,        4);
++          OPC_CONST_n(_iconst_5,    INT,        5);
++          OPC_CONST_n(_fconst_0,    FLOAT,      0.0);
++          OPC_CONST_n(_fconst_1,    FLOAT,      1.0);
++          OPC_CONST_n(_fconst_2,    FLOAT,      2.0);
++
++#undef  OPC_CONST2_n
++#define OPC_CONST2_n(opcname, value, key, kind)                         \
++      CASE(_##opcname):                                                 \
++      {                                                                 \
++          SET_STACK_ ## kind(VM##key##Const##value(), 1);               \
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);                         \
++      }
++         OPC_CONST2_n(dconst_0, Zero, double, DOUBLE);
++         OPC_CONST2_n(dconst_1, One,  double, DOUBLE);
++         OPC_CONST2_n(lconst_0, Zero, long, LONG);
++         OPC_CONST2_n(lconst_1, One,  long, LONG);
++
++         /* Load constant from constant pool: */
++
++          /* Push a 1-byte signed integer value onto the stack. */
++      CASE(_bipush):
++          SET_STACK_INT((jbyte)(pc[1]), 0);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
++
++          /* Push a 2-byte signed integer constant onto the stack. */
++      CASE(_sipush):
++          SET_STACK_INT((int16_t)Bytes::get_Java_u2(pc + 1), 0);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
++
++          /* load from local variable */
++
++      CASE(_aload):
++          SET_STACK_OBJECT(LOCALS_OBJECT(pc[1]), 0);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
++
++      CASE(_iload):
++      CASE(_fload):
++          SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
++
++      CASE(_lload):
++          SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(pc[1]), 1);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2);
++
++      CASE(_dload):
++          SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(pc[1]), 1);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2);
++
++#undef  OPC_LOAD_n
++#define OPC_LOAD_n(num)                                                 \
++      CASE(_aload_##num):                                               \
++          SET_STACK_OBJECT(LOCALS_OBJECT(num), 0);                      \
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);                         \
++                                                                        \
++      CASE(_iload_##num):                                               \
++      CASE(_fload_##num):                                               \
++          SET_STACK_SLOT(LOCALS_SLOT(num), 0);                          \
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);                         \
++                                                                        \
++      CASE(_lload_##num):                                               \
++          SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(num), 1);             \
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);                         \
++      CASE(_dload_##num):                                               \
++          SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(num), 1);         \
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
++
++          OPC_LOAD_n(0);
++          OPC_LOAD_n(1);
++          OPC_LOAD_n(2);
++          OPC_LOAD_n(3);
++
++          /* store to a local variable */
++
++      CASE(_astore):
++          astore(topOfStack, -1, locals, pc[1]);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1);
++
++      CASE(_istore):
++      CASE(_fstore):
++          SET_LOCALS_SLOT(STACK_SLOT(-1), pc[1]);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1);
++
++      CASE(_lstore):
++          SET_LOCALS_LONG(STACK_LONG(-1), pc[1]);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2);
++
++      CASE(_dstore):
++          SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), pc[1]);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2);
++
++      CASE(_wide): {
++          uint16_t reg = Bytes::get_Java_u2(pc + 2);
++
++          opcode = pc[1];
++          switch(opcode) {
++              case Bytecodes::_aload:
++                  SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0);
++                  UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
++
++              case Bytecodes::_iload:
++              case Bytecodes::_fload:
++                  SET_STACK_SLOT(LOCALS_SLOT(reg), 0);
++                  UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
++
++              case Bytecodes::_lload:
++                  SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(reg), 1);
++                  UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
++
++              case Bytecodes::_dload:
++                  SET_STACK_DOUBLE_FROM_ADDR(LOCALS_LONG_AT(reg), 1);
++                  UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
++
++              case Bytecodes::_astore:
++                  astore(topOfStack, -1, locals, reg);
++                  UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1);
++
++              case Bytecodes::_istore:
++              case Bytecodes::_fstore:
++                  SET_LOCALS_SLOT(STACK_SLOT(-1), reg);
++                  UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1);
++
++              case Bytecodes::_lstore:
++                  SET_LOCALS_LONG(STACK_LONG(-1), reg);
++                  UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2);
++
++              case Bytecodes::_dstore:
++                  SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), reg);
++                  UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2);
++
++              case Bytecodes::_iinc: {
++                  int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4);
++                  // Be nice to see what this generates.... QQQ
++                  SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg);
++                  UPDATE_PC_AND_CONTINUE(6);
++              }
++              case Bytecodes::_ret:
++                  pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg));
++                  UPDATE_PC_AND_CONTINUE(0);
++              default:
++                  VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode");
++          }
++      }
++
++
++#undef  OPC_STORE_n
++#define OPC_STORE_n(num)                                                \
++      CASE(_astore_##num):                                              \
++          astore(topOfStack, -1, locals, num);                          \
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);                        \
++      CASE(_istore_##num):                                              \
++      CASE(_fstore_##num):                                              \
++          SET_LOCALS_SLOT(STACK_SLOT(-1), num);                         \
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
++
++          OPC_STORE_n(0);
++          OPC_STORE_n(1);
++          OPC_STORE_n(2);
++          OPC_STORE_n(3);
++
++#undef  OPC_DSTORE_n
++#define OPC_DSTORE_n(num)                                               \
++      CASE(_dstore_##num):                                              \
++          SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), num);                     \
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);                        \
++      CASE(_lstore_##num):                                              \
++          SET_LOCALS_LONG(STACK_LONG(-1), num);                         \
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);
++
++          OPC_DSTORE_n(0);
++          OPC_DSTORE_n(1);
++          OPC_DSTORE_n(2);
++          OPC_DSTORE_n(3);
++
++          /* stack pop, dup, and insert opcodes */
++
++
++      CASE(_pop):                /* Discard the top item on the stack */
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
++
++
++      CASE(_pop2):               /* Discard the top 2 items on the stack */
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);
++
++
++      CASE(_dup):               /* Duplicate the top item on the stack */
++          dup(topOfStack);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
++
++      CASE(_dup2):              /* Duplicate the top 2 items on the stack */
++          dup2(topOfStack);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
++
++      CASE(_dup_x1):    /* insert top word two down */
++          dup_x1(topOfStack);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
++
++      CASE(_dup_x2):    /* insert top word three down  */
++          dup_x2(topOfStack);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
++
++      CASE(_dup2_x1):   /* insert top 2 slots three down */
++          dup2_x1(topOfStack);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
++
++      CASE(_dup2_x2):   /* insert top 2 slots four down */
++          dup2_x2(topOfStack);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
++
++      CASE(_swap): {        /* swap top two elements on the stack */
++          swap(topOfStack);
++          UPDATE_PC_AND_CONTINUE(1);
++      }
++
++          /* Perform various binary integer operations */
++
++#undef  OPC_INT_BINARY
++#define OPC_INT_BINARY(opcname, opname, test)                           \
++      CASE(_i##opcname):                                                \
++          if (test && (STACK_INT(-1) == 0)) {                           \
++              VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
++                            "/ by int zero");                           \
++          }                                                             \
++          SET_STACK_INT(VMint##opname(STACK_INT(-2),                    \
++                                      STACK_INT(-1)),                   \
++                                      -2);                              \
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);                        \
++      CASE(_l##opcname):                                                \
++      {                                                                 \
++          if (test) {                                                   \
++            jlong l1 = STACK_LONG(-1);                                  \
++            if (VMlongEqz(l1)) {                                        \
++              VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
++                            "/ by long zero");                          \
++            }                                                           \
++          }                                                             \
++          /* First long at (-1,-2) next long at (-3,-4) */              \
++          SET_STACK_LONG(VMlong##opname(STACK_LONG(-3),                 \
++                                        STACK_LONG(-1)),                \
++                                        -3);                            \
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);                        \
++      }
++
++      OPC_INT_BINARY(add, Add, 0);
++      OPC_INT_BINARY(sub, Sub, 0);
++      OPC_INT_BINARY(mul, Mul, 0);
++      OPC_INT_BINARY(and, And, 0);
++      OPC_INT_BINARY(or,  Or,  0);
++      OPC_INT_BINARY(xor, Xor, 0);
++      OPC_INT_BINARY(div, Div, 1);
++      OPC_INT_BINARY(rem, Rem, 1);
++
++
++      /* Perform various binary floating number operations */
++      /* On some machine/platforms/compilers div zero check can be implicit */
++
++#undef  OPC_FLOAT_BINARY
++#define OPC_FLOAT_BINARY(opcname, opname)                                  \
++      CASE(_d##opcname): {                                                 \
++          SET_STACK_DOUBLE(VMdouble##opname(STACK_DOUBLE(-3),              \
++                                            STACK_DOUBLE(-1)),             \
++                                            -3);                           \
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);                           \
++      }                                                                    \
++      CASE(_f##opcname):                                                   \
++          SET_STACK_FLOAT(VMfloat##opname(STACK_FLOAT(-2),                 \
++                                          STACK_FLOAT(-1)),                \
++                                          -2);                             \
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
++
++
++     OPC_FLOAT_BINARY(add, Add);
++     OPC_FLOAT_BINARY(sub, Sub);
++     OPC_FLOAT_BINARY(mul, Mul);
++     OPC_FLOAT_BINARY(div, Div);
++     OPC_FLOAT_BINARY(rem, Rem);
++
++      /* Shift operations
++       * Shift left int and long: ishl, lshl
++       * Logical shift right int and long w/zero extension: iushr, lushr
++       * Arithmetic shift right int and long w/sign extension: ishr, lshr
++       */
++
++#undef  OPC_SHIFT_BINARY
++#define OPC_SHIFT_BINARY(opcname, opname)                               \
++      CASE(_i##opcname):                                                \
++         SET_STACK_INT(VMint##opname(STACK_INT(-2),                     \
++                                     STACK_INT(-1)),                    \
++                                     -2);                               \
++         UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);                         \
++      CASE(_l##opcname):                                                \
++      {                                                                 \
++         SET_STACK_LONG(VMlong##opname(STACK_LONG(-2),                  \
++                                       STACK_INT(-1)),                  \
++                                       -2);                             \
++         UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);                         \
++      }
++
++      OPC_SHIFT_BINARY(shl, Shl);
++      OPC_SHIFT_BINARY(shr, Shr);
++      OPC_SHIFT_BINARY(ushr, Ushr);
++
++     /* Increment local variable by constant */
++      CASE(_iinc):
++      {
++          // locals[pc[1]].j.i += (jbyte)(pc[2]);
++          SET_LOCALS_INT(LOCALS_INT(pc[1]) + (jbyte)(pc[2]), pc[1]);
++          UPDATE_PC_AND_CONTINUE(3);
++      }
++
++     /* negate the value on the top of the stack */
++
++      CASE(_ineg):
++         SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1);
++         UPDATE_PC_AND_CONTINUE(1);
++
++      CASE(_fneg):
++         SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1);
++         UPDATE_PC_AND_CONTINUE(1);
++
++      CASE(_lneg):
++      {
++         SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1);
++         UPDATE_PC_AND_CONTINUE(1);
++      }
++
++      CASE(_dneg):
++      {
++         SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1);
++         UPDATE_PC_AND_CONTINUE(1);
++      }
++
++      /* Conversion operations */
++
++      CASE(_i2f):       /* convert top of stack int to float */
++         SET_STACK_FLOAT(VMint2Float(STACK_INT(-1)), -1);
++         UPDATE_PC_AND_CONTINUE(1);
++
++      CASE(_i2l):       /* convert top of stack int to long */
++      {
++          // this is ugly QQQ
++          jlong r = VMint2Long(STACK_INT(-1));
++          MORE_STACK(-1); // Pop
++          SET_STACK_LONG(r, 1);
++
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
++      }
++
++      CASE(_i2d):       /* convert top of stack int to double */
++      {
++          // this is ugly QQQ (why cast to jlong?? )
++          jdouble r = (jlong)STACK_INT(-1);
++          MORE_STACK(-1); // Pop
++          SET_STACK_DOUBLE(r, 1);
++
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
++      }
++
++      CASE(_l2i):       /* convert top of stack long to int */
++      {
++          jint r = VMlong2Int(STACK_LONG(-1));
++          MORE_STACK(-2); // Pop
++          SET_STACK_INT(r, 0);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
++      }
++
++      CASE(_l2f):   /* convert top of stack long to float */
++      {
++          jlong r = STACK_LONG(-1);
++          MORE_STACK(-2); // Pop
++          SET_STACK_FLOAT(VMlong2Float(r), 0);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
++      }
++
++      CASE(_l2d):       /* convert top of stack long to double */
++      {
++          jlong r = STACK_LONG(-1);
++          MORE_STACK(-2); // Pop
++          SET_STACK_DOUBLE(VMlong2Double(r), 1);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
++      }
++
++      CASE(_f2i):  /* Convert top of stack float to int */
++          SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1);
++          UPDATE_PC_AND_CONTINUE(1);
++
++      CASE(_f2l):  /* convert top of stack float to long */
++      {
++          jlong r = SharedRuntime::f2l(STACK_FLOAT(-1));
++          MORE_STACK(-1); // POP
++          SET_STACK_LONG(r, 1);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
++      }
++
++      CASE(_f2d):  /* convert top of stack float to double */
++      {
++          jfloat f;
++          jdouble r;
++          f = STACK_FLOAT(-1);
++#ifdef IA64
++          // IA64 gcc bug
++          r = ( f == 0.0f ) ? (jdouble) f : (jdouble) f + ia64_double_zero;
++#else
++          r = (jdouble) f;
++#endif
++          MORE_STACK(-1); // POP
++          SET_STACK_DOUBLE(r, 1);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
++      }
++
++      CASE(_d2i): /* convert top of stack double to int */
++      {
++          jint r1 = SharedRuntime::d2i(STACK_DOUBLE(-1));
++          MORE_STACK(-2);
++          SET_STACK_INT(r1, 0);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
++      }
++
++      CASE(_d2f): /* convert top of stack double to float */
++      {
++          jfloat r1 = VMdouble2Float(STACK_DOUBLE(-1));
++          MORE_STACK(-2);
++          SET_STACK_FLOAT(r1, 0);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
++      }
++
++      CASE(_d2l): /* convert top of stack double to long */
++      {
++          jlong r1 = SharedRuntime::d2l(STACK_DOUBLE(-1));
++          MORE_STACK(-2);
++          SET_STACK_LONG(r1, 1);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
++      }
++
++      CASE(_i2b):
++          SET_STACK_INT(VMint2Byte(STACK_INT(-1)), -1);
++          UPDATE_PC_AND_CONTINUE(1);
++
++      CASE(_i2c):
++          SET_STACK_INT(VMint2Char(STACK_INT(-1)), -1);
++          UPDATE_PC_AND_CONTINUE(1);
++
++      CASE(_i2s):
++          SET_STACK_INT(VMint2Short(STACK_INT(-1)), -1);
++          UPDATE_PC_AND_CONTINUE(1);
++
++      /* comparison operators */
++
++
++#define COMPARISON_OP(name, comparison)                                      \
++      CASE(_if_icmp##name): {                                                \
++          int skip = (STACK_INT(-2) comparison STACK_INT(-1))                \
++                      ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3;             \
++          address branch_pc = pc;                                            \
++          UPDATE_PC_AND_TOS(skip, -2);                                       \
++          DO_BACKEDGE_CHECKS(skip, branch_pc);                               \
++          CONTINUE;                                                          \
++      }                                                                      \
++      CASE(_if##name): {                                                     \
++          int skip = (STACK_INT(-1) comparison 0)                            \
++                      ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3;             \
++          address branch_pc = pc;                                            \
++          UPDATE_PC_AND_TOS(skip, -1);                                       \
++          DO_BACKEDGE_CHECKS(skip, branch_pc);                               \
++          CONTINUE;                                                          \
++      }
++
++#define COMPARISON_OP2(name, comparison)                                     \
++      COMPARISON_OP(name, comparison)                                        \
++      CASE(_if_acmp##name): {                                                \
++          int skip = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1))          \
++                       ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3;            \
++          address branch_pc = pc;                                            \
++          UPDATE_PC_AND_TOS(skip, -2);                                       \
++          DO_BACKEDGE_CHECKS(skip, branch_pc);                               \
++          CONTINUE;                                                          \
++      }
++
++#define NULL_COMPARISON_NOT_OP(name)                                         \
++      CASE(_if##name): {                                                     \
++          int skip = (!(STACK_OBJECT(-1) == 0))                              \
++                      ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3;             \
++          address branch_pc = pc;                                            \
++          UPDATE_PC_AND_TOS(skip, -1);                                       \
++          DO_BACKEDGE_CHECKS(skip, branch_pc);                               \
++          CONTINUE;                                                          \
++      }
++
++#define NULL_COMPARISON_OP(name)                                             \
++      CASE(_if##name): {                                                     \
++          int skip = ((STACK_OBJECT(-1) == 0))                               \
++                      ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3;             \
++          address branch_pc = pc;                                            \
++          UPDATE_PC_AND_TOS(skip, -1);                                       \
++          DO_BACKEDGE_CHECKS(skip, branch_pc);                               \
++          CONTINUE;                                                          \
++      }
++      COMPARISON_OP(lt, <);
++      COMPARISON_OP(gt, >);
++      COMPARISON_OP(le, <=);
++      COMPARISON_OP(ge, >=);
++      COMPARISON_OP2(eq, ==);  /* include ref comparison */
++      COMPARISON_OP2(ne, !=);  /* include ref comparison */
++      NULL_COMPARISON_OP(null);
++      NULL_COMPARISON_NOT_OP(nonnull);
++
++      /* Goto pc at specified offset in switch table. */
++
++      CASE(_tableswitch): {
++          jint* lpc  = (jint*)VMalignWordUp(pc+1);
++          int32_t  key  = STACK_INT(-1);
++          int32_t  low  = Bytes::get_Java_u4((address)&lpc[1]);
++          int32_t  high = Bytes::get_Java_u4((address)&lpc[2]);
++          int32_t  skip;
++          key -= low;
++          skip = ((uint32_t) key > (uint32_t)(high - low))
++                      ? Bytes::get_Java_u4((address)&lpc[0])
++                      : Bytes::get_Java_u4((address)&lpc[key + 3]);
++          // Does this really need a full backedge check (osr?)
++          address branch_pc = pc;
++          UPDATE_PC_AND_TOS(skip, -1);
++          DO_BACKEDGE_CHECKS(skip, branch_pc);
++          CONTINUE;
++      }
++
++      /* Goto pc whose table entry matches specified key */
++
++      CASE(_lookupswitch): {
++          jint* lpc  = (jint*)VMalignWordUp(pc+1);
++          int32_t  key  = STACK_INT(-1);
++          int32_t  skip = Bytes::get_Java_u4((address) lpc); /* default amount */
++          int32_t  npairs = Bytes::get_Java_u4((address) &lpc[1]);
++          while (--npairs >= 0) {
++              lpc += 2;
++              if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) {
++                  skip = Bytes::get_Java_u4((address)&lpc[1]);
++                  break;
++              }
++          }
++          address branch_pc = pc;
++          UPDATE_PC_AND_TOS(skip, -1);
++          DO_BACKEDGE_CHECKS(skip, branch_pc);
++          CONTINUE;
++      }
++
++      CASE(_fcmpl):
++      CASE(_fcmpg):
++      {
++          SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2),
++                                        STACK_FLOAT(-1),
++                                        (opcode == Bytecodes::_fcmpl ? -1 : 1)),
++                        -2);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
++      }
++
++      CASE(_dcmpl):
++      CASE(_dcmpg):
++      {
++          int r = VMdoubleCompare(STACK_DOUBLE(-3),
++                                  STACK_DOUBLE(-1),
++                                  (opcode == Bytecodes::_dcmpl ? -1 : 1));
++          MORE_STACK(-4); // Pop
++          SET_STACK_INT(r, 0);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
++      }
++
++      CASE(_lcmp):
++      {
++          int r = VMlongCompare(STACK_LONG(-3), STACK_LONG(-1));
++          MORE_STACK(-4);
++          SET_STACK_INT(r, 0);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
++      }
++
++
++      /* Return from a method */
++
++      CASE(_areturn):
++      CASE(_ireturn):
++      CASE(_freturn):
++      {
++          // Allow a safepoint before returning to frame manager.
++          SAFEPOINT;
++
++          goto handle_return;
++      }
++
++      CASE(_lreturn):
++      CASE(_dreturn):
++      {
++          // Allow a safepoint before returning to frame manager.
++          SAFEPOINT;
++          goto handle_return;
++      }
++
++      CASE(_return_register_finalizer): {
++
++          oop rcvr = LOCALS_OBJECT(0);
++          if (rcvr->klass()->klass_part()->has_finalizer()) {
++            CALL_VM(InterpreterRuntime::register_finalizer(THREAD, rcvr), handle_exception);
++          }
++          goto handle_return;
++      }
++      CASE(_return): {
++
++          // Allow a safepoint before returning to frame manager.
++          SAFEPOINT;
++          goto handle_return;
++      }
++
++      /* Array access byte-codes */
++
++      /* Every array access byte-code starts out like this */
++//        arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff);
++#define ARRAY_INTRO(arrayOff)                                                  \
++      arrayOop arrObj = (arrayOop)STACK_OBJECT(arrayOff);                      \
++      jint     index  = STACK_INT(arrayOff + 1);                               \
++      char message[jintAsStringSize];                                          \
++      CHECK_NULL(arrObj);                                                      \
++      if ((uint32_t)index >= (uint32_t)arrObj->length()) {                     \
++          sprintf(message, "%d", index);                                       \
++          VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \
++                        message);                                              \
++      }
++
++      /* 32-bit loads. These handle conversion from < 32-bit types */
++#define ARRAY_LOADTO32(T, T2, format, stackRes, extra)                                \
++      {                                                                               \
++          ARRAY_INTRO(-2);                                                            \
++          extra;                                                                      \
++          SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \
++                           -2);                                                       \
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);                                      \
++      }
++
++      /* 64-bit loads */
++#define ARRAY_LOADTO64(T,T2, stackRes, extra)                                              \
++      {                                                                                    \
++          ARRAY_INTRO(-2);                                                                 \
++          SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \
++          extra;                                                                           \
++          UPDATE_PC_AND_CONTINUE(1);                                            \
++      }
++
++      CASE(_iaload):
++          ARRAY_LOADTO32(T_INT, jint,   "%d",   STACK_INT, 0);
++      CASE(_faload):
++          ARRAY_LOADTO32(T_FLOAT, jfloat, "%f",   STACK_FLOAT, 0);
++      CASE(_aaload):
++          ARRAY_LOADTO32(T_OBJECT, oop,   INTPTR_FORMAT, STACK_OBJECT, 0);
++      CASE(_baload):
++          ARRAY_LOADTO32(T_BYTE, jbyte,  "%d",   STACK_INT, 0);
++      CASE(_caload):
++          ARRAY_LOADTO32(T_CHAR,  jchar, "%d",   STACK_INT, 0);
++      CASE(_saload):
++          ARRAY_LOADTO32(T_SHORT, jshort, "%d",   STACK_INT, 0);
++      CASE(_laload):
++          ARRAY_LOADTO64(T_LONG, jlong, STACK_LONG, 0);
++      CASE(_daload):
++          ARRAY_LOADTO64(T_DOUBLE, jdouble, STACK_DOUBLE, 0);
++
++      /* 32-bit stores. These handle conversion to < 32-bit types */
++#define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra)                            \
++      {                                                                              \
++          ARRAY_INTRO(-3);                                                           \
++          extra;                                                                     \
++          *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3);                                     \
++      }
++
++      /* 64-bit stores */
++#define ARRAY_STOREFROM64(T, T2, stackSrc, extra)                                    \
++      {                                                                              \
++          ARRAY_INTRO(-4);                                                           \
++          extra;                                                                     \
++          *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4);                                     \
++      }
++
++      CASE(_iastore):
++          ARRAY_STOREFROM32(T_INT, jint,   "%d",   STACK_INT, 0);
++      CASE(_fastore):
++          ARRAY_STOREFROM32(T_FLOAT, jfloat, "%f",   STACK_FLOAT, 0);
++      /*
++       * This one looks different because of the assignability check
++       */
++      CASE(_aastore): {
++          oop rhsObject = STACK_OBJECT(-1);
++          ARRAY_INTRO( -3);
++          // arrObj, index are set
++          if (rhsObject != NULL) {
++            /* Check assignability of rhsObject into arrObj */
++            klassOop rhsKlassOop = rhsObject->klass(); // EBX (subclass)
++            assert(arrObj->klass()->klass()->klass_part()->oop_is_objArrayKlass(), "Ack not an objArrayKlass");
++            klassOop elemKlassOop = ((objArrayKlass*) arrObj->klass()->klass_part())->element_klass(); // superklass EAX
++            //
++            // Check for compatibilty. This check must not GC!!
++            // Seems way more expensive now that we must dispatch
++            //
++            if (rhsKlassOop != elemKlassOop && !rhsKlassOop->klass_part()->is_subtype_of(elemKlassOop)) { // ebx->is...
++              VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), "");
++            }
++          }
++          oop* elem_loc = (oop*)(((address) arrObj->base(T_OBJECT)) + index * sizeof(oop));
++          // *(oop*)(((address) arrObj->base(T_OBJECT)) + index * sizeof(oop)) = rhsObject;
++          *elem_loc = rhsObject;
++          // Mark the card
++          OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)elem_loc >> CardTableModRefBS::card_shift], 0);
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3);
++      }
++      CASE(_bastore):
++          ARRAY_STOREFROM32(T_BYTE, jbyte,  "%d",   STACK_INT, 0);
++      CASE(_castore):
++          ARRAY_STOREFROM32(T_CHAR, jchar,  "%d",   STACK_INT, 0);
++      CASE(_sastore):
++          ARRAY_STOREFROM32(T_SHORT, jshort, "%d",   STACK_INT, 0);
++      CASE(_lastore):
++          ARRAY_STOREFROM64(T_LONG, jlong, STACK_LONG, 0);
++      CASE(_dastore):
++          ARRAY_STOREFROM64(T_DOUBLE, jdouble, STACK_DOUBLE, 0);
++
++      CASE(_arraylength):
++      {
++          arrayOop ary = (arrayOop) STACK_OBJECT(-1);
++          CHECK_NULL(ary);
++          SET_STACK_INT(ary->length(), -1);
++          UPDATE_PC_AND_CONTINUE(1);
++      }
++
++      /* monitorenter and monitorexit for locking/unlocking an object */
++
++      CASE(_monitorenter): {
++        oop lockee = STACK_OBJECT(-1);
++        // derefing's lockee ought to provoke implicit null check
++        CHECK_NULL(lockee);
++        // find a free monitor or one already allocated for this object
++        // if we find a matching object then we need a new monitor
++        // since this is recursive enter
++        BasicObjectLock* limit = istate->monitor_base();
++        BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
++        BasicObjectLock* entry = NULL;
++        while (most_recent != limit ) {
++          if (most_recent->obj() == NULL) entry = most_recent;
++          else if (most_recent->obj() == lockee) break;
++          most_recent++;
++        }
++        if (entry != NULL) {
++          entry->set_obj(lockee);
++          markOop displaced = lockee->mark()->set_unlocked();
++          entry->lock()->set_displaced_header(displaced);
++          if (Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
++            // Is it simple recursive case?
++            if (THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
++              entry->lock()->set_displaced_header(NULL);
++            } else {
++              CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
++            }
++          }
++          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
++        } else {
++          istate->set_msg(more_monitors);
++          UPDATE_PC_AND_RETURN(0); // Re-execute
++        }
++      }
++
++      CASE(_monitorexit): {
++        oop lockee = STACK_OBJECT(-1);
++        CHECK_NULL(lockee);
++        // derefing's lockee ought to provoke implicit null check
++        // find our monitor slot
++        BasicObjectLock* limit = istate->monitor_base();
++        BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
++        while (most_recent != limit ) {
++          if ((most_recent)->obj() == lockee) {
++            BasicLock* lock = most_recent->lock();
++            markOop header = lock->displaced_header();
++            most_recent->set_obj(NULL);
++            // If it isn't recursive we either must swap old header or call the runtime
++            if (header != NULL) {
++              if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
++                // restore object for the slow case
++                most_recent->set_obj(lockee);
++                CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception);
++              }
++            }
++            UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
++          }
++          most_recent++;
++        }
++        // Need to throw illegal monitor state exception
++        CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception);
++        // Should never reach here...
++        assert(false, "Should have thrown illegal monitor exception");
++      }
++
++      /* All of the non-quick opcodes. */
++
++      /* -Set clobbersCpIndex true if the quickened opcode clobbers the
++       *  constant pool index in the instruction.
++       */
++      CASE(_getfield):
++      CASE(_getstatic):
++        {
++          u2 index;
++          ConstantPoolCacheEntry* cache;
++          index = Bytes::get_native_u2(pc+1);
++
++          // QQQ Need to make this as inlined as possible. Probably need to
++          // split all the bytecode cases out so c++ compiler has a chance
++          // for constant prop to fold everything possible away.
++
++          cache = cp->entry_at(index);
++          if (!cache->is_resolved((Bytecodes::Code)opcode)) {
++            CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode),
++                    handle_exception);
++            cache = cp->entry_at(index);
++          }
++
++#ifdef VM_JVMTI
++          if (_jvmti_interp_events) {
++            int *count_addr;
++            oop obj;
++            // Check to see if a field modification watch has been set
++            // before we take the time to call into the VM.
++            count_addr = (int *)JvmtiExport::get_field_access_count_addr();
++            if ( *count_addr > 0 ) {
++              if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) {
++                obj = (oop)NULL;
++              } else {
++                obj = (oop) STACK_OBJECT(-1);
++              }
++              CALL_VM(InterpreterRuntime::post_field_access(THREAD,
++                                          obj,
++                                          cache),
++                                          handle_exception);
++            }
++          }
++#endif /* VM_JVMTI */
++
++          oop obj;
++          if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) {
++            obj = (oop) cache->f1();
++            MORE_STACK(1);  // Assume single slot push
++          } else {
++            obj = (oop) STACK_OBJECT(-1);
++            CHECK_NULL(obj);
++          }
++
++          //
++          // Now store the result on the stack
++          //
++          TosState tos_type = cache->flag_state();
++          int field_offset = cache->f2();
++          if (cache->is_volatile()) {
++            if (tos_type == atos) {
++              SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1);
++            } else if (tos_type == itos) {
++              SET_STACK_INT(obj->int_field_acquire(field_offset), -1);
++            } else if (tos_type == ltos) {
++              SET_STACK_LONG(obj->long_field_acquire(field_offset), 0);
++              MORE_STACK(1);
++            } else if (tos_type == btos) {
++              SET_STACK_INT(obj->byte_field_acquire(field_offset), -1);
++            } else if (tos_type == ctos) {
++              SET_STACK_INT(obj->char_field_acquire(field_offset), -1);
++            } else if (tos_type == stos) {
++              SET_STACK_INT(obj->short_field_acquire(field_offset), -1);
++            } else if (tos_type == ftos) {
++              SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1);
++            } else {
++              SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0);
++              MORE_STACK(1);
++            }
++          } else {
++            if (tos_type == atos) {
++              SET_STACK_OBJECT(obj->obj_field(field_offset), -1);
++            } else if (tos_type == itos) {
++              SET_STACK_INT(obj->int_field(field_offset), -1);
++            } else if (tos_type == ltos) {
++              SET_STACK_LONG(obj->long_field(field_offset), 0);
++              MORE_STACK(1);
++            } else if (tos_type == btos) {
++              SET_STACK_INT(obj->byte_field(field_offset), -1);
++            } else if (tos_type == ctos) {
++              SET_STACK_INT(obj->char_field(field_offset), -1);
++            } else if (tos_type == stos) {
++              SET_STACK_INT(obj->short_field(field_offset), -1);
++            } else if (tos_type == ftos) {
++              SET_STACK_FLOAT(obj->float_field(field_offset), -1);
++            } else {
++              SET_STACK_DOUBLE(obj->double_field(field_offset), 0);
++              MORE_STACK(1);
++            }
++          }
++
++          UPDATE_PC_AND_CONTINUE(3);
++         }
++
++      CASE(_putfield):
++      CASE(_putstatic):
++        {
++          u2 index = Bytes::get_native_u2(pc+1);
++          ConstantPoolCacheEntry* cache = cp->entry_at(index);
++          if (!cache->is_resolved((Bytecodes::Code)opcode)) {
++            CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode),
++                    handle_exception);
++            cache = cp->entry_at(index);
++          }
++
++#ifdef VM_JVMTI
++          if (_jvmti_interp_events) {
++            int *count_addr;
++            oop obj;
++            // Check to see if a field modification watch has been set
++            // before we take the time to call into the VM.
++            count_addr = (int *)JvmtiExport::get_field_modification_count_addr();
++            if ( *count_addr > 0 ) {
++              if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) {
++                obj = (oop)NULL;
++              }
++              else {
++                if (cache->is_long() || cache->is_double()) {
++                  obj = (oop) STACK_OBJECT(-3);
++                } else {
++                  obj = (oop) STACK_OBJECT(-2);
++                }
++              }
++
++              CALL_VM(InterpreterRuntime::post_field_modification(THREAD,
++                                          obj,
++                                          cache,
++                                          (jvalue *)STACK_SLOT(-1)),
++                                          handle_exception);
++            }
++          }
++#endif /* VM_JVMTI */
++
++          // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
++          // out so c++ compiler has a chance for constant prop to fold everything possible away.
++
++          oop obj;
++          int count;
++          TosState tos_type = cache->flag_state();
++
++          count = -1;
++          if (tos_type == ltos || tos_type == dtos) {
++            --count;
++          }
++          if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) {
++            obj = (oop) cache->f1();
++          } else {
++            --count;
++            obj = (oop) STACK_OBJECT(count);
++            CHECK_NULL(obj);
++          }
++
++          //
++          // Now store the result
++          //
++          int field_offset = cache->f2();
++          if (cache->is_volatile()) {
++            if (tos_type == itos) {
++              obj->release_int_field_put(field_offset, STACK_INT(-1));
++            } else if (tos_type == atos) {
++              obj->release_obj_field_put(field_offset, STACK_OBJECT(-1));
++              OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)obj >> CardTableModRefBS::card_shift], 0);
++            } else if (tos_type == btos) {
++              obj->release_byte_field_put(field_offset, STACK_INT(-1));
++            } else if (tos_type == ltos) {
++              obj->release_long_field_put(field_offset, STACK_LONG(-1));
++            } else if (tos_type == ctos) {
++              obj->release_char_field_put(field_offset, STACK_INT(-1));
++            } else if (tos_type == stos) {
++              obj->release_short_field_put(field_offset, STACK_INT(-1));
++            } else if (tos_type == ftos) {
++              obj->release_float_field_put(field_offset, STACK_FLOAT(-1));
++            } else {
++              obj->release_double_field_put(field_offset, STACK_DOUBLE(-1));
++            }
++            OrderAccess::storeload();
++          } else {
++            if (tos_type == itos) {
++              obj->int_field_put(field_offset, STACK_INT(-1));
++            } else if (tos_type == atos) {
++              obj->obj_field_put(field_offset, STACK_OBJECT(-1));
++              OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)obj >> CardTableModRefBS::card_shift], 0);
++            } else if (tos_type == btos) {
++              obj->byte_field_put(field_offset, STACK_INT(-1));
++            } else if (tos_type == ltos) {
++              obj->long_field_put(field_offset, STACK_LONG(-1));
++            } else if (tos_type == ctos) {
++              obj->char_field_put(field_offset, STACK_INT(-1));
++            } else if (tos_type == stos) {
++              obj->short_field_put(field_offset, STACK_INT(-1));
++            } else if (tos_type == ftos) {
++              obj->float_field_put(field_offset, STACK_FLOAT(-1));
++            } else {
++              obj->double_field_put(field_offset, STACK_DOUBLE(-1));
++            }
++          }
++
++          UPDATE_PC_AND_TOS_AND_CONTINUE(3, count);
++        }
++
++      CASE(_new): {
++        u2 index = Bytes::get_Java_u2(pc+1);
++        constantPoolOop constants = istate->method()->constants();
++        if (!constants->tag_at(index).is_unresolved_klass()) {
++          // Make sure klass is initialized and doesn't have a finalizer
++          oop entry = (klassOop) *constants->obj_at_addr(index);
++          assert(entry->is_klass(), "Should be resolved klass");
++          klassOop k_entry = (klassOop) entry;
++          assert(k_entry->klass_part()->oop_is_instance(), "Should be instanceKlass");
++          instanceKlass* ik = (instanceKlass*) k_entry->klass_part();
++          if ( ik->is_initialized() && ik->can_be_fastpath_allocated() ) {
++            size_t obj_size = ik->size_helper();
++            oop result = NULL;
++            // If the TLAB isn't pre-zeroed then we'll have to do it
++            bool need_zero = !ZeroTLAB;
++            if (UseTLAB) {
++              result = (oop) THREAD->tlab().allocate(obj_size);
++            }
++            if (result == NULL) {
++              need_zero = true;
++              // Try allocate in shared eden
++        retry:
++              HeapWord* compare_to = *Universe::heap()->top_addr();
++              HeapWord* new_top = compare_to + obj_size;
++              if (new_top <= *Universe::heap()->end_addr()) {
++                if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) {
++                  goto retry;
++                }
++                result = (oop) compare_to;
++              }
++            }
++            if (result != NULL) {
++              // Initialize object (if nonzero size and need) and then the header
++              if (need_zero ) {
++                HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize;
++                obj_size -= sizeof(oopDesc) / oopSize;
++                if (obj_size > 0 ) {
++                  memset(to_zero, 0, obj_size * HeapWordSize);
++                }
++              }
++              if (UseBiasedLocking) {
++                result->set_mark(ik->prototype_header());
++              } else {
++                result->set_mark(markOopDesc::prototype());
++              }
++              result->set_klass(k_entry);
++              SET_STACK_OBJECT(result, 0);
++              UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
++            }
++          }
++        }
++        // Slow case allocation
++        CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index),
++                handle_exception);
++        SET_STACK_OBJECT(THREAD->vm_result(), 0);
++        THREAD->set_vm_result(NULL);
++        UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
++      }
++      CASE(_anewarray): {
++        u2 index = Bytes::get_Java_u2(pc+1);
++        jint size = STACK_INT(-1);
++        CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size),
++                handle_exception);
++        SET_STACK_OBJECT(THREAD->vm_result(), -1);
++        THREAD->set_vm_result(NULL);
++        UPDATE_PC_AND_CONTINUE(3);
++      }
++      CASE(_multianewarray): {
++        jint dims = *(pc+3);
++        jint size = STACK_INT(-1);
++        // stack grows down, dimensions are up!
++        jint *dimarray =
++                   (jint*)&topOfStack[dims * Interpreter::stackElementWords()+
++                                      Interpreter::stackElementWords()-1];
++        //adjust pointer to start of stack element
++        CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray),
++                handle_exception);
++        SET_STACK_OBJECT(THREAD->vm_result(), -dims);
++        THREAD->set_vm_result(NULL);
++        UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1));
++      }
++      CASE(_checkcast):
++          if (STACK_OBJECT(-1) != NULL) {
++            u2 index = Bytes::get_Java_u2(pc+1);
++            if (ProfileInterpreter) {
++              // needs Profile_checkcast QQQ
++              ShouldNotReachHere();
++            }
++            // Constant pool may have actual klass or unresolved klass. If it is
++            // unresolved we must resolve it
++            if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
++              CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
++            }
++            klassOop klassOf = (klassOop) *(METHOD->constants()->obj_at_addr(index));
++            klassOop objKlassOop = STACK_OBJECT(-1)->klass(); //ebx
++            //
++            // Check for compatibilty. This check must not GC!!
++            // Seems way more expensive now that we must dispatch
++            //
++            if (objKlassOop != klassOf &&
++                !objKlassOop->klass_part()->is_subtype_of(klassOf)) {
++              ResourceMark rm(THREAD);
++              const char* objName = Klass::cast(objKlassOop)->external_name();
++              const char* klassName = Klass::cast(klassOf)->external_name();
++              char* message = SharedRuntime::generate_class_cast_message(
++                objName, klassName);
++              VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message);
++            }
++          } else {
++            if (UncommonNullCast) {
++//              istate->method()->set_null_cast_seen();
++// [RGV] Not sure what to do here!
++
++            }
++          }
++          UPDATE_PC_AND_CONTINUE(3);
++
++      CASE(_instanceof):
++          if (STACK_OBJECT(-1) == NULL) {
++            SET_STACK_INT(0, -1);
++          } else {
++            u2 index = Bytes::get_Java_u2(pc+1);
++            // Constant pool may have actual klass or unresolved klass. If it is
++            // unresolved we must resolve it
++            if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
++              CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
++            }
++            klassOop klassOf = (klassOop) *(METHOD->constants()->obj_at_addr(index));
++            klassOop objKlassOop = STACK_OBJECT(-1)->klass();
++            //
++            // Check for compatibilty. This check must not GC!!
++            // Seems way more expensive now that we must dispatch
++            //
++            if ( objKlassOop == klassOf || objKlassOop->klass_part()->is_subtype_of(klassOf)) {
++              SET_STACK_INT(1, -1);
++            } else {
++              SET_STACK_INT(0, -1);
++            }
++          }
++          UPDATE_PC_AND_CONTINUE(3);
++
++      CASE(_ldc_w):
++      CASE(_ldc):
++        {
++          u2 index;
++          bool wide = false;
++          int incr = 2; // frequent case
++          if (opcode == Bytecodes::_ldc) {
++            index = pc[1];
++          } else {
++            index = Bytes::get_Java_u2(pc+1);
++            incr = 3;
++            wide = true;
++          }
++
++          constantPoolOop constants = METHOD->constants();
++          switch (constants->tag_at(index).value()) {
++          case JVM_CONSTANT_Integer:
++            SET_STACK_INT(constants->int_at(index), 0);
++            break;
++
++          case JVM_CONSTANT_Float:
++            SET_STACK_FLOAT(constants->float_at(index), 0);
++            break;
++
++          case JVM_CONSTANT_String:
++            SET_STACK_OBJECT(constants->resolved_string_at(index), 0);
++            break;
++
++          case JVM_CONSTANT_Class:
++            SET_STACK_OBJECT(constants->resolved_klass_at(index)->klass_part()->java_mirror(), 0);
++            break;
++
++          case JVM_CONSTANT_UnresolvedString:
++          case JVM_CONSTANT_UnresolvedClass:
++          case JVM_CONSTANT_UnresolvedClassInError:
++            CALL_VM(InterpreterRuntime::ldc(THREAD, wide), handle_exception);
++            SET_STACK_OBJECT(THREAD->vm_result(), 0);
++            THREAD->set_vm_result(NULL);
++            break;
++
++#if 0
++          CASE(_fast_igetfield):
++          CASE(_fastagetfield):
++          CASE(_fast_aload_0):
++          CASE(_fast_iaccess_0):
++          CASE(__fast_aaccess_0):
++          CASE(_fast_linearswitch):
++          CASE(_fast_binaryswitch):
++            fatal("unsupported fast bytecode");
++#endif
++
++          default:  ShouldNotReachHere();
++          }
++          UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
++        }
++
++      CASE(_ldc2_w):
++        {
++          u2 index = Bytes::get_Java_u2(pc+1);
++
++          constantPoolOop constants = METHOD->constants();
++          switch (constants->tag_at(index).value()) {
++
++          case JVM_CONSTANT_Long:
++             SET_STACK_LONG(constants->long_at(index), 1);
++            break;
++
++          case JVM_CONSTANT_Double:
++             SET_STACK_DOUBLE(constants->double_at(index), 1);
++            break;
++          default:  ShouldNotReachHere();
++          }
++          UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2);
++        }
++
++      CASE(_invokeinterface): {
++        u2 index = Bytes::get_native_u2(pc+1);
++
++        // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
++        // out so c++ compiler has a chance for constant prop to fold everything possible away.
++
++        ConstantPoolCacheEntry* cache = cp->entry_at(index);
++        if (!cache->is_resolved((Bytecodes::Code)opcode)) {
++          CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode),
++                  handle_exception);
++          cache = cp->entry_at(index);
++        }
++
++        istate->set_msg(call_method);
++
++        // Special case of invokeinterface called for virtual method of
++        // java.lang.Object.  See cpCacheOop.cpp for details.
++        // This code isn't produced by javac, but could be produced by
++        // another compliant java compiler.
++        if (cache->is_methodInterface()) {
++          methodOop callee;
++          CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
++          if (cache->is_vfinal()) {
++            callee = (methodOop) cache->f2();
++          } else {
++            // get receiver
++            int parms = cache->parameter_size();
++            // Same comments as invokevirtual apply here
++            instanceKlass* rcvrKlass = (instanceKlass*)
++                                 STACK_OBJECT(-parms)->klass()->klass_part();
++            callee = (methodOop) rcvrKlass->start_of_vtable()[ cache->f2()];
++          }
++          istate->set_callee(callee);
++          istate->set_callee_entry_point(callee->from_interpreted_entry());
++#ifdef VM_JVMTI
++          if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
++            istate->set_callee_entry_point(callee->interpreter_entry());
++          }
++#endif /* VM_JVMTI */
++          istate->set_bcp_advance(5);
++          UPDATE_PC_AND_RETURN(0); // I'll be back...
++        }
++
++        // this could definitely be cleaned up QQQ
++        methodOop callee;
++        klassOop iclass = (klassOop)cache->f1();
++        // instanceKlass* interface = (instanceKlass*) iclass->klass_part();
++        // get receiver
++        int parms = cache->parameter_size();
++        oop rcvr = STACK_OBJECT(-parms);
++        CHECK_NULL(rcvr);
++        instanceKlass* int2 = (instanceKlass*) rcvr->klass()->klass_part();
++        itableOffsetEntry* ki = (itableOffsetEntry*) int2->start_of_itable();
++        int i;
++        for ( i = 0 ; i < int2->itable_length() ; i++, ki++ ) {
++          if (ki->interface_klass() == iclass) break;
++        }
++        // If the interface isn't found, this class doesn't implement this
++        // interface.  The link resolver checks this but only for the first
++        // time this interface is called.
++        if (i == int2->itable_length()) {
++          VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "");
++        }
++        int mindex = cache->f2();
++        itableMethodEntry* im = ki->first_method_entry(rcvr->klass());
++        callee = im[mindex].method();
++        if (callee == NULL) {
++          VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), "");
++        }
++
++        istate->set_callee(callee);
++        istate->set_callee_entry_point(callee->from_interpreted_entry());
++#ifdef VM_JVMTI
++        if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
++          istate->set_callee_entry_point(callee->interpreter_entry());
++        }
++#endif /* VM_JVMTI */
++        istate->set_bcp_advance(5);
++        UPDATE_PC_AND_RETURN(0); // I'll be back...
++      }
++
++      CASE(_invokevirtual):
++      CASE(_invokespecial):
++      CASE(_invokestatic): {
++        u2 index = Bytes::get_native_u2(pc+1);
++
++        ConstantPoolCacheEntry* cache = cp->entry_at(index);
++        // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
++        // out so c++ compiler has a chance for constant prop to fold everything possible away.
++
++        if (!cache->is_resolved((Bytecodes::Code)opcode)) {
++          CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode),
++                  handle_exception);
++          cache = cp->entry_at(index);
++        }
++
++        istate->set_msg(call_method);
++        {
++          methodOop callee;
++          if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) {
++            CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
++            if (cache->is_vfinal()) callee = (methodOop) cache->f2();
++            else {
++              // get receiver
++              int parms = cache->parameter_size();
++              // this works but needs a resourcemark and seems to create a vtable on every call:
++              // methodOop callee = rcvr->klass()->klass_part()->vtable()->method_at(cache->f2());
++              //
++              // this fails with an assert
++              // instanceKlass* rcvrKlass = instanceKlass::cast(STACK_OBJECT(-parms)->klass());
++              // but this works
++              instanceKlass* rcvrKlass = (instanceKlass*) STACK_OBJECT(-parms)->klass()->klass_part();
++              /*
++                Executing this code in java.lang.String:
++                    public String(char value[]) {
++                          this.count = value.length;
++                          this.value = (char[])value.clone();
++                     }
++
++                 a find on rcvr->klass()->klass_part() reports:
++                 {type array char}{type array class}
++                  - klass: {other class}
++
++                  but using instanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure
++                  because rcvr->klass()->klass_part()->oop_is_instance() == 0
++                  However it seems to have a vtable in the right location. Huh?
++
++              */
++              callee = (methodOop) rcvrKlass->start_of_vtable()[ cache->f2()];
++            }
++          } else {
++            if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) {
++              CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
++            }
++            callee = (methodOop) cache->f1();
++          }
++
++          istate->set_callee(callee);
++          istate->set_callee_entry_point(callee->from_interpreted_entry());
++#ifdef VM_JVMTI
++          if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
++            istate->set_callee_entry_point(callee->interpreter_entry());
++          }
++#endif /* VM_JVMTI */
++          istate->set_bcp_advance(3);
++          UPDATE_PC_AND_RETURN(0); // I'll be back...
++        }
++      }
++
++      /* Allocate memory for a new java object. */
++
++      CASE(_newarray): {
++        BasicType atype = (BasicType) *(pc+1);
++        jint size = STACK_INT(-1);
++        CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size),
++                handle_exception);
++        SET_STACK_OBJECT(THREAD->vm_result(), -1);
++        THREAD->set_vm_result(NULL);
++
++        UPDATE_PC_AND_CONTINUE(2);
++      }
++
++      /* Throw an exception. */
++
++      CASE(_athrow): {
++          oop except_oop = STACK_OBJECT(-1);
++          CHECK_NULL(except_oop);
++          // set pending_exception so we use common code
++          THREAD->set_pending_exception(except_oop, NULL, 0);
++          goto handle_exception;
++      }
++
++      /* goto and jsr. They are exactly the same except jsr pushes
++       * the address of the next instruction first.
++       */
++
++      CASE(_jsr): {
++          /* push bytecode index on stack */
++          SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 3), 0);
++          MORE_STACK(1);
++          /* FALL THROUGH */
++      }
++
++      CASE(_goto):
++      {
++          int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1);
++          address branch_pc = pc;
++          UPDATE_PC(offset);
++          DO_BACKEDGE_CHECKS(offset, branch_pc);
++          CONTINUE;
++      }
++
++      CASE(_jsr_w): {
++          /* push return address on the stack */
++          SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 5), 0);
++          MORE_STACK(1);
++          /* FALL THROUGH */
++      }
++
++      CASE(_goto_w):
++      {
++          int32_t offset = Bytes::get_Java_u4(pc + 1);
++          address branch_pc = pc;
++          UPDATE_PC(offset);
++          DO_BACKEDGE_CHECKS(offset, branch_pc);
++          CONTINUE;
++      }
++
++      /* return from a jsr or jsr_w */
++
++      CASE(_ret): {
++          pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1]));
++          UPDATE_PC_AND_CONTINUE(0);
++      }
++
++      /* debugger breakpoint */
++
++      CASE(_breakpoint): {
++          Bytecodes::Code original_bytecode;
++          DECACHE_STATE();
++          SET_LAST_JAVA_FRAME();
++          original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD,
++                              METHOD, pc);
++          RESET_LAST_JAVA_FRAME();
++          CACHE_STATE();
++          if (THREAD->has_pending_exception()) goto handle_exception;
++            CALL_VM(InterpreterRuntime::_breakpoint(THREAD, METHOD, pc),
++                                                    handle_exception);
++
++          opcode = (jubyte)original_bytecode;
++          goto opcode_switch;
++      }
++
++      DEFAULT:
++          fatal2("\t*** Unimplemented opcode: %d = %s\n",
++                 opcode, Bytecodes::name((Bytecodes::Code)opcode));
++          goto finish;
++
++      } /* switch(opc) */
++
++
++#ifdef USELABELS
++    check_for_exception:
++#endif
++    {
++      if (!THREAD->has_pending_exception()) {
++        CONTINUE;
++      }
++      /* We will be gcsafe soon, so flush our state. */
++      DECACHE_PC();
++      goto handle_exception;
++    }
++  do_continue: ;
++
++  } /* while (1) interpreter loop */
++
++
++  // An exception exists in the thread state see whether this activation can handle it
++  handle_exception: {
++
++    HandleMarkCleaner __hmc(THREAD);
++    Handle except_oop(THREAD, THREAD->pending_exception());
++    // Prevent any subsequent HandleMarkCleaner in the VM
++    // from freeing the except_oop handle.
++    HandleMark __hm(THREAD);
++
++    THREAD->clear_pending_exception();
++    assert(except_oop(), "No exception to process");
++    intptr_t continuation_bci;
++    // expression stack is emptied
++    topOfStack = istate->stack_base() - Interpreter::stackElementWords();
++    CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()),
++            handle_exception);
++
++    except_oop = (oop) THREAD->vm_result();
++    THREAD->set_vm_result(NULL);
++    if (continuation_bci >= 0) {
++      // Place exception on top of stack
++      SET_STACK_OBJECT(except_oop(), 0);
++      MORE_STACK(1);
++      pc = METHOD->code_base() + continuation_bci;
++      if (TraceExceptions) {
++        ttyLocker ttyl;
++        ResourceMark rm;
++        tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), except_oop());
++        tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string());
++        tty->print_cr(" at bci %d, continuing at %d for thread " INTPTR_FORMAT,
++                      pc - (intptr_t)METHOD->code_base(),
++                      continuation_bci, THREAD);
++      }
++      // for AbortVMOnException flag
++      NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
++      goto run;
++    }
++    if (TraceExceptions) {
++      ttyLocker ttyl;
++      ResourceMark rm;
++      tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), except_oop());
++      tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string());
++      tty->print_cr(" at bci %d, unwinding for thread " INTPTR_FORMAT,
++                    pc  - (intptr_t) METHOD->code_base(),
++                    THREAD);
++    }
++    // for AbortVMOnException flag
++    NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
++    // No handler in this activation, unwind and try again
++    THREAD->set_pending_exception(except_oop(), NULL, 0);
++    goto handle_return;
++  }  /* handle_exception: */
++
++
++
++  // Return from an interpreter invocation with the result of the interpretation
++  // on the top of the Java Stack (or a pending exception)
++
++handle_Pop_Frame:
++
++  // We don't really do anything special here except we must be aware
++  // that we can get here without ever locking the method (if sync).
++  // Also we skip the notification of the exit.
++
++  istate->set_msg(popping_frame);
++  // Clear pending so while the pop is in process
++  // we don't start another one if a call_vm is done.
++  THREAD->clr_pop_frame_pending();
++  // Let interpreter (only) see the we're in the process of popping a frame
++  THREAD->set_pop_frame_in_process();
++
++handle_return:
++  {
++    DECACHE_STATE();
++
++    bool suppress_error = istate->msg() == popping_frame;
++    bool suppress_exit_event = THREAD->has_pending_exception() || suppress_error;
++    Handle original_exception(THREAD, THREAD->pending_exception());
++    Handle illegal_state_oop(THREAD, NULL);
++
++    // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner
++    // in any following VM entries from freeing our live handles, but illegal_state_oop
++    // isn't really allocated yet and so doesn't become live until later and
++    // in unpredicatable places. Instead we must protect the places where we enter the
++    // VM. It would be much simpler (and safer) if we could allocate a real handle with
++    // a NULL oop in it and then overwrite the oop later as needed. This isn't
++    // unfortunately isn't possible.
++
++    THREAD->clear_pending_exception();
++
++    //
++    // As far as we are concerned we have returned. If we have a pending exception
++    // that will be returned as this invocation's result. However if we get any
++    // exception(s) while checking monitor state one of those IllegalMonitorStateExceptions
++    // will be our final result (i.e. monitor exception trumps a pending exception).
++    //
++
++    // If we never locked the method (or really passed the point where we would have),
++    // there is no need to unlock it (or look for other monitors), since that
++    // could not have happened.
++
++    if (THREAD->do_not_unlock()) {
++
++      // Never locked, reset the flag now because obviously any caller must
++      // have passed their point of locking for us to have gotten here.
++
++      THREAD->clr_do_not_unlock();
++    } else {
++      // At this point we consider that we have returned. We now check that the
++      // locks were properly block structured. If we find that they were not
++      // used properly we will return with an illegal monitor exception.
++      // The exception is checked by the caller not the callee since this
++      // checking is considered to be part of the invocation and therefore
++      // in the callers scope (JVM spec 8.13).
++      //
++      // Another weird thing to watch for is if the method was locked
++      // recursively and then not exited properly. This means we must
++      // examine all the entries in reverse time(and stack) order and
++      // unlock as we find them. If we find the method monitor before
++      // we are at the initial entry then we should throw an exception.
++      // It is not clear the template based interpreter does this
++      // correctly
++
++      BasicObjectLock* base = istate->monitor_base();
++      BasicObjectLock* end = (BasicObjectLock*) istate->stack_base();
++      bool method_unlock_needed = METHOD->is_synchronized();
++      // We know the initial monitor was used for the method don't check that
++      // slot in the loop
++      if (method_unlock_needed) base--;
++
++      // Check all the monitors to see they are unlocked. Install exception if found to be locked.
++      while (end < base) {
++        oop lockee = end->obj();
++        if (lockee != NULL) {
++          BasicLock* lock = end->lock();
++          markOop header = lock->displaced_header();
++          end->set_obj(NULL);
++          // If it isn't recursive we either must swap old header or call the runtime
++          if (header != NULL) {
++            if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
++              // restore object for the slow case
++              end->set_obj(lockee);
++              {
++                // Prevent any HandleMarkCleaner from freeing our live handles
++                HandleMark __hm(THREAD);
++                CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end));
++              }
++            }
++          }
++          // One error is plenty
++          if (illegal_state_oop() == NULL && !suppress_error) {
++            {
++              // Prevent any HandleMarkCleaner from freeing our live handles
++              HandleMark __hm(THREAD);
++              CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
++            }
++            assert(THREAD->has_pending_exception(), "Lost our exception!");
++            illegal_state_oop = THREAD->pending_exception();
++            THREAD->clear_pending_exception();
++          }
++        }
++        end++;
++      }
++      // Unlock the method if needed
++      if (method_unlock_needed) {
++        if (base->obj() == NULL) {
++          // The method is already unlocked this is not good.
++          if (illegal_state_oop() == NULL && !suppress_error) {
++            {
++              // Prevent any HandleMarkCleaner from freeing our live handles
++              HandleMark __hm(THREAD);
++              CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
++            }
++            assert(THREAD->has_pending_exception(), "Lost our exception!");
++            illegal_state_oop = THREAD->pending_exception();
++            THREAD->clear_pending_exception();
++          }
++        } else {
++          //
++          // The initial monitor is always used for the method
++          // However if that slot is no longer the oop for the method it was unlocked
++          // and reused by something that wasn't unlocked!
++          //
++          // deopt can come in with rcvr dead because c2 knows
++          // its value is preserved in the monitor. So we can't use locals[0] at all
++          // and must use first monitor slot.
++          //
++          oop rcvr = base->obj();
++          if (rcvr == NULL) {
++            if (!suppress_error) {
++              VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "");
++              illegal_state_oop = THREAD->pending_exception();
++              THREAD->clear_pending_exception();
++            }
++          } else {
++            BasicLock* lock = base->lock();
++            markOop header = lock->displaced_header();
++            base->set_obj(NULL);
++            // If it isn't recursive we either must swap old header or call the runtime
++            if (header != NULL) {
++              if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
++                // restore object for the slow case
++                base->set_obj(rcvr);
++                {
++                  // Prevent any HandleMarkCleaner from freeing our live handles
++                  HandleMark __hm(THREAD);
++                  CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
++                }
++                if (THREAD->has_pending_exception()) {
++                  if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
++                  THREAD->clear_pending_exception();
++                }
++              }
++            }
++          }
++        }
++      }
++    }
++
++    //
++    // Notify jvmti/jvmdi
++    //
++    // NOTE: we do not notify a method_exit if we have a pending exception,
++    // including an exception we generate for unlocking checks.  In the former
++    // case, JVMDI has already been notified by our call for the exception handler
++    // and in both cases as far as JVMDI is concerned we have already returned.
++    // If we notify it again JVMDI will be all confused about how many frames
++    // are still on the stack (4340444).
++    //
++    // NOTE Further! It turns out the the JVMTI spec in fact expects to see
++    // method_exit events whenever we leave an activation unless it was done
++    // for popframe. This is nothing like jvmdi. However we are passing the
++    // tests at the moment (apparently because they are jvmdi based) so rather
++    // than change this code and possibly fail tests we will leave it alone
++    // (with this note) in anticipation of changing the vm and the tests
++    // simultaneously.
++
++
++    //
++    suppress_exit_event = suppress_exit_event || illegal_state_oop() != NULL;
++
++
++
++#ifdef VM_JVMTI
++      if (_jvmti_interp_events) {
++        // Whenever JVMTI puts a thread in interp_only_mode, method
++        // entry/exit events are sent for that thread to track stack depth.
++        if ( !suppress_exit_event && THREAD->is_interp_only_mode() ) {
++          {
++            // Prevent any HandleMarkCleaner from freeing our live handles
++            HandleMark __hm(THREAD);
++            CALL_VM_NOCHECK(InterpreterRuntime::post_method_exit(THREAD));
++          }
++        }
++      }
++#endif /* VM_JVMTI */
++
++    //
++    // See if we are returning any exception
++    // A pending exception that was pending prior to a possible popping frame
++    // overrides the popping frame.
++    //
++    assert(!suppress_error || suppress_error && illegal_state_oop() == NULL, "Error was not suppressed");
++    if (illegal_state_oop() != NULL || original_exception() != NULL) {
++      // inform the frame manager we have no result
++      istate->set_msg(throwing_exception);
++      if (illegal_state_oop() != NULL)
++        THREAD->set_pending_exception(illegal_state_oop(), NULL, 0);
++      else
++        THREAD->set_pending_exception(original_exception(), NULL, 0);
++      istate->set_return_kind((Bytecodes::Code)opcode);
++      UPDATE_PC_AND_RETURN(0);
++    }
++
++    if (istate->msg() == popping_frame) {
++      // Make it simpler on the assembly code and set the message for the frame pop.
++      // returns
++      if (istate->prev() == NULL) {
++        // We must be returning to a deoptimized frame (because popframe only happens between
++        // two interpreted frames). We need to save the current arguments in C heap so that
++        // the deoptimized frame when it restarts can copy the arguments to its expression
++        // stack and re-execute the call. We also have to notify deoptimization that this
++        // has occured and to pick the preerved args copy them to the deoptimized frame's
++        // java expression stack. Yuck.
++        //
++        THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize),
++                                LOCALS_SLOT(METHOD->size_of_parameters() - 1));
++        THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit);
++      }
++      UPDATE_PC_AND_RETURN(1);
++    } else {
++      // Normal return
++      // Advance the pc and return to frame manager
++      istate->set_msg(return_from_method);
++      istate->set_return_kind((Bytecodes::Code)opcode);
++      UPDATE_PC_AND_RETURN(1);
++    }
++  } /* handle_return: */
++
++// This is really a fatal error return
++
++finish:
++  DECACHE_TOS();
++  DECACHE_PC();
++
++  return;
++}
++
++/*
++ * All the code following this point is only produced once and is not present
++ * in the JVMTI version of the interpreter
++*/
++
++#ifndef VM_JVMTI
++
++// This constructor should only be used to contruct the object to signal
++// interpreter initialization. All other instances should be created by
++// the frame manager.
++BytecodeInterpreter::BytecodeInterpreter(messages msg) {
++  if (msg != initialize) ShouldNotReachHere();
++  _msg = msg;
++  _self_link = this;
++  _prev_link = NULL;
++}
++
++// Inline static functions for Java Stack and Local manipulation
++
++// The implementations are platform dependent. We have to worry about alignment
++// issues on some machines which can change on the same platform depending on
++// whether it is an LP64 machine also.
++#ifdef ASSERT
++void BytecodeInterpreter::verify_stack_tag(intptr_t *tos, frame::Tag tag, int offset) {
++  if (TaggedStackInterpreter) {
++    frame::Tag t = (frame::Tag)tos[Interpreter::expr_tag_index_at(-offset)];
++    assert(t == tag, "stack tag mismatch");
++  }
++}
++#endif // ASSERT
++
++address BytecodeInterpreter::stack_slot(intptr_t *tos, int offset) {
++  debug_only(verify_stack_tag(tos, frame::TagValue, offset));
++  return (address) tos[Interpreter::expr_index_at(-offset)];
++}
++
++jint BytecodeInterpreter::stack_int(intptr_t *tos, int offset) {
++  debug_only(verify_stack_tag(tos, frame::TagValue, offset));
++  return *((jint*) &tos[Interpreter::expr_index_at(-offset)]);
++}
++
++jfloat BytecodeInterpreter::stack_float(intptr_t *tos, int offset) {
++  debug_only(verify_stack_tag(tos, frame::TagValue, offset));
++  return *((jfloat *) &tos[Interpreter::expr_index_at(-offset)]);
++}
++
++oop BytecodeInterpreter::stack_object(intptr_t *tos, int offset) {
++  debug_only(verify_stack_tag(tos, frame::TagReference, offset));
++  return (oop)tos [Interpreter::expr_index_at(-offset)];
++}
++
++jdouble BytecodeInterpreter::stack_double(intptr_t *tos, int offset) {
++  debug_only(verify_stack_tag(tos, frame::TagValue, offset));
++  debug_only(verify_stack_tag(tos, frame::TagValue, offset-1));
++  return ((VMJavaVal64*) &tos[Interpreter::expr_index_at(-offset)])->d;
++}
++
++jlong BytecodeInterpreter::stack_long(intptr_t *tos, int offset) {
++  debug_only(verify_stack_tag(tos, frame::TagValue, offset));
++  debug_only(verify_stack_tag(tos, frame::TagValue, offset-1));
++  return ((VMJavaVal64 *) &tos[Interpreter::expr_index_at(-offset)])->l;
++}
++
++void BytecodeInterpreter::tag_stack(intptr_t *tos, frame::Tag tag, int offset) {
++  if (TaggedStackInterpreter)
++    tos[Interpreter::expr_tag_index_at(-offset)] = (intptr_t)tag;
++}
++
++// only used for value types
++void BytecodeInterpreter::set_stack_slot(intptr_t *tos, address value,
++                                                        int offset) {
++  tag_stack(tos, frame::TagValue, offset);
++  *((address *)&tos[Interpreter::expr_index_at(-offset)]) = value;
++}
++
++void BytecodeInterpreter::set_stack_int(intptr_t *tos, int value,
++                                                       int offset) {
++  tag_stack(tos, frame::TagValue, offset);
++  *((jint *)&tos[Interpreter::expr_index_at(-offset)]) = value;
++}
++
++void BytecodeInterpreter::set_stack_float(intptr_t *tos, jfloat value,
++                                                         int offset) {
++  tag_stack(tos, frame::TagValue, offset);
++  *((jfloat *)&tos[Interpreter::expr_index_at(-offset)]) = value;
++}
++
++void BytecodeInterpreter::set_stack_object(intptr_t *tos, oop value,
++                                                          int offset) {
++  tag_stack(tos, frame::TagReference, offset);
++  *((oop *)&tos[Interpreter::expr_index_at(-offset)]) = value;
++}
++
++// needs to be platform dep for the 32 bit platforms.
++void BytecodeInterpreter::set_stack_double(intptr_t *tos, jdouble value,
++                                                          int offset) {
++  tag_stack(tos, frame::TagValue, offset);
++  tag_stack(tos, frame::TagValue, offset-1);
++  ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = value;
++}
++
++void BytecodeInterpreter::set_stack_double_from_addr(intptr_t *tos,
++                                              address addr, int offset) {
++  tag_stack(tos, frame::TagValue, offset);
++  tag_stack(tos, frame::TagValue, offset-1);
++  (((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d =
++                        ((VMJavaVal64*)addr)->d);
++}
++
++void BytecodeInterpreter::set_stack_long(intptr_t *tos, jlong value,
++                                                        int offset) {
++  tag_stack(tos, frame::TagValue, offset);
++  ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb;
++  tag_stack(tos, frame::TagValue, offset-1);
++  ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = value;
++}
++
++void BytecodeInterpreter::set_stack_long_from_addr(intptr_t *tos,
++                                            address addr, int offset) {
++  tag_stack(tos, frame::TagValue, offset);
++  ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb;
++  tag_stack(tos, frame::TagValue, offset-1);
++  ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l =
++                        ((VMJavaVal64*)addr)->l;
++}
++
++// Locals
++
++#ifdef ASSERT
++void BytecodeInterpreter::verify_locals_tag(intptr_t *locals, frame::Tag tag,
++                                     int offset) {
++  if (TaggedStackInterpreter) {
++    frame::Tag t = (frame::Tag)locals[Interpreter::local_tag_index_at(-offset)];
++    assert(t == tag, "locals tag mismatch");
++  }
++}
++#endif // ASSERT
++address BytecodeInterpreter::locals_slot(intptr_t* locals, int offset) {
++  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
++  return (address)locals[Interpreter::local_index_at(-offset)];
++}
++jint BytecodeInterpreter::locals_int(intptr_t* locals, int offset) {
++  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
++  return (jint)locals[Interpreter::local_index_at(-offset)];
++}
++jfloat BytecodeInterpreter::locals_float(intptr_t* locals, int offset) {
++  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
++  return (jfloat)locals[Interpreter::local_index_at(-offset)];
++}
++oop BytecodeInterpreter::locals_object(intptr_t* locals, int offset) {
++  debug_only(verify_locals_tag(locals, frame::TagReference, offset));
++  return (oop)locals[Interpreter::local_index_at(-offset)];
++}
++jdouble BytecodeInterpreter::locals_double(intptr_t* locals, int offset) {
++  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
++  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
++  return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d;
++}
++jlong BytecodeInterpreter::locals_long(intptr_t* locals, int offset) {
++  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
++  debug_only(verify_locals_tag(locals, frame::TagValue, offset+1));
++  return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l;
++}
++
++// Returns the address of locals value.
++address BytecodeInterpreter::locals_long_at(intptr_t* locals, int offset) {
++  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
++  debug_only(verify_locals_tag(locals, frame::TagValue, offset+1));
++  return ((address)&locals[Interpreter::local_index_at(-(offset+1))]);
++}
++address BytecodeInterpreter::locals_double_at(intptr_t* locals, int offset) {
++  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
++  debug_only(verify_locals_tag(locals, frame::TagValue, offset+1));
++  return ((address)&locals[Interpreter::local_index_at(-(offset+1))]);
++}
++
++void BytecodeInterpreter::tag_locals(intptr_t *locals, frame::Tag tag, int offset) {
++  if (TaggedStackInterpreter)
++    locals[Interpreter::local_tag_index_at(-offset)] = (intptr_t)tag;
++}
++
++// Used for local value or returnAddress
++void BytecodeInterpreter::set_locals_slot(intptr_t *locals,
++                                   address value, int offset) {
++  tag_locals(locals, frame::TagValue, offset);
++  *((address*)&locals[Interpreter::local_index_at(-offset)]) = value;
++}
++void BytecodeInterpreter::set_locals_int(intptr_t *locals,
++                                   jint value, int offset) {
++  tag_locals(locals, frame::TagValue, offset);
++  *((jint *)&locals[Interpreter::local_index_at(-offset)]) = value;
++}
++void BytecodeInterpreter::set_locals_float(intptr_t *locals,
++                                   jfloat value, int offset) {
++  tag_locals(locals, frame::TagValue, offset);
++  *((jfloat *)&locals[Interpreter::local_index_at(-offset)]) = value;
++}
++void BytecodeInterpreter::set_locals_object(intptr_t *locals,
++                                   oop value, int offset) {
++  tag_locals(locals, frame::TagReference, offset);
++  *((oop *)&locals[Interpreter::local_index_at(-offset)]) = value;
++}
++void BytecodeInterpreter::set_locals_double(intptr_t *locals,
++                                   jdouble value, int offset) {
++  tag_locals(locals, frame::TagValue, offset);
++  tag_locals(locals, frame::TagValue, offset+1);
++  ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = value;
++}
++void BytecodeInterpreter::set_locals_long(intptr_t *locals,
++                                   jlong value, int offset) {
++  tag_locals(locals, frame::TagValue, offset);
++  tag_locals(locals, frame::TagValue, offset+1);
++  ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = value;
++}
++void BytecodeInterpreter::set_locals_double_from_addr(intptr_t *locals,
++                                   address addr, int offset) {
++  tag_locals(locals, frame::TagValue, offset);
++  tag_locals(locals, frame::TagValue, offset+1);
++  ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = ((VMJavaVal64*)addr)->d;
++}
++void BytecodeInterpreter::set_locals_long_from_addr(intptr_t *locals,
++                                   address addr, int offset) {
++  tag_locals(locals, frame::TagValue, offset);
++  tag_locals(locals, frame::TagValue, offset+1);
++  ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = ((VMJavaVal64*)addr)->l;
++}
++
++void BytecodeInterpreter::astore(intptr_t* tos,    int stack_offset,
++                          intptr_t* locals, int locals_offset) {
++  // Copy tag from stack to locals.  astore's operand can be returnAddress
++  // and may not be TagReference
++  if (TaggedStackInterpreter) {
++    frame::Tag t = (frame::Tag) tos[Interpreter::expr_tag_index_at(-stack_offset)];
++    locals[Interpreter::local_tag_index_at(-locals_offset)] = (intptr_t)t;
++  }
++  intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)];
++  locals[Interpreter::local_index_at(-locals_offset)] = value;
++}
++
++
++void BytecodeInterpreter::copy_stack_slot(intptr_t *tos, int from_offset,
++                                   int to_offset) {
++  if (TaggedStackInterpreter) {
++    tos[Interpreter::expr_tag_index_at(-to_offset)] =
++                      (intptr_t)tos[Interpreter::expr_tag_index_at(-from_offset)];
++  }
++  tos[Interpreter::expr_index_at(-to_offset)] =
++                      (intptr_t)tos[Interpreter::expr_index_at(-from_offset)];
++}
++
++void BytecodeInterpreter::dup(intptr_t *tos) {
++  copy_stack_slot(tos, -1, 0);
++}
++void BytecodeInterpreter::dup2(intptr_t *tos) {
++  copy_stack_slot(tos, -2, 0);
++  copy_stack_slot(tos, -1, 1);
++}
++
++void BytecodeInterpreter::dup_x1(intptr_t *tos) {
++  /* insert top word two down */
++  copy_stack_slot(tos, -1, 0);
++  copy_stack_slot(tos, -2, -1);
++  copy_stack_slot(tos, 0, -2);
++}
++
++void BytecodeInterpreter::dup_x2(intptr_t *tos) {
++  /* insert top word three down  */
++  copy_stack_slot(tos, -1, 0);
++  copy_stack_slot(tos, -2, -1);
++  copy_stack_slot(tos, -3, -2);
++  copy_stack_slot(tos, 0, -3);
++}
++void BytecodeInterpreter::dup2_x1(intptr_t *tos) {
++  /* insert top 2 slots three down */
++  copy_stack_slot(tos, -1, 1);
++  copy_stack_slot(tos, -2, 0);
++  copy_stack_slot(tos, -3, -1);
++  copy_stack_slot(tos, 1, -2);
++  copy_stack_slot(tos, 0, -3);
++}
++void BytecodeInterpreter::dup2_x2(intptr_t *tos) {
++  /* insert top 2 slots four down */
++  copy_stack_slot(tos, -1, 1);
++  copy_stack_slot(tos, -2, 0);
++  copy_stack_slot(tos, -3, -1);
++  copy_stack_slot(tos, -4, -2);
++  copy_stack_slot(tos, 1, -3);
++  copy_stack_slot(tos, 0, -4);
++}
++
++
++void BytecodeInterpreter::swap(intptr_t *tos) {
++  // swap top two elements
++  intptr_t val = tos[Interpreter::expr_index_at(1)];
++  frame::Tag t;
++  if (TaggedStackInterpreter) {
++    t = (frame::Tag) tos[Interpreter::expr_tag_index_at(1)];
++  }
++  // Copy -2 entry to -1
++  copy_stack_slot(tos, -2, -1);
++  // Store saved -1 entry into -2
++  if (TaggedStackInterpreter) {
++    tos[Interpreter::expr_tag_index_at(2)] = (intptr_t)t;
++  }
++  tos[Interpreter::expr_index_at(2)] = val;
++}
++// --------------------------------------------------------------------------------
++// Non-product code
++#ifndef PRODUCT
++
++const char* BytecodeInterpreter::C_msg(BytecodeInterpreter::messages msg) {
++  switch (msg) {
++     case BytecodeInterpreter::no_request:  return("no_request");
++     case BytecodeInterpreter::initialize:  return("initialize");
++     // status message to C++ interpreter
++     case BytecodeInterpreter::method_entry:  return("method_entry");
++     case BytecodeInterpreter::method_resume:  return("method_resume");
++     case BytecodeInterpreter::got_monitors:  return("got_monitors");
++     case BytecodeInterpreter::rethrow_exception:  return("rethrow_exception");
++     // requests to frame manager from C++ interpreter
++     case BytecodeInterpreter::call_method:  return("call_method");
++     case BytecodeInterpreter::return_from_method:  return("return_from_method");
++     case BytecodeInterpreter::more_monitors:  return("more_monitors");
++     case BytecodeInterpreter::throwing_exception:  return("throwing_exception");
++     case BytecodeInterpreter::popping_frame:  return("popping_frame");
++     case BytecodeInterpreter::do_osr:  return("do_osr");
++     // deopt
++     case BytecodeInterpreter::deopt_resume:  return("deopt_resume");
++     case BytecodeInterpreter::deopt_resume2:  return("deopt_resume2");
++     default: return("BAD MSG");
++  }
++}
++void
++BytecodeInterpreter::print() {
++  tty->print_cr("thread: " INTPTR_FORMAT, (uintptr_t) this->_thread);
++  tty->print_cr("bcp: " INTPTR_FORMAT, (uintptr_t) this->_bcp);
++  tty->print_cr("locals: " INTPTR_FORMAT, (uintptr_t) this->_locals);
++  tty->print_cr("constants: " INTPTR_FORMAT, (uintptr_t) this->_constants);
++  {
++    ResourceMark rm;
++    char *method_name = _method->name_and_sig_as_C_string();
++    tty->print_cr("method: " INTPTR_FORMAT "[ %s ]",  (uintptr_t) this->_method, method_name);
++  }
++  tty->print_cr("mdx: " INTPTR_FORMAT, (uintptr_t) this->_mdx);
++  tty->print_cr("stack: " INTPTR_FORMAT, (uintptr_t) this->_stack);
++  tty->print_cr("msg: %s", C_msg(this->_msg));
++  tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee);
++  tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point);
++  tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance);
++  tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf);
++  tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry);
++  tty->print_cr("result_return_kind 0x%x ", (int) this->_result._return_kind);
++  tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link);
++  tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) this->_oop_temp);
++  tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base);
++  tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit);
++  tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base);
++#ifdef SPARC
++  tty->print_cr("last_Java_pc: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_pc);
++  tty->print_cr("frame_bottom: " INTPTR_FORMAT, (uintptr_t) this->_frame_bottom);
++  tty->print_cr("&native_fresult: " INTPTR_FORMAT, (uintptr_t) &this->_native_fresult);
++  tty->print_cr("native_lresult: " INTPTR_FORMAT, (uintptr_t) this->_native_lresult);
++#endif
++#ifdef IA64
++  tty->print_cr("last_Java_fp: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_fp);
++#endif // IA64
++  tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link);
++}
++
++extern "C" {
++    void PI(uintptr_t arg) {
++        ((BytecodeInterpreter*)arg)->print();
++    }
++}
++#endif // PRODUCT
++
++#endif // JVMTI
++#endif // CC_INTERP
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/bytecodeInterpreter.hpp openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.hpp
+--- openjdk6/hotspot/src/share/vm/interpreter/bytecodeInterpreter.hpp	1969-12-31 19:00:00.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -0,0 +1,572 @@
++/*
++ * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++ *
++ * This code is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 only, as
++ * published by the Free Software Foundation.
++ *
++ * This code is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++ * version 2 for more details (a copy is included in the LICENSE file that
++ * accompanied this code).
++ *
++ * You should have received a copy of the GNU General Public License version
++ * 2 along with this work; if not, write to the Free Software Foundation,
++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
++ * CA 95054 USA or visit www.sun.com if you need additional information or
++ * have any questions.
++ *
++ */
++
++#ifdef CC_INTERP
++
++// CVM definitions find hotspot equivalents...
++
++union VMJavaVal64 {
++    jlong   l;
++    jdouble d;
++    uint32_t      v[2];
++};
++
++
++typedef class BytecodeInterpreter* interpreterState;
++
++struct call_message {
++    class methodOopDesc* _callee;    /* method to call during call_method request */
++    address   _callee_entry_point;   /* address to jump to for call_method request */
++    int       _bcp_advance;          /* size of the invoke bytecode operation */
++};
++
++struct osr_message {
++    address _osr_buf;                 /* the osr buffer */
++    address _osr_entry;               /* the entry to the osr method */
++};
++
++struct osr_result {
++  nmethod* nm;                       /* osr nmethod */
++  address return_addr;               /* osr blob return address */
++};
++
++// Result returned to frame manager
++union frame_manager_message {
++    call_message _to_call;            /* describes callee */
++    Bytecodes::Code _return_kind;     /* i_return, a_return, ... */
++    osr_message _osr;                 /* describes the osr */
++    osr_result _osr_result;           /* result of OSR request */
++};
++
++class BytecodeInterpreter : StackObj {
++friend class SharedRuntime;
++friend class AbstractInterpreterGenerator;
++friend class CppInterpreterGenerator;
++friend class InterpreterGenerator;
++friend class InterpreterMacroAssembler;
++friend class frame;
++friend class SharedRuntime;
++friend class VMStructs;
++
++public:
++    enum messages {
++         no_request = 0,            // unused
++         initialize,                // Perform one time interpreter initializations (assumes all switches set)
++         // status message to C++ interpreter
++         method_entry,              // initial method entry to interpreter
++         method_resume,             // frame manager response to return_from_method request (assuming a frame to resume)
++         deopt_resume,              // returning from a native call into a deopted frame
++         deopt_resume2,             // deopt resume as a result of a PopFrame
++         got_monitors,              // frame manager response to more_monitors request
++         rethrow_exception,         // unwinding and throwing exception
++         // requests to frame manager from C++ interpreter
++         call_method,               // request for new frame from interpreter, manager responds with method_entry
++         return_from_method,        // request from interpreter to unwind, manager responds with method_continue
++         more_monitors,             // need a new monitor
++         throwing_exception,        // unwind stack and rethrow
++         popping_frame,             // unwind call and retry call
++         do_osr                     // request this invocation be OSR's
++    };
++
++private:
++    JavaThread*           _thread;        // the vm's java thread pointer
++    address               _bcp;           // instruction pointer
++    intptr_t*             _locals;        // local variable pointer
++    constantPoolCacheOop  _constants;     // constant pool cache
++    methodOop             _method;        // method being executed
++    DataLayout*           _mdx;           // compiler profiling data for current bytecode
++    intptr_t*             _stack;         // expression stack
++    messages              _msg;           // frame manager <-> interpreter message
++    frame_manager_message _result;        // result to frame manager
++    interpreterState      _prev_link;     // previous interpreter state
++    oop                   _oop_temp;      // mirror for interpreted native, null otherwise
++    intptr_t*             _stack_base;    // base of expression stack
++    intptr_t*             _stack_limit;   // limit of expression stack
++    BasicObjectLock*      _monitor_base;  // base of monitors on the native stack
++
++
++public:
++  // Constructor is only used by the initialization step. All other instances are created
++  // by the frame manager.
++  BytecodeInterpreter(messages msg);
++
++//
++// Deoptimization support
++//
++static void layout_interpreterState(interpreterState to_fill,
++                                    frame* caller,
++                                    frame* interpreter_frame,
++                                    methodOop method,
++                                    intptr_t* locals,
++                                    intptr_t* stack,
++                                    intptr_t* stack_base,
++                                    intptr_t* monitor_base,
++                                    intptr_t* frame_bottom,
++                                    bool top_frame);
++
++/*
++ * Generic 32-bit wide "Java slot" definition. This type occurs
++ * in operand stacks, Java locals, object fields, constant pools.
++ */
++union VMJavaVal32 {
++    jint     i;
++    jfloat   f;
++    class oopDesc*   r;
++    uint32_t raw;
++};
++
++/*
++ * Generic 64-bit Java value definition
++ */
++union VMJavaVal64 {
++    jlong   l;
++    jdouble d;
++    uint32_t      v[2];
++};
++
++/*
++ * Generic 32-bit wide "Java slot" definition. This type occurs
++ * in Java locals, object fields, constant pools, and
++ * operand stacks (as a CVMStackVal32).
++ */
++typedef union VMSlotVal32 {
++    VMJavaVal32    j;     /* For "Java" values */
++    address        a;     /* a return created by jsr or jsr_w */
++} VMSlotVal32;
++
++
++/*
++ * Generic 32-bit wide stack slot definition.
++ */
++union VMStackVal32 {
++    VMJavaVal32    j;     /* For "Java" values */
++    VMSlotVal32    s;     /* any value from a "slot" or locals[] */
++};
++
++inline JavaThread* thread() { return _thread; }
++
++inline address bcp() { return _bcp; }
++inline void set_bcp(address new_bcp) { _bcp = new_bcp; }
++
++inline intptr_t* locals() { return _locals; }
++
++inline constantPoolCacheOop constants() { return _constants; }
++inline methodOop method() { return _method; }
++inline DataLayout* mdx() { return _mdx; }
++inline void set_mdx(DataLayout *new_mdx) { _mdx = new_mdx; }
++
++inline messages msg() { return _msg; }
++inline void set_msg(messages new_msg) { _msg = new_msg; }
++
++inline methodOop callee() { return _result._to_call._callee; }
++inline void set_callee(methodOop new_callee) { _result._to_call._callee = new_callee; }
++inline void set_callee_entry_point(address entry) { _result._to_call._callee_entry_point = entry; }
++inline void set_osr_buf(address buf) { _result._osr._osr_buf = buf; }
++inline void set_osr_entry(address entry) { _result._osr._osr_entry = entry; }
++inline int bcp_advance() { return _result._to_call._bcp_advance; }
++inline void set_bcp_advance(int count) { _result._to_call._bcp_advance = count; }
++
++inline void set_return_kind(Bytecodes::Code kind) { _result._return_kind = kind; }
++
++inline interpreterState prev() { return _prev_link; }
++
++inline intptr_t* stack() { return _stack; }
++inline void set_stack(intptr_t* new_stack) { _stack = new_stack; }
++
++
++inline intptr_t* stack_base() { return _stack_base; }
++inline intptr_t* stack_limit() { return _stack_limit; }
++
++inline BasicObjectLock* monitor_base() { return _monitor_base; }
++
++/*
++ * 64-bit Arithmetic:
++ *
++ * The functions below follow the semantics of the
++ * ladd, land, ldiv, lmul, lor, lxor, and lrem bytecodes,
++ * respectively.
++ */
++
++static jlong VMlongAdd(jlong op1, jlong op2);
++static jlong VMlongAnd(jlong op1, jlong op2);
++static jlong VMlongDiv(jlong op1, jlong op2);
++static jlong VMlongMul(jlong op1, jlong op2);
++static jlong VMlongOr (jlong op1, jlong op2);
++static jlong VMlongSub(jlong op1, jlong op2);
++static jlong VMlongXor(jlong op1, jlong op2);
++static jlong VMlongRem(jlong op1, jlong op2);
++
++/*
++ * Shift:
++ *
++ * The functions below follow the semantics of the
++ * lushr, lshl, and lshr bytecodes, respectively.
++ */
++
++static jlong VMlongUshr(jlong op1, jint op2);
++static jlong VMlongShl (jlong op1, jint op2);
++static jlong VMlongShr (jlong op1, jint op2);
++
++/*
++ * Unary:
++ *
++ * Return the negation of "op" (-op), according to
++ * the semantics of the lneg bytecode.
++ */
++
++static jlong VMlongNeg(jlong op);
++
++/*
++ * Return the complement of "op" (~op)
++ */
++
++static jlong VMlongNot(jlong op);
++
++
++/*
++ * Comparisons to 0:
++ */
++
++static int32_t VMlongLtz(jlong op);     /* op <= 0 */
++static int32_t VMlongGez(jlong op);     /* op >= 0 */
++static int32_t VMlongEqz(jlong op);     /* op == 0 */
++
++/*
++ * Between operands:
++ */
++
++static int32_t VMlongEq(jlong op1, jlong op2);    /* op1 == op2 */
++static int32_t VMlongNe(jlong op1, jlong op2);    /* op1 != op2 */
++static int32_t VMlongGe(jlong op1, jlong op2);    /* op1 >= op2 */
++static int32_t VMlongLe(jlong op1, jlong op2);    /* op1 <= op2 */
++static int32_t VMlongLt(jlong op1, jlong op2);    /* op1 <  op2 */
++static int32_t VMlongGt(jlong op1, jlong op2);    /* op1 >  op2 */
++
++/*
++ * Comparisons (returning an jint value: 0, 1, or -1)
++ *
++ * Between operands:
++ *
++ * Compare "op1" and "op2" according to the semantics of the
++ * "lcmp" bytecode.
++ */
++
++static int32_t VMlongCompare(jlong op1, jlong op2);
++
++/*
++ * Convert int to long, according to "i2l" bytecode semantics
++ */
++static jlong VMint2Long(jint val);
++
++/*
++ * Convert long to int, according to "l2i" bytecode semantics
++ */
++static jint VMlong2Int(jlong val);
++
++/*
++ * Convert long to float, according to "l2f" bytecode semantics
++ */
++static jfloat VMlong2Float(jlong val);
++
++/*
++ * Convert long to double, according to "l2d" bytecode semantics
++ */
++static jdouble VMlong2Double(jlong val);
++
++/*
++ * Java floating-point float value manipulation.
++ *
++ * The result argument is, once again, an lvalue.
++ *
++ * Arithmetic:
++ *
++ * The functions below follow the semantics of the
++ * fadd, fsub, fmul, fdiv, and frem bytecodes,
++ * respectively.
++ */
++
++static jfloat VMfloatAdd(jfloat op1, jfloat op2);
++static jfloat VMfloatSub(jfloat op1, jfloat op2);
++static jfloat VMfloatMul(jfloat op1, jfloat op2);
++static jfloat VMfloatDiv(jfloat op1, jfloat op2);
++static jfloat VMfloatRem(jfloat op1, jfloat op2);
++
++/*
++ * Unary:
++ *
++ * Return the negation of "op" (-op), according to
++ * the semantics of the fneg bytecode.
++ */
++
++static jfloat VMfloatNeg(jfloat op);
++
++/*
++ * Comparisons (returning an int value: 0, 1, or -1)
++ *
++ * Between operands:
++ *
++ * Compare "op1" and "op2" according to the semantics of the
++ * "fcmpl" (direction is -1) or "fcmpg" (direction is 1) bytecodes.
++ */
++
++static int32_t VMfloatCompare(jfloat op1, jfloat op2,
++                              int32_t direction);
++/*
++ * Conversion:
++ */
++
++/*
++ * Convert float to double, according to "f2d" bytecode semantics
++ */
++
++static jdouble VMfloat2Double(jfloat op);
++
++/*
++ ******************************************
++ * Java double floating-point manipulation.
++ ******************************************
++ *
++ * The result argument is, once again, an lvalue.
++ *
++ * Conversions:
++ */
++
++/*
++ * Convert double to int, according to "d2i" bytecode semantics
++ */
++
++static jint VMdouble2Int(jdouble val);
++
++/*
++ * Convert double to float, according to "d2f" bytecode semantics
++ */
++
++static jfloat VMdouble2Float(jdouble val);
++
++/*
++ * Convert int to double, according to "i2d" bytecode semantics
++ */
++
++static jdouble VMint2Double(jint val);
++
++/*
++ * Arithmetic:
++ *
++ * The functions below follow the semantics of the
++ * dadd, dsub, ddiv, dmul, and drem bytecodes, respectively.
++ */
++
++static jdouble VMdoubleAdd(jdouble op1, jdouble op2);
++static jdouble VMdoubleSub(jdouble op1, jdouble op2);
++static jdouble VMdoubleDiv(jdouble op1, jdouble op2);
++static jdouble VMdoubleMul(jdouble op1, jdouble op2);
++static jdouble VMdoubleRem(jdouble op1, jdouble op2);
++
++/*
++ * Unary:
++ *
++ * Return the negation of "op" (-op), according to
++ * the semantics of the dneg bytecode.
++ */
++
++static jdouble VMdoubleNeg(jdouble op);
++
++/*
++ * Comparisons (returning an int32_t value: 0, 1, or -1)
++ *
++ * Between operands:
++ *
++ * Compare "op1" and "op2" according to the semantics of the
++ * "dcmpl" (direction is -1) or "dcmpg" (direction is 1) bytecodes.
++ */
++
++static int32_t VMdoubleCompare(jdouble op1, jdouble op2, int32_t direction);
++
++/*
++ * Copy two typeless 32-bit words from one location to another.
++ * This is semantically equivalent to:
++ *
++ * to[0] = from[0];
++ * to[1] = from[1];
++ *
++ * but this interface is provided for those platforms that could
++ * optimize this into a single 64-bit transfer.
++ */
++
++static void VMmemCopy64(uint32_t to[2], const uint32_t from[2]);
++
++
++// Arithmetic operations
++
++/*
++ * Java arithmetic methods.
++ * The functions below follow the semantics of the
++ * iadd, isub, imul, idiv, irem, iand, ior, ixor,
++ * and ineg bytecodes, respectively.
++ */
++
++static jint VMintAdd(jint op1, jint op2);
++static jint VMintSub(jint op1, jint op2);
++static jint VMintMul(jint op1, jint op2);
++static jint VMintDiv(jint op1, jint op2);
++static jint VMintRem(jint op1, jint op2);
++static jint VMintAnd(jint op1, jint op2);
++static jint VMintOr (jint op1, jint op2);
++static jint VMintXor(jint op1, jint op2);
++
++/*
++ * Shift Operation:
++ * The functions below follow the semantics of the
++ * iushr, ishl, and ishr bytecodes, respectively.
++ */
++
++static jint VMintUshr(jint op, jint num);
++static jint VMintShl (jint op, jint num);
++static jint VMintShr (jint op, jint num);
++
++/*
++ * Unary Operation:
++ *
++ * Return the negation of "op" (-op), according to
++ * the semantics of the ineg bytecode.
++ */
++
++static jint VMintNeg(jint op);
++
++/*
++ * Int Conversions:
++ */
++
++/*
++ * Convert int to float, according to "i2f" bytecode semantics
++ */
++
++static jfloat VMint2Float(jint val);
++
++/*
++ * Convert int to byte, according to "i2b" bytecode semantics
++ */
++
++static jbyte VMint2Byte(jint val);
++
++/*
++ * Convert int to char, according to "i2c" bytecode semantics
++ */
++
++static jchar VMint2Char(jint val);
++
++/*
++ * Convert int to short, according to "i2s" bytecode semantics
++ */
++
++static jshort VMint2Short(jint val);
++
++/*=========================================================================
++ * Bytecode interpreter operations
++ *=======================================================================*/
++
++static void dup(intptr_t *tos);
++static void dup2(intptr_t *tos);
++static void dup_x1(intptr_t *tos);    /* insert top word two down */
++static void dup_x2(intptr_t *tos);    /* insert top word three down  */
++static void dup2_x1(intptr_t *tos);   /* insert top 2 slots three down */
++static void dup2_x2(intptr_t *tos);   /* insert top 2 slots four down */
++static void swap(intptr_t *tos);      /* swap top two elements */
++
++// umm don't like this method modifies its object
++
++// The Interpreter used when
++static void run(interpreterState istate);
++// The interpreter used if JVMTI needs interpreter events
++static void runWithChecks(interpreterState istate);
++static void End_Of_Interpreter(void);
++
++// Inline static functions for Java Stack and Local manipulation
++
++static address stack_slot(intptr_t *tos, int offset);
++static jint stack_int(intptr_t *tos, int offset);
++static jfloat stack_float(intptr_t *tos, int offset);
++static oop stack_object(intptr_t *tos, int offset);
++static jdouble stack_double(intptr_t *tos, int offset);
++static jlong stack_long(intptr_t *tos, int offset);
++
++static void tag_stack(intptr_t *tos, frame::Tag tag, int offset);
++
++// only used for value types
++static void set_stack_slot(intptr_t *tos, address value, int offset);
++static void set_stack_int(intptr_t *tos, int value, int offset);
++static void set_stack_float(intptr_t *tos, jfloat value, int offset);
++static void set_stack_object(intptr_t *tos, oop value, int offset);
++
++// needs to be platform dep for the 32 bit platforms.
++static void set_stack_double(intptr_t *tos, jdouble value, int offset);
++static void set_stack_long(intptr_t *tos, jlong value, int offset);
++
++static void set_stack_double_from_addr(intptr_t *tos, address addr, int offset);
++static void set_stack_long_from_addr(intptr_t *tos, address addr, int offset);
++
++// Locals
++
++static address locals_slot(intptr_t* locals, int offset);
++static jint locals_int(intptr_t* locals, int offset);
++static jfloat locals_float(intptr_t* locals, int offset);
++static oop locals_object(intptr_t* locals, int offset);
++static jdouble locals_double(intptr_t* locals, int offset);
++static jlong locals_long(intptr_t* locals, int offset);
++
++static address locals_long_at(intptr_t* locals, int offset);
++static address locals_double_at(intptr_t* locals, int offset);
++
++static void tag_locals(intptr_t *locals, frame::Tag tag, int offset);
++
++static void set_locals_slot(intptr_t *locals, address value, int offset);
++static void set_locals_int(intptr_t *locals, jint value, int offset);
++static void set_locals_float(intptr_t *locals, jfloat value, int offset);
++static void set_locals_object(intptr_t *locals, oop value, int offset);
++static void set_locals_double(intptr_t *locals, jdouble value, int offset);
++static void set_locals_long(intptr_t *locals, jlong value, int offset);
++static void set_locals_double_from_addr(intptr_t *locals,
++                                   address addr, int offset);
++static void set_locals_long_from_addr(intptr_t *locals,
++                                   address addr, int offset);
++
++static void astore(intptr_t* topOfStack, int stack_offset,
++                   intptr_t* locals,     int locals_offset);
++
++// Support for dup and swap
++static void copy_stack_slot(intptr_t *tos, int from_offset, int to_offset);
++
++#ifndef PRODUCT
++static void verify_locals_tag(intptr_t *locals, frame::Tag tag, int offset);
++static void verify_stack_tag(intptr_t *tos, frame::Tag tag, int offset);
++static const char* C_msg(BytecodeInterpreter::messages msg);
++void print();
++#endif // PRODUCT
++
++    // Platform fields/methods
++# include "incls/_bytecodeInterpreter_pd.hpp.incl"
++
++}; // BytecodeInterpreter
++
++#endif // CC_INTERP
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp
+--- openjdk6/hotspot/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp	1969-12-31 19:00:00.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -0,0 +1,44 @@
++/*
++ * Copyright 2002 Sun Microsystems, Inc.  All Rights Reserved.
++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++ *
++ * This code is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 only, as
++ * published by the Free Software Foundation.
++ *
++ * This code is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++ * version 2 for more details (a copy is included in the LICENSE file that
++ * accompanied this code).
++ *
++ * You should have received a copy of the GNU General Public License version
++ * 2 along with this work; if not, write to the Free Software Foundation,
++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
++ * CA 95054 USA or visit www.sun.com if you need additional information or
++ * have any questions.
++ *
++ */
++
++// This file holds platform-independant bodies of inline functions for the C++ based interpreter
++
++#ifdef CC_INTERP
++
++#ifdef ASSERT
++extern "C" { typedef void (*verify_oop_fn_t)(oop, const char *);};
++#define VERIFY_OOP(o) \
++        /*{ verify_oop_fn_t verify_oop_entry = \
++            *StubRoutines::verify_oop_subroutine_entry_address(); \
++          if (verify_oop_entry) { \
++             (*verify_oop_entry)((o), "Not an oop!"); \
++          } \
++        }*/
++#else
++#define VERIFY_OOP(o)
++#endif
++
++// Platform dependent data manipulation
++# include "incls/_bytecodeInterpreter_pd.inline.hpp.incl"
++#endif // CC_INTERP
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/bytecodeInterpreterWithChecks.xml openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreterWithChecks.xml
+--- openjdk6/hotspot/src/share/vm/interpreter/bytecodeInterpreterWithChecks.xml	1969-12-31 19:00:00.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreterWithChecks.xml	2008-01-31 09:19:00.000000000 -0500
+@@ -0,0 +1,10 @@
++<?xml version="1.0"?> 
++<!-- 
++     Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
++     SUN PROPRIETARY/CONFIDENTIAL.  Use is subject to license terms.
++-->
++<!DOCTYPE processcode [
++  <!ELEMENT processcode ANY>
++]>
++<processcode>
++</processcode>
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/bytecodeInterpreterWithChecks.xsl openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreterWithChecks.xsl
+--- openjdk6/hotspot/src/share/vm/interpreter/bytecodeInterpreterWithChecks.xsl	1969-12-31 19:00:00.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreterWithChecks.xsl	2008-01-31 09:19:00.000000000 -0500
+@@ -0,0 +1,21 @@
++<?xml version="1.0"?> 
++<!-- 
++     Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
++     SUN PROPRIETARY/CONFIDENTIAL.  Use is subject to license terms.
++-->
++
++<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
++
++<xsl:template match="processcode">
++<xsl:text>
++#define VM_JVMTI
++#include "bytecodeInterpreter.cpp"
++</xsl:text>
++<xsl:text disable-output-escaping = "yes">
++
++</xsl:text>
++
++<xsl:output method="text" indent="no" omit-xml-declaration="yes"/>
++</xsl:template>
++
++</xsl:stylesheet>
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/bytecodes.cpp openjdk/hotspot/src/share/vm/interpreter/bytecodes.cpp
+--- openjdk6/hotspot/src/share/vm/interpreter/bytecodes.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/bytecodes.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)bytecodes.cpp	1.97 07/06/21 09:48:42 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -72,9 +69,9 @@
+       return (len > 0 && len == (int)len) ? len : -1;
+     }
+ 
+-  case _lookupswitch:      // fall through    
+-  case _fast_binaryswitch: // fall through    
+-  case _fast_linearswitch: 
++  case _lookupswitch:      // fall through
++  case _fast_binaryswitch: // fall through
++  case _fast_linearswitch:
+     { address aligned_bcp = (address)round_to((intptr_t)bcp + 1, jintSize);
+       jlong npairs = (jint)Bytes::get_Java_u4(aligned_bcp + jintSize);
+       jlong len = (aligned_bcp - bcp) + (2 + 2*npairs)*jintSize;
+@@ -86,9 +83,9 @@
+   return 0;
+ }
+ 
+-// At a breakpoint instruction, this returns the breakpoint's length, 
++// At a breakpoint instruction, this returns the breakpoint's length,
+ // otherwise, it's the same as special_length_at().  This is used by
+-// the RawByteCodeStream, which wants to see the actual bytecode 
++// the RawByteCodeStream, which wants to see the actual bytecode
+ // values (including breakpoint).  RawByteCodeStream is used by the
+ // verifier when reading in bytecode to verify.  Other mechanisms that
+ // run at runtime (such as generateOopMaps) need to iterate over the code
+@@ -124,7 +121,7 @@
+   if (java_code != code)  _can_rewrite[java_code] = true;
+ }
+ 
+-  
++
+ // Format strings interpretation:
+ //
+ // b: bytecode
+@@ -136,7 +133,7 @@
+ // w: wide bytecode
+ //
+ // Note: Right now the format strings are used for 2 purposes:
+-//       1. to specify the length of the bytecode 
++//       1. to specify the length of the bytecode
+ //          (= number of characters in format string)
+ //       2. to specify the bytecode attributes
+ //
+@@ -344,7 +341,7 @@
+   def(_putstatic           , "putstatic"           , "bjj"  , NULL    , T_ILLEGAL, -1, true );
+   def(_getfield            , "getfield"            , "bjj"  , NULL    , T_ILLEGAL,  0, true );
+   def(_putfield            , "putfield"            , "bjj"  , NULL    , T_ILLEGAL, -2, true );
+-  def(_invokevirtual       , "invokevirtual"       , "bjj"  , NULL    , T_ILLEGAL, -1, true); 
++  def(_invokevirtual       , "invokevirtual"       , "bjj"  , NULL    , T_ILLEGAL, -1, true);
+   def(_invokespecial       , "invokespecial"       , "bjj"  , NULL    , T_ILLEGAL, -1, true);
+   def(_invokestatic        , "invokestatic"        , "bjj"  , NULL    , T_ILLEGAL,  0, true);
+   def(_invokeinterface     , "invokeinterface"     , "bjj__", NULL    , T_ILLEGAL, -1, true);
+@@ -366,7 +363,7 @@
+   def(_jsr_w               , "jsr_w"               , "boooo", NULL    , T_INT    ,  0, false);
+   def(_breakpoint          , "breakpoint"          , ""     , NULL    , T_VOID   ,  0, true);
+ 
+-  //  JVM bytecodes	  			
++  //  JVM bytecodes
+   //  bytecode               bytecode name           format   wide f.   result tp  stk traps  std code
+ 
+   def(_fast_agetfield      , "fast_agetfield"      , "bjj"  , NULL    , T_OBJECT ,  0, true , _getfield       );
+@@ -397,7 +394,7 @@
+   def(_fast_icaload        , "fast_icaload"        , "bi_"  , NULL    , T_INT    ,  0, false, _iload);
+ 
+   // Faster method invocation.
+-  def(_fast_invokevfinal   , "fast_invokevfinal"   , "bjj"  , NULL    , T_ILLEGAL, -1, true, _invokevirtual   ); 
++  def(_fast_invokevfinal   , "fast_invokevfinal"   , "bjj"  , NULL    , T_ILLEGAL, -1, true, _invokevirtual   );
+ 
+   def(_fast_linearswitch   , "fast_linearswitch"   , ""     , NULL    , T_VOID   , -1, false, _lookupswitch   );
+   def(_fast_binaryswitch   , "fast_binaryswitch"   , ""     , NULL    , T_VOID   , -1, false, _lookupswitch   );
+@@ -432,7 +429,7 @@
+   Bytecodes::initialize();
+ }
+ 
+-// Restore optimization 
++// Restore optimization
+ #ifdef _M_AMD64
+ #pragma optimize ("", on)
+ #endif
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/bytecodes.hpp openjdk/hotspot/src/share/vm/interpreter/bytecodes.hpp
+--- openjdk6/hotspot/src/share/vm/interpreter/bytecodes.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/bytecodes.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)bytecodes.hpp	1.79 07/06/21 09:48:42 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Bytecodes specifies all bytecodes used in the VM and
+@@ -237,7 +234,7 @@
+     _ifnonnull            = 199, // 0xc7
+     _goto_w               = 200, // 0xc8
+     _jsr_w                = 201, // 0xc9
+-    _breakpoint		  = 202, // 0xca
++    _breakpoint           = 202, // 0xca
+ 
+     number_of_java_codes,
+ 
+@@ -308,10 +305,10 @@
+ 
+ 
+    // Fetch a bytecode, hiding breakpoints as necessary:
+-   static Code       code_at(address bcp, methodOop method = NULL) { 
+-          Code code = cast(*bcp); return (code != _breakpoint) ? code : non_breakpoint_code_at(bcp, method); 
++   static Code       code_at(address bcp, methodOop method = NULL) {
++          Code code = cast(*bcp); return (code != _breakpoint) ? code : non_breakpoint_code_at(bcp, method);
+    }
+-   static Code       java_code_at(address bcp, methodOop method = NULL) { 
++   static Code       java_code_at(address bcp, methodOop method = NULL) {
+           return java_code(code_at(bcp, method));
+    }
+ 
+@@ -349,14 +346,13 @@
+   static int         java_length_at (address bcp)  { int l = length_for(java_code_at(bcp)); return l > 0 ? l : special_length_at(bcp); }
+   static bool        is_java_code   (Code code)    { return 0 <= code && code < number_of_java_codes; }
+ 
+-  static bool        is_aload       (Code code)    { return (code == _aload  || code == _aload_0  || code == _aload_1   
++  static bool        is_aload       (Code code)    { return (code == _aload  || code == _aload_0  || code == _aload_1
+                                                                              || code == _aload_2  || code == _aload_3); }
+-  static bool        is_astore      (Code code)    { return (code == _astore || code == _astore_0 || code == _astore_1 
++  static bool        is_astore      (Code code)    { return (code == _astore || code == _astore_0 || code == _astore_1
+                                                                              || code == _astore_2 || code == _astore_3); }
+ 
+-  static bool        is_zero_const  (Code code)    { return (code == _aconst_null || code == _iconst_0 
++  static bool        is_zero_const  (Code code)    { return (code == _aconst_null || code == _iconst_0
+                                                            || code == _fconst_0 || code == _dconst_0); }
+   // Initialization
+   static void        initialize     ();
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/bytecodeStream.cpp openjdk/hotspot/src/share/vm/interpreter/bytecodeStream.cpp
+--- openjdk6/hotspot/src/share/vm/interpreter/bytecodeStream.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/bytecodeStream.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)bytecodeStream.cpp	1.47 07/06/21 09:48:41 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/bytecodeStream.hpp openjdk/hotspot/src/share/vm/interpreter/bytecodeStream.hpp
+--- openjdk6/hotspot/src/share/vm/interpreter/bytecodeStream.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/bytecodeStream.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)bytecodeStream.hpp	1.54 07/06/21 09:48:41 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,11 +19,11 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A BytecodeStream is used for fast iteration over the bytecodes
+-// of a methodOop. 
++// of a methodOop.
+ //
+ // Usage:
+ //
+@@ -74,7 +71,7 @@
+   void set_start   (int beg_bci) {
+     set_interval(beg_bci, _method->code_size());
+   }
+-  
++
+   // Iteration
+   // Use raw_next() rather than next() for faster method reference
+   Bytecodes::Code raw_next() {
+@@ -123,9 +120,9 @@
+   // Bytecode-specific attributes
+   int             dest() const                   { return bci() + (short)Bytes::get_Java_u2(bcp() + 1); }
+   int             dest_w() const                 { return bci() + (int  )Bytes::get_Java_u4(bcp() + 1); }
+-  
++
+   // Unsigned indices, widening
+-  int             get_index() const              { return (is_wide()) ? Bytes::get_Java_u2(bcp() + 2) : bcp()[1]; }  
++  int             get_index() const              { return (is_wide()) ? Bytes::get_Java_u2(bcp() + 2) : bcp()[1]; }
+   int             get_index_big() const          { return (int)Bytes::get_Java_u2(bcp() + 1);  }
+ };
+ 
+@@ -136,7 +133,7 @@
+  public:
+   // Construction
+   BytecodeStream(methodHandle method) : RawBytecodeStream(method) { }
+-  
++
+   // Iteration
+   Bytecodes::Code next() {
+     Bytecodes::Code code;
+@@ -153,7 +150,7 @@
+       //
+       // note that we cannot advance before having the
+       // tty bytecode otherwise the stepping is wrong!
+-      // (carefull: length_for(...) must be used first!) 
++      // (carefull: length_for(...) must be used first!)
+       int l = Bytecodes::length_for(code);
+       if (l == 0) l = Bytecodes::length_at(bcp);
+       _next_bci  += l;
+@@ -173,5 +170,3 @@
+ 
+   bool            is_active_breakpoint() const   { return Bytecodes::is_active_breakpoint_at(bcp()); }
+ };
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp openjdk/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp
+--- openjdk6/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)bytecodeTracer.cpp	1.52 07/06/08 15:21:46 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -44,10 +41,10 @@
+   bool      _is_wide;
+   address   _next_pc;                // current decoding position
+ 
+-  void      align()                  { _next_pc = (address)round_to((intptr_t)_next_pc, sizeof(jint)); }  
+-  int       get_byte()               { return *(jbyte*) _next_pc++; }  // signed  
++  void      align()                  { _next_pc = (address)round_to((intptr_t)_next_pc, sizeof(jint)); }
++  int       get_byte()               { return *(jbyte*) _next_pc++; }  // signed
+   short     get_short()              { short i=Bytes::get_Java_u2(_next_pc); _next_pc+=2; return i; }
+-  int       get_int()                { int i=Bytes::get_Java_u4(_next_pc); _next_pc+=4; return i; }                                                   
++  int       get_int()                { int i=Bytes::get_Java_u4(_next_pc); _next_pc+=4; return i; }
+ 
+   int       get_index()              { return *(address)_next_pc++; }
+   int       get_big_index()          { int i=Bytes::get_Java_u2(_next_pc); _next_pc+=2; return i; }
+@@ -56,9 +53,9 @@
+   bool      is_wide()                { return _is_wide; }
+ 
+ 
+-  void      print_constant(int i);
+-  void      print_attributes(Bytecodes::Code code, int bci);
+-  void      bytecode_epilog(int bci);
++  void      print_constant(int i, outputStream* st = tty);
++  void      print_attributes(Bytecodes::Code code, int bci, outputStream* st = tty);
++  void      bytecode_epilog(int bci, outputStream* st = tty);
+ 
+  public:
+   BytecodePrinter() {
+@@ -67,7 +64,7 @@
+ 
+   // This method is called while executing the raw bytecodes, so none of
+   // the adjustments that BytecodeStream performs applies.
+-  void trace(methodHandle method, address bcp, uintptr_t tos, uintptr_t tos2) {
++  void trace(methodHandle method, address bcp, uintptr_t tos, uintptr_t tos2, outputStream* st) {
+     ResourceMark rm;
+     if (_current_method != method()) {
+       // Note 1: This code will not work as expected with true MT/MP.
+@@ -76,10 +73,10 @@
+       // _current_method pointer happens to have the same bits as
+       // the incoming method.  We could lose a line of trace output.
+       // This is acceptable in a debug-only feature.
+-      tty->cr();
+-      tty->print("[%d] ", (int) Thread::current()->osthread()->thread_id());
+-      method->print_name(tty);
+-      tty->cr();
++      st->cr();
++      st->print("[%d] ", (int) Thread::current()->osthread()->thread_id());
++      method->print_name(st);
++      st->cr();
+       _current_method = method();
+     }
+     Bytecodes::Code code;
+@@ -90,13 +87,13 @@
+       code = Bytecodes::code_at(bcp);
+     }
+     int bci = bcp - method->code_base();
+-    tty->print("[%d] ", (int) Thread::current()->osthread()->thread_id());
++    st->print("[%d] ", (int) Thread::current()->osthread()->thread_id());
+     if (Verbose) {
+-      tty->print("%8d  %4d  " INTPTR_FORMAT " " INTPTR_FORMAT " %s", 
+-	   BytecodeCounter::counter_value(), bci, tos, tos2, Bytecodes::name(code));
++      st->print("%8d  %4d  " INTPTR_FORMAT " " INTPTR_FORMAT " %s",
++           BytecodeCounter::counter_value(), bci, tos, tos2, Bytecodes::name(code));
+     } else {
+-      tty->print("%8d  %4d  %s", 
+-	   BytecodeCounter::counter_value(), bci, Bytecodes::name(code));
++      st->print("%8d  %4d  %s",
++           BytecodeCounter::counter_value(), bci, Bytecodes::name(code));
+     }
+     _next_pc = is_wide() ? bcp+2 : bcp+1;
+     print_attributes(code, bci);
+@@ -107,11 +104,11 @@
+ 
+   // Used for methodOop::print_codes().  The input bcp comes from
+   // BytecodeStream, which will skip wide bytecodes.
+-  void trace(methodHandle method, address bcp) {
++  void trace(methodHandle method, address bcp, outputStream* st) {
+     _current_method = method();
+     ResourceMark rm;
+     Bytecodes::Code code = Bytecodes::code_at(bcp);
+-    // Set is_wide 
++    // Set is_wide
+     _is_wide = (code == Bytecodes::_wide);
+     if (is_wide()) {
+       code = Bytecodes::code_at(bcp+1);
+@@ -119,13 +116,13 @@
+     int bci = bcp - method->code_base();
+     // Print bytecode index and name
+     if (is_wide()) {
+-      tty->print("%d %s_w", bci, Bytecodes::name(code));
++      st->print("%d %s_w", bci, Bytecodes::name(code));
+     } else {
+-      tty->print("%d %s", bci, Bytecodes::name(code));
++      st->print("%d %s", bci, Bytecodes::name(code));
+     }
+     _next_pc = is_wide() ? bcp+2 : bcp+1;
+-    print_attributes(code, bci);
+-    bytecode_epilog(bci);
++    print_attributes(code, bci, st);
++    bytecode_epilog(bci, st);
+   }
+ };
+ 
+@@ -146,7 +143,7 @@
+ }
+ 
+ 
+-void BytecodeTracer::trace(methodHandle method, address bcp, uintptr_t tos, uintptr_t tos2 ) {
++void BytecodeTracer::trace(methodHandle method, address bcp, uintptr_t tos, uintptr_t tos2, outputStream* st) {
+   if (TraceBytecodes && BytecodeCounter::counter_value() >= TraceBytecodesAt) {
+     ttyLocker ttyl;  // 5065316: keep the following output coherent
+     // The ttyLocker also prevents races between two threads
+@@ -161,81 +158,81 @@
+     // We put the locker on the static trace method, not the
+     // virtual one, because the clients of this module go through
+     // the static method.
+-    _closure->trace(method, bcp, tos, tos2);
++    _closure->trace(method, bcp, tos, tos2, st);
+   }
+ }
+ 
+-void BytecodeTracer::trace(methodHandle method, address bcp) {
++void BytecodeTracer::trace(methodHandle method, address bcp, outputStream* st) {
+   ttyLocker ttyl;  // 5065316: keep the following output coherent
+-  _closure->trace(method, bcp);
++  _closure->trace(method, bcp, st);
+ }
+ 
+-void print_oop(oop value) {
++void print_oop(oop value, outputStream* st) {
+   if (value == NULL) {
+-    tty->print_cr(" NULL");
++    st->print_cr(" NULL");
+   } else {
+     EXCEPTION_MARK;
+     Handle h_value (THREAD, value);
+     symbolHandle sym = java_lang_String::as_symbol(h_value, CATCH);
+     if (sym->utf8_length() > 32) {
+-      tty->print_cr(" ....");
++      st->print_cr(" ....");
+     } else {
+-      sym->print(); tty->cr();
++      sym->print_on(st); st->cr();
+     }
+   }
+ }
+ 
+-void BytecodePrinter::print_constant(int i) {
++void BytecodePrinter::print_constant(int i, outputStream* st) {
+   constantPoolOop constants = method()->constants();
+   constantTag tag = constants->tag_at(i);
+ 
+-  if (tag.is_int()) { 
+-    tty->print_cr(" " INT32_FORMAT, constants->int_at(i));
++  if (tag.is_int()) {
++    st->print_cr(" " INT32_FORMAT, constants->int_at(i));
+   } else if (tag.is_long()) {
+-    tty->print_cr(" " INT64_FORMAT, constants->long_at(i));
+-  } else if (tag.is_float()) { 
+-    tty->print_cr(" %f", constants->float_at(i));
++    st->print_cr(" " INT64_FORMAT, constants->long_at(i));
++  } else if (tag.is_float()) {
++    st->print_cr(" %f", constants->float_at(i));
+   } else if (tag.is_double()) {
+-    tty->print_cr(" %f", constants->double_at(i));
+-  } else if (tag.is_string()) { 
++    st->print_cr(" %f", constants->double_at(i));
++  } else if (tag.is_string()) {
+     oop string = constants->resolved_string_at(i);
+-    print_oop(string);
+-  } else if (tag.is_unresolved_string()) { 
+-    tty->print_cr(" <unresolved string at %d>", i);  
+-  } else if (tag.is_klass()) { 
+-    tty->print_cr(" %s", constants->resolved_klass_at(i)->klass_part()->external_name());
+-  } else if (tag.is_unresolved_klass()) { 
+-    tty->print_cr(" <unresolved klass at %d>", i);  
+-  } else ShouldNotReachHere();  
++    print_oop(string, st);
++  } else if (tag.is_unresolved_string()) {
++    st->print_cr(" <unresolved string at %d>", i);
++  } else if (tag.is_klass()) {
++    st->print_cr(" %s", constants->resolved_klass_at(i)->klass_part()->external_name());
++  } else if (tag.is_unresolved_klass()) {
++    st->print_cr(" <unresolved klass at %d>", i);
++  } else ShouldNotReachHere();
+ }
+ 
+ 
+-void BytecodePrinter::print_attributes(Bytecodes::Code code, int bci) {
++void BytecodePrinter::print_attributes(Bytecodes::Code code, int bci, outputStream* st) {
+   // Show attributes of pre-rewritten codes
+   code = Bytecodes::java_code(code);
+   // If the code doesn't have any fields there's nothing to print.
+   // note this is ==1 because the tableswitch and lookupswitch are
+   // zero size (for some reason) and we want to print stuff out for them.
+   if (Bytecodes::length_for(code) == 1) {
+-    tty->cr();
++    st->cr();
+     return;
+   }
+ 
+   switch(code) {
+     // Java specific bytecodes only matter.
+     case Bytecodes::_bipush:
+-      tty->print_cr(" " INT32_FORMAT, get_byte());
++      st->print_cr(" " INT32_FORMAT, get_byte());
+       break;
+-    case Bytecodes::_sipush: 
+-      tty->print_cr(" " INT32_FORMAT, get_short());
++    case Bytecodes::_sipush:
++      st->print_cr(" " INT32_FORMAT, get_short());
+       break;
+     case Bytecodes::_ldc:
+-      print_constant(get_index());
++      print_constant(get_index(), st);
+       break;
+ 
+     case Bytecodes::_ldc_w:
+     case Bytecodes::_ldc2_w:
+-      print_constant(get_big_index());
++      print_constant(get_big_index(), st);
+       break;
+ 
+     case Bytecodes::_iload:
+@@ -248,15 +245,15 @@
+     case Bytecodes::_fstore:
+     case Bytecodes::_dstore:
+     case Bytecodes::_astore:
+-      tty->print_cr(" #%d", get_index_special());
++      st->print_cr(" #%d", get_index_special());
+       break;
+ 
+     case Bytecodes::_iinc:
+       { int index = get_index_special();
+-        jint offset = is_wide() ? get_short(): get_byte();    
+-        tty->print_cr(" #%d " INT32_FORMAT, index, offset);
++        jint offset = is_wide() ? get_short(): get_byte();
++        st->print_cr(" #%d " INT32_FORMAT, index, offset);
+       }
+-      break;    
++      break;
+ 
+     case Bytecodes::_newarray: {
+         BasicType atype = (BasicType)get_index();
+@@ -264,14 +261,14 @@
+         if (str == NULL || atype == T_OBJECT || atype == T_ARRAY) {
+           assert(false, "Unidentified basic type");
+         }
+-        tty->print_cr(" %s", str);
++        st->print_cr(" %s", str);
+       }
+       break;
+     case Bytecodes::_anewarray: {
+         int klass_index = get_big_index();
+         constantPoolOop constants = method()->constants();
+         symbolOop name = constants->klass_name_at(klass_index);
+-        tty->print_cr(" %s ", name->as_C_string());
++        st->print_cr(" %s ", name->as_C_string());
+       }
+       break;
+     case Bytecodes::_multianewarray: {
+@@ -279,7 +276,7 @@
+         int nof_dims = get_index();
+         constantPoolOop constants = method()->constants();
+         symbolOop name = constants->klass_name_at(klass_index);
+-        tty->print_cr(" %s %d", name->as_C_string(), nof_dims);
++        st->print_cr(" %s %d", name->as_C_string(), nof_dims);
+       }
+       break;
+ 
+@@ -301,15 +298,15 @@
+     case Bytecodes::_if_acmpne:
+     case Bytecodes::_goto:
+     case Bytecodes::_jsr:
+-      tty->print_cr(" %d", bci + get_short());
++      st->print_cr(" %d", bci + get_short());
+       break;
+ 
+     case Bytecodes::_goto_w:
+     case Bytecodes::_jsr_w:
+-      tty->print_cr(" %d", bci + get_int());
++      st->print_cr(" %d", bci + get_int());
+       break;
+ 
+-    case Bytecodes::_ret: tty->print_cr(" %d", get_index_special()); break;
++    case Bytecodes::_ret: st->print_cr(" %d", get_index_special()); break;
+ 
+     case Bytecodes::_tableswitch:
+       { align();
+@@ -321,16 +318,16 @@
+         for (int i = 0; i < len; i++) {
+           dest[i] = bci + get_int();
+         }
+-        tty->print(" %d " INT32_FORMAT " " INT32_FORMAT " ",
+-                      default_dest, lo, hi); 
++        st->print(" %d " INT32_FORMAT " " INT32_FORMAT " ",
++                      default_dest, lo, hi);
+         int first = true;
+         for (int ll = lo; ll <= hi; ll++, first = false)  {
+           int idx = ll - lo;
+           const char *format = first ? " %d:" INT32_FORMAT " (delta: %d)" :
+                                        ", %d:" INT32_FORMAT " (delta: %d)";
+-          tty->print(format, ll, dest[idx], dest[idx]-bci);
++          st->print(format, ll, dest[idx], dest[idx]-bci);
+         }
+-        tty->cr();
++        st->cr();
+       }
+       break;
+     case Bytecodes::_lookupswitch:
+@@ -343,14 +340,14 @@
+           key [i] = get_int();
+           dest[i] = bci + get_int();
+         };
+-        tty->print(" %d %d ", default_dest, len); 
++        st->print(" %d %d ", default_dest, len);
+         bool first = true;
+         for (int ll = 0; ll < len; ll++, first = false)  {
+           const char *format = first ? " " INT32_FORMAT ":" INT32_FORMAT :
+                                        ", " INT32_FORMAT ":" INT32_FORMAT ;
+-          tty->print(format, key[ll], dest[ll]);
++          st->print(format, key[ll], dest[ll]);
+         }
+-        tty->cr();
++        st->cr();
+       }
+       break;
+ 
+@@ -361,7 +358,7 @@
+         int i = get_big_index();
+         constantPoolOop constants = method()->constants();
+         symbolOop field = constants->name_ref_at(i);
+-        tty->print_cr(" %d <%s>", i, field->as_C_string()); 
++        st->print_cr(" %d <%s>", i, field->as_C_string());
+       }
+       break;
+ 
+@@ -372,7 +369,7 @@
+         constantPoolOop constants = method()->constants();
+         symbolOop name = constants->name_ref_at(i);
+         symbolOop signature = constants->signature_ref_at(i);
+-        tty->print_cr(" %d <%s> <%s> ", i, name->as_C_string(), signature->as_C_string()); 
++        st->print_cr(" %d <%s> <%s> ", i, name->as_C_string(), signature->as_C_string());
+       }
+       break;
+ 
+@@ -383,7 +380,7 @@
+         constantPoolOop constants = method()->constants();
+         symbolOop name = constants->name_ref_at(i);
+         symbolOop signature = constants->signature_ref_at(i);
+-        tty->print_cr(" %d <%s> <%s> %d", i, name->as_C_string(), signature->as_C_string(), n);
++        st->print_cr(" %d <%s> <%s> %d", i, name->as_C_string(), signature->as_C_string(), n);
+       }
+       break;
+ 
+@@ -393,14 +390,14 @@
+       { int i = get_big_index();
+         constantPoolOop constants = method()->constants();
+         symbolOop name = constants->klass_name_at(i);
+-        tty->print_cr(" %d <%s>", i, name->as_C_string()); 
++        st->print_cr(" %d <%s>", i, name->as_C_string());
+       }
+       break;
+ 
+-    case Bytecodes::_wide: 
++    case Bytecodes::_wide:
+       // length is zero not one, but printed with no more info.
+       break;
+-    
++
+     default:
+       ShouldNotReachHere();
+       break;
+@@ -408,14 +405,14 @@
+ }
+ 
+ 
+-void BytecodePrinter::bytecode_epilog(int bci) {
++void BytecodePrinter::bytecode_epilog(int bci, outputStream* st) {
+   methodDataOop mdo = method()->method_data();
+   if (mdo != NULL) {
+     ProfileData* data = mdo->bci_to_data(bci);
+     if (data != NULL) {
+-      tty->print("  %d", mdo->dp_to_di(data->dp()));
+-      tty->fill_to(6);
+-      data->print_data_on(tty);
++      st->print("  %d", mdo->dp_to_di(data->dp()));
++      st->fill_to(6);
++      data->print_data_on(st);
+     }
+   }
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/bytecodeTracer.hpp openjdk/hotspot/src/share/vm/interpreter/bytecodeTracer.hpp
+--- openjdk6/hotspot/src/share/vm/interpreter/bytecodeTracer.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/bytecodeTracer.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)bytecodeTracer.hpp	1.24 07/05/05 17:05:37 JVM"
+-#endif
+ /*
+  * Copyright 1997-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The BytecodeTracer is a helper class used by the interpreter for run-time
+@@ -42,11 +39,11 @@
+ 
+  public:
+   static BytecodeClosure* std_closure();                        // a printing closure
+-  static BytecodeClosure* closure()				                      { return _closure; }
+-  static void             set_closure(BytecodeClosure* closure)	{ _closure = closure; }
++  static BytecodeClosure* closure()                                                   { return _closure; }
++  static void             set_closure(BytecodeClosure* closure) { _closure = closure; }
+ 
+-  static void             trace(methodHandle method, address bcp, uintptr_t tos, uintptr_t tos2);
+-  static void             trace(methodHandle method, address bcp);
++  static void             trace(methodHandle method, address bcp, uintptr_t tos, uintptr_t tos2, outputStream* st = tty);
++  static void             trace(methodHandle method, address bcp, outputStream* st = tty);
+ };
+ 
+ 
+@@ -54,8 +51,8 @@
+ 
+ class BytecodeClosure {
+  public:
+-  virtual void trace(methodHandle method, address bcp, uintptr_t tos, uintptr_t tos2) = 0;
+-  virtual void trace(methodHandle method, address bcp) = 0;
++  virtual void trace(methodHandle method, address bcp, uintptr_t tos, uintptr_t tos2, outputStream* st) = 0;
++  virtual void trace(methodHandle method, address bcp, outputStream* st) = 0;
+ };
+ 
+ #endif // !PRODUCT
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/cInterpreter.cpp openjdk/hotspot/src/share/vm/interpreter/cInterpreter.cpp
+--- openjdk6/hotspot/src/share/vm/interpreter/cInterpreter.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/cInterpreter.cpp	1969-12-31 19:00:00.000000000 -0500
+@@ -1,359 +0,0 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)cInterpreter.cpp	1.30 07/05/17 15:54:05 JVM"
+-#endif
+-/*
+- * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
+- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+- *
+- * This code is free software; you can redistribute it and/or modify it
+- * under the terms of the GNU General Public License version 2 only, as
+- * published by the Free Software Foundation.
+- *
+- * This code is distributed in the hope that it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+- * version 2 for more details (a copy is included in the LICENSE file that
+- * accompanied this code).
+- *
+- * You should have received a copy of the GNU General Public License version
+- * 2 along with this work; if not, write to the Free Software Foundation,
+- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+- * CA 95054 USA or visit www.sun.com if you need additional information or
+- * have any questions.
+- *  
+- */
+-
+-/*
+- * Note:
+- * In order to eliminate the overhead of testing JVMTI flags
+- * during non debuging execution, we generate two version of the Interpreter.
+- * The first one is generated via the dependency in the includeDB mechanism 
+- * and is read in as part of the _cInterpreter.cpp.incl line below.
+- *
+- * The second and JVMTI enabled interpreter is brought in below after
+- * the line defining VM_JVMTI to 1.
+- * 
+- * On startup, the assembly generated to enter the Interpreter will be
+- * pointed at either InterpretMethod or InterpretMethodWithChecks depending
+- * on the state of the JVMTI flags..
+- */
+-#undef VM_JVMTI
+-
+-#include "incls/_precompiled.incl"
+-#include "incls/_cInterpreter.cpp.incl"
+-
+-#ifdef CC_INTERP
+-
+-
+-#define VM_JVMTI 1
+-
+-// Build the Interpreter that is used if JVMTI is enabled
+-#include "cInterpretMethod.hpp"
+-
+-// This constructor should only be used to contruct the object to signal
+-// interpreter initialization. All other instances should be created by
+-// the frame manager.
+-cInterpreter::cInterpreter(messages msg) {
+-  if (msg != initialize) ShouldNotReachHere(); 
+-  _msg = msg; 
+-  _self_link = this;
+-  _prev_link = NULL;
+-}
+-
+-// Dummy function so we can determine if a pc is within the interpreter.
+-// This is really a hack. Seems like adding state to thread ala last_Java_sp, etc.
+-// would be cleaner.
+-//
+-void cInterpreter::End_Of_Interpreter(void) {
+-}
+-
+-// Inline static functions for Java Stack and Local manipulation
+-
+-// The implementations are platform dependent. We have to worry about alignment
+-// issues on some machines which can change on the same platform depending on
+-// whether it is an LP64 machine also.
+-#ifdef ASSERT
+-void cInterpreter::verify_stack_tag(intptr_t *tos, frame::Tag tag, int offset) {
+-  if (TaggedStackInterpreter) {
+-    frame::Tag t = (frame::Tag)tos[Interpreter::expr_tag_index_at(-offset)];
+-    assert(t == tag, "stack tag mismatch");
+-  }
+-}
+-#endif // ASSERT
+-
+-address cInterpreter::stack_slot(intptr_t *tos, int offset) {
+-  debug_only(verify_stack_tag(tos, frame::TagValue, offset));
+-  return (address) tos[Interpreter::expr_index_at(-offset)];
+-}
+-
+-jint cInterpreter::stack_int(intptr_t *tos, int offset) {
+-  debug_only(verify_stack_tag(tos, frame::TagValue, offset));
+-  return *((jint*) &tos[Interpreter::expr_index_at(-offset)]);
+-}
+-
+-jfloat cInterpreter::stack_float(intptr_t *tos, int offset) {
+-  debug_only(verify_stack_tag(tos, frame::TagValue, offset));
+-  return *((jfloat *) &tos[Interpreter::expr_index_at(-offset)]);
+-}
+-
+-oop cInterpreter::stack_object(intptr_t *tos, int offset) {
+-  debug_only(verify_stack_tag(tos, frame::TagReference, offset));
+-  return (oop)tos [Interpreter::expr_index_at(-offset)];
+-}
+-
+-jdouble cInterpreter::stack_double(intptr_t *tos, int offset) {
+-  debug_only(verify_stack_tag(tos, frame::TagValue, offset));
+-  debug_only(verify_stack_tag(tos, frame::TagValue, offset-1));
+-  return ((VMJavaVal64*) &tos[Interpreter::expr_index_at(-offset)])->d;
+-}
+-
+-jlong cInterpreter::stack_long(intptr_t *tos, int offset) {
+-  debug_only(verify_stack_tag(tos, frame::TagValue, offset));
+-  debug_only(verify_stack_tag(tos, frame::TagValue, offset-1));
+-  return ((VMJavaVal64 *) &tos[Interpreter::expr_index_at(-offset)])->l;
+-}
+-
+-void cInterpreter::tag_stack(intptr_t *tos, frame::Tag tag, int offset) {
+-  if (TaggedStackInterpreter)
+-    tos[Interpreter::expr_tag_index_at(-offset)] = (intptr_t)tag;
+-}
+-
+-// only used for value types
+-void cInterpreter::set_stack_slot(intptr_t *tos, address value,
+-                                                        int offset) {
+-  tag_stack(tos, frame::TagValue, offset);
+-  *((address *)&tos[Interpreter::expr_index_at(-offset)]) = value;
+-}
+-
+-void cInterpreter::set_stack_int(intptr_t *tos, int value, 
+-                                                       int offset) {
+-  tag_stack(tos, frame::TagValue, offset);
+-  *((jint *)&tos[Interpreter::expr_index_at(-offset)]) = value;
+-}
+-
+-void cInterpreter::set_stack_float(intptr_t *tos, jfloat value, 
+-                                                         int offset) {
+-  tag_stack(tos, frame::TagValue, offset);
+-  *((jfloat *)&tos[Interpreter::expr_index_at(-offset)]) = value;
+-}
+-
+-void cInterpreter::set_stack_object(intptr_t *tos, oop value, 
+-                                                          int offset) {
+-  tag_stack(tos, frame::TagReference, offset);
+-  *((oop *)&tos[Interpreter::expr_index_at(-offset)]) = value;
+-}
+-
+-// needs to be platform dep for the 32 bit platforms.
+-void cInterpreter::set_stack_double(intptr_t *tos, jdouble value, 
+-                                                          int offset) {
+-  tag_stack(tos, frame::TagValue, offset);
+-  tag_stack(tos, frame::TagValue, offset-1);
+-  ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = value;
+-}
+-
+-void cInterpreter::set_stack_double_from_addr(intptr_t *tos,
+-                                              address addr, int offset) {
+-  tag_stack(tos, frame::TagValue, offset);
+-  tag_stack(tos, frame::TagValue, offset-1);
+-  (((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d =
+-                        ((VMJavaVal64*)addr)->d);
+-}
+-
+-void cInterpreter::set_stack_long(intptr_t *tos, jlong value, 
+-                                                        int offset) {
+-  tag_stack(tos, frame::TagValue, offset);
+-  ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb;
+-  tag_stack(tos, frame::TagValue, offset-1);
+-  ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = value;
+-}
+-
+-void cInterpreter::set_stack_long_from_addr(intptr_t *tos, 
+-                                            address addr, int offset) {
+-  tag_stack(tos, frame::TagValue, offset);
+-  ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb;
+-  tag_stack(tos, frame::TagValue, offset-1);
+-  ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l =
+-                        ((VMJavaVal64*)addr)->l;
+-}
+-
+-// Locals
+-
+-#ifdef ASSERT
+-void cInterpreter::verify_locals_tag(intptr_t *locals, frame::Tag tag,
+-                                     int offset) {
+-  if (TaggedStackInterpreter) {
+-    frame::Tag t = (frame::Tag)locals[Interpreter::local_tag_index_at(-offset)];
+-    assert(t == tag, "locals tag mismatch");
+-  }
+-}
+-#endif // ASSERT
+-address cInterpreter::locals_slot(intptr_t* locals, int offset) {
+-  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
+-  return (address)locals[Interpreter::local_index_at(-offset)];
+-}
+-jint cInterpreter::locals_int(intptr_t* locals, int offset) {
+-  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
+-  return (jint)locals[Interpreter::local_index_at(-offset)];
+-}
+-jfloat cInterpreter::locals_float(intptr_t* locals, int offset) {
+-  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
+-  return (jfloat)locals[Interpreter::local_index_at(-offset)];
+-}
+-oop cInterpreter::locals_object(intptr_t* locals, int offset) {
+-  debug_only(verify_locals_tag(locals, frame::TagReference, offset));
+-  return (oop)locals[Interpreter::local_index_at(-offset)];
+-}
+-jdouble cInterpreter::locals_double(intptr_t* locals, int offset) {
+-  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
+-  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
+-  return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d;
+-}
+-jlong cInterpreter::locals_long(intptr_t* locals, int offset) {
+-  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
+-  debug_only(verify_locals_tag(locals, frame::TagValue, offset+1));
+-  return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l;
+-}
+-
+-// Returns the address of locals value.
+-address cInterpreter::locals_long_at(intptr_t* locals, int offset) {
+-  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
+-  debug_only(verify_locals_tag(locals, frame::TagValue, offset+1));
+-  return ((address)&locals[Interpreter::local_index_at(-(offset+1))]);
+-}
+-address cInterpreter::locals_double_at(intptr_t* locals, int offset) {
+-  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
+-  debug_only(verify_locals_tag(locals, frame::TagValue, offset+1));
+-  return ((address)&locals[Interpreter::local_index_at(-(offset+1))]);
+-}
+-
+-void cInterpreter::tag_locals(intptr_t *locals, frame::Tag tag, int offset) {
+-  if (TaggedStackInterpreter)
+-    locals[Interpreter::local_tag_index_at(-offset)] = (intptr_t)tag;
+-}
+-
+-// Used for local value or returnAddress
+-void cInterpreter::set_locals_slot(intptr_t *locals,
+-                                   address value, int offset) {
+-  tag_locals(locals, frame::TagValue, offset);
+-  *((address*)&locals[Interpreter::local_index_at(-offset)]) = value;
+-}
+-void cInterpreter::set_locals_int(intptr_t *locals,
+-                                   jint value, int offset) {
+-  tag_locals(locals, frame::TagValue, offset);
+-  *((jint *)&locals[Interpreter::local_index_at(-offset)]) = value;
+-}
+-void cInterpreter::set_locals_float(intptr_t *locals,
+-                                   jfloat value, int offset) {
+-  tag_locals(locals, frame::TagValue, offset);
+-  *((jfloat *)&locals[Interpreter::local_index_at(-offset)]) = value;
+-}
+-void cInterpreter::set_locals_object(intptr_t *locals,
+-                                   oop value, int offset) {
+-  tag_locals(locals, frame::TagReference, offset);
+-  *((oop *)&locals[Interpreter::local_index_at(-offset)]) = value;
+-}
+-void cInterpreter::set_locals_double(intptr_t *locals,
+-                                   jdouble value, int offset) {
+-  tag_locals(locals, frame::TagValue, offset);
+-  tag_locals(locals, frame::TagValue, offset+1);
+-  ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = value;
+-}
+-void cInterpreter::set_locals_long(intptr_t *locals,
+-                                   jlong value, int offset) {
+-  tag_locals(locals, frame::TagValue, offset);
+-  tag_locals(locals, frame::TagValue, offset+1);
+-  ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = value;
+-}
+-void cInterpreter::set_locals_double_from_addr(intptr_t *locals,
+-                                   address addr, int offset) {
+-  tag_locals(locals, frame::TagValue, offset);
+-  tag_locals(locals, frame::TagValue, offset+1);
+-  ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = ((VMJavaVal64*)addr)->d;
+-}
+-void cInterpreter::set_locals_long_from_addr(intptr_t *locals,
+-                                   address addr, int offset) {
+-  tag_locals(locals, frame::TagValue, offset);
+-  tag_locals(locals, frame::TagValue, offset+1);
+-  ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = ((VMJavaVal64*)addr)->l;
+-}
+-
+-void cInterpreter::astore(intptr_t* tos,    int stack_offset,
+-                          intptr_t* locals, int locals_offset) {
+-  // Copy tag from stack to locals.  astore's operand can be returnAddress
+-  // and may not be TagReference
+-  if (TaggedStackInterpreter) {
+-    frame::Tag t = (frame::Tag) tos[Interpreter::expr_tag_index_at(-stack_offset)];
+-    locals[Interpreter::local_tag_index_at(-locals_offset)] = (intptr_t)t;
+-  }
+-  intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)];
+-  locals[Interpreter::local_index_at(-locals_offset)] = value;
+-}
+-
+-
+-void cInterpreter::copy_stack_slot(intptr_t *tos, int from_offset,
+-                                   int to_offset) {
+-  if (TaggedStackInterpreter) {
+-    tos[Interpreter::expr_tag_index_at(-to_offset)] =
+-                      (intptr_t)tos[Interpreter::expr_tag_index_at(-from_offset)];
+-  }
+-  tos[Interpreter::expr_index_at(-to_offset)] =
+-                      (intptr_t)tos[Interpreter::expr_index_at(-from_offset)];
+-}
+-
+-void cInterpreter::dup(intptr_t *tos) {
+-  copy_stack_slot(tos, -1, 0);
+-}
+-void cInterpreter::dup2(intptr_t *tos) {
+-  copy_stack_slot(tos, -2, 0);
+-  copy_stack_slot(tos, -1, 1);
+-}
+-
+-void cInterpreter::dup_x1(intptr_t *tos) {
+-  /* insert top word two down */
+-  copy_stack_slot(tos, -1, 0);
+-  copy_stack_slot(tos, -2, -1);
+-  copy_stack_slot(tos, 0, -2);
+-}
+-
+-void cInterpreter::dup_x2(intptr_t *tos) {
+-  /* insert top word three down  */
+-  copy_stack_slot(tos, -1, 0);
+-  copy_stack_slot(tos, -2, -1);
+-  copy_stack_slot(tos, -3, -2);
+-  copy_stack_slot(tos, 0, -3);
+-}
+-void cInterpreter::dup2_x1(intptr_t *tos) {
+-  /* insert top 2 slots three down */
+-  copy_stack_slot(tos, -1, 1);
+-  copy_stack_slot(tos, -2, 0);
+-  copy_stack_slot(tos, -3, -1);
+-  copy_stack_slot(tos, 1, -2);
+-  copy_stack_slot(tos, 0, -3);
+-}
+-void cInterpreter::dup2_x2(intptr_t *tos) {
+-  /* insert top 2 slots four down */
+-  copy_stack_slot(tos, -1, 1);
+-  copy_stack_slot(tos, -2, 0);
+-  copy_stack_slot(tos, -3, -1);
+-  copy_stack_slot(tos, -4, -2);
+-  copy_stack_slot(tos, 1, -3);
+-  copy_stack_slot(tos, 0, -4);
+-}
+-
+-
+-void cInterpreter::swap(intptr_t *tos) {
+-  // swap top two elements
+-  intptr_t val = tos[Interpreter::expr_index_at(1)];
+-  frame::Tag t;
+-  if (TaggedStackInterpreter) {
+-    t = (frame::Tag) tos[Interpreter::expr_tag_index_at(1)];
+-  }
+-  // Copy -2 entry to -1
+-  copy_stack_slot(tos, -2, -1);
+-  // Store saved -1 entry into -2
+-  tos[Interpreter::expr_tag_index_at(2)] = (intptr_t)t;
+-  tos[Interpreter::expr_index_at(2)] = val;
+-}
+-#endif
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/cInterpreter.hpp openjdk/hotspot/src/share/vm/interpreter/cInterpreter.hpp
+--- openjdk6/hotspot/src/share/vm/interpreter/cInterpreter.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/cInterpreter.hpp	1969-12-31 19:00:00.000000000 -0500
+@@ -1,565 +0,0 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)cInterpreter.hpp	1.23 07/05/17 15:54:24 JVM"
+-#endif
+-/*
+- * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
+- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+- *
+- * This code is free software; you can redistribute it and/or modify it
+- * under the terms of the GNU General Public License version 2 only, as
+- * published by the Free Software Foundation.
+- *
+- * This code is distributed in the hope that it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+- * version 2 for more details (a copy is included in the LICENSE file that
+- * accompanied this code).
+- *
+- * You should have received a copy of the GNU General Public License version
+- * 2 along with this work; if not, write to the Free Software Foundation,
+- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+- * CA 95054 USA or visit www.sun.com if you need additional information or
+- * have any questions.
+- *  
+- */
+-
+-#ifdef CC_INTERP
+-
+-// CVM definitions find hotspot equivalents...
+-
+-union VMJavaVal64 {
+-    jlong   l;
+-    jdouble d;
+-    uint32_t      v[2];
+-};
+-
+-
+-typedef class cInterpreter* interpreterState;
+-
+-struct call_message {
+-    methodOop _callee;               /* method to call during call_method request */
+-    address   _callee_entry_point;   /* address to jump to for call_method request */
+-    int       _bcp_advance;          /* size of the invoke bytecode operation */
+-};
+-
+-struct osr_message {
+-    address _osr_buf;                 /* the osr buffer */
+-    address _osr_entry;               /* the entry to the osr method */
+-};
+-
+-// Result returned to frame manager
+-union frame_manager_message {
+-    call_message _to_call;            /* describes callee */
+-    Bytecodes::Code _return_kind;     /* i_return, a_return, ... */
+-    osr_message _osr;                 /* describes the osr */
+-};
+-
+-class cInterpreter : StackObj {
+-friend class AbstractInterpreterGenerator;
+-friend class InterpreterGenerator;
+-friend class InterpreterMacroAssembler;
+-friend class frame;
+-friend class VMStructs;
+-
+-public:
+-    enum messages {
+-         no_request = 0,            // unused
+-         initialize,                // Perform one time interpreter initializations (assumes all switches set)
+-         // status message to C++ interpreter
+-         method_entry,              // initial method entry to interpreter
+-         method_resume,             // frame manager response to return_from_method request (assuming a frame to resume)
+-         deopt_resume,              // returning from a native call into a deopted frame
+-         deopt_resume2,             // deopt resume as a result of a PopFrame
+-         got_monitors,              // frame manager response to more_monitors request
+-         rethrow_exception,         // unwinding and throwing exception
+-         // requests to frame manager from C++ interpreter
+-         call_method,               // request for new frame from interpreter, manager responds with method_entry
+-         do_osr,                    // osr the current method
+-         return_from_method,        // request from interpreter to unwind, manager responds with method_continue
+-         more_monitors,             // need a new monitor
+-	 throwing_exception,        // unwind stack and rethrow
+-	 popping_frame              // unwind call and retry call
+-    };
+-
+-private:
+-    JavaThread*           _thread;        // the vm's java thread pointer
+-    address               _bcp;           // instruction pointer
+-    intptr_t*             _locals;        // local variable pointer
+-    constantPoolCacheOop  _constants;     // constant pool cache
+-    methodOop             _method;        // method being executed
+-    DataLayout*           _mdx;           // compiler profiling data for current bytecode
+-    intptr_t*             _stack;         // expression stack
+-    messages              _msg;           // frame manager <-> interpreter message
+-    frame_manager_message _result;        // result to frame manager
+-    interpreterState      _prev_link;     // previous interpreter state
+-    oop                   _oop_temp;      // mirror for interpreted native, null otherwise
+-    // These are likely platform dependent fields
+-    // jint*  sender_sp;                  // previous stack pointer
+-    intptr_t*             _stack_base;    // base of expression stack
+-    intptr_t*             _stack_limit;   // limit of expression stack
+-    BasicObjectLock*      _monitor_base;  // base of monitors on the native stack
+-
+-
+-public:
+-  // Constructor is only used by the initialization step. All other instances are created
+-  // by the frame manager.
+-  cInterpreter(messages msg);
+-
+-//
+-// Deoptimization support
+-//
+-static void layout_interpreterState(interpreterState to_fill,
+-				    frame* caller,
+-				    frame* interpreter_frame,
+-				    methodOop method,
+-				    intptr_t* locals,
+-				    intptr_t* stack,
+-				    intptr_t* stack_base,
+-				    intptr_t* monitor_base,
+-				    intptr_t* frame_bottom,
+-				    bool top_frame);
+-
+-/*
+- * Generic 32-bit wide "Java slot" definition. This type occurs
+- * in operand stacks, Java locals, object fields, constant pools.
+- */
+-union VMJavaVal32 {
+-    jint     i;
+-    jfloat   f;
+-    oop      r;
+-    uint32_t raw;
+-};
+-
+-/*
+- * Generic 64-bit Java value definition
+- */
+-union VMJavaVal64 {
+-    jlong   l;
+-    jdouble d;
+-    uint32_t      v[2];
+-};
+-
+-/*
+- * Generic 32-bit wide "Java slot" definition. This type occurs
+- * in Java locals, object fields, constant pools, and
+- * operand stacks (as a CVMStackVal32).
+- */
+-typedef union VMSlotVal32 {
+-    VMJavaVal32    j;     /* For "Java" values */
+-    address        a;     /* a return created by jsr or jsr_w */
+-} VMSlotVal32;
+-
+-
+-/*
+- * Generic 32-bit wide stack slot definition.
+- */
+-union VMStackVal32 {
+-    VMJavaVal32    j;     /* For "Java" values */
+-    VMSlotVal32    s;     /* any value from a "slot" or locals[] */
+-};
+-
+-inline JavaThread* thread() { return _thread; }
+-
+-inline address bcp() { return _bcp; }
+-inline void set_bcp(address new_bcp) { _bcp = new_bcp; }
+-
+-inline intptr_t* locals() { return _locals; }
+-
+-inline constantPoolCacheOop constants() { return _constants; }
+-inline methodOop method() { return _method; }
+-inline DataLayout* mdx() { return _mdx; }
+-
+-inline messages msg() { return _msg; }
+-inline void set_msg(messages new_msg) { _msg = new_msg; }
+-
+-inline methodOop callee() { return _result._to_call._callee; }
+-inline void set_callee(methodOop new_callee) { _result._to_call._callee = new_callee; }
+-inline void set_callee_entry_point(address entry) { _result._to_call._callee_entry_point = entry; }
+-inline void set_osr_buf(address buf) { _result._osr._osr_buf = buf; }
+-inline void set_osr_entry(address entry) { _result._osr._osr_entry = entry; }
+-inline int bcp_advance() { return _result._to_call._bcp_advance; }
+-inline void set_bcp_advance(int count) { _result._to_call._bcp_advance = count; }
+-
+-inline void set_return_kind(Bytecodes::Code kind) { _result._return_kind = kind; }
+-
+-inline interpreterState prev() { return _prev_link; }
+-
+-inline intptr_t* stack() { return _stack; }
+-inline void set_stack(intptr_t* new_stack) { _stack = new_stack; }
+-
+-
+-inline intptr_t* stack_base() { return _stack_base; }
+-inline intptr_t* stack_limit() { return _stack_limit; }
+-
+-inline BasicObjectLock* monitor_base() { return _monitor_base; }
+-
+-/*
+- * 64-bit Arithmetic:
+- *
+- * The functions below follow the semantics of the
+- * ladd, land, ldiv, lmul, lor, lxor, and lrem bytecodes,
+- * respectively.
+- */
+-
+-static jlong VMlongAdd(jlong op1, jlong op2);
+-static jlong VMlongAnd(jlong op1, jlong op2);
+-static jlong VMlongDiv(jlong op1, jlong op2);
+-static jlong VMlongMul(jlong op1, jlong op2);
+-static jlong VMlongOr (jlong op1, jlong op2);
+-static jlong VMlongSub(jlong op1, jlong op2);
+-static jlong VMlongXor(jlong op1, jlong op2);
+-static jlong VMlongRem(jlong op1, jlong op2);
+-
+-/*
+- * Shift:
+- *
+- * The functions below follow the semantics of the
+- * lushr, lshl, and lshr bytecodes, respectively.
+- */
+-
+-static jlong VMlongUshr(jlong op1, jint op2);
+-static jlong VMlongShl (jlong op1, jint op2);
+-static jlong VMlongShr (jlong op1, jint op2);
+-
+-/*
+- * Unary:
+- *
+- * Return the negation of "op" (-op), according to
+- * the semantics of the lneg bytecode.
+- */
+-
+-static jlong VMlongNeg(jlong op);
+-
+-/*
+- * Return the complement of "op" (~op)
+- */
+-
+-static jlong VMlongNot(jlong op);
+-
+-
+-/*
+- * Comparisons to 0:
+- */
+-
+-static int32_t VMlongLtz(jlong op);     /* op <= 0 */
+-static int32_t VMlongGez(jlong op);     /* op >= 0 */
+-static int32_t VMlongEqz(jlong op);     /* op == 0 */
+-
+-/*
+- * Between operands:
+- */
+-
+-static int32_t VMlongEq(jlong op1, jlong op2);    /* op1 == op2 */
+-static int32_t VMlongNe(jlong op1, jlong op2);    /* op1 != op2 */
+-static int32_t VMlongGe(jlong op1, jlong op2);    /* op1 >= op2 */
+-static int32_t VMlongLe(jlong op1, jlong op2);    /* op1 <= op2 */
+-static int32_t VMlongLt(jlong op1, jlong op2);    /* op1 <  op2 */
+-static int32_t VMlongGt(jlong op1, jlong op2);    /* op1 >  op2 */
+-
+-/*
+- * Comparisons (returning an jint value: 0, 1, or -1)
+- *
+- * Between operands:
+- *
+- * Compare "op1" and "op2" according to the semantics of the
+- * "lcmp" bytecode.
+- */
+-
+-static int32_t VMlongCompare(jlong op1, jlong op2);
+-
+-/*
+- * Convert int to long, according to "i2l" bytecode semantics
+- */
+-static jlong VMint2Long(jint val);
+-
+-/*
+- * Convert long to int, according to "l2i" bytecode semantics
+- */
+-static jint VMlong2Int(jlong val);
+-
+-/*
+- * Convert long to float, according to "l2f" bytecode semantics
+- */
+-static jfloat VMlong2Float(jlong val);
+-
+-/*
+- * Convert long to double, according to "l2d" bytecode semantics
+- */
+-static jdouble VMlong2Double(jlong val);
+-
+-/*
+- * Java floating-point float value manipulation.
+- *
+- * The result argument is, once again, an lvalue.
+- *
+- * Arithmetic:
+- *
+- * The functions below follow the semantics of the
+- * fadd, fsub, fmul, fdiv, and frem bytecodes,
+- * respectively.
+- */
+-
+-static jfloat VMfloatAdd(jfloat op1, jfloat op2);
+-static jfloat VMfloatSub(jfloat op1, jfloat op2);
+-static jfloat VMfloatMul(jfloat op1, jfloat op2);
+-static jfloat VMfloatDiv(jfloat op1, jfloat op2);
+-static jfloat VMfloatRem(jfloat op1, jfloat op2);
+-
+-/*
+- * Unary:
+- *
+- * Return the negation of "op" (-op), according to
+- * the semantics of the fneg bytecode.
+- */
+-
+-static jfloat VMfloatNeg(jfloat op);
+-
+-/*
+- * Comparisons (returning an int value: 0, 1, or -1)
+- *
+- * Between operands:
+- *
+- * Compare "op1" and "op2" according to the semantics of the
+- * "fcmpl" (direction is -1) or "fcmpg" (direction is 1) bytecodes.
+- */
+-
+-static int32_t VMfloatCompare(jfloat op1, jfloat op2,
+-                              int32_t direction);
+-/*
+- * Conversion:
+- */
+-
+-/*
+- * Convert float to double, according to "f2d" bytecode semantics
+- */
+-
+-static jdouble VMfloat2Double(jfloat op);
+-
+-/*
+- ******************************************
+- * Java double floating-point manipulation.
+- ******************************************
+- *
+- * The result argument is, once again, an lvalue.
+- *
+- * Conversions:
+- */
+-
+-/*
+- * Convert double to int, according to "d2i" bytecode semantics
+- */
+-
+-static jint VMdouble2Int(jdouble val);
+-
+-/*
+- * Convert double to float, according to "d2f" bytecode semantics
+- */
+-
+-static jfloat VMdouble2Float(jdouble val);
+-
+-/*
+- * Convert int to double, according to "i2d" bytecode semantics
+- */
+-
+-static jdouble VMint2Double(jint val);
+-
+-/*
+- * Arithmetic:
+- *
+- * The functions below follow the semantics of the
+- * dadd, dsub, ddiv, dmul, and drem bytecodes, respectively.
+- */
+-
+-static jdouble VMdoubleAdd(jdouble op1, jdouble op2);
+-static jdouble VMdoubleSub(jdouble op1, jdouble op2);
+-static jdouble VMdoubleDiv(jdouble op1, jdouble op2);
+-static jdouble VMdoubleMul(jdouble op1, jdouble op2);
+-static jdouble VMdoubleRem(jdouble op1, jdouble op2);
+-
+-/*
+- * Unary:
+- *
+- * Return the negation of "op" (-op), according to
+- * the semantics of the dneg bytecode.
+- */
+-
+-static jdouble VMdoubleNeg(jdouble op);
+-
+-/*
+- * Comparisons (returning an int32_t value: 0, 1, or -1)
+- *
+- * Between operands:
+- *
+- * Compare "op1" and "op2" according to the semantics of the
+- * "dcmpl" (direction is -1) or "dcmpg" (direction is 1) bytecodes.
+- */
+-
+-static int32_t VMdoubleCompare(jdouble op1, jdouble op2, int32_t direction);
+-
+-/*
+- * Copy two typeless 32-bit words from one location to another.
+- * This is semantically equivalent to:
+- * 
+- * to[0] = from[0];
+- * to[1] = from[1];
+- *
+- * but this interface is provided for those platforms that could
+- * optimize this into a single 64-bit transfer.
+- */
+-
+-static void VMmemCopy64(uint32_t to[2], const uint32_t from[2]);
+-
+-
+-// Arithmetic operations
+-
+-/*
+- * Java arithmetic methods. 
+- * The functions below follow the semantics of the
+- * iadd, isub, imul, idiv, irem, iand, ior, ixor,
+- * and ineg bytecodes, respectively.
+- */
+-
+-static jint VMintAdd(jint op1, jint op2);
+-static jint VMintSub(jint op1, jint op2);
+-static jint VMintMul(jint op1, jint op2);
+-static jint VMintDiv(jint op1, jint op2);
+-static jint VMintRem(jint op1, jint op2);
+-static jint VMintAnd(jint op1, jint op2);
+-static jint VMintOr (jint op1, jint op2);
+-static jint VMintXor(jint op1, jint op2);
+-
+-/*
+- * Shift Operation:
+- * The functions below follow the semantics of the
+- * iushr, ishl, and ishr bytecodes, respectively.
+- */
+-
+-static jint VMintUshr(jint op, jint num);
+-static jint VMintShl (jint op, jint num);
+-static jint VMintShr (jint op, jint num);
+-
+-/*
+- * Unary Operation:
+- *
+- * Return the negation of "op" (-op), according to
+- * the semantics of the ineg bytecode.
+- */
+-
+-static jint VMintNeg(jint op);
+-
+-/*
+- * Int Conversions:
+- */
+-
+-/*
+- * Convert int to float, according to "i2f" bytecode semantics
+- */
+-
+-static jfloat VMint2Float(jint val);
+-
+-/*
+- * Convert int to byte, according to "i2b" bytecode semantics
+- */
+-
+-static jbyte VMint2Byte(jint val);
+-
+-/*
+- * Convert int to char, according to "i2c" bytecode semantics
+- */
+-
+-static jchar VMint2Char(jint val);
+-
+-/*
+- * Convert int to short, according to "i2s" bytecode semantics
+- */
+-
+-static jshort VMint2Short(jint val);
+-
+-/*=========================================================================
+- * Bytecode interpreter operations
+- *=======================================================================*/
+-
+-static void dup(intptr_t *tos);
+-static void dup2(intptr_t *tos);
+-static void dup_x1(intptr_t *tos);    /* insert top word two down */
+-static void dup_x2(intptr_t *tos);    /* insert top word three down  */
+-static void dup2_x1(intptr_t *tos);   /* insert top 2 slots three down */
+-static void dup2_x2(intptr_t *tos);   /* insert top 2 slots four down */
+-static void swap(intptr_t *tos);      /* swap top two elements */
+-
+-// umm don't like this method modifies its object
+-
+-// The Interpreter used when 
+-static void InterpretMethod(interpreterState istate);
+-// The interpreter used if JVMTI needs interpreter events
+-static void InterpretMethodWithChecks(interpreterState istate);
+-static void End_Of_Interpreter(void);
+-
+-// Inline static functions for Java Stack and Local manipulation
+-
+-static address stack_slot(intptr_t *tos, int offset);
+-static jint stack_int(intptr_t *tos, int offset);
+-static jfloat stack_float(intptr_t *tos, int offset);
+-static oop stack_object(intptr_t *tos, int offset);
+-static jdouble stack_double(intptr_t *tos, int offset);
+-static jlong stack_long(intptr_t *tos, int offset);
+-
+-static void tag_stack(intptr_t *tos, frame::Tag tag, int offset);
+-
+-// only used for value types
+-static void set_stack_slot(intptr_t *tos, address value, int offset);
+-static void set_stack_int(intptr_t *tos, int value, int offset);
+-static void set_stack_float(intptr_t *tos, jfloat value, int offset);
+-static void set_stack_object(intptr_t *tos, oop value, int offset);
+-
+-// needs to be platform dep for the 32 bit platforms.
+-static void set_stack_double(intptr_t *tos, jdouble value, int offset);
+-static void set_stack_long(intptr_t *tos, jlong value, int offset);
+-
+-static void set_stack_double_from_addr(intptr_t *tos, address addr, int offset);
+-static void set_stack_long_from_addr(intptr_t *tos, address addr, int offset);
+-
+-// Locals
+-
+-static address locals_slot(intptr_t* locals, int offset);
+-static jint locals_int(intptr_t* locals, int offset);
+-static jfloat locals_float(intptr_t* locals, int offset);
+-static oop locals_object(intptr_t* locals, int offset);
+-static jdouble locals_double(intptr_t* locals, int offset);
+-static jlong locals_long(intptr_t* locals, int offset);
+-
+-static address locals_long_at(intptr_t* locals, int offset);
+-static address locals_double_at(intptr_t* locals, int offset);
+-
+-static void tag_locals(intptr_t *locals, frame::Tag tag, int offset);
+-
+-static void set_locals_slot(intptr_t *locals, address value, int offset);
+-static void set_locals_int(intptr_t *locals, jint value, int offset);
+-static void set_locals_float(intptr_t *locals, jfloat value, int offset);
+-static void set_locals_object(intptr_t *locals, oop value, int offset);
+-static void set_locals_double(intptr_t *locals, jdouble value, int offset);
+-static void set_locals_long(intptr_t *locals, jlong value, int offset);
+-static void set_locals_double_from_addr(intptr_t *locals,
+-                                   address addr, int offset);
+-static void set_locals_long_from_addr(intptr_t *locals,
+-                                   address addr, int offset);
+-
+-static void astore(intptr_t* topOfStack, int stack_offset,
+-                   intptr_t* locals,     int locals_offset);
+-
+-// Support for dup and swap
+-static void copy_stack_slot(intptr_t *tos, int from_offset, int to_offset);
+-
+-#ifndef PRODUCT
+-static void verify_locals_tag(intptr_t *locals, frame::Tag tag, int offset);
+-static void verify_stack_tag(intptr_t *tos, frame::Tag tag, int offset);
+-#endif // PRODUCT
+-
+-    // Platform fields/methods 
+-# include "incls/_cInterpreter_pd.hpp.incl"
+-
+-}; // cInterpreter
+-
+-#endif // CC_INTERP
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/cInterpreter.inline.hpp openjdk/hotspot/src/share/vm/interpreter/cInterpreter.inline.hpp
+--- openjdk6/hotspot/src/share/vm/interpreter/cInterpreter.inline.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/cInterpreter.inline.hpp	1969-12-31 19:00:00.000000000 -0500
+@@ -1,47 +0,0 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)cInterpreter.inline.hpp	1.9 07/05/05 17:05:37 JVM"
+-#endif
+-/*
+- * Copyright 2002 Sun Microsystems, Inc.  All Rights Reserved.
+- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+- *
+- * This code is free software; you can redistribute it and/or modify it
+- * under the terms of the GNU General Public License version 2 only, as
+- * published by the Free Software Foundation.
+- *
+- * This code is distributed in the hope that it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+- * version 2 for more details (a copy is included in the LICENSE file that
+- * accompanied this code).
+- *
+- * You should have received a copy of the GNU General Public License version
+- * 2 along with this work; if not, write to the Free Software Foundation,
+- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+- * CA 95054 USA or visit www.sun.com if you need additional information or
+- * have any questions.
+- *  
+- */
+-
+-// This file holds platform-independant bodies of inline functions for the C++ based interpreter
+-
+-#ifdef CC_INTERP
+-
+-#ifdef ASSERT
+-extern "C" { typedef void (*verify_oop_fn_t)(oop, const char *);};
+-#define VERIFY_OOP(o) \
+-	/*{ verify_oop_fn_t verify_oop_entry = \
+-            *StubRoutines::verify_oop_subroutine_entry_address(); \
+-          if (verify_oop_entry) { \
+-	     (*verify_oop_entry)((o), "Not an oop!"); \
+-	  } \
+-	}*/
+-#else
+-#define VERIFY_OOP(o)
+-#endif
+-
+-// Platform dependent data manipulation
+-# include "incls/_cInterpreter_pd.inline.hpp.incl"
+-#endif // CC_INTERP
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/cInterpretMethod.hpp openjdk/hotspot/src/share/vm/interpreter/cInterpretMethod.hpp
+--- openjdk6/hotspot/src/share/vm/interpreter/cInterpretMethod.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/cInterpretMethod.hpp	1969-12-31 19:00:00.000000000 -0500
+@@ -1,2617 +0,0 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)cInterpretMethod.hpp	1.68 07/05/17 15:54:02 JVM"
+-#endif
+-/*
+- * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
+- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+- *
+- * This code is free software; you can redistribute it and/or modify it
+- * under the terms of the GNU General Public License version 2 only, as
+- * published by the Free Software Foundation.
+- *
+- * This code is distributed in the hope that it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+- * version 2 for more details (a copy is included in the LICENSE file that
+- * accompanied this code).
+- *
+- * You should have received a copy of the GNU General Public License version
+- * 2 along with this work; if not, write to the Free Software Foundation,
+- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+- * CA 95054 USA or visit www.sun.com if you need additional information or
+- * have any questions.
+- *  
+- */
+-
+-#ifndef CINTERPRETERBODY_ONCE
+-#define CINTERPERTERBODY_ONCE 
+-#endif
+-
+-/*
+- * This code was converted from CVM sources to C++ and the Hotspot VM
+- */
+-
+-#ifdef CC_INTERP
+-
+-/*
+- * USELABELS - If using GCC, then use labels for the opcode dispatching
+- * rather -then a switch statement. This improves performance because it
+- * gives us the oportunity to have the instructions that calculate the
+- * next opcode to jump to be intermixed with the rest of the instructions
+- * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro).
+- */
+-#undef USELABELS
+-#ifdef __GNUC__
+-/* 
+-   ASSERT signifies debugging. It is much easier to step thru bytecodes if we
+-   don't use the computed goto approach.
+-*/
+-#ifndef ASSERT
+-#define USELABELS
+-#endif
+-#endif
+-
+-#undef CASE
+-#ifdef USELABELS
+-#define CASE(opcode) opc ## opcode
+-#define DEFAULT opc_default
+-#else
+-#define CASE(opcode) case Bytecodes:: opcode
+-#define DEFAULT default
+-#endif
+-
+-/*
+- * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next 
+- * opcode before going back to the top of the while loop, rather then having
+- * the top of the while loop handle it. This provides a better opportunity
+- * for instruction scheduling. Some compilers just do this prefetch
+- * automatically. Some actually end up with worse performance if you
+- * force the prefetch. Solaris gcc seems to do better, but cc does worse.
+- */
+-#undef PREFETCH_OPCCODE
+-#define PREFETCH_OPCCODE
+-
+-/*
+-  Interpreter safepoint: it is expected that the interpreter will have no live
+-  handles of its own creation live at an interpreter safepoint. Therefore we
+-  run a HandleMarkCleaner and trash all handles allocated in the call chain
+-  since the JavaCalls::call_helper invocation that initiated the chain.
+-  There really shouldn't be any handles remaining to trash but this is cheap
+-  in relation to a safepoint.
+-*/
+-#define SAFEPOINT                                                                 \
+-    if ( SafepointSynchronize::is_synchronizing()) {                              \
+-        {                                                                         \
+-          /* zap freed handles rather than GC'ing them */                         \
+-          HandleMarkCleaner __hmc(THREAD);                                        \
+-        }                                                                         \
+-        CALL_VM(SafepointSynchronize::block(THREAD), handle_exception);           \
+-    }
+-
+-/*
+- * VM_JAVA_ERROR - Macro for throwing a java exception from
+- * the interpreter loop. Should really be a CALL_VM but there
+- * is no entry point to do the transition to vm so we just
+- * do it by hand here.
+- */
+-#define VM_JAVA_ERROR_NO_JUMP(name, msg)                                                  \
+-    DECACHE_STATE();                                                              \
+-    SET_LAST_JAVA_FRAME();                                                        \
+-    {                                                                             \
+-       ThreadInVMfromJava trans(THREAD);                                          \
+-       Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg, NULL, NULL); \
+-    }                                                                             \
+-    RESET_LAST_JAVA_FRAME();                                                      \
+-    CACHE_STATE();                                                                
+-
+-// Normal throw of a java error
+-#define VM_JAVA_ERROR(name, msg)                                                  \
+-    VM_JAVA_ERROR_NO_JUMP(name, msg)                                              \
+-    goto handle_exception;
+-
+-#ifdef PRODUCT
+-#define DO_UPDATE_INSTRUCTION_COUNT(opcode)
+-#else
+-#define DO_UPDATE_INSTRUCTION_COUNT(opcode)                                                          \
+-{                                                                                                    \
+-    BytecodeCounter::_counter_value++;                                                               \
+-    BytecodeHistogram::_counters[(Bytecodes::Code)opcode]++;                                         \
+-    if (StopInterpreterAt && StopInterpreterAt == BytecodeCounter::_counter_value) os::breakpoint(); \
+-    if (TraceBytecodes) {                                                                            \
+-      CALL_VM((void)SharedRuntime::trace_bytecode(THREAD, 0,               \
+-                                   topOfStack[Interpreter::expr_index_at(1)],   \
+-                                   topOfStack[Interpreter::expr_index_at(2)]),  \
+-                                   handle_exception);                      \
+-    }                                                                      \
+-}
+-#endif
+-
+-#undef DEBUGGER_SINGLE_STEP_NOTIFY
+-#ifdef VM_JVMTI
+-/* NOTE: (kbr) This macro must be called AFTER the PC has been
+-   incremented. JvmtiExport::at_single_stepping_point() may cause a
+-   breakpoint opcode to get inserted at the current PC to allow the
+-   debugger to coalesce single-step events.
+-   
+-   As a result if we call at_single_stepping_point() we refetch opcode
+-   to get the current opcode. This will override any other prefetching
+-   that might have occurred.
+-*/
+-#define DEBUGGER_SINGLE_STEP_NOTIFY()                                            \
+-{                                                                                \
+-      if (_jvmti_interp_events) {                                                \
+-        if (JvmtiExport::should_post_single_step()) {                            \
+-          DECACHE_STATE();                                                       \
+-          SET_LAST_JAVA_FRAME();                                                 \
+-          ThreadInVMfromJava trans(THREAD);                                      \
+-          JvmtiExport::at_single_stepping_point(THREAD,                          \
+-                                          istate->method(),                      \
+-                                          pc);                                   \
+-          RESET_LAST_JAVA_FRAME();                                               \
+-          CACHE_STATE();                                                         \
+-          if (THREAD->pop_frame_pending() &&                                     \
+-              !THREAD->pop_frame_in_process()) {                                 \
+-            goto handle_Pop_Frame;                                               \
+-          }                                                                      \
+-          opcode = *pc;                                                          \
+-        }                                                                        \
+-      }                                                                          \
+-}
+-#else
+-#define DEBUGGER_SINGLE_STEP_NOTIFY() 
+-#endif
+-
+-/*
+- * CONTINUE - Macro for executing the next opcode.
+- */
+-#undef CONTINUE
+-#ifdef USELABELS
+-// Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an
+-// initialization (which is is the initialization of the table pointer...)
+-#define DISPATCH(opcode) goto *dispatch_table[opcode]
+-#define CONTINUE {                              \
+-        opcode = *pc;                           \
+-        DO_UPDATE_INSTRUCTION_COUNT(opcode);    \
+-        DEBUGGER_SINGLE_STEP_NOTIFY();          \
+-        DISPATCH(opcode);                       \
+-    }
+-#else
+-#ifdef PREFETCH_OPCCODE
+-#define CONTINUE {                              \
+-        opcode = *pc;                           \
+-        DO_UPDATE_INSTRUCTION_COUNT(opcode);    \
+-        DEBUGGER_SINGLE_STEP_NOTIFY();          \
+-        continue;                               \
+-    }
+-#else
+-#define CONTINUE {                              \
+-        DO_UPDATE_INSTRUCTION_COUNT(opcode);    \
+-        DEBUGGER_SINGLE_STEP_NOTIFY();          \
+-        continue;                               \
+-    }
+-#endif
+-#endif
+-
+-// JavaStack Implementation
+-#define MORE_STACK(count)  \
+-    (topOfStack -= ((count) * Interpreter::stackElementWords()))
+-
+-
+-#define UPDATE_PC(opsize) {pc += opsize; }
+-/*
+- * UPDATE_PC_AND_TOS - Macro for updating the pc and topOfStack.
+- */
+-#undef UPDATE_PC_AND_TOS
+-#define UPDATE_PC_AND_TOS(opsize, stack) \
+-    {pc += opsize; MORE_STACK(stack); }
+-
+-/*
+- * UPDATE_PC_AND_TOS_AND_CONTINUE - Macro for updating the pc and topOfStack,
+- * and executing the next opcode. It's somewhat similar to the combination
+- * of UPDATE_PC_AND_TOS and CONTINUE, but with some minor optimizations.
+- */
+-#undef UPDATE_PC_AND_TOS_AND_CONTINUE
+-#ifdef USELABELS
+-#define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) {         \
+-        pc += opsize; opcode = *pc; MORE_STACK(stack);          \
+-        DO_UPDATE_INSTRUCTION_COUNT(opcode);                    \
+-        DEBUGGER_SINGLE_STEP_NOTIFY();                          \
+-        DISPATCH(opcode);                                       \
+-    }
+-
+-#define UPDATE_PC_AND_CONTINUE(opsize) {                        \
+-        pc += opsize; opcode = *pc;                             \
+-        DO_UPDATE_INSTRUCTION_COUNT(opcode);                    \
+-        DEBUGGER_SINGLE_STEP_NOTIFY();                          \
+-        DISPATCH(opcode);                                       \
+-    }
+-#else
+-#ifdef PREFETCH_OPCCODE
+-#define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) {         \
+-        pc += opsize; opcode = *pc; MORE_STACK(stack);          \
+-        DO_UPDATE_INSTRUCTION_COUNT(opcode);                    \
+-        DEBUGGER_SINGLE_STEP_NOTIFY();                          \
+-        goto do_continue;                                       \
+-    }
+-
+-#define UPDATE_PC_AND_CONTINUE(opsize) {                        \
+-        pc += opsize; opcode = *pc;                             \
+-        DO_UPDATE_INSTRUCTION_COUNT(opcode);                    \
+-        DEBUGGER_SINGLE_STEP_NOTIFY();                          \
+-        goto do_continue;                                       \
+-    }
+-#else
+-#define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
+-        pc += opsize; MORE_STACK(stack);                \
+-        DO_UPDATE_INSTRUCTION_COUNT(opcode);            \
+-        DEBUGGER_SINGLE_STEP_NOTIFY();                  \
+-        goto do_continue;                               \
+-    }
+-
+-#define UPDATE_PC_AND_CONTINUE(opsize) {                \
+-        pc += opsize;                                   \
+-        DO_UPDATE_INSTRUCTION_COUNT(opcode);            \
+-        DEBUGGER_SINGLE_STEP_NOTIFY();                  \
+-        goto do_continue;                               \
+-    }
+-#endif /* PREFETCH_OPCCODE */
+-#endif /* USELABELS */
+-
+-// About to call a new method, update the save the adjusted pc and return to frame manager
+-#define UPDATE_PC_AND_RETURN(opsize)  \
+-   DECACHE_TOS();                     \
+-   istate->set_bcp(pc+opsize);        \
+-   return;
+-
+-
+-#define METHOD istate->method()
+-#define INVOCATION_COUNT METHOD->invocation_counter()
+-#define BACKEDGE_COUNT METHOD->backedge_counter()
+-
+-
+-#define INCR_INVOCATION_COUNT INVOCATION_COUNT->increment()
+-#define OSR_REQUEST(res, branch_pc) \
+-            CALL_VM(res=InterpreterRuntime::frequency_counter_overflow(THREAD, branch_pc), handle_exception);
+-/*
+- * For those opcodes that need to have a GC point on a backwards branch
+- */
+-
+-// Backedge counting is kind of strange. The asm interpreter will increment
+-// the backedge counter as a separate counter but it does it's comparisons
+-// to the sum (scaled) of invocation counter and backedge count to make
+-// a decision. Seems kind of odd to sum them together like that
+-
+-// skip is delta from current bcp/bci for target, branch_pc is pre-branch bcp
+-
+-
+-#define DO_BACKEDGE_CHECKS(skip, branch_pc)                                                         \
+-    if ((skip) <= 0) {                                                                              \
+-      if (UseCompiler && UseLoopCounter) {                                                          \
+-        bool do_OSR = UseOnStackReplacement;                                                        \
+-        BACKEDGE_COUNT->increment();                                                                \
+-        if (do_OSR) do_OSR = BACKEDGE_COUNT->reached_InvocationLimit();                             \
+-        if (do_OSR) {                                                                               \
+-          nmethod*  osr_nmethod;                                                                    \
+-          OSR_REQUEST(osr_nmethod, branch_pc);                                                      \
+-          if (osr_nmethod->osr_entry_bci() != InvalidOSREntryBci) {                                 \
+-            intptr_t* buf;                                                                          \
+-            CALL_VM(buf=SharedRuntime::OSR_migration_begin(THREAD), handle_exception);              \
+-            istate->set_msg(do_osr);                                                                \
+-            istate->set_osr_buf((address)buf);                                                      \
+-            istate->set_osr_entry(osr_nmethod->osr_entry());                                        \
+-            return;                                                                                 \
+-          }                                                                                         \
+-        } else {                                                                                    \
+-          INCR_INVOCATION_COUNT;                                                                    \
+-          SAFEPOINT;                                                                                \
+-        }                                                                                           \
+-      }  /* UseCompiler ... */                                                                      \
+-      INCR_INVOCATION_COUNT;                                                                        \
+-      SAFEPOINT;                                                                                    \
+-    }
+-
+-/*
+- * Macros for accessing the stack.
+- */
+-#undef STACK_INT
+-#undef STACK_FLOAT
+-#undef STACK_OBJECT
+-#undef STACK_DOUBLE
+-#undef STACK_LONG
+-// JavaStack Implementation
+-
+-#define STACK_SLOT(offset)       stack_slot(topOfStack, (offset))
+-#define STACK_INT(offset)        stack_int(topOfStack, (offset))
+-#define STACK_FLOAT(offset)      stack_float(topOfStack, (offset))
+-#define STACK_OBJECT(offset)     stack_object(topOfStack, (offset))
+-#define STACK_DOUBLE(offset)     stack_double(topOfStack, (offset))
+-#define STACK_LONG(offset)       stack_long(topOfStack, (offset))
+-
+-
+-#define SET_STACK_SLOT(value, offset)            \
+-        set_stack_slot(topOfStack, (value), (offset))
+-#define SET_STACK_INT(value, offset)             \
+-        set_stack_int(topOfStack, (value), (offset))
+-#define SET_STACK_FLOAT(value, offset)           \
+-        set_stack_float(topOfStack, (value), (offset))
+-#define SET_STACK_OBJECT(value, offset)          \
+-        set_stack_object(topOfStack, (value), (offset))
+-#define SET_STACK_DOUBLE(value, offset)          \
+-        set_stack_double(topOfStack, (value), (offset))
+-#define SET_STACK_DOUBLE_FROM_ADDR(addr, offset) \
+-        set_stack_double_from_addr(topOfStack, (addr), (offset))
+-#define SET_STACK_LONG(value, offset)            \
+-        set_stack_long(topOfStack, (value), (offset))
+-#define SET_STACK_LONG_FROM_ADDR(addr, offset)   \
+-        set_stack_long_from_addr(topOfStack, (addr), (offset))
+-
+-#define LOCALS_SLOT(offset)      locals_slot(locals, (offset))
+-#define LOCALS_INT(offset)       locals_int(locals,  (offset))
+-#define LOCALS_FLOAT(offset)     locals_float(locals, (offset))
+-#define LOCALS_OBJECT(offset)    locals_object(locals, (offset))
+-#define LOCALS_DOUBLE(offset)    locals_double(locals, (offset))
+-#define LOCALS_LONG(offset)      locals_long(locals, (offset))
+-#define LOCALS_LONG_AT(offset)   locals_long_at(locals, (offset))
+-#define LOCALS_DOUBLE_AT(offset) locals_double_at(locals, (offset))
+-
+-
+-#define SET_LOCALS_SLOT(value, offset)            \
+-        set_locals_slot(locals, (value), (offset))
+-#define SET_LOCALS_INT(value, offset)             \
+-        set_locals_int(locals, (value), (offset))
+-#define SET_LOCALS_FLOAT(value, offset)           \
+-        set_locals_float(locals, (value), (offset))
+-#define SET_LOCALS_OBJECT(value, offset)          \
+-        set_locals_object(locals, (value), (offset))
+-#define SET_LOCALS_DOUBLE(value, offset)          \
+-        set_locals_double(locals, (value), (offset))
+-#define SET_LOCALS_LONG(value, offset)            \
+-        set_locals_long(locals, (value), (offset))
+-#define SET_LOCALS_DOUBLE_FROM_ADDR(addr, offset) \
+-        set_locals_double_from_addr(locals, (addr), (offset))
+-#define SET_LOCALS_LONG_FROM_ADDR(addr, offset)   \
+-        set_locals_long_from_addr(locals, (addr), (offset))
+-
+-
+-/*
+- * Macros for caching and flushing the interpreter state. Some local
+- * variables need to be flushed out to the frame before we do certain
+- * things (like pushing frames or becomming gc safe) and some need to 
+- * be recached later (like after popping a frame). We could use one
+- * macro to cache or decache everything, but this would be less then
+- * optimal because we don't always need to cache or decache everything
+- * because some things we know are already cached or decached.
+- */
+-#undef DECACHE_TOS
+-#undef CACHE_TOS
+-#undef CACHE_PREV_TOS
+-#define DECACHE_TOS()    istate->set_stack(topOfStack);
+-
+-#define CACHE_TOS()      topOfStack = (intptr_t *)istate->stack();
+-
+-#undef DECACHE_PC
+-#undef CACHE_PC
+-#define DECACHE_PC()    istate->set_bcp(pc);
+-#define CACHE_PC()      pc = istate->bcp();
+-#define CACHE_CP()      cp = istate->constants();
+-#define CACHE_LOCALS()  locals = istate->locals();
+-#undef CACHE_FRAME
+-#define CACHE_FRAME()   
+- 
+-/*
+- * CHECK_NULL - Macro for throwing a NullPointerException if the object
+- * passed is a null ref.
+- * On some architectures/platforms it should be possible to do this implicitly
+- */
+-#undef CHECK_NULL
+-#define CHECK_NULL(obj_)                                                 \
+-    if ((obj_) == 0) {                                                   \
+-        VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), "");  \
+-    }
+-
+-#define VMdoubleConstZero() 0.0
+-#define VMdoubleConstOne() 1.0
+-#define VMlongConstZero() (max_jlong-max_jlong)
+-#define VMlongConstOne() ((max_jlong-max_jlong)+1)
+-
+-/*
+- * Alignment
+- */
+-/* #define VMalignWordUp(val)          (((juint)(val) + 3) & ~3) */
+-#define VMalignWordUp(val)          (((uintptr_t)(val) + 3) & ~3)
+-
+-// Decache the interpreter state that interpreter modifies directly (i.e. GC is indirect mod)
+-#define DECACHE_STATE() DECACHE_PC(); DECACHE_TOS();
+-
+-// Reload interpreter state after calling the VM or a possible GC
+-#define CACHE_STATE()   \
+-        CACHE_TOS();    \
+-        CACHE_PC();     \
+-        CACHE_CP();     \
+-        CACHE_LOCALS();
+-
+-// Call the VM don't check for pending exceptions
+-#define CALL_VM_NOCHECK(func)                                     \
+-          DECACHE_STATE();                                        \
+-          SET_LAST_JAVA_FRAME();                                  \
+-          func;                                                   \
+-          RESET_LAST_JAVA_FRAME();                                \
+-          CACHE_STATE();                                          \
+-          if (THREAD->pop_frame_pending() &&                      \
+-              !THREAD->pop_frame_in_process()) {                  \
+-            goto handle_Pop_Frame;                                \
+-          }
+-
+-// Call the VM and check for pending exceptions
+-#define CALL_VM(func, label) {                                    \
+-          CALL_VM_NOCHECK(func);                                  \
+-          if (THREAD->pending_exception()) goto label;            \
+-        }
+-
+-/*
+- * cInterpreter::InterpretMethod(interpreterState istate)
+- * cInterpreter::InterpretMethodWithChecks(interpreterState istate)
+- *
+- * The real deal. This is where byte codes actually get interpreted.
+- * Basically it's a big while loop that iterates until we return from
+- * the method passed in.
+- *
+- * The InterpretMethodWithChecks is used if JVMTI is enabled.
+- *
+- */
+-#if defined(VM_JVMTI)
+-void
+-cInterpreter::InterpretMethodWithChecks(interpreterState istate) {
+-#else
+-void
+-cInterpreter::InterpretMethod(interpreterState istate) {
+-#endif
+-
+-  // In order to simplify some tests based on switches set at runtime
+-  // we invoke the interpreter a single time after switches are enabled 
+-  // and set simpler to to test variables rather than method calls or complex
+-  // boolean expressions.
+-
+-  static int initialized = 0;
+-#ifdef VM_JVMTI
+-  static bool _jvmti_interp_events = 0;
+-#endif
+-
+-  static int _compiling;  // (UseCompiler || CountCompiledCalls)
+-
+-#ifdef ASSERT
+-  // Verify linkages.
+-  interpreterState l = istate;
+-  do {
+-    assert(l == l->_self_link, "bad link");
+-    l = l->_prev_link;
+-  } while (l != NULL);
+-  // Screwups with stack management usually cause us to overwrite istate
+-  // save a copy so we can verify it.
+-  interpreterState orig = istate;
+-#endif
+-
+-  static volatile jbyte* _byte_map_base; // adjusted card table base for oop store barrier
+-
+-  register intptr_t*        topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */
+-  register address          pc = istate->bcp();
+-  register jubyte opcode;
+-  register intptr_t*        locals = istate->locals();
+-  register constantPoolCacheOop  cp = istate->constants(); // method()->constants()->cache()
+-#ifdef LOTS_OF_REGS
+-  register JavaThread*      THREAD = istate->thread();
+-  register volatile jbyte*  BYTE_MAP_BASE = _byte_map_base;
+-#else
+-#undef THREAD
+-#define THREAD istate->thread()
+-#undef BYTE_MAP_BASE
+-#define BYTE_MAP_BASE _byte_map_base
+-#endif
+-
+-#ifdef USELABELS
+-  const static void* const opclabels_data[256] = { 
+-/* 0x00 */ &&opc_nop,     &&opc_aconst_null,&&opc_iconst_m1,&&opc_iconst_0,
+-/* 0x04 */ &&opc_iconst_1,&&opc_iconst_2,   &&opc_iconst_3, &&opc_iconst_4,
+-/* 0x08 */ &&opc_iconst_5,&&opc_lconst_0,   &&opc_lconst_1, &&opc_fconst_0,
+-/* 0x0C */ &&opc_fconst_1,&&opc_fconst_2,   &&opc_dconst_0, &&opc_dconst_1,
+-
+-/* 0x10 */ &&opc_bipush, &&opc_sipush, &&opc_ldc,    &&opc_ldc_w,
+-/* 0x14 */ &&opc_ldc2_w, &&opc_iload,  &&opc_lload,  &&opc_fload,
+-/* 0x18 */ &&opc_dload,  &&opc_aload,  &&opc_iload_0,&&opc_iload_1,
+-/* 0x1C */ &&opc_iload_2,&&opc_iload_3,&&opc_lload_0,&&opc_lload_1,
+-
+-/* 0x20 */ &&opc_lload_2,&&opc_lload_3,&&opc_fload_0,&&opc_fload_1,
+-/* 0x24 */ &&opc_fload_2,&&opc_fload_3,&&opc_dload_0,&&opc_dload_1,
+-/* 0x28 */ &&opc_dload_2,&&opc_dload_3,&&opc_aload_0,&&opc_aload_1,
+-/* 0x2C */ &&opc_aload_2,&&opc_aload_3,&&opc_iaload, &&opc_laload,
+-
+-/* 0x30 */ &&opc_faload,  &&opc_daload,  &&opc_aaload,  &&opc_baload,
+-/* 0x34 */ &&opc_caload,  &&opc_saload,  &&opc_istore,  &&opc_lstore,
+-/* 0x38 */ &&opc_fstore,  &&opc_dstore,  &&opc_astore,  &&opc_istore_0,
+-/* 0x3C */ &&opc_istore_1,&&opc_istore_2,&&opc_istore_3,&&opc_lstore_0,
+-
+-/* 0x40 */ &&opc_lstore_1,&&opc_lstore_2,&&opc_lstore_3,&&opc_fstore_0,
+-/* 0x44 */ &&opc_fstore_1,&&opc_fstore_2,&&opc_fstore_3,&&opc_dstore_0,
+-/* 0x48 */ &&opc_dstore_1,&&opc_dstore_2,&&opc_dstore_3,&&opc_astore_0,
+-/* 0x4C */ &&opc_astore_1,&&opc_astore_2,&&opc_astore_3,&&opc_iastore,
+-
+-/* 0x50 */ &&opc_lastore,&&opc_fastore,&&opc_dastore,&&opc_aastore,
+-/* 0x54 */ &&opc_bastore,&&opc_castore,&&opc_sastore,&&opc_pop,
+-/* 0x58 */ &&opc_pop2,   &&opc_dup,    &&opc_dup_x1, &&opc_dup_x2,
+-/* 0x5C */ &&opc_dup2,   &&opc_dup2_x1,&&opc_dup2_x2,&&opc_swap,
+-
+-/* 0x60 */ &&opc_iadd,&&opc_ladd,&&opc_fadd,&&opc_dadd,
+-/* 0x64 */ &&opc_isub,&&opc_lsub,&&opc_fsub,&&opc_dsub,
+-/* 0x68 */ &&opc_imul,&&opc_lmul,&&opc_fmul,&&opc_dmul,
+-/* 0x6C */ &&opc_idiv,&&opc_ldiv,&&opc_fdiv,&&opc_ddiv,
+-
+-/* 0x70 */ &&opc_irem, &&opc_lrem, &&opc_frem,&&opc_drem,
+-/* 0x74 */ &&opc_ineg, &&opc_lneg, &&opc_fneg,&&opc_dneg,
+-/* 0x78 */ &&opc_ishl, &&opc_lshl, &&opc_ishr,&&opc_lshr,
+-/* 0x7C */ &&opc_iushr,&&opc_lushr,&&opc_iand,&&opc_land,
+-
+-/* 0x80 */ &&opc_ior, &&opc_lor,&&opc_ixor,&&opc_lxor,
+-/* 0x84 */ &&opc_iinc,&&opc_i2l,&&opc_i2f, &&opc_i2d,
+-/* 0x88 */ &&opc_l2i, &&opc_l2f,&&opc_l2d, &&opc_f2i,
+-/* 0x8C */ &&opc_f2l, &&opc_f2d,&&opc_d2i, &&opc_d2l,
+-
+-/* 0x90 */ &&opc_d2f,  &&opc_i2b,  &&opc_i2c,  &&opc_i2s,
+-/* 0x94 */ &&opc_lcmp, &&opc_fcmpl,&&opc_fcmpg,&&opc_dcmpl,
+-/* 0x98 */ &&opc_dcmpg,&&opc_ifeq, &&opc_ifne, &&opc_iflt,
+-/* 0x9C */ &&opc_ifge, &&opc_ifgt, &&opc_ifle, &&opc_if_icmpeq,
+-
+-/* 0xA0 */ &&opc_if_icmpne,&&opc_if_icmplt,&&opc_if_icmpge,  &&opc_if_icmpgt,
+-/* 0xA4 */ &&opc_if_icmple,&&opc_if_acmpeq,&&opc_if_acmpne,  &&opc_goto,
+-/* 0xA8 */ &&opc_jsr,      &&opc_ret,      &&opc_tableswitch,&&opc_lookupswitch,
+-/* 0xAC */ &&opc_ireturn,  &&opc_lreturn,  &&opc_freturn,    &&opc_dreturn,
+-
+-/* 0xB0 */ &&opc_areturn,     &&opc_return,         &&opc_getstatic,    &&opc_putstatic,
+-/* 0xB4 */ &&opc_getfield,    &&opc_putfield,       &&opc_invokevirtual,&&opc_invokespecial,
+-/* 0xB8 */ &&opc_invokestatic,&&opc_invokeinterface,NULL,               &&opc_new,
+-/* 0xBC */ &&opc_newarray,    &&opc_anewarray,      &&opc_arraylength,  &&opc_athrow,
+-
+-/* 0xC0 */ &&opc_checkcast,   &&opc_instanceof,     &&opc_monitorenter, &&opc_monitorexit,
+-/* 0xC4 */ &&opc_wide,        &&opc_multianewarray, &&opc_ifnull,       &&opc_ifnonnull,
+-/* 0xC8 */ &&opc_goto_w,      &&opc_jsr_w,          &&opc_breakpoint,   &&opc_fast_igetfield,
+-/* 0xCC */ &&opc_fastagetfield,&&opc_fast_aload_0,  &&opc_fast_iaccess_0, &&opc__fast_aaccess_0,
+-
+-/* 0xD0 */ &&opc_fast_linearswitch, &&opc_fast_binaryswitch, &&opc_return_register_finalizer,      &&opc_default,
+-/* 0xD4 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
+-/* 0xD8 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
+-/* 0xDC */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
+-
+-/* 0xE0 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
+-/* 0xE4 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
+-/* 0xE8 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
+-/* 0xEC */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
+-
+-/* 0xF0 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
+-/* 0xF4 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
+-/* 0xF8 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
+-/* 0xFC */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default
+-  };
+-  register uintptr_t *dispatch_table = (uintptr_t*)&opclabels_data[0];
+-#endif /* USELABELS */
+-
+-#ifdef ASSERT
+-  // this will trigger a VERIFY_OOP on entry
+-  if (istate->msg() != initialize && ! istate->method()->is_static()) {
+-    oop rcvr = LOCALS_OBJECT(0);
+-  }
+-#endif
+-
+-  /* QQQ this should be a stack method so we don't know actual direction */
+-  assert(istate->msg() == initialize ||
+-         topOfStack >= istate->stack_limit() &&
+-         topOfStack < istate->stack_base(), 
+-         "Stack top out of range");
+-
+-  switch (istate->msg()) {
+-    case initialize: {
+-      if (initialized++) ShouldNotReachHere(); // Only one initialize call
+-      _compiling = (UseCompiler || CountCompiledCalls);
+-#ifdef VM_JVMTI
+-      _jvmti_interp_events = JvmtiExport::can_post_interpreter_events();
+-#endif
+-      BarrierSet* bs = Universe::heap()->barrier_set();
+-      assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
+-      _byte_map_base = (volatile jbyte*)(((CardTableModRefBS*)bs)->byte_map_base);
+-      return;
+-    }
+-    break;
+-    case method_entry: {
+-      THREAD->set_do_not_unlock();
+-      // count invocations
+-      assert(initialized, "Interpreter not initialized");
+-      if (_compiling) {
+-        if (ProfileInterpreter) {
+-          istate->method()->increment_interpreter_invocation_count();
+-        }
+-        INCR_INVOCATION_COUNT;
+-        if (istate->method()->invocation_counter()->has_overflowed()) {
+-            CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception);
+-
+-            // We no longer retry on a counter overflow
+-
+-            // istate->set_msg(retry_method);
+-            // THREAD->clr_do_not_unlock();
+-            // return;
+-        }
+-        SAFEPOINT;
+-      }
+-
+-#ifdef HACK
+-      {
+-        ResourceMark rm;
+-        char *method_name = istate->method()->name_and_sig_as_C_string();
+-        if (strstr(method_name, "SecurityManager$1") != NULL) os::breakpoint();
+-      }
+-#endif // HACK
+-
+-
+-      // lock method if synchronized
+-      if (istate->method()->is_synchronized()) {
+-          // oop rcvr = locals[0].j.r;
+-          oop rcvr;
+-          if (istate->method()->is_static()) {
+-            rcvr = istate->method()->constants()->pool_holder()->klass_part()->java_mirror();
+-          } else {
+-            rcvr = LOCALS_OBJECT(0);
+-          }
+-          // The initial monitor is ours for the taking
+-          BasicObjectLock* mon = &istate->monitor_base()[-1];
+-          oop monobj = mon->obj();
+-          assert(mon->obj() == rcvr, "method monitor mis-initialized");
+-
+-          markOop displaced = rcvr->mark()->set_unlocked();
+-          mon->lock()->set_displaced_header(displaced);
+-          if (Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) {
+-            // Is it simple recursive case?
+-            if (THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
+-              mon->lock()->set_displaced_header(NULL);
+-            } else {
+-              CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
+-            }
+-          }
+-      }
+-      THREAD->clr_do_not_unlock();
+-
+-      // Notify jvmti
+-#ifdef VM_JVMTI
+-      if (_jvmti_interp_events) {
+-        // Whenever JVMTI puts a thread in interp_only_mode, method
+-        // entry/exit events are sent for that thread to track stack depth.  
+-        if (THREAD->is_interp_only_mode()) {
+-          CALL_VM(InterpreterRuntime::post_method_entry(THREAD), 
+-                  handle_exception);
+-        }
+-      }
+-#endif /* VM_JVMTI */
+-
+-      goto run;
+-    }
+-
+-    case popping_frame: {
+-      // returned from a java call to pop the frame, restart the call
+-      // clear the message so we don't confuse ourselves later
+-      assert(THREAD->pop_frame_in_process(), "wrong frame pop state");
+-      istate->set_msg(no_request);
+-      THREAD->clr_pop_frame_in_process();
+-      goto run;
+-    }
+-
+-    case method_resume: {
+-#ifdef HACK
+-      {
+-        ResourceMark rm;
+-        char *method_name = istate->method()->name_and_sig_as_C_string();
+-        if (strstr(method_name, "SecurityManager$1") != NULL) os::breakpoint();
+-      }
+-#endif // HACK
+-      // returned from a java call, continue executing.
+-      if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) {
+-        goto handle_Pop_Frame;
+-      }
+- 
+-      if (THREAD->has_pending_exception()) goto handle_exception;
+-      // Update the pc by the saved amount of the invoke bytecode size
+-      UPDATE_PC(istate->bcp_advance());
+-      goto run;
+-    }
+-
+-    case deopt_resume2: {
+-      // Returned from an opcode that will reexecute. Deopt was
+-      // a result of a PopFrame request.
+-      //
+-      goto run;
+-    }
+-
+-    case deopt_resume: {
+-      // Returned from an opcode that has completed. The stack has
+-      // the result all we need to do is skip across the bytecode
+-      // and continue (assuming there is no exception pending)
+-      // 
+-      // compute continuation length
+-      //
+-      UPDATE_PC(Bytecodes::length_at(pc));
+-      if (THREAD->has_pending_exception()) goto handle_exception;
+-      goto run;
+-    }
+-    case got_monitors: {
+-      // continue locking now that we have a monitor to use
+-      // we expect to find newly allocated monitor at the "top" of the monitor stack.
+-      oop lockee = STACK_OBJECT(-1);
+-      // derefing's lockee ought to provoke implicit null check
+-      // find a free monitor
+-      BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base();
+-      assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor");
+-      entry->set_obj(lockee);
+-
+-      markOop displaced = lockee->mark()->set_unlocked();
+-      entry->lock()->set_displaced_header(displaced);
+-      if (Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
+-        // Is it simple recursive case?
+-        if (THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
+-          entry->lock()->set_displaced_header(NULL);
+-        } else {
+-          CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
+-        }
+-      }
+-      UPDATE_PC_AND_TOS(1, -1);
+-      goto run;
+-    }
+-    default: {
+-      fatal("Unexpected message from frame manager");
+-    }
+-  }
+-
+-run:
+-
+-  DO_UPDATE_INSTRUCTION_COUNT(*pc)
+-  DEBUGGER_SINGLE_STEP_NOTIFY();
+-#ifdef PREFETCH_OPCCODE
+-  opcode = *pc;  /* prefetch first opcode */
+-#endif
+-
+-#ifndef USELABELS
+-  while (1)
+-#endif
+-  {
+-#ifndef PREFETCH_OPCCODE
+-      opcode = *pc;
+-#endif
+-      // Seems like this happens twice per opcode. At worst this is only
+-      // need at entry to the loop.
+-      // DEBUGGER_SINGLE_STEP_NOTIFY();
+-      /* Using this labels avoids double breakpoints when quickening and
+-       * when returing from transition frames.
+-       */
+-  opcode_switch:
+-      assert(istate == orig, "Corrupted istate");
+-      /* QQQ Hmm this has knowledge of direction, ought to be a stack method */
+-      assert(topOfStack >= istate->stack_limit(), "Stack overrun");
+-      assert(topOfStack < istate->stack_base(), "Stack underrun");
+-
+-#ifdef USELABELS
+-      DISPATCH(opcode);
+-#else
+-      switch (opcode)
+-#endif
+-      {
+-      CASE(_nop):
+-          UPDATE_PC_AND_CONTINUE(1);
+-
+-          /* Push miscellaneous constants onto the stack. */
+-
+-      CASE(_aconst_null):
+-          SET_STACK_OBJECT(NULL, 0);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
+-
+-#undef  OPC_CONST_n
+-#define OPC_CONST_n(opcode, const_type, value)                          \
+-      CASE(opcode):                                                     \
+-          SET_STACK_ ## const_type(value, 0);                           \
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
+-
+-          OPC_CONST_n(_iconst_m1,   INT,       -1);
+-          OPC_CONST_n(_iconst_0,    INT,        0);
+-          OPC_CONST_n(_iconst_1,    INT,        1);
+-          OPC_CONST_n(_iconst_2,    INT,        2);
+-          OPC_CONST_n(_iconst_3,    INT,        3);
+-          OPC_CONST_n(_iconst_4,    INT,        4);
+-          OPC_CONST_n(_iconst_5,    INT,        5);
+-          OPC_CONST_n(_fconst_0,    FLOAT,      0.0);
+-          OPC_CONST_n(_fconst_1,    FLOAT,      1.0);
+-          OPC_CONST_n(_fconst_2,    FLOAT,      2.0);
+-
+-#if 0
+-#undef  OPC_CONST2_n
+-#define OPC_CONST2_n(opcname, value, key)                               \
+-      CASE(_##opcname):                                                 \
+-      {                                                                 \
+-         VM##key##2Jvm(&STACK_INFO(DTOS(0)).raw,                        \
+-             VM##key##Const##value());                                  \
+-         UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);                          \
+-      }
+-#endif
+-
+-#undef  OPC_CONST2_n
+-#define OPC_CONST2_n(opcname, value, key, kind)                         \
+-      CASE(_##opcname):                                                 \
+-      {                                                                 \
+-          SET_STACK_ ## kind(VM##key##Const##value(), 1);               \
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);                         \
+-      }
+-         OPC_CONST2_n(dconst_0, Zero, double, DOUBLE);
+-         OPC_CONST2_n(dconst_1, One,  double, DOUBLE);
+-         OPC_CONST2_n(lconst_0, Zero, long, LONG);
+-         OPC_CONST2_n(lconst_1, One,  long, LONG);
+-
+-         /* Load constant from constant pool: */
+-
+-          /* Push a 1-byte signed integer value onto the stack. */
+-      CASE(_bipush):
+-          SET_STACK_INT((jbyte)(pc[1]), 0);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
+-
+-          /* Push a 2-byte signed integer constant onto the stack. */
+-      CASE(_sipush):
+-          SET_STACK_INT((int16_t)Bytes::get_Java_u2(pc + 1), 0);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
+-
+-          /* load from local variable */
+-
+-      CASE(_aload):
+-          SET_STACK_OBJECT(LOCALS_OBJECT(pc[1]), 0);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
+-
+-      CASE(_iload):
+-      CASE(_fload):
+-          SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
+-
+-      CASE(_lload):
+-          SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(pc[1]), 1);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2);
+-
+-      CASE(_dload):
+-          SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(pc[1]), 1);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2);
+-
+-#undef  OPC_LOAD_n
+-#define OPC_LOAD_n(num)                                                 \
+-      CASE(_aload_##num):                                               \
+-          SET_STACK_OBJECT(LOCALS_OBJECT(num), 0);                      \
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);                         \
+-                                                                        \
+-      CASE(_iload_##num):                                               \
+-      CASE(_fload_##num):                                               \
+-          SET_STACK_SLOT(LOCALS_SLOT(num), 0);                          \
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);                         \
+-                                                                        \
+-      CASE(_lload_##num):                                               \
+-          SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(num), 1);             \
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);                         \
+-      CASE(_dload_##num):                                               \
+-          SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(num), 1);         \
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
+-
+-          OPC_LOAD_n(0);
+-          OPC_LOAD_n(1);
+-          OPC_LOAD_n(2);
+-          OPC_LOAD_n(3);
+-
+-          /* store to a local variable */
+-
+-      CASE(_astore):
+-          astore(topOfStack, -1, locals, pc[1]);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1);
+-
+-      CASE(_istore):
+-      CASE(_fstore):
+-          SET_LOCALS_SLOT(STACK_SLOT(-1), pc[1]);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1);
+-
+-      CASE(_lstore):
+-          SET_LOCALS_LONG(STACK_LONG(-1), pc[1]);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2);
+-
+-      CASE(_dstore):
+-          SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), pc[1]);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2);
+-
+-      CASE(_wide): {
+-          uint16_t reg = Bytes::get_Java_u2(pc + 2);
+-
+-          opcode = pc[1];
+-          switch(opcode) {
+-              case Bytecodes::_aload:
+-                  SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0); 
+-                  UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
+-
+-              case Bytecodes::_iload:
+-              case Bytecodes::_fload:
+-                  SET_STACK_SLOT(LOCALS_SLOT(reg), 0); 
+-                  UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
+-
+-              case Bytecodes::_lload:
+-                  SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(reg), 1);
+-                  UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
+-
+-              case Bytecodes::_dload:
+-                  SET_STACK_DOUBLE_FROM_ADDR(LOCALS_LONG_AT(reg), 1);
+-                  UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
+-
+-              case Bytecodes::_astore:
+-                  astore(topOfStack, -1, locals, reg);
+-                  UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1);
+-
+-              case Bytecodes::_istore:
+-              case Bytecodes::_fstore:
+-                  SET_LOCALS_SLOT(STACK_SLOT(-1), reg);
+-                  UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1);
+-
+-              case Bytecodes::_lstore:
+-                  SET_LOCALS_LONG(STACK_LONG(-1), reg);
+-                  UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2);
+-
+-              case Bytecodes::_dstore:
+-                  SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), reg);
+-                  UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2);
+-
+-              case Bytecodes::_iinc: {
+-                  int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4); 
+-                  // Be nice to see what this generates.... QQQ
+-                  SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg);
+-                  UPDATE_PC_AND_CONTINUE(6);
+-              }
+-              case Bytecodes::_ret:
+-                  pc = istate->method()->code_base() + (intptr_t)(LOCALS_SLOT(reg));
+-                  UPDATE_PC_AND_CONTINUE(0);
+-              default:
+-                  VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode");
+-          }
+-      }
+-
+-
+-#undef  OPC_STORE_n
+-#define OPC_STORE_n(num)                                                \
+-      CASE(_astore_##num):                                              \
+-          astore(topOfStack, -1, locals, num);                          \
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);                        \
+-      CASE(_istore_##num):                                              \
+-      CASE(_fstore_##num):                                              \
+-          SET_LOCALS_SLOT(STACK_SLOT(-1), num);                         \
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
+-
+-          OPC_STORE_n(0);
+-          OPC_STORE_n(1);
+-          OPC_STORE_n(2);
+-          OPC_STORE_n(3);
+-
+-#undef  OPC_DSTORE_n
+-#define OPC_DSTORE_n(num)                                               \
+-      CASE(_dstore_##num):                                              \
+-          SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), num);                     \
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);                        \
+-      CASE(_lstore_##num):                                              \
+-          SET_LOCALS_LONG(STACK_LONG(-1), num);                         \
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);
+-
+-          OPC_DSTORE_n(0);
+-          OPC_DSTORE_n(1);
+-          OPC_DSTORE_n(2);
+-          OPC_DSTORE_n(3);
+-
+-          /* stack pop, dup, and insert opcodes */
+-
+-         
+-      CASE(_pop):                /* Discard the top item on the stack */
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
+-
+-         
+-      CASE(_pop2):               /* Discard the top 2 items on the stack */
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);
+-
+-          
+-      CASE(_dup):               /* Duplicate the top item on the stack */
+-          dup(topOfStack);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
+-
+-      CASE(_dup2):              /* Duplicate the top 2 items on the stack */
+-          dup2(topOfStack);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
+-
+-      CASE(_dup_x1):    /* insert top word two down */
+-          dup_x1(topOfStack);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
+-
+-      CASE(_dup_x2):    /* insert top word three down  */
+-          dup_x2(topOfStack);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
+-
+-      CASE(_dup2_x1):   /* insert top 2 slots three down */
+-          dup2_x1(topOfStack);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
+-
+-      CASE(_dup2_x2):   /* insert top 2 slots four down */
+-          dup2_x2(topOfStack);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
+-
+-      CASE(_swap): {        /* swap top two elements on the stack */
+-          swap(topOfStack);
+-          UPDATE_PC_AND_CONTINUE(1);
+-      }
+-
+-          /* Perform various binary integer operations */
+-
+-#undef  OPC_INT_BINARY 
+-#define OPC_INT_BINARY(opcname, opname, test)                           \
+-      CASE(_i##opcname):                                                \
+-          if (test && (STACK_INT(-1) == 0)) {                           \
+-              VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
+-                            "/ by int zero");                           \
+-          }                                                             \
+-          SET_STACK_INT(VMint##opname(STACK_INT(-2),                    \
+-                                      STACK_INT(-1)),                   \
+-                                      -2);                              \
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);                        \
+-      CASE(_l##opcname):                                                \
+-      {                                                                 \
+-          if (test) {                                                   \
+-            jlong l1 = STACK_LONG(-1);                                  \
+-            if (VMlongEqz(l1)) {                                        \
+-              VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
+-                            "/ by long zero");                          \
+-            }                                                           \
+-          }                                                             \
+-          /* First long at (-1,-2) next long at (-3,-4) */              \
+-          SET_STACK_LONG(VMlong##opname(STACK_LONG(-3),                 \
+-                                        STACK_LONG(-1)),                \
+-                                        -3);                            \
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);                        \
+-      }
+-
+-      OPC_INT_BINARY(add, Add, 0);
+-      OPC_INT_BINARY(sub, Sub, 0);
+-      OPC_INT_BINARY(mul, Mul, 0);
+-      OPC_INT_BINARY(and, And, 0);
+-      OPC_INT_BINARY(or,  Or,  0);
+-      OPC_INT_BINARY(xor, Xor, 0);
+-      OPC_INT_BINARY(div, Div, 1);
+-      OPC_INT_BINARY(rem, Rem, 1);
+-
+-
+-      /* Perform various binary floating number operations */
+-      /* On some machine/platforms/compilers div zero check can be implicit */
+-
+-#undef  OPC_FLOAT_BINARY 
+-#define OPC_FLOAT_BINARY(opcname, opname)                                  \
+-      CASE(_d##opcname): {                                                 \
+-          SET_STACK_DOUBLE(VMdouble##opname(STACK_DOUBLE(-3),              \
+-                                            STACK_DOUBLE(-1)),             \
+-                                            -3);                           \
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);                           \
+-      }                                                                    \
+-      CASE(_f##opcname):                                                   \
+-          SET_STACK_FLOAT(VMfloat##opname(STACK_FLOAT(-2),                 \
+-                                          STACK_FLOAT(-1)),                \
+-                                          -2);                             \
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
+-
+-
+-     OPC_FLOAT_BINARY(add, Add);
+-     OPC_FLOAT_BINARY(sub, Sub);
+-     OPC_FLOAT_BINARY(mul, Mul);
+-     OPC_FLOAT_BINARY(div, Div);
+-     OPC_FLOAT_BINARY(rem, Rem);
+-
+-      /* Shift operations                                  
+-       * Shift left int and long: ishl, lshl           
+-       * Logical shift right int and long w/zero extension: iushr, lushr
+-       * Arithmetic shift right int and long w/sign extension: ishr, lshr
+-       */
+-
+-#undef  OPC_SHIFT_BINARY
+-#define OPC_SHIFT_BINARY(opcname, opname)                               \
+-      CASE(_i##opcname):                                                \
+-         SET_STACK_INT(VMint##opname(STACK_INT(-2),                     \
+-                                     STACK_INT(-1)),                    \
+-                                     -2);                               \
+-         UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);                         \
+-      CASE(_l##opcname):                                                \
+-      {                                                                 \
+-         SET_STACK_LONG(VMlong##opname(STACK_LONG(-2),                  \
+-                                       STACK_INT(-1)),                  \
+-                                       -2);                             \
+-         UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);                         \
+-      }
+-    
+-      OPC_SHIFT_BINARY(shl, Shl);
+-      OPC_SHIFT_BINARY(shr, Shr);
+-      OPC_SHIFT_BINARY(ushr, Ushr);
+-
+-     /* Increment local variable by constant */ 
+-      CASE(_iinc): 
+-      {
+-          // locals[pc[1]].j.i += (jbyte)(pc[2]);
+-          SET_LOCALS_INT(LOCALS_INT(pc[1]) + (jbyte)(pc[2]), pc[1]);
+-          UPDATE_PC_AND_CONTINUE(3);
+-      }
+-
+-     /* negate the value on the top of the stack */
+-
+-      CASE(_ineg):
+-         SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1); 
+-         UPDATE_PC_AND_CONTINUE(1);
+-
+-      CASE(_fneg):
+-         SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1); 
+-         UPDATE_PC_AND_CONTINUE(1);
+-
+-      CASE(_lneg):
+-      {
+-         SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1); 
+-         UPDATE_PC_AND_CONTINUE(1);
+-      }
+-
+-      CASE(_dneg):
+-      {
+-         SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1); 
+-         UPDATE_PC_AND_CONTINUE(1);
+-      }
+-
+-      /* Conversion operations */
+-
+-      CASE(_i2f):       /* convert top of stack int to float */
+-         SET_STACK_FLOAT(VMint2Float(STACK_INT(-1)), -1);
+-         UPDATE_PC_AND_CONTINUE(1);
+-
+-      CASE(_i2l):       /* convert top of stack int to long */
+-      {
+-          // this is ugly QQQ
+-          jlong r = VMint2Long(STACK_INT(-1));
+-          MORE_STACK(-1); // Pop
+-          SET_STACK_LONG(r, 1);
+-
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
+-      }
+-
+-      CASE(_i2d):       /* convert top of stack int to double */
+-      {
+-          // this is ugly QQQ (why cast to jlong?? )
+-          jdouble r = (jlong)STACK_INT(-1);
+-          MORE_STACK(-1); // Pop
+-          SET_STACK_DOUBLE(r, 1);
+-
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
+-      }
+-
+-      CASE(_l2i):       /* convert top of stack long to int */
+-      {
+-          jint r = VMlong2Int(STACK_LONG(-1));
+-          MORE_STACK(-2); // Pop
+-          SET_STACK_INT(r, 0);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
+-      }
+-      
+-      CASE(_l2f):   /* convert top of stack long to float */
+-      {
+-          jlong r = STACK_LONG(-1);
+-          MORE_STACK(-2); // Pop
+-          SET_STACK_FLOAT(VMlong2Float(r), 0);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
+-      }
+-
+-      CASE(_l2d):       /* convert top of stack long to double */
+-      {
+-          jlong r = STACK_LONG(-1);
+-          MORE_STACK(-2); // Pop
+-          SET_STACK_DOUBLE(VMlong2Double(r), 1);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
+-      }
+-
+-      CASE(_f2i):  /* Convert top of stack float to int */
+-          SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1); 
+-          UPDATE_PC_AND_CONTINUE(1);
+-
+-      CASE(_f2l):  /* convert top of stack float to long */
+-      {
+-          jlong r = SharedRuntime::f2l(STACK_FLOAT(-1));
+-          MORE_STACK(-1); // POP
+-          SET_STACK_LONG(r, 1);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
+-      }
+-
+-      CASE(_f2d):  /* convert top of stack float to double */
+-      {
+-          jfloat f;
+-          jdouble r;
+-          f = STACK_FLOAT(-1);
+-#ifdef IA64
+-          // IA64 gcc bug
+-          r = ( f == 0.0f ) ? (jdouble) f : (jdouble) f + ia64_double_zero;
+-#else
+-          r = (jdouble) f;
+-#endif
+-          MORE_STACK(-1); // POP
+-          SET_STACK_DOUBLE(r, 1);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
+-      }
+-
+-      CASE(_d2i): /* convert top of stack double to int */
+-      {
+-          jint r1 = SharedRuntime::d2i(STACK_DOUBLE(-1));
+-          MORE_STACK(-2);
+-          SET_STACK_INT(r1, 0);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
+-      }
+-
+-      CASE(_d2f): /* convert top of stack double to float */
+-      {
+-          jfloat r1 = VMdouble2Float(STACK_DOUBLE(-1));
+-          MORE_STACK(-2);
+-          SET_STACK_FLOAT(r1, 0);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
+-      }
+-
+-      CASE(_d2l): /* convert top of stack double to long */
+-      {
+-          jlong r1 = SharedRuntime::d2l(STACK_DOUBLE(-1));
+-          MORE_STACK(-2);
+-          SET_STACK_LONG(r1, 1);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
+-      }
+-
+-      CASE(_i2b):
+-          SET_STACK_INT(VMint2Byte(STACK_INT(-1)), -1);
+-          UPDATE_PC_AND_CONTINUE(1);
+-
+-      CASE(_i2c):
+-          SET_STACK_INT(VMint2Char(STACK_INT(-1)), -1);
+-          UPDATE_PC_AND_CONTINUE(1);
+-
+-      CASE(_i2s):
+-          SET_STACK_INT(VMint2Short(STACK_INT(-1)), -1);
+-          UPDATE_PC_AND_CONTINUE(1);
+-
+-      /* comparison operators */
+-
+-
+-#define COMPARISON_OP(name, comparison)                                      \
+-      CASE(_if_icmp##name): {                                                \
+-          int skip = (STACK_INT(-2) comparison STACK_INT(-1))                \
+-                      ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3;             \
+-          address branch_pc = pc;                                            \
+-          UPDATE_PC_AND_TOS(skip, -2);                                       \
+-          DO_BACKEDGE_CHECKS(skip, branch_pc);                               \
+-          CONTINUE;                                                          \
+-      }                                                                      \
+-      CASE(_if##name): {                                                     \
+-          int skip = (STACK_INT(-1) comparison 0)                            \
+-                      ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3;             \
+-          address branch_pc = pc;                                            \
+-          UPDATE_PC_AND_TOS(skip, -1);                                       \
+-          DO_BACKEDGE_CHECKS(skip, branch_pc);                               \
+-          CONTINUE;                                                          \
+-      }
+-
+-#define COMPARISON_OP2(name, comparison)                                     \
+-      COMPARISON_OP(name, comparison)                                        \
+-      CASE(_if_acmp##name): {                                                \
+-          int skip = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1))          \
+-                       ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3;            \
+-          address branch_pc = pc;                                            \
+-          UPDATE_PC_AND_TOS(skip, -2);                                       \
+-          DO_BACKEDGE_CHECKS(skip, branch_pc);                               \
+-          CONTINUE;                                                          \
+-      }
+-
+-#define NULL_COMPARISON_NOT_OP(name)                                         \
+-      CASE(_if##name): {                                                     \
+-          int skip = (!(STACK_OBJECT(-1) == 0))                              \
+-                      ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3;             \
+-          address branch_pc = pc;                                            \
+-          UPDATE_PC_AND_TOS(skip, -1);                                       \
+-          DO_BACKEDGE_CHECKS(skip, branch_pc);                               \
+-          CONTINUE;                                                          \
+-      }
+-
+-#define NULL_COMPARISON_OP(name)                                             \
+-      CASE(_if##name): {                                                     \
+-          int skip = ((STACK_OBJECT(-1) == 0))                               \
+-                      ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3;             \
+-          address branch_pc = pc;                                            \
+-          UPDATE_PC_AND_TOS(skip, -1);                                       \
+-          DO_BACKEDGE_CHECKS(skip, branch_pc);                               \
+-          CONTINUE;                                                          \
+-      }
+-      COMPARISON_OP(lt, <);
+-      COMPARISON_OP(gt, >);
+-      COMPARISON_OP(le, <=);
+-      COMPARISON_OP(ge, >=);
+-      COMPARISON_OP2(eq, ==);  /* include ref comparison */
+-      COMPARISON_OP2(ne, !=);  /* include ref comparison */
+-      NULL_COMPARISON_OP(null);
+-      NULL_COMPARISON_NOT_OP(nonnull);
+-
+-      /* Goto pc at specified offset in switch table. */
+-
+-      CASE(_tableswitch): {
+-          jint* lpc  = (jint*)VMalignWordUp(pc+1);
+-          int32_t  key  = STACK_INT(-1);
+-          int32_t  low  = Bytes::get_Java_u4((address)&lpc[1]);
+-          int32_t  high = Bytes::get_Java_u4((address)&lpc[2]);
+-          int32_t  skip;
+-          key -= low;
+-          skip = ((uint32_t) key > (uint32_t)(high - low))
+-                      ? Bytes::get_Java_u4((address)&lpc[0])
+-                      : Bytes::get_Java_u4((address)&lpc[key + 3]);
+-          // Does this really need a full backedge check (osr?)
+-          address branch_pc = pc;
+-          UPDATE_PC_AND_TOS(skip, -1);
+-          DO_BACKEDGE_CHECKS(skip, branch_pc);
+-          CONTINUE;
+-      }
+-
+-      /* Goto pc whose table entry matches specified key */
+-
+-      CASE(_lookupswitch): {
+-          jint* lpc  = (jint*)VMalignWordUp(pc+1);
+-          int32_t  key  = STACK_INT(-1);
+-          int32_t  skip = Bytes::get_Java_u4((address) lpc); /* default amount */
+-          int32_t  npairs = Bytes::get_Java_u4((address) &lpc[1]);
+-          while (--npairs >= 0) {
+-              lpc += 2;
+-              if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) {
+-                  skip = Bytes::get_Java_u4((address)&lpc[1]);
+-                  break;
+-              }
+-          }
+-          address branch_pc = pc;
+-          UPDATE_PC_AND_TOS(skip, -1);
+-          DO_BACKEDGE_CHECKS(skip, branch_pc);
+-          CONTINUE;
+-      }
+-
+-      CASE(_fcmpl):
+-      CASE(_fcmpg):
+-      {
+-          SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2), 
+-                                        STACK_FLOAT(-1), 
+-                                        (opcode == Bytecodes::_fcmpl ? -1 : 1)),
+-                        -2);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
+-      }
+-
+-      CASE(_dcmpl):
+-      CASE(_dcmpg):
+-      {
+-          int r = VMdoubleCompare(STACK_DOUBLE(-3),
+-                                  STACK_DOUBLE(-1),
+-                                  (opcode == Bytecodes::_dcmpl ? -1 : 1));
+-          MORE_STACK(-4); // Pop 
+-          SET_STACK_INT(r, 0);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
+-      }
+-
+-      CASE(_lcmp):
+-      {
+-          int r = VMlongCompare(STACK_LONG(-3), STACK_LONG(-1));
+-          MORE_STACK(-4);
+-          SET_STACK_INT(r, 0);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
+-      }
+-
+-
+-      /* Return from a method */
+-
+-      CASE(_areturn):
+-      CASE(_ireturn):
+-      CASE(_freturn):
+-      {
+-          // Allow a safepoint before returning to frame manager.
+-          SAFEPOINT;
+-
+-          goto handle_return;
+-      }
+-
+-      CASE(_lreturn):
+-      CASE(_dreturn):
+-      {
+-          // Allow a safepoint before returning to frame manager.
+-          SAFEPOINT;
+-          goto handle_return;
+-      }
+-
+-      CASE(_return_register_finalizer): {
+-
+-          oop rcvr = LOCALS_OBJECT(0);
+-          if (rcvr->klass()->klass_part()->has_finalizer()) {
+-            CALL_VM(InterpreterRuntime::register_finalizer(THREAD, rcvr), handle_exception);
+-          }
+-          goto handle_return;
+-      }
+-      CASE(_return): {
+-
+-          // Allow a safepoint before returning to frame manager.
+-          SAFEPOINT;
+-          goto handle_return;
+-      }
+-
+-      /* Array access byte-codes */
+-
+-      /* Every array access byte-code starts out like this */
+-#define ARRAY_INTRO(arrayOff)                                                  \
+-      arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff);            \
+-      jint     index  = STACK_INT(arrayOff + 1);                               \
+-      char message[jintAsStringSize];                                          \
+-      CHECK_NULL(arrObj);                                                      \
+-      if ((uint32_t)index >= (uint32_t)arrObj->length()) {                     \
+-          sprintf(message, "%d", index);                                       \
+-          VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \
+-                        message);                                              \
+-      }                                                 
+-
+-      /* 32-bit loads. These handle conversion from < 32-bit types */
+-#define ARRAY_LOADTO32(T, T2, format, stackRes, extra)                                \
+-      {                                                                               \
+-          ARRAY_INTRO(-2);                                                            \
+-          extra;                                                                      \
+-          SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \
+-                           -2);                                                       \
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);                                      \
+-      }
+-
+-      /* 64-bit loads */
+-#define ARRAY_LOADTO64(T,T2, stackRes, extra)                                              \
+-      {                                                                                    \
+-          ARRAY_INTRO(-2);                                                                 \
+-          SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \
+-          extra;                                                                           \
+-          UPDATE_PC_AND_CONTINUE(1);                                            \
+-      }
+-
+-      CASE(_iaload):
+-          ARRAY_LOADTO32(T_INT, jint,   "%d",   STACK_INT, 0);
+-      CASE(_faload):
+-          ARRAY_LOADTO32(T_FLOAT, jfloat, "%f",   STACK_FLOAT, 0);
+-      CASE(_aaload):
+-          ARRAY_LOADTO32(T_OBJECT, oop,   INTPTR_FORMAT, STACK_OBJECT, 0);
+-      CASE(_baload):
+-          ARRAY_LOADTO32(T_BYTE, jbyte,  "%d",   STACK_INT, 0);
+-      CASE(_caload):
+-          ARRAY_LOADTO32(T_CHAR,  jchar, "%d",   STACK_INT, 0);
+-      CASE(_saload):
+-          ARRAY_LOADTO32(T_SHORT, jshort, "%d",   STACK_INT, 0);
+-      CASE(_laload):
+-          ARRAY_LOADTO64(T_LONG, jlong, STACK_LONG, 0);
+-      CASE(_daload):
+-          ARRAY_LOADTO64(T_DOUBLE, jdouble, STACK_DOUBLE, 0);
+-
+-      /* 32-bit stores. These handle conversion to < 32-bit types */
+-#define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra)                            \
+-      {                                                                              \
+-          ARRAY_INTRO(-3);                                                           \
+-          extra;                                                                     \
+-          *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3);                                     \
+-      }
+-
+-      /* 64-bit stores */
+-#define ARRAY_STOREFROM64(T, T2, stackSrc, extra)                                    \
+-      {                                                                              \
+-          ARRAY_INTRO(-4);                                                           \
+-          extra;                                                                     \
+-          *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4);                                     \
+-      }
+-
+-      CASE(_iastore):
+-          ARRAY_STOREFROM32(T_INT, jint,   "%d",   STACK_INT, 0);
+-      CASE(_fastore):
+-          ARRAY_STOREFROM32(T_FLOAT, jfloat, "%f",   STACK_FLOAT, 0);
+-      /*
+-       * This one looks different because of the assignability check
+-       */
+-      CASE(_aastore): {
+-          oop rhsObject = STACK_OBJECT(-1);
+-          ARRAY_INTRO( -3);
+-          // arrObj, index are set
+-          if (rhsObject != NULL) {
+-            /* Check assignability of rhsObject into arrObj */
+-            klassOop rhsKlassOop = rhsObject->klass(); // EBX (subclass)
+-            assert(arrObj->klass()->klass()->klass_part()->oop_is_objArrayKlass(), "Ack not an objArrayKlass");
+-            klassOop elemKlassOop = ((objArrayKlass*) arrObj->klass()->klass_part())->element_klass(); // superklass EAX
+-            //
+-            // Check for compatibilty. This check must not GC!!
+-            // Seems way more expensive now that we must dispatch
+-            //
+-            if (rhsKlassOop != elemKlassOop && !rhsKlassOop->klass_part()->is_subtype_of(elemKlassOop)) { // ebx->is...
+-              VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), "");
+-            }
+-          }
+-          oop* elem_loc = (oop*)(((address) arrObj->base(T_OBJECT)) + index * sizeof(oop));
+-          // *(oop*)(((address) arrObj->base(T_OBJECT)) + index * sizeof(oop)) = rhsObject;
+-          *elem_loc = rhsObject;
+-          // Mark the card
+-          OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)elem_loc >> CardTableModRefBS::card_shift], 0);
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3);
+-      }
+-      CASE(_bastore):
+-          ARRAY_STOREFROM32(T_BYTE, jbyte,  "%d",   STACK_INT, 0);
+-      CASE(_castore):
+-          ARRAY_STOREFROM32(T_CHAR, jchar,  "%d",   STACK_INT, 0);
+-      CASE(_sastore):
+-          ARRAY_STOREFROM32(T_SHORT, jshort, "%d",   STACK_INT, 0);
+-      CASE(_lastore):
+-          ARRAY_STOREFROM64(T_LONG, jlong, STACK_LONG, 0);
+-      CASE(_dastore):
+-          ARRAY_STOREFROM64(T_DOUBLE, jdouble, STACK_DOUBLE, 0);
+-
+-      CASE(_arraylength):
+-      {
+-          arrayOopDesc *ary = (arrayOopDesc *) STACK_OBJECT(-1);
+-          CHECK_NULL(ary);
+-          SET_STACK_INT(ary->length(), -1);
+-          UPDATE_PC_AND_CONTINUE(1);
+-      }
+-
+-      /* monitorenter and monitorexit for locking/unlocking an object */
+-
+-      CASE(_monitorenter): {
+-        oop lockee = STACK_OBJECT(-1);
+-        // derefing's lockee ought to provoke implicit null check
+-        CHECK_NULL(lockee);
+-        // find a free monitor or one already allocated for this object
+-        // if we find a matching object then we need a new monitor
+-        // since this is recursive enter
+-        BasicObjectLock* limit = istate->monitor_base();
+-        BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
+-        BasicObjectLock* entry = NULL;
+-        while (most_recent != limit ) {
+-          if (most_recent->obj() == NULL) entry = most_recent;
+-          else if (most_recent->obj() == lockee) break;
+-          most_recent++;
+-        }
+-        if (entry != NULL) {
+-          entry->set_obj(lockee);
+-          markOop displaced = lockee->mark()->set_unlocked();
+-          entry->lock()->set_displaced_header(displaced);
+-          if (Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
+-            // Is it simple recursive case?
+-            if (THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
+-              entry->lock()->set_displaced_header(NULL);
+-            } else {
+-              CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
+-            }
+-          }
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
+-        } else {
+-          istate->set_msg(more_monitors);
+-          // HACK FIX LATER
+-          // Why was this needed? Seems to be useless now
+-          // istate->set_callee((methodOop) lockee);
+-          UPDATE_PC_AND_RETURN(0); // Re-execute
+-        }
+-      }
+-
+-      CASE(_monitorexit): {
+-        oop lockee = STACK_OBJECT(-1);
+-        CHECK_NULL(lockee);
+-        // derefing's lockee ought to provoke implicit null check
+-        // find our monitor slot
+-        BasicObjectLock* limit = istate->monitor_base();
+-        BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
+-        while (most_recent != limit ) {
+-          if ((most_recent)->obj() == lockee) {
+-            BasicLock* lock = most_recent->lock();
+-            markOop header = lock->displaced_header();
+-            most_recent->set_obj(NULL);
+-            // If it isn't recursive we either must swap old header or call the runtime
+-            if (header != NULL) {
+-              if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
+-                // restore object for the slow case
+-                most_recent->set_obj(lockee);
+-                CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception);
+-              }
+-            }
+-            UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
+-          }
+-          most_recent++;
+-        }
+-        // Need to throw illegal monitor state exception
+-        CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception);
+-        // Should never reach here...
+-        assert(false, "Should have thrown illegal monitor exception");
+-      }
+-
+-      /* All of the non-quick opcodes. */
+-
+-      /* -Set clobbersCpIndex true if the quickened opcode clobbers the
+-       *  constant pool index in the instruction.
+-       */
+-      CASE(_getfield):
+-      CASE(_getstatic):
+-        {
+-          u2 index;
+-          ConstantPoolCacheEntry* cache;
+-          index = Bytes::get_native_u2(pc+1);
+-
+-          // QQQ Need to make this as inlined as possible. Probably need to 
+-          // split all the bytecode cases out so c++ compiler has a chance 
+-          // for constant prop to fold everything possible away.
+-
+-          cache = cp->entry_at(index);
+-          if (!cache->is_resolved((Bytecodes::Code)opcode)) {
+-            CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode), 
+-                    handle_exception);
+-            cache = cp->entry_at(index);
+-          }
+-
+-#ifdef VM_JVMTI
+-          if (_jvmti_interp_events) {
+-            int *count_addr;
+-            oop obj;
+-            // Check to see if a field modification watch has been set 
+-            // before we take the time to call into the VM.
+-            count_addr = (int *)JvmtiExport::get_field_access_count_addr();
+-            if ( *count_addr > 0 ) {
+-              if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) {
+-                obj = (oop)NULL;
+-              } else {
+-                obj = (oop) STACK_OBJECT(-1);
+-              }
+-              CALL_VM(InterpreterRuntime::post_field_access(THREAD, 
+-                                          obj, 
+-                                          cache),
+-                                          handle_exception);
+-            }
+-          }
+-#endif /* VM_JVMTI */
+-
+-          oop obj;
+-          if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) {
+-            obj = (oop) cache->f1();
+-            MORE_STACK(1);  // Assume single slot push
+-          } else {
+-            obj = (oop) STACK_OBJECT(-1);
+-            CHECK_NULL(obj);
+-          }
+-
+-          //
+-          // Now store the result on the stack
+-          //
+-          TosState tos_type = cache->flag_state();
+-          int field_offset = cache->f2();
+-          if (cache->is_volatile()) {
+-            if (tos_type == atos) {
+-              SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1);
+-            } else if (tos_type == itos) {
+-              SET_STACK_INT(obj->int_field_acquire(field_offset), -1);
+-            } else if (tos_type == ltos) {
+-              SET_STACK_LONG(obj->long_field_acquire(field_offset), 0);
+-              MORE_STACK(1);
+-            } else if (tos_type == btos) {
+-              SET_STACK_INT(obj->byte_field_acquire(field_offset), -1);
+-            } else if (tos_type == ctos) {
+-              SET_STACK_INT(obj->char_field_acquire(field_offset), -1);
+-            } else if (tos_type == stos) {
+-              SET_STACK_INT(obj->short_field_acquire(field_offset), -1);
+-            } else if (tos_type == ftos) {
+-              SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1);
+-            } else {
+-              SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0);
+-              MORE_STACK(1);
+-            }
+-          } else {
+-            if (tos_type == atos) {
+-              SET_STACK_OBJECT(obj->obj_field(field_offset), -1);
+-            } else if (tos_type == itos) {
+-              SET_STACK_INT(obj->int_field(field_offset), -1);
+-            } else if (tos_type == ltos) {
+-              SET_STACK_LONG(obj->long_field(field_offset), 0);
+-              MORE_STACK(1);
+-            } else if (tos_type == btos) {
+-              SET_STACK_INT(obj->byte_field(field_offset), -1);
+-            } else if (tos_type == ctos) {
+-              SET_STACK_INT(obj->char_field(field_offset), -1);
+-            } else if (tos_type == stos) {
+-              SET_STACK_INT(obj->short_field(field_offset), -1);
+-            } else if (tos_type == ftos) {
+-              SET_STACK_FLOAT(obj->float_field(field_offset), -1);
+-            } else {
+-              SET_STACK_DOUBLE(obj->double_field(field_offset), 0);
+-              MORE_STACK(1);
+-            }
+-          }
+-
+-          UPDATE_PC_AND_CONTINUE(3);
+-         }
+-
+-      CASE(_putfield):
+-      CASE(_putstatic):
+-        {
+-          u2 index = Bytes::get_native_u2(pc+1);
+-          ConstantPoolCacheEntry* cache = cp->entry_at(index);
+-          if (!cache->is_resolved((Bytecodes::Code)opcode)) {
+-            CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode), 
+-                    handle_exception);
+-            cache = cp->entry_at(index);
+-          }
+-
+-#ifdef VM_JVMTI
+-          if (_jvmti_interp_events) {
+-            int *count_addr;
+-            oop obj;
+-            // Check to see if a field modification watch has been set 
+-            // before we take the time to call into the VM.
+-            count_addr = (int *)JvmtiExport::get_field_modification_count_addr();
+-            if ( *count_addr > 0 ) {
+-              if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) {
+-                obj = (oop)NULL;
+-              }
+-              else {
+-                if (cache->is_long() || cache->is_double()) {
+-                  obj = (oop) STACK_OBJECT(-3);
+-                } else {
+-                  obj = (oop) STACK_OBJECT(-2);
+-                }
+-              }
+-
+-              CALL_VM(InterpreterRuntime::post_field_modification(THREAD,
+-                                          obj, 
+-                                          cache, 
+-                                          (jvalue *)STACK_SLOT(-1)),  
+-                                          handle_exception);
+-            }
+-          }
+-#endif /* VM_JVMTI */
+-
+-          // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
+-          // out so c++ compiler has a chance for constant prop to fold everything possible away.
+-
+-          oop obj;
+-          int count;
+-          TosState tos_type = cache->flag_state();
+-
+-          count = -1;
+-          if (tos_type == ltos || tos_type == dtos) {
+-            --count;
+-          }
+-          if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) {
+-            obj = (oop) cache->f1();
+-          } else {
+-            --count;
+-            obj = (oop) STACK_OBJECT(count);
+-            CHECK_NULL(obj);
+-          }
+-
+-          //
+-          // Now store the result
+-          //
+-          int field_offset = cache->f2();
+-          if (cache->is_volatile()) {
+-            if (tos_type == itos) {
+-              obj->release_int_field_put(field_offset, STACK_INT(-1));
+-            } else if (tos_type == atos) {
+-              obj->release_obj_field_put(field_offset, STACK_OBJECT(-1));
+-              OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)obj >> CardTableModRefBS::card_shift], 0);
+-            } else if (tos_type == btos) {
+-              obj->release_byte_field_put(field_offset, STACK_INT(-1));
+-            } else if (tos_type == ltos) {
+-              obj->release_long_field_put(field_offset, STACK_LONG(-1));
+-            } else if (tos_type == ctos) {
+-              obj->release_char_field_put(field_offset, STACK_INT(-1));
+-            } else if (tos_type == stos) {
+-              obj->release_short_field_put(field_offset, STACK_INT(-1));
+-            } else if (tos_type == ftos) {
+-              obj->release_float_field_put(field_offset, STACK_FLOAT(-1));
+-            } else {
+-              obj->release_double_field_put(field_offset, STACK_DOUBLE(-1));
+-            }
+-            OrderAccess::storeload();
+-          } else {
+-            if (tos_type == itos) {
+-              obj->int_field_put(field_offset, STACK_INT(-1));
+-            } else if (tos_type == atos) {
+-              obj->obj_field_put(field_offset, STACK_OBJECT(-1));
+-              OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)obj >> CardTableModRefBS::card_shift], 0);
+-            } else if (tos_type == btos) {
+-              obj->byte_field_put(field_offset, STACK_INT(-1));
+-            } else if (tos_type == ltos) {
+-              obj->long_field_put(field_offset, STACK_LONG(-1));
+-            } else if (tos_type == ctos) {
+-              obj->char_field_put(field_offset, STACK_INT(-1));
+-            } else if (tos_type == stos) {
+-              obj->short_field_put(field_offset, STACK_INT(-1));
+-            } else if (tos_type == ftos) {
+-              obj->float_field_put(field_offset, STACK_FLOAT(-1));
+-            } else {
+-              obj->double_field_put(field_offset, STACK_DOUBLE(-1));
+-            }
+-          }
+-
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(3, count);
+-        }  
+-
+-      CASE(_new): {
+-        u2 index = Bytes::get_Java_u2(pc+1);
+-        constantPoolOop constants = istate->method()->constants();
+-        if (!constants->tag_at(index).is_unresolved_klass()) {
+-          // Make sure klass is initialized and doesn't have a finalizer
+-          oop entry = (klassOop) *constants->obj_at_addr(index);
+-          assert(entry->is_klass(), "Should be resolved klass");
+-          klassOop k_entry = (klassOop) entry;
+-          assert(k_entry->klass_part()->oop_is_instance(), "Should be instanceKlass");
+-          instanceKlass* ik = (instanceKlass*) k_entry->klass_part();
+-          if ( ik->is_initialized() && ik->can_be_fastpath_allocated() ) {
+-            size_t obj_size = ik->size_helper();
+-            oop result = NULL;
+-            bool need_zero = false;
+-            if (UseTLAB) {
+-              result = (oop) THREAD->tlab().allocate(obj_size);
+-            }
+-            if (result == NULL) {
+-              need_zero = true;
+-              // Try allocate in shared eden
+-        retry:
+-              HeapWord* compare_to = *Universe::heap()->top_addr();
+-              HeapWord* new_top = compare_to + obj_size;
+-              if (new_top <= *Universe::heap()->end_addr()) {
+-                if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) {
+-                  goto retry;
+-                }
+-                result = (oop) compare_to;
+-              }
+-            }
+-            if (result != NULL) {
+-              // Initialize object (if nonzero size and need) and then the header
+-              if (need_zero ) {
+-                HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize;
+-                obj_size -= sizeof(oopDesc) / oopSize;
+-                if (obj_size > 0 ) {
+-                  memset(to_zero, 0, obj_size * HeapWordSize);
+-                }
+-              }
+-              if (UseBiasedLocking) {
+-                result->set_mark(ik->prototype_header());
+-              } else {
+-                result->set_mark(markOopDesc::prototype());
+-              }
+-              result->set_klass(k_entry);
+-              SET_STACK_OBJECT(result, 0);
+-              UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
+-            }
+-          }
+-        }
+-        // Slow case allocation
+-        CALL_VM(InterpreterRuntime::_new(THREAD, istate->method()->constants(), index),
+-                handle_exception);
+-        SET_STACK_OBJECT(THREAD->vm_result(), 0);
+-        THREAD->set_vm_result(NULL);
+-        UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
+-      }
+-      CASE(_anewarray): {
+-        u2 index = Bytes::get_Java_u2(pc+1);
+-        jint size = STACK_INT(-1);
+-        CALL_VM(InterpreterRuntime::anewarray(THREAD, istate->method()->constants(), index, size),
+-                handle_exception);
+-        SET_STACK_OBJECT(THREAD->vm_result(), -1);
+-        THREAD->set_vm_result(NULL);
+-        UPDATE_PC_AND_CONTINUE(3);
+-      }
+-      CASE(_multianewarray): {
+-        jint dims = *(pc+3);
+-        jint size = STACK_INT(-1);
+-        // stack grows down, dimensions are up!
+-        jint *dimarray =
+-                   (jint*)&topOfStack[dims * Interpreter::stackElementWords()+ 
+-                                      Interpreter::stackElementWords()-1];
+-        //adjust pointer to start of stack element
+-        CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray),
+-                handle_exception);
+-        SET_STACK_OBJECT(THREAD->vm_result(), -dims);
+-        THREAD->set_vm_result(NULL);
+-        UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1));
+-      }
+-      CASE(_checkcast):
+-          if (STACK_OBJECT(-1) != NULL) {
+-            u2 index = Bytes::get_Java_u2(pc+1);
+-            if (ProfileInterpreter) {
+-              // needs Profile_checkcast QQQ
+-              ShouldNotReachHere();
+-            }
+-            // Constant pool may have actual klass or unresolved klass. If it is
+-            // unresolved we must resolve it
+-            if (istate->method()->constants()->tag_at(index).is_unresolved_klass()) {
+-              CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
+-            }
+-            klassOop klassOf = (klassOop) *(istate->method()->constants()->obj_at_addr(index));
+-            klassOop objKlassOop = STACK_OBJECT(-1)->klass(); //ebx
+-            //
+-            // Check for compatibilty. This check must not GC!!
+-            // Seems way more expensive now that we must dispatch
+-            //
+-            if (objKlassOop != klassOf && 
+-                !objKlassOop->klass_part()->is_subtype_of(klassOf)) {
+-              ResourceMark rm(THREAD);  
+-              const char* objName = Klass::cast(objKlassOop)->external_name();
+-              const char* klassName = Klass::cast(klassOf)->external_name();
+-              char* message = SharedRuntime::generate_class_cast_message(
+-                objName, klassName);
+-              VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message);
+-            }
+-          } else {
+-            if (UncommonNullCast) {
+-//              istate->method()->set_null_cast_seen();
+-// [RGV] Not sure what to do here!
+-              ShouldNotReachHere();
+-
+-            }
+-          }
+-          UPDATE_PC_AND_CONTINUE(3);
+-
+-      CASE(_instanceof):
+-          if (STACK_OBJECT(-1) == NULL) {
+-            SET_STACK_INT(0, -1);
+-          } else {
+-            u2 index = Bytes::get_Java_u2(pc+1);
+-            // Constant pool may have actual klass or unresolved klass. If it is
+-            // unresolved we must resolve it
+-            if (istate->method()->constants()->tag_at(index).is_unresolved_klass()) {
+-              CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
+-            }
+-            klassOop klassOf = (klassOop) *(istate->method()->constants()->obj_at_addr(index));
+-            klassOop objKlassOop = STACK_OBJECT(-1)->klass();
+-            //
+-            // Check for compatibilty. This check must not GC!!
+-            // Seems way more expensive now that we must dispatch
+-            //
+-            if ( objKlassOop == klassOf || objKlassOop->klass_part()->is_subtype_of(klassOf)) {
+-              SET_STACK_INT(1, -1);
+-            } else {
+-              SET_STACK_INT(0, -1);
+-            }
+-          }
+-          UPDATE_PC_AND_CONTINUE(3);
+-
+-      CASE(_ldc_w):
+-      CASE(_ldc):
+-        {
+-          u2 index;
+-          bool wide = false;
+-          int incr = 2; // frequent case
+-          if (opcode == Bytecodes::_ldc) {
+-            index = pc[1];
+-          } else {
+-            index = Bytes::get_Java_u2(pc+1);
+-            incr = 3;
+-            wide = true;
+-          }
+-
+-          constantPoolOop constants = istate->method()->constants();
+-          switch (constants->tag_at(index).value()) {
+-          case JVM_CONSTANT_Integer:
+-            SET_STACK_INT(constants->int_at(index), 0);
+-            break;
+-
+-          case JVM_CONSTANT_Float:
+-            SET_STACK_FLOAT(constants->float_at(index), 0);
+-            break;
+-
+-          case JVM_CONSTANT_String:
+-            SET_STACK_OBJECT(constants->resolved_string_at(index), 0);
+-            break;
+-
+-          case JVM_CONSTANT_Class:
+-            SET_STACK_OBJECT(constants->resolved_klass_at(index)->klass_part()->java_mirror(), 0);
+-            break;
+-
+-          case JVM_CONSTANT_UnresolvedString:
+-          case JVM_CONSTANT_UnresolvedClass:
+-	  case JVM_CONSTANT_UnresolvedClassInError:
+-            CALL_VM(InterpreterRuntime::ldc(THREAD, wide), handle_exception);
+-            SET_STACK_OBJECT(THREAD->vm_result(), 0);
+-            THREAD->set_vm_result(NULL);
+-            break;
+-
+-#if 0
+-          CASE(_fast_igetfield):
+-          CASE(_fastagetfield):
+-          CASE(_fast_aload_0):
+-          CASE(_fast_iaccess_0):
+-          CASE(__fast_aaccess_0):
+-          CASE(_fast_linearswitch):
+-          CASE(_fast_binaryswitch):
+-            fatal("unsupported fast bytecode");
+-#endif
+-
+-          default:  ShouldNotReachHere();
+-          }
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
+-        }
+-
+-      CASE(_ldc2_w):
+-        {
+-          u2 index = Bytes::get_Java_u2(pc+1);
+-
+-          constantPoolOop constants = istate->method()->constants();
+-          switch (constants->tag_at(index).value()) {
+-
+-          case JVM_CONSTANT_Long:
+-             SET_STACK_LONG(constants->long_at(index), 1);
+-            break;
+-
+-          case JVM_CONSTANT_Double:
+-             SET_STACK_DOUBLE(constants->double_at(index), 1);
+-            break;
+-          default:  ShouldNotReachHere();
+-          }
+-          UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2);
+-        }
+-
+-      CASE(_invokeinterface): {
+-        u2 index = Bytes::get_native_u2(pc+1);
+-
+-        // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
+-        // out so c++ compiler has a chance for constant prop to fold everything possible away.
+-
+-        ConstantPoolCacheEntry* cache = cp->entry_at(index);
+-        if (!cache->is_resolved((Bytecodes::Code)opcode)) {
+-          CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode), 
+-                  handle_exception);
+-          cache = cp->entry_at(index);
+-        }
+-
+-        istate->set_msg(call_method);
+-
+-        // Special case of invokeinterface called for virtual method of
+-        // java.lang.Object.  See cpCacheOop.cpp for details.
+-        // This code isn't produced by javac, but could be produced by
+-        // another compliant java compiler.
+-        if (cache->is_methodInterface()) {
+-          methodOop callee;
+-          CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
+-          if (cache->is_vfinal()) {
+-            callee = (methodOop) cache->f2();
+-          } else {
+-            // get receiver
+-            int parms = cache->parameter_size();
+-            // Same comments as invokevirtual apply here
+-            instanceKlass* rcvrKlass = (instanceKlass*)
+-                                 STACK_OBJECT(-parms)->klass()->klass_part();
+-            callee = (methodOop) rcvrKlass->start_of_vtable()[ cache->f2()];
+-          }
+-          istate->set_callee(callee);
+-          istate->set_callee_entry_point(callee->from_interpreted_entry());
+-#ifdef VM_JVMTI
+-          if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
+-            istate->set_callee_entry_point(callee->interpreter_entry());
+-          }
+-#endif /* VM_JVMTI */
+-          istate->set_bcp_advance(5);
+-          UPDATE_PC_AND_RETURN(0); // I'll be back...
+-        }
+-
+-        // this could definitely be cleaned up QQQ
+-        methodOop callee;
+-        klassOop iclass = (klassOop)cache->f1();
+-        // instanceKlass* interface = (instanceKlass*) iclass->klass_part();
+-        // get receiver
+-        int parms = cache->parameter_size();
+-        oop rcvr = STACK_OBJECT(-parms);
+-        CHECK_NULL(rcvr); 
+-        instanceKlass* int2 = (instanceKlass*) rcvr->klass()->klass_part();
+-        itableOffsetEntry* ki = (itableOffsetEntry*) int2->start_of_itable();
+-        int i;
+-        for ( i = 0 ; i < int2->itable_length() ; i++, ki++ ) {
+-          if (ki->interface_klass() == iclass) break;
+-        }
+-        // If the interface isn't found, this class doesn't implement this
+-        // interface.  The link resolver checks this but only for the first
+-        // time this interface is called.
+-        if (i == int2->itable_length()) {
+-          VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "");
+-        }
+-        int mindex = cache->f2();
+-        itableMethodEntry* im = ki->first_method_entry(rcvr->klass());
+-        callee = im[mindex].method();
+-        if (callee == NULL) {
+-          VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), "");
+-        }
+-        
+-        istate->set_callee(callee);
+-        istate->set_callee_entry_point(callee->from_interpreted_entry());
+-#ifdef VM_JVMTI
+-        if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
+-          istate->set_callee_entry_point(callee->interpreter_entry());
+-        }
+-#endif /* VM_JVMTI */
+-        istate->set_bcp_advance(5);
+-        UPDATE_PC_AND_RETURN(0); // I'll be back...
+-      }
+-
+-      CASE(_invokevirtual):
+-      CASE(_invokespecial):
+-      CASE(_invokestatic): {
+-        u2 index = Bytes::get_native_u2(pc+1);
+-
+-        ConstantPoolCacheEntry* cache = cp->entry_at(index);
+-        // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
+-        // out so c++ compiler has a chance for constant prop to fold everything possible away.
+-
+-        if (!cache->is_resolved((Bytecodes::Code)opcode)) {
+-          CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode), 
+-                  handle_exception);
+-          cache = cp->entry_at(index);
+-        }
+-     
+-        istate->set_msg(call_method);
+-        {
+-          methodOop callee;
+-          if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) {
+-            CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
+-            if (cache->is_vfinal()) callee = (methodOop) cache->f2();
+-            else {
+-              // get receiver
+-              int parms = cache->parameter_size();
+-              // this works but needs a resourcemark and seems to create a vtable on every call:
+-              // methodOop callee = rcvr->klass()->klass_part()->vtable()->method_at(cache->f2());
+-              // 
+-              // this fails with an assert
+-              // instanceKlass* rcvrKlass = instanceKlass::cast(STACK_OBJECT(-parms)->klass());
+-              // but this works
+-              instanceKlass* rcvrKlass = (instanceKlass*) STACK_OBJECT(-parms)->klass()->klass_part();
+-              /*
+-                Executing this code in java.lang.String:
+-                    public String(char value[]) {
+-                          this.count = value.length;
+-                          this.value = (char[])value.clone();
+-                     }
+-
+-                 a find on rcvr->klass()->klass_part() reports:
+-                 {type array char}{type array class} 
+-                  - klass: {other class}
+-
+-                  but using instanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure
+-                  because rcvr->klass()->klass_part()->oop_is_instance() == 0
+-                  However it seems to have a vtable in the right location. Huh?
+-
+-              */
+-              callee = (methodOop) rcvrKlass->start_of_vtable()[ cache->f2()];
+-            }
+-          } else {
+-            if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) {
+-              CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
+-            }
+-            callee = (methodOop) cache->f1();
+-          }
+-
+-          istate->set_callee(callee);
+-          istate->set_callee_entry_point(callee->from_interpreted_entry());
+-#ifdef VM_JVMTI
+-          if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
+-            istate->set_callee_entry_point(callee->interpreter_entry());
+-          }
+-#endif /* VM_JVMTI */
+-          istate->set_bcp_advance(3);
+-          UPDATE_PC_AND_RETURN(0); // I'll be back...
+-        }
+-      }
+-
+-      /* Allocate memory for a new java object. */
+-
+-      CASE(_newarray): {
+-        BasicType atype = (BasicType) *(pc+1);
+-        jint size = STACK_INT(-1);
+-        CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size),
+-                handle_exception);
+-        SET_STACK_OBJECT(THREAD->vm_result(), -1);
+-        THREAD->set_vm_result(NULL);
+-
+-        UPDATE_PC_AND_CONTINUE(2);
+-      }
+-
+-      /* Throw an exception. */
+-
+-      CASE(_athrow): {
+-          oop except_oop = STACK_OBJECT(-1);
+-          CHECK_NULL(except_oop);
+-          // set pending_exception so we use common code
+-          THREAD->set_pending_exception(except_oop, NULL, 0);
+-          goto handle_exception;
+-      }
+-
+-      /* goto and jsr. They are exactly the same except jsr pushes
+-       * the address of the next instruction first.
+-       */
+-
+-      CASE(_jsr): {
+-          /* push bytecode index on stack */
+-          SET_STACK_SLOT(((address)pc - (intptr_t)(istate->method()->code_base()) + 3), 0);
+-          MORE_STACK(1);
+-          /* FALL THROUGH */
+-      }
+-
+-      CASE(_goto):
+-      {
+-          int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1);
+-          address branch_pc = pc;
+-          UPDATE_PC(offset);
+-          DO_BACKEDGE_CHECKS(offset, branch_pc);
+-          CONTINUE;
+-      }
+-
+-      CASE(_jsr_w): {
+-          /* push return address on the stack */
+-          SET_STACK_SLOT(((address)pc - (intptr_t)(istate->method()->code_base()) + 5), 0);
+-          MORE_STACK(1);
+-          /* FALL THROUGH */
+-      }
+-
+-      CASE(_goto_w):
+-      {
+-          int32_t offset = Bytes::get_Java_u4(pc + 1);
+-          address branch_pc = pc;
+-          UPDATE_PC(offset);
+-          DO_BACKEDGE_CHECKS(offset, branch_pc);
+-          CONTINUE;
+-      }
+-
+-      /* return from a jsr or jsr_w */
+-
+-      CASE(_ret): {
+-          pc = istate->method()->code_base() + (intptr_t)(LOCALS_SLOT(pc[1]));
+-          UPDATE_PC_AND_CONTINUE(0);
+-      }
+-
+-      /* debugger breakpoint */
+-
+-      CASE(_breakpoint): {
+-          Bytecodes::Code original_bytecode;
+-          DECACHE_STATE();                                        
+-          SET_LAST_JAVA_FRAME();                                  
+-          original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD, 
+-                              istate->method(), pc);
+-          RESET_LAST_JAVA_FRAME();
+-          CACHE_STATE();
+-          if (THREAD->pending_exception()) goto handle_exception;
+-            CALL_VM(InterpreterRuntime::_breakpoint(THREAD, istate->method(), pc),
+-                                                    handle_exception);
+-
+-          opcode = (jubyte)original_bytecode;
+-          goto opcode_switch;
+-      }
+-
+-      DEFAULT:
+-          fatal2("\t*** Unimplemented opcode: %d = %s\n",
+-                 opcode, Bytecodes::name((Bytecodes::Code)opcode));
+-          goto finish;
+-
+-      } /* switch(opc) */
+-
+-      
+-#ifdef USELABELS
+-    check_for_exception: 
+-#endif
+-    {
+-      if (!THREAD->has_pending_exception()) {
+-        CONTINUE;
+-      }
+-      /* We will be gcsafe soon, so flush our state. */
+-      DECACHE_PC();
+-      goto handle_exception;
+-    }
+-  do_continue: ;
+-
+-  } /* while (1) interpreter loop */
+-
+-
+-  // An exception exists in the thread state see whether this activation can handle it
+-  handle_exception: {
+-
+-    HandleMarkCleaner __hmc(THREAD);
+-    Handle except_oop(THREAD, THREAD->pending_exception());
+-    // Prevent any subsequent HandleMarkCleaner in the VM 
+-    // from freeing the except_oop handle.
+-    HandleMark __hm(THREAD);
+-
+-    THREAD->clear_pending_exception();
+-    assert(except_oop(), "No exception to process");
+-    intptr_t continuation_bci;
+-    // expression stack is emptied
+-    topOfStack = istate->stack_base() - Interpreter::stackElementWords();
+-    CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()), 
+-            handle_exception);
+-
+-    except_oop = (oop) THREAD->vm_result();
+-    THREAD->set_vm_result(NULL);
+-    if (continuation_bci >= 0) {
+-      // Place exception on top of stack
+-      SET_STACK_OBJECT(except_oop(), 0);
+-      MORE_STACK(1);
+-      pc = istate->method()->code_base() + continuation_bci;
+-      if (TraceExceptions) {
+-        ttyLocker ttyl;
+-        ResourceMark rm;
+-        tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), except_oop());
+-        tty->print_cr(" thrown in interpreter method <%s>", istate->method()->print_value_string());
+-        tty->print_cr(" at bci %d, continuing at %d for thread " INTPTR_FORMAT,
+-                      pc - (intptr_t)istate->method()->code_base(),
+-                      continuation_bci, THREAD);
+-      }
+-      // for AbortVMOnException flag
+-      NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
+-      goto run;
+-    }
+-    if (TraceExceptions) {
+-      ttyLocker ttyl;
+-      ResourceMark rm;
+-      tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), except_oop());
+-      tty->print_cr(" thrown in interpreter method <%s>", istate->method()->print_value_string());
+-      tty->print_cr(" at bci %d, unwinding for thread " INTPTR_FORMAT,
+-                    pc  - (intptr_t) istate->method()->code_base(),
+-                    THREAD);
+-    }
+-    // for AbortVMOnException flag
+-    NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
+-    // No handler in this activation, unwind and try again
+-    THREAD->set_pending_exception(except_oop(), NULL, 0);
+-    goto handle_return;
+-  }  /* handle_exception: */
+-      
+-
+-
+-  // Return from an interpreter invocation with the result of the interpretation
+-  // on the top of the Java Stack (or a pending exception)
+-
+-handle_Pop_Frame:
+-
+-  // We don't really do anything special here except we must be aware
+-  // that we can get here without ever locking the method (if sync).
+-  // Also we skip the notification of the exit.
+-
+-  istate->set_msg(popping_frame);
+-  // Clear pending so while the pop is in process
+-  // we don't start another one if a call_vm is done.
+-  THREAD->clr_pop_frame_pending();
+-  // Let interpreter (only) see the we're in the process of popping a frame
+-  THREAD->set_pop_frame_in_process();
+-
+-handle_return:
+-  {
+-    DECACHE_STATE();
+-
+-    bool suppress_error = istate->msg() == popping_frame;
+-    bool suppress_exit_event = THREAD->has_pending_exception() || suppress_error;
+-    Handle original_exception(THREAD, THREAD->pending_exception());
+-    Handle illegal_state_oop(THREAD, NULL);
+-
+-    // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner
+-    // in any following VM entries from freeing our live handles, but illegal_state_oop
+-    // isn't really allocated yet and so doesn't become live until later and
+-    // in unpredicatable places. Instead we must protect the places where we enter the
+-    // VM. It would be much simpler (and safer) if we could allocate a real handle with
+-    // a NULL oop in it and then overwrite the oop later as needed. This isn't
+-    // unfortunately isn't possible.
+-
+-    THREAD->clear_pending_exception();
+-
+-    //
+-    // As far as we are concerned we have returned. If we have a pending exception
+-    // that will be returned as this invocation's result. However if we get any
+-    // exception(s) while checking monitor state one of those IllegalMonitorStateExceptions
+-    // will be our final result (i.e. monitor exception trumps a pending exception).
+-    //
+-
+-    // If we never locked the method (or really passed the point where we would have),
+-    // there is no need to unlock it (or look for other monitors), since that
+-    // could not have happened.
+-
+-    if (!THREAD->do_not_unlock()) {
+-      // At this point we consider that we have returned. We now check that the
+-      // locks were properly block structured. If we find that they were not
+-      // used properly we will return with an illegal monitor exception.
+-      // The exception is checked by the caller not the callee since this
+-      // checking is considered to be part of the invocation and therefore
+-      // in the callers scope (JVM spec 8.13).
+-      //
+-      // Another weird thing to watch for is if the method was locked
+-      // recursively and then not exited properly. This means we must
+-      // examine all the entries in reverse time(and stack) order and
+-      // unlock as we find them. If we find the method monitor before
+-      // we are at the initial entry then we should throw an exception.
+-      // It is not clear the template based interpreter does this
+-      // correctly
+-        
+-      BasicObjectLock* base = istate->monitor_base();
+-      BasicObjectLock* end = (BasicObjectLock*) istate->stack_base();
+-      bool method_unlock_needed = istate->method()->is_synchronized();
+-      // We know the initial monitor was used for the method don't check that
+-      // slot in the loop
+-      if (method_unlock_needed) base--;
+-
+-      // Check all the monitors to see they are unlocked. Install exception if found to be locked.
+-      while (end < base) {
+-        oop lockee = end->obj();
+-        if (lockee != NULL) {
+-          BasicLock* lock = end->lock();
+-          markOop header = lock->displaced_header();
+-          end->set_obj(NULL);
+-          // If it isn't recursive we either must swap old header or call the runtime
+-          if (header != NULL) {
+-            if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
+-              // restore object for the slow case
+-              end->set_obj(lockee);
+-              {
+-                // Prevent any HandleMarkCleaner from freeing our live handles
+-                HandleMark __hm(THREAD); 
+-                CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end));
+-              }
+-            }
+-          }
+-          // One error is plenty
+-          if (illegal_state_oop() == NULL && !suppress_error) {
+-            {
+-              // Prevent any HandleMarkCleaner from freeing our live handles
+-              HandleMark __hm(THREAD); 
+-              CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
+-            }
+-            assert(THREAD->has_pending_exception(), "Lost our exception!");
+-            illegal_state_oop = THREAD->pending_exception();
+-            THREAD->clear_pending_exception();
+-          }
+-        }
+-        end++;
+-      }
+-      // Unlock the method if needed
+-      if (method_unlock_needed) {
+-        if (base->obj() == NULL) {
+-          // The method is already unlocked this is not good.
+-          if (illegal_state_oop() == NULL && !suppress_error) {
+-            {
+-              // Prevent any HandleMarkCleaner from freeing our live handles
+-              HandleMark __hm(THREAD); 
+-              CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
+-            }
+-            assert(THREAD->has_pending_exception(), "Lost our exception!");
+-            illegal_state_oop = THREAD->pending_exception();
+-            THREAD->clear_pending_exception();
+-          }
+-        } else {
+-          //
+-          // The initial monitor is always used for the method
+-          // However if that slot is no longer the oop for the method it was unlocked
+-          // and reused by something that wasn't unlocked!
+-          //
+-          // deopt can come in with rcvr dead because c2 knows
+-          // its value is preserved in the monitor. So we can't use locals[0] at all
+-          // and must use first monitor slot.
+-          //
+-          oop rcvr = base->obj();
+-          if (rcvr == NULL) {
+-            if (!suppress_error) {
+-              VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "");
+-              illegal_state_oop = THREAD->pending_exception();
+-              THREAD->clear_pending_exception();
+-            }
+-          } else {
+-            BasicLock* lock = base->lock();
+-            markOop header = lock->displaced_header();
+-            base->set_obj(NULL);
+-            // If it isn't recursive we either must swap old header or call the runtime
+-            if (header != NULL) {
+-              if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
+-                // restore object for the slow case
+-                base->set_obj(rcvr);
+-                {
+-                  // Prevent any HandleMarkCleaner from freeing our live handles
+-                  HandleMark __hm(THREAD); 
+-                  CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
+-                }
+-                if (THREAD->has_pending_exception()) {
+-                  if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
+-                  THREAD->clear_pending_exception();
+-                }
+-              }
+-            }
+-          }
+-        }
+-      }
+-    }
+-
+-    //
+-    // Notify jvmti
+-    //
+-    // NOTE: we do not notify a method_exit if we have a pending exception,
+-    // including an exception we generate for unlocking checks.  In the former
+-    // case, JVMDI has already been notified by our call for the exception handler
+-    // and in both cases as far as JVMDI is concerned we have already returned.
+-    // If we notify it again JVMDI will be all confused about how many frames
+-    // are still on the stack (4340444).
+-    //
+-    //
+-    // NOTE Further! It turns out the the JVMTI spec in fact expects to see
+-    // method_exit events whenever we leave an activation unless it was done
+-    // for popframe. This is nothing like jvmdi. However
+-    // we are passing the tests at the moment (apparently becuase they are
+-    // jvmdi based) so rather than change this code and possibly fail tests
+-    // we will leave it alone (with this note) in anticipation of changing
+-    // the vm and the tests simultaneously.
+-
+-
+-    //
+-    suppress_exit_event = suppress_exit_event || illegal_state_oop() != NULL;
+-
+-
+-
+-#ifdef VM_JVMTI
+-      if (_jvmti_interp_events) {
+-        // Whenever JVMTI puts a thread in interp_only_mode, method
+-        // entry/exit events are sent for that thread to track stack depth.  
+-        if ( !suppress_exit_event && THREAD->is_interp_only_mode() ) {
+-          {
+-            // Prevent any HandleMarkCleaner from freeing our live handles
+-            HandleMark __hm(THREAD); 
+-            CALL_VM_NOCHECK(InterpreterRuntime::post_method_exit(THREAD));
+-          }
+-        }
+-      }
+-#endif /* VM_JVMTI */
+-
+-    //
+-    // See if we are returning any exception
+-    // A pending exception that was pending prior to a possible popping frame
+-    // overrides the popping frame.
+-    //
+-    assert(!suppress_error || suppress_error && illegal_state_oop() == NULL, "Error was not suppressed");
+-    if (illegal_state_oop() != NULL || original_exception() != NULL) {
+-      // inform the frame manager we have no result
+-      istate->set_msg(throwing_exception);
+-      if (illegal_state_oop() != NULL) 
+-        THREAD->set_pending_exception(illegal_state_oop(), NULL, 0);
+-      else
+-        THREAD->set_pending_exception(original_exception(), NULL, 0);
+-      istate->set_return_kind((Bytecodes::Code)opcode);
+-      UPDATE_PC_AND_RETURN(0);
+-    }
+-
+-    if (istate->msg() == popping_frame) {
+-      // Make it simpler on the assembly code and set the message for the frame pop.
+-      // returns
+-      if (istate->prev() == NULL) {
+-        // We must be returning to a deoptimized frame (because popframe only happens between
+-        // two interpreted frames). We need to save the current arguments in C heap so that
+-        // the deoptimized frame when it restarts can copy the arguments to its expression
+-        // stack and re-execute the call. We also have to notify deoptimization that this
+-        // has occured and to pick the preerved args copy them to the deoptimized frame's
+-        // java expression stack. Yuck.
+-        //
+-        THREAD->popframe_preserve_args(in_ByteSize(istate->method()->size_of_parameters() * wordSize),
+-                                LOCALS_SLOT(istate->method()->size_of_parameters() - 1));
+-        THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit);
+-      }
+-      UPDATE_PC_AND_RETURN(1);
+-    } else {
+-      // Normal return
+-      // Advance the pc and return to frame manager
+-      istate->set_msg(return_from_method);
+-      istate->set_return_kind((Bytecodes::Code)opcode);
+-      UPDATE_PC_AND_RETURN(1);
+-    }
+-  } /* handle_return: */
+-
+-// This is really a fatal error return
+-
+-finish:
+-  DECACHE_TOS();
+-  DECACHE_PC();
+-
+-  return;
+-}
+-
+-#endif // CC_INTERP
+-
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/cppInterpreter.cpp openjdk/hotspot/src/share/vm/interpreter/cppInterpreter.cpp
+--- openjdk6/hotspot/src/share/vm/interpreter/cppInterpreter.cpp	1969-12-31 19:00:00.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/cppInterpreter.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -0,0 +1,135 @@
++/*
++ * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++ *
++ * This code is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 only, as
++ * published by the Free Software Foundation.
++ *
++ * This code is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++ * version 2 for more details (a copy is included in the LICENSE file that
++ * accompanied this code).
++ *
++ * You should have received a copy of the GNU General Public License version
++ * 2 along with this work; if not, write to the Free Software Foundation,
++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
++ * CA 95054 USA or visit www.sun.com if you need additional information or
++ * have any questions.
++ *
++ */
++
++#include "incls/_precompiled.incl"
++#include "incls/_cppInterpreter.cpp.incl"
++
++#ifdef CC_INTERP
++# define __ _masm->
++
++void CppInterpreter::initialize() {
++  if (_code != NULL) return;
++  AbstractInterpreter::initialize();
++
++  // generate interpreter
++  { ResourceMark rm;
++    TraceTime timer("Interpreter generation", TraceStartupTime);
++    int code_size = InterpreterCodeSize;
++    NOT_PRODUCT(code_size *= 4;)  // debug uses extra interpreter code space
++    _code = new StubQueue(new InterpreterCodeletInterface, code_size, NULL,
++                          "Interpreter");
++    InterpreterGenerator g(_code);
++    if (PrintInterpreter) print();
++  }
++
++
++  // Allow c++ interpreter to do one initialization now that switches are set, etc.
++  BytecodeInterpreter start_msg(BytecodeInterpreter::initialize);
++  if (JvmtiExport::can_post_interpreter_events())
++    BytecodeInterpreter::runWithChecks(&start_msg);
++  else
++    BytecodeInterpreter::run(&start_msg);
++}
++
++
++address    CppInterpreter::_tosca_to_stack         [AbstractInterpreter::number_of_result_handlers];
++address    CppInterpreter::_stack_to_stack         [AbstractInterpreter::number_of_result_handlers];
++address    CppInterpreter::_stack_to_native_abi    [AbstractInterpreter::number_of_result_handlers];
++
++CppInterpreterGenerator::CppInterpreterGenerator(StubQueue* _code): AbstractInterpreterGenerator(_code) {
++}
++
++static const BasicType types[Interpreter::number_of_result_handlers] = {
++  T_BOOLEAN,
++  T_CHAR   ,
++  T_BYTE   ,
++  T_SHORT  ,
++  T_INT    ,
++  T_LONG   ,
++  T_VOID   ,
++  T_FLOAT  ,
++  T_DOUBLE ,
++  T_OBJECT
++};
++
++void CppInterpreterGenerator::generate_all() {
++  AbstractInterpreterGenerator::generate_all();
++
++  { CodeletMark cm(_masm, "result handlers for native calls");
++    // The various result converter stublets.
++    int is_generated[Interpreter::number_of_result_handlers];
++    memset(is_generated, 0, sizeof(is_generated));
++    int _tosca_to_stack_is_generated[Interpreter::number_of_result_handlers];
++    int _stack_to_stack_is_generated[Interpreter::number_of_result_handlers];
++    int _stack_to_native_abi_is_generated[Interpreter::number_of_result_handlers];
++
++    memset(_tosca_to_stack_is_generated, 0, sizeof(_tosca_to_stack_is_generated));
++    memset(_stack_to_stack_is_generated, 0, sizeof(_stack_to_stack_is_generated));
++    memset(_stack_to_native_abi_is_generated, 0, sizeof(_stack_to_native_abi_is_generated));
++    for (int i = 0; i < Interpreter::number_of_result_handlers; i++) {
++      BasicType type = types[i];
++      if (!is_generated[Interpreter::BasicType_as_index(type)]++) {
++        Interpreter::_native_abi_to_tosca[Interpreter::BasicType_as_index(type)] = generate_result_handler_for(type);
++      }
++      if (!_tosca_to_stack_is_generated[Interpreter::BasicType_as_index(type)]++) {
++        Interpreter::_tosca_to_stack[Interpreter::BasicType_as_index(type)] = generate_tosca_to_stack_converter(type);
++      }
++      if (!_stack_to_stack_is_generated[Interpreter::BasicType_as_index(type)]++) {
++        Interpreter::_stack_to_stack[Interpreter::BasicType_as_index(type)] = generate_stack_to_stack_converter(type);
++      }
++      if (!_stack_to_native_abi_is_generated[Interpreter::BasicType_as_index(type)]++) {
++        Interpreter::_stack_to_native_abi[Interpreter::BasicType_as_index(type)] = generate_stack_to_native_abi_converter(type);
++      }
++    }
++  }
++
++
++#define method_entry(kind) Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind)
++
++  { CodeletMark cm(_masm, "(kind = frame_manager)");
++    // all non-native method kinds
++    method_entry(zerolocals);
++    method_entry(zerolocals_synchronized);
++    method_entry(empty);
++    method_entry(accessor);
++    method_entry(abstract);
++    method_entry(java_lang_math_sin   );
++    method_entry(java_lang_math_cos   );
++    method_entry(java_lang_math_tan   );
++    method_entry(java_lang_math_abs   );
++    method_entry(java_lang_math_sqrt  );
++    method_entry(java_lang_math_log   );
++    method_entry(java_lang_math_log10 );
++    Interpreter::_native_entry_begin = Interpreter::code()->code_end();
++    method_entry(native);
++    method_entry(native_synchronized);
++    Interpreter::_native_entry_end = Interpreter::code()->code_end();
++  }
++
++
++#undef method_entry
++
++}
++
++#endif // CC_INTERP
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/cppInterpreterGenerator.hpp openjdk/hotspot/src/share/vm/interpreter/cppInterpreterGenerator.hpp
+--- openjdk6/hotspot/src/share/vm/interpreter/cppInterpreterGenerator.hpp	1969-12-31 19:00:00.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/cppInterpreterGenerator.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -0,0 +1,47 @@
++/*
++ * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++ *
++ * This code is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 only, as
++ * published by the Free Software Foundation.
++ *
++ * This code is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++ * version 2 for more details (a copy is included in the LICENSE file that
++ * accompanied this code).
++ *
++ * You should have received a copy of the GNU General Public License version
++ * 2 along with this work; if not, write to the Free Software Foundation,
++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
++ * CA 95054 USA or visit www.sun.com if you need additional information or
++ * have any questions.
++ *
++ */
++
++// This file contains the platform-independant parts
++// of the template interpreter generator.
++
++#ifdef CC_INTERP
++
++class CppInterpreterGenerator: public AbstractInterpreterGenerator {
++  protected:
++  // shared code sequences
++  // Converter for native abi result to tosca result
++  address generate_result_handler_for(BasicType type);
++  address generate_tosca_to_stack_converter(BasicType type);
++  address generate_stack_to_stack_converter(BasicType type);
++  address generate_stack_to_native_abi_converter(BasicType type);
++
++  void generate_all();
++
++ public:
++  CppInterpreterGenerator(StubQueue* _code);
++
++   #include "incls/_cppInterpreterGenerator_pd.hpp.incl"
++};
++
++#endif // CC_INTERP
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/cppInterpreter.hpp openjdk/hotspot/src/share/vm/interpreter/cppInterpreter.hpp
+--- openjdk6/hotspot/src/share/vm/interpreter/cppInterpreter.hpp	1969-12-31 19:00:00.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/cppInterpreter.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -0,0 +1,83 @@
++/*
++ * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++ *
++ * This code is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 only, as
++ * published by the Free Software Foundation.
++ *
++ * This code is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++ * version 2 for more details (a copy is included in the LICENSE file that
++ * accompanied this code).
++ *
++ * You should have received a copy of the GNU General Public License version
++ * 2 along with this work; if not, write to the Free Software Foundation,
++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
++ * CA 95054 USA or visit www.sun.com if you need additional information or
++ * have any questions.
++ *
++ */
++
++#ifdef CC_INTERP
++
++// This file contains the platform-independant parts
++// of the c++ interpreter
++
++class CppInterpreter: public AbstractInterpreter {
++  friend class VMStructs;
++  friend class Interpreter; // contains()
++  friend class InterpreterGenerator; // result handlers
++  friend class CppInterpreterGenerator; // result handlers
++ public:
++
++
++ protected:
++
++  // tosca result -> stack result
++  static address    _tosca_to_stack[number_of_result_handlers];  // converts tosca to C++ interpreter stack result
++  // stack result -> stack result
++  static address    _stack_to_stack[number_of_result_handlers];  // pass result between C++ interpreter calls
++  // stack result -> native abi result
++  static address    _stack_to_native_abi[number_of_result_handlers];  // converts C++ interpreter results to native abi
++
++  // this is to allow frame and only frame to use contains().
++  friend class      frame;
++
++ public:
++  // Initialization/debugging
++  static void       initialize();
++  // this only returns whether a pc is within generated code for the interpreter.
++
++  // This is a moderately dubious interface for the c++ interpreter. Only
++  // frame code and debug.cpp should be using it.
++  static bool       contains(address pc);
++
++ public:
++
++
++  // No displatch table to switch so no need for these to do anything special
++  static void notice_safepoints() {}
++  static void ignore_safepoints() {}
++
++  static address    native_result_to_tosca()                    { return (address)_native_abi_to_tosca; } // aka result handler
++  static address    tosca_result_to_stack()                     { return (address)_tosca_to_stack; }
++  static address    stack_result_to_stack()                     { return (address)_stack_to_stack; }
++  static address    stack_result_to_native()                    { return (address)_stack_to_native_abi; }
++
++  static address    native_result_to_tosca(int index)           { return _native_abi_to_tosca[index]; } // aka result handler
++  static address    tosca_result_to_stack(int index)            { return _tosca_to_stack[index]; }
++  static address    stack_result_to_stack(int index)            { return _stack_to_stack[index]; }
++  static address    stack_result_to_native(int index)           { return _stack_to_native_abi[index]; }
++
++  static address    return_entry  (TosState state, int length);
++  static address    deopt_entry   (TosState state, int length);
++
++#include "incls/_cppInterpreter_pd.hpp.incl"
++
++};
++
++#endif // CC_INTERP
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/interpreter.cpp openjdk/hotspot/src/share/vm/interpreter/interpreter.cpp
+--- openjdk6/hotspot/src/share/vm/interpreter/interpreter.cpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/interpreter.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)interpreter.cpp	1.246 07/06/08 15:21:43 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -53,7 +50,7 @@
+   if (description() != NULL) tty->print("%s  ", description());
+   if (bytecode()    >= 0   ) tty->print("%d %s  ", bytecode(), Bytecodes::name(bytecode()));
+   tty->print_cr("[" INTPTR_FORMAT ", " INTPTR_FORMAT "]  %d bytes",
+-		code_begin(), code_end(), code_size());
++                code_begin(), code_end(), code_size());
+ 
+   if (PrintInterpreter) {
+     tty->cr();
+@@ -63,58 +60,20 @@
+ 
+ 
+ //------------------------------------------------------------------------------------------------------------------------
+-// Implementation of AbstractInterpreter
+-
+-
+-// Define a prototype interface
+-DEF_STUB_INTERFACE(InterpreterCodelet);
+-
++// Implementation of  platform independent aspects of Interpreter
+ 
+ void AbstractInterpreter::initialize() {
+   if (_code != NULL) return;
+ 
+-  // assertions
+-#ifndef CC_INTERP
+-  assert((int)Bytecodes::number_of_codes <= (int)DispatchTable::length, 
+-         "dispatch table too small");
+-#endif /* !CC_INTERP */
+-
+   // make sure 'imported' classes are initialized
+   if (CountBytecodes || TraceBytecodes || StopInterpreterAt) BytecodeCounter::reset();
+   if (PrintBytecodeHistogram)                                BytecodeHistogram::reset();
+   if (PrintBytecodePairHistogram)                            BytecodePairHistogram::reset();
+-#ifndef CC_INTERP
+-  TemplateTable::initialize();
+-#endif /* !CC_INTERP */
+-  InvocationCounter::reinitialize(DelayCompilationDuringStartup);
+ 
+-  // generate interpreter
+-  { ResourceMark rm;
+-    TraceTime timer("Interpreter generation", TraceStartupTime);    
+-    int code_size = Interpreter::InterpreterCodeSize;
+-    NOT_PRODUCT(code_size *= 4;)  // debug uses extra interpreter code space
+-    _code = new StubQueue(new InterpreterCodeletInterface, code_size, NULL,
+-                          "Interpreter");
+-    InterpreterGenerator g(_code);
+-    if (PrintInterpreter) print();
+-  }
++  InvocationCounter::reinitialize(DelayCompilationDuringStartup);
+ 
+-#ifdef CC_INTERP
+-  {
+-    // Allow c++ interpreter to do one initialization now that switches are set, etc.
+-    cInterpreter start_msg(cInterpreter::initialize);
+-    if (JvmtiExport::can_post_interpreter_events())
+-      cInterpreter::InterpretMethodWithChecks(&start_msg);
+-    else
+-      cInterpreter::InterpretMethod(&start_msg);
+-  }
+-#else
+-  // initialize dispatch table
+-  _active_table = _normal_table;
+-#endif // CC_INTERP
+ }
+ 
+-
+ void AbstractInterpreter::print() {
+   tty->cr();
+   tty->print_cr("----------------------------------------------------------------------");
+@@ -154,618 +113,53 @@
+   // notify JVMTI profiler
+   if (JvmtiExport::should_post_dynamic_code_generated()) {
+     JvmtiExport::post_dynamic_code_generated("Interpreter",
+-					     AbstractInterpreter::code()->code_start(),
+-					     AbstractInterpreter::code()->code_end());
+-  }
+-}
+-
+-
+-#ifndef CC_INTERP
+-//------------------------------------------------------------------------------------------------------------------------
+-// Implementation of EntryPoint
+-
+-EntryPoint::EntryPoint() {
+-  assert(number_of_states == 9, "check the code below");
+-  _entry[btos] = NULL;
+-  _entry[ctos] = NULL;
+-  _entry[stos] = NULL;
+-  _entry[atos] = NULL;
+-  _entry[itos] = NULL;
+-  _entry[ltos] = NULL;
+-  _entry[ftos] = NULL;
+-  _entry[dtos] = NULL;
+-  _entry[vtos] = NULL;
+-}
+-
+-
+-EntryPoint::EntryPoint(address bentry, address centry, address sentry, address aentry, address ientry, address lentry, address fentry, address dentry, address ventry) {
+-  assert(number_of_states == 9, "check the code below");
+-  _entry[btos] = bentry;
+-  _entry[ctos] = centry;
+-  _entry[stos] = sentry;
+-  _entry[atos] = aentry;
+-  _entry[itos] = ientry;
+-  _entry[ltos] = lentry;
+-  _entry[ftos] = fentry;
+-  _entry[dtos] = dentry;
+-  _entry[vtos] = ventry;
+-}
+-
+-
+-void EntryPoint::set_entry(TosState state, address entry) {
+-  assert(0 <= state && state < number_of_states, "state out of bounds");
+-  _entry[state] = entry;
+-}
+-
+-
+-address EntryPoint::entry(TosState state) const {
+-  assert(0 <= state && state < number_of_states, "state out of bounds");
+-  return _entry[state];
+-}
+-
+-
+-void EntryPoint::print() {
+-  tty->print("[");
+-  for (int i = 0; i < number_of_states; i++) {
+-    if (i > 0) tty->print(", ");
+-    tty->print(INTPTR_FORMAT, _entry[i]);
+-  }
+-  tty->print("]");
+-}
+-
+-
+-bool EntryPoint::operator == (const EntryPoint& y) {
+-  int i = number_of_states;
+-  while (i-- > 0) {
+-    if (_entry[i] != y._entry[i]) return false;
+-  }
+-  return true;
+-}
+-
+-
+-//------------------------------------------------------------------------------------------------------------------------
+-// Implementation of DispatchTable
+-
+-EntryPoint DispatchTable::entry(int i) const {
+-  assert(0 <= i && i < length, "index out of bounds");
+-  return
+-    EntryPoint(
+-      _table[btos][i],
+-      _table[ctos][i],
+-      _table[stos][i],
+-      _table[atos][i],
+-      _table[itos][i],
+-      _table[ltos][i],
+-      _table[ftos][i],
+-      _table[dtos][i],
+-      _table[vtos][i]
+-    );
+-}
+-
+-
+-void DispatchTable::set_entry(int i, EntryPoint& entry) {
+-  assert(0 <= i && i < length, "index out of bounds");
+-  assert(number_of_states == 9, "check the code below");
+-  _table[btos][i] = entry.entry(btos);
+-  _table[ctos][i] = entry.entry(ctos);
+-  _table[stos][i] = entry.entry(stos);
+-  _table[atos][i] = entry.entry(atos);
+-  _table[itos][i] = entry.entry(itos);
+-  _table[ltos][i] = entry.entry(ltos);
+-  _table[ftos][i] = entry.entry(ftos);
+-  _table[dtos][i] = entry.entry(dtos);
+-  _table[vtos][i] = entry.entry(vtos);
+-}
+-
+-
+-bool DispatchTable::operator == (DispatchTable& y) {
+-  int i = length;
+-  while (i-- > 0) {
+-    EntryPoint t = y.entry(i); // for compiler compatibility (BugId 4150096)
+-    if (!(entry(i) == t)) return false;
++                                             AbstractInterpreter::code()->code_start(),
++                                             AbstractInterpreter::code()->code_end());
+   }
+-  return true;
+ }
+-#endif // CC_INTERP
+-
+ 
+ //------------------------------------------------------------------------------------------------------------------------
+ // Implementation of interpreter
+ 
+ StubQueue* AbstractInterpreter::_code                                       = NULL;
+ bool       AbstractInterpreter::_notice_safepoints                          = false;
+-
+ address    AbstractInterpreter::_rethrow_exception_entry                    = NULL;
+-#ifndef CC_INTERP
+-address    AbstractInterpreter::_remove_activation_entry                    = NULL;
+-address    AbstractInterpreter::_remove_activation_preserving_args_entry    = NULL;
+-
+-
+-address    AbstractInterpreter::_throw_ArrayIndexOutOfBoundsException_entry = NULL;
+-address    AbstractInterpreter::_throw_ArrayStoreException_entry            = NULL;
+-address    AbstractInterpreter::_throw_ArithmeticException_entry            = NULL;
+-address    AbstractInterpreter::_throw_ClassCastException_entry             = NULL;
+-address    AbstractInterpreter::_throw_NullPointerException_entry           = NULL;
+-address    AbstractInterpreter::_throw_StackOverflowError_entry             = NULL;
+-address    AbstractInterpreter::_throw_exception_entry                      = NULL;
+-
+-#ifndef PRODUCT
+-EntryPoint AbstractInterpreter::_trace_code;
+-#endif // !PRODUCT
+-EntryPoint AbstractInterpreter::_return_entry[AbstractInterpreter::number_of_return_entries];
+-EntryPoint AbstractInterpreter::_earlyret_entry;
+-EntryPoint AbstractInterpreter::_deopt_entry [AbstractInterpreter::number_of_deopt_entries ];
+-EntryPoint AbstractInterpreter::_continuation_entry;
+-EntryPoint AbstractInterpreter::_safept_entry;
+-
+-address    AbstractInterpreter::_return_3_addrs_by_index[AbstractInterpreter::number_of_return_addrs];
+-address    AbstractInterpreter::_return_5_addrs_by_index[AbstractInterpreter::number_of_return_addrs];
+-
+-DispatchTable AbstractInterpreter::_active_table;
+-DispatchTable AbstractInterpreter::_normal_table;
+-DispatchTable AbstractInterpreter::_safept_table;
+-address    AbstractInterpreter::_wentry_point[DispatchTable::length];
+-#endif // CC_INTERP
+ 
+ address    AbstractInterpreter::_native_entry_begin                         = NULL;
+ address    AbstractInterpreter::_native_entry_end                           = NULL;
+ address    AbstractInterpreter::_slow_signature_handler;
+ address    AbstractInterpreter::_entry_table            [AbstractInterpreter::number_of_method_entries];
+ address    AbstractInterpreter::_native_abi_to_tosca    [AbstractInterpreter::number_of_result_handlers];
+-#ifdef CC_INTERP
+-address    AbstractInterpreter::_tosca_to_stack         [AbstractInterpreter::number_of_result_handlers];
+-address    AbstractInterpreter::_stack_to_stack         [AbstractInterpreter::number_of_result_handlers];
+-address    AbstractInterpreter::_stack_to_native_abi    [AbstractInterpreter::number_of_result_handlers];
+-#endif
+-
+-
+-//------------------------------------------------------------------------------------------------------------------------
+-// A CodeletMark serves as an automatic creator/initializer for Codelets
+-// (As a subclass of ResourceMark it automatically GC's the allocated
+-// code buffer and assemblers).
+-
+-class CodeletMark: ResourceMark {
+- private:
+-  InterpreterCodelet*         _clet;
+-  InterpreterMacroAssembler** _masm;
+-  CodeBuffer                  _cb;
+-
+-  int codelet_size() {
+-    // Request the whole code buffer (minus a little for alignment).
+-    // The commit call below trims it back for each codelet.
+-    int codelet_size = AbstractInterpreter::code()->available_space() - 2*K;
+-
+-    // Guarantee there's a little bit of code space left.
+-    guarantee (codelet_size > 0 && (size_t)codelet_size >  2*K,
+-               "not enough space for interpreter generation");
+-
+-    return codelet_size;
+-  }
+-
+- public:
+-  CodeletMark(
+-    InterpreterMacroAssembler*& masm,
+-    const char* description,
+-    Bytecodes::Code bytecode = Bytecodes::_illegal):
+-    _clet((InterpreterCodelet*)AbstractInterpreter::code()->request(codelet_size())),
+-    _cb(_clet->code_begin(), _clet->code_size())
+-
+-  { // request all space (add some slack for Codelet data)
+-    assert (_clet != NULL, "we checked not enough space already");
+-
+-    // initialize Codelet attributes
+-    _clet->initialize(description, bytecode);
+-    // create assembler for code generation
+-    masm  = new InterpreterMacroAssembler(&_cb);
+-    _masm = &masm;
+-  }  
+-  
+-  ~CodeletMark() {
+-    // align so printing shows nop's instead of random code at the end (Codelets are aligned)
+-    (*_masm)->align(wordSize);
+-    // make sure all code is in code buffer
+-    (*_masm)->flush();
+-
+-
+-    // commit Codelet
+-    AbstractInterpreter::code()->commit((*_masm)->code()->pure_code_size());
+-    // make sure nobody can use _masm outside a CodeletMark lifespan
+-    *_masm = NULL;
+-  }
+-};
+-
+ 
+ //------------------------------------------------------------------------------------------------------------------------
+ // Generation of complete interpreter
+ 
+ AbstractInterpreterGenerator::AbstractInterpreterGenerator(StubQueue* _code) {
+   _masm                      = NULL;
+-#ifndef CC_INTERP
+-  _unimplemented_bytecode    = NULL;
+-  _illegal_bytecode_sequence = NULL;
+-#endif // CC_INTERP
+ }
+ 
+ 
+-void AbstractInterpreterGenerator::generate_all() {
+-#ifndef CC_INTERP
+-  { CodeletMark cm(_masm, "error exits");
+-    _unimplemented_bytecode    = generate_error_exit("unimplemented bytecode");
+-    _illegal_bytecode_sequence = generate_error_exit("illegal bytecode sequence - method not verified");
+-  }
+-
+-#ifndef PRODUCT
+-  if (TraceBytecodes) {
+-    CodeletMark cm(_masm, "bytecode tracing support");
+-    Interpreter::_trace_code =
+-      EntryPoint(
+-        generate_trace_code(btos),
+-        generate_trace_code(ctos),
+-        generate_trace_code(stos),
+-        generate_trace_code(atos),
+-        generate_trace_code(itos),
+-        generate_trace_code(ltos),
+-        generate_trace_code(ftos),
+-        generate_trace_code(dtos),
+-        generate_trace_code(vtos)
+-      );
+-  }
+-#endif // !PRODUCT
+-
+-  { CodeletMark cm(_masm, "return entry points");
+-    for (int i = 0; i < Interpreter::number_of_return_entries; i++) {
+-      Interpreter::_return_entry[i] =
+-        EntryPoint(
+-          generate_return_entry_for(itos, i),
+-          generate_return_entry_for(itos, i),
+-          generate_return_entry_for(itos, i),
+-          generate_return_entry_for(atos, i),
+-          generate_return_entry_for(itos, i),
+-          generate_return_entry_for(ltos, i),
+-          generate_return_entry_for(ftos, i),
+-          generate_return_entry_for(dtos, i),
+-          generate_return_entry_for(vtos, i)
+-        );
+-    }
+-  }
+-
+-  { CodeletMark cm(_masm, "earlyret entry points");
+-    Interpreter::_earlyret_entry =
+-      EntryPoint(
+-        generate_earlyret_entry_for(btos),
+-        generate_earlyret_entry_for(ctos),
+-        generate_earlyret_entry_for(stos),
+-        generate_earlyret_entry_for(atos),
+-        generate_earlyret_entry_for(itos),
+-        generate_earlyret_entry_for(ltos),
+-        generate_earlyret_entry_for(ftos),
+-        generate_earlyret_entry_for(dtos),
+-        generate_earlyret_entry_for(vtos)
+-      );
+-  }
+-
+-  { CodeletMark cm(_masm, "deoptimization entry points");
+-    for (int i = 0; i < Interpreter::number_of_deopt_entries; i++) {
+-      Interpreter::_deopt_entry[i] =
+-        EntryPoint(
+-          generate_deopt_entry_for(itos, i),
+-          generate_deopt_entry_for(itos, i),
+-          generate_deopt_entry_for(itos, i),
+-          generate_deopt_entry_for(atos, i),
+-          generate_deopt_entry_for(itos, i),
+-          generate_deopt_entry_for(ltos, i),
+-          generate_deopt_entry_for(ftos, i),
+-          generate_deopt_entry_for(dtos, i),
+-          generate_deopt_entry_for(vtos, i)
+-        );
+-    }
+-  }
++static const BasicType types[Interpreter::number_of_result_handlers] = {
++  T_BOOLEAN,
++  T_CHAR   ,
++  T_BYTE   ,
++  T_SHORT  ,
++  T_INT    ,
++  T_LONG   ,
++  T_VOID   ,
++  T_FLOAT  ,
++  T_DOUBLE ,
++  T_OBJECT
++};
+ 
+-#endif // !CC_INTERP
++void AbstractInterpreterGenerator::generate_all() {
+ 
+-  { CodeletMark cm(_masm, "result handlers for native calls");
+-    const BasicType types[Interpreter::number_of_result_handlers] = {
+-      T_BOOLEAN,
+-      T_CHAR   ,
+-      T_BYTE   ,
+-      T_SHORT  ,
+-      T_INT    ,
+-      T_LONG   ,
+-      T_VOID   ,
+-      T_FLOAT  ,
+-      T_DOUBLE ,
+-      T_OBJECT
+-    };
+-    // The various result converter stublets.
+-    int is_generated[Interpreter::number_of_result_handlers];
+-    memset(is_generated, 0, sizeof(is_generated));
+-#ifdef CC_INTERP
+-    int _tosca_to_stack_is_generated[Interpreter::number_of_result_handlers];
+-    int _stack_to_stack_is_generated[Interpreter::number_of_result_handlers];
+-    int _stack_to_native_abi_is_generated[Interpreter::number_of_result_handlers];
+-
+-    memset(_tosca_to_stack_is_generated, 0, sizeof(_tosca_to_stack_is_generated));
+-    memset(_stack_to_stack_is_generated, 0, sizeof(_stack_to_stack_is_generated));
+-    memset(_stack_to_native_abi_is_generated, 0, sizeof(_stack_to_native_abi_is_generated));
+-#endif
+-    for (int i = 0; i < Interpreter::number_of_result_handlers; i++) {
+-      BasicType type = types[i];
+-      if (!is_generated[Interpreter::BasicType_as_index(type)]++) {
+-	Interpreter::_native_abi_to_tosca[Interpreter::BasicType_as_index(type)] = generate_result_handler_for(type);
+-      }
+-#ifdef CC_INTERP
+-      if (!_tosca_to_stack_is_generated[Interpreter::BasicType_as_index(type)]++) {
+-	Interpreter::_tosca_to_stack[Interpreter::BasicType_as_index(type)] = generate_tosca_to_stack_converter(type);
+-      }
+-      if (!_stack_to_stack_is_generated[Interpreter::BasicType_as_index(type)]++) {
+-	Interpreter::_stack_to_stack[Interpreter::BasicType_as_index(type)] = generate_stack_to_stack_converter(type);
+-      }
+-      if (!_stack_to_native_abi_is_generated[Interpreter::BasicType_as_index(type)]++) {
+-	Interpreter::_stack_to_native_abi[Interpreter::BasicType_as_index(type)] = generate_stack_to_native_abi_converter(type);
+-      }
+-#endif
+-    }
+-  }
+ 
+   { CodeletMark cm(_masm, "slow signature handler");
+     Interpreter::_slow_signature_handler = generate_slow_signature_handler();
+   }
+ 
+-#ifndef CC_INTERP
+-  for (int j = 0; j < number_of_states; j++) {
+-    const TosState states[] = {btos, ctos, stos, itos, ltos, ftos, dtos, atos, vtos};
+-    Interpreter::_return_3_addrs_by_index[Interpreter::TosState_as_index(states[j])] = Interpreter::return_entry(states[j], 3);
+-    Interpreter::_return_5_addrs_by_index[Interpreter::TosState_as_index(states[j])] = Interpreter::return_entry(states[j], 5);
+-  }
+-
+-  { CodeletMark cm(_masm, "continuation entry points");
+-    Interpreter::_continuation_entry =
+-      EntryPoint(
+-        generate_continuation_for(btos),
+-        generate_continuation_for(ctos),
+-        generate_continuation_for(stos),
+-        generate_continuation_for(atos),
+-        generate_continuation_for(itos),
+-        generate_continuation_for(ltos),
+-        generate_continuation_for(ftos),
+-        generate_continuation_for(dtos),
+-        generate_continuation_for(vtos)
+-      );
+-  }
+-
+-  { CodeletMark cm(_masm, "safepoint entry points");
+-    Interpreter::_safept_entry =
+-      EntryPoint(
+-	generate_safept_entry_for(btos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+-	generate_safept_entry_for(ctos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+-	generate_safept_entry_for(stos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+-	generate_safept_entry_for(atos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+-        generate_safept_entry_for(itos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+-        generate_safept_entry_for(ltos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+-        generate_safept_entry_for(ftos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+-        generate_safept_entry_for(dtos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+-        generate_safept_entry_for(vtos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint))
+-      );
+-  }
+-
+-  { CodeletMark cm(_masm, "exception handling");
+-    // (Note: this is not safepoint safe because thread may return to compiled code)
+-    generate_throw_exception();
+-  }
+-
+-  { CodeletMark cm(_masm, "throw exception entrypoints");
+-    Interpreter::_throw_ArrayIndexOutOfBoundsException_entry = generate_ArrayIndexOutOfBounds_handler("java/lang/ArrayIndexOutOfBoundsException");
+-    Interpreter::_throw_ArrayStoreException_entry            = generate_klass_exception_handler("java/lang/ArrayStoreException"                 );
+-    Interpreter::_throw_ArithmeticException_entry            = generate_exception_handler("java/lang/ArithmeticException"           , "/ by zero");
+-    Interpreter::_throw_ClassCastException_entry             = generate_ClassCastException_handler();
+-    Interpreter::_throw_NullPointerException_entry           = generate_exception_handler("java/lang/NullPointerException"          , NULL       );
+-    Interpreter::_throw_StackOverflowError_entry             = generate_StackOverflowError_handler();
+-  }
+-#endif // !CC_INTERP
+-
+-
+-#ifdef CC_INTERP
+-
+-#define method_entry(kind) Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind)
+-
+-  { CodeletMark cm(_masm, "(kind = frame_manager)");
+-    // all non-native method kinds  
+-    method_entry(zerolocals);
+-    method_entry(zerolocals_synchronized);
+-    method_entry(empty);
+-    method_entry(accessor);
+-    method_entry(abstract);
+-    method_entry(java_lang_math_sin   );
+-    method_entry(java_lang_math_cos   );
+-    method_entry(java_lang_math_tan   );
+-    method_entry(java_lang_math_abs   );
+-    method_entry(java_lang_math_sqrt  );
+-    method_entry(java_lang_math_log   );
+-    method_entry(java_lang_math_log10 );
+-    Interpreter::_native_entry_begin = Interpreter::code()->code_end();
+-    method_entry(native);
+-    method_entry(native_synchronized);
+-    Interpreter::_native_entry_end = Interpreter::code()->code_end();
+-  }
+-
+-#else
+-
+-#define method_entry(kind)                                                                    \
+-  { CodeletMark cm(_masm, "method entry point (kind = " #kind ")");                    \
+-    Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind);  \
+-  }
+-
+-  // all non-native method kinds  
+-  method_entry(zerolocals)
+-  method_entry(zerolocals_synchronized)
+-  method_entry(empty)
+-  method_entry(accessor)
+-  method_entry(abstract)
+-  method_entry(java_lang_math_sin  )
+-  method_entry(java_lang_math_cos  )
+-  method_entry(java_lang_math_tan  )
+-  method_entry(java_lang_math_abs  )
+-  method_entry(java_lang_math_sqrt )
+-  method_entry(java_lang_math_log  )
+-  method_entry(java_lang_math_log10)
+-
+-  // all native method kinds (must be one contiguous block)
+-  Interpreter::_native_entry_begin = Interpreter::code()->code_end();
+-  method_entry(native)
+-  method_entry(native_synchronized)
+-  Interpreter::_native_entry_end = Interpreter::code()->code_end();
+-
+-#endif // !CC_INTERP
+-
+-#undef method_entry
+-
+-#ifndef CC_INTERP
+-  // Bytecodes
+-  set_entry_points_for_all_bytes();
+-  set_safepoints_for_all_bytes();
+-#endif // !CC_INTERP
+-}
+-
+-
+-//------------------------------------------------------------------------------------------------------------------------
+-
+-#ifndef CC_INTERP
+-address AbstractInterpreterGenerator::generate_error_exit(const char* msg) {
+-  address entry = __ pc();
+-  __ stop(msg);
+-  return entry;
+-}
+-
+-
+-//------------------------------------------------------------------------------------------------------------------------
+-
+-void AbstractInterpreterGenerator::set_entry_points_for_all_bytes() {
+-  for (int i = 0; i < DispatchTable::length; i++) {
+-    Bytecodes::Code code = (Bytecodes::Code)i;
+-    if (Bytecodes::is_defined(code)) {
+-      set_entry_points(code);
+-    } else {
+-      set_unimplemented(i);
+-    }
+-  }
+-}
+-
+-
+-void AbstractInterpreterGenerator::set_safepoints_for_all_bytes() {
+-  for (int i = 0; i < DispatchTable::length; i++) {
+-    Bytecodes::Code code = (Bytecodes::Code)i;
+-    if (Bytecodes::is_defined(code)) Interpreter::_safept_table.set_entry(code, Interpreter::_safept_entry);
+-  }
+-}
+-
+-
+-void AbstractInterpreterGenerator::set_unimplemented(int i) {
+-  address e = _unimplemented_bytecode;
+-  EntryPoint entry(e, e, e, e, e, e, e, e, e);
+-  Interpreter::_normal_table.set_entry(i, entry);
+-  Interpreter::_wentry_point[i] = _unimplemented_bytecode;
+-}
+-
+-
+-void AbstractInterpreterGenerator::set_entry_points(Bytecodes::Code code) {
+-  CodeletMark cm(_masm, Bytecodes::name(code), code);
+-  // initialize entry points
+-  assert(_unimplemented_bytecode    != NULL, "should have been generated before");
+-  assert(_illegal_bytecode_sequence != NULL, "should have been generated before");
+-  address bep = _illegal_bytecode_sequence;
+-  address cep = _illegal_bytecode_sequence;
+-  address sep = _illegal_bytecode_sequence;
+-  address aep = _illegal_bytecode_sequence;
+-  address iep = _illegal_bytecode_sequence;
+-  address lep = _illegal_bytecode_sequence;
+-  address fep = _illegal_bytecode_sequence;
+-  address dep = _illegal_bytecode_sequence;
+-  address vep = _unimplemented_bytecode;
+-  address wep = _unimplemented_bytecode;
+-  // code for short & wide version of bytecode
+-  if (Bytecodes::is_defined(code)) {
+-    Template* t = TemplateTable::template_for(code);
+-    assert(t->is_valid(), "just checking");
+-    set_short_entry_points(t, bep, cep, sep, aep, iep, lep, fep, dep, vep);
+-  }
+-  if (Bytecodes::wide_is_defined(code)) {
+-    Template* t = TemplateTable::template_for_wide(code);
+-    assert(t->is_valid(), "just checking");
+-    set_wide_entry_point(t, wep);
+-  }
+-  // set entry points
+-  EntryPoint entry(bep, cep, sep, aep, iep, lep, fep, dep, vep);
+-  Interpreter::_normal_table.set_entry(code, entry);
+-  Interpreter::_wentry_point[code] = wep;
+-}
+-
+-
+-void AbstractInterpreterGenerator::set_wide_entry_point(Template* t, address& wep) {
+-  assert(t->is_valid(), "template must exist");
+-  assert(t->tos_in() == vtos, "only vtos tos_in supported for wide instructions")
+-  wep = __ pc(); generate_and_dispatch(t);
+-}
+-
+-
+-void AbstractInterpreterGenerator::set_short_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
+-  assert(t->is_valid(), "template must exist");
+-  switch (t->tos_in()) {
+-    case btos: vep = __ pc(); __ pop(btos); bep = __ pc(); generate_and_dispatch(t); break;
+-    case ctos: vep = __ pc(); __ pop(ctos); sep = __ pc(); generate_and_dispatch(t); break;
+-    case stos: vep = __ pc(); __ pop(stos); sep = __ pc(); generate_and_dispatch(t); break;
+-    case atos: vep = __ pc(); __ pop(atos); aep = __ pc(); generate_and_dispatch(t); break;
+-    case itos: vep = __ pc(); __ pop(itos); iep = __ pc(); generate_and_dispatch(t); break;
+-    case ltos: vep = __ pc(); __ pop(ltos); lep = __ pc(); generate_and_dispatch(t); break;
+-    case ftos: vep = __ pc(); __ pop(ftos); fep = __ pc(); generate_and_dispatch(t); break;
+-    case dtos: vep = __ pc(); __ pop(dtos); dep = __ pc(); generate_and_dispatch(t); break;
+-    case vtos: set_vtos_entry_points(t, bep, cep, sep, aep, iep, lep, fep, dep, vep);     break;
+-    default  : ShouldNotReachHere();                                                 break;
+-  }
+-}
+-
+-
+-//------------------------------------------------------------------------------------------------------------------------
+-
+-void AbstractInterpreterGenerator::generate_and_dispatch(Template* t, TosState tos_out) {
+-#ifndef CC_INTERP
+-  if (PrintBytecodeHistogram)                                    histogram_bytecode(t);
+-#ifndef PRODUCT
+-  // debugging code
+-  if (CountBytecodes || TraceBytecodes || StopInterpreterAt > 0) count_bytecode();
+-  if (PrintBytecodePairHistogram)                                histogram_bytecode_pair(t);
+-  if (TraceBytecodes)                                            trace_bytecode(t);
+-  if (StopInterpreterAt > 0)                                     stop_interpreter_at();
+-  __ verify_FPU(1, t->tos_in());
+-#endif // !PRODUCT
+-  int step;
+-  if (!t->does_dispatch()) { 
+-    step = t->is_wide() ? Bytecodes::wide_length_for(t->bytecode()) : Bytecodes::length_for(t->bytecode());
+-    if (tos_out == ilgl) tos_out = t->tos_out();
+-    // compute bytecode size
+-    assert(step > 0, "just checkin'");    
+-    // setup stuff for dispatching next bytecode 
+-    if (ProfileInterpreter && VerifyDataPointer
+-        && methodDataOopDesc::bytecode_has_profile(t->bytecode())) {
+-      __ verify_method_data_pointer();
+-    }
+-    __ dispatch_prolog(tos_out, step);
+-  }
+-  // generate template
+-  t->generate(_masm);
+-  // advance
+-  if (t->does_dispatch()) {
+-#ifdef ASSERT
+-    // make sure execution doesn't go beyond this point if code is broken
+-    __ should_not_reach_here();
+-#endif // ASSERT
+-  } else {
+-    // dispatch to next bytecode
+-    __ dispatch_epilog(tos_out, step);
+-  }
+-#endif
+ }
+-#endif /* !CC_INTERP */
+-
+ 
+ //------------------------------------------------------------------------------------------------------------------------
+ // Entry points
+@@ -779,12 +173,12 @@
+   //       methods. See also comments below.
+   if (m->is_native()) {
+     return m->is_synchronized() ? native_synchronized : native;
+-  } 
++  }
+ 
+   // Synchronized?
+   if (m->is_synchronized()) {
+     return zerolocals_synchronized;
+-  } 
++  }
+ 
+   if (RegisterFinalizersAtInit && m->code_size() == 1 &&
+       m->intrinsic_id() == vmIntrinsics::_Object_init) {
+@@ -796,14 +190,14 @@
+   // Empty method?
+   if (m->is_empty_method()) {
+     return empty;
+-  } 
+-  
++  }
++
+   // Accessor method?
+   if (m->is_accessor()) {
+     assert(m->size_of_parameters() == 1, "fast code for accessors assumes parameter size = 1");
+     return accessor;
+   }
+-  
++
+   // Special intrinsic method?
+   // Note: This test must come _after_ the test for native methods,
+   //       otherwise we will run into problems with JDK 1.2, see also
+@@ -820,7 +214,7 @@
+   }
+ 
+   // Note: for now: zero locals for all non-empty methods
+-  return zerolocals;  
++  return zerolocals;
+ }
+ 
+ 
+@@ -867,21 +261,6 @@
+ }
+ #endif // PRODUCT
+ 
+-
+-#ifndef CC_INTERP
+-address AbstractInterpreter::return_entry(TosState state, int length) {
+-  guarantee(0 <= length && length < Interpreter::number_of_return_entries, "illegal length");
+-  return _return_entry[length].entry(state);
+-}
+-
+-
+-address AbstractInterpreter::deopt_entry(TosState state, int length) {
+-  guarantee(0 <= length && length < Interpreter::number_of_deopt_entries, "illegal length");
+-  return _deopt_entry[length].entry(state);
+-}
+-
+-#endif /* CC_INTERP */
+-
+ static BasicType constant_pool_type(methodOop method, int index) {
+   constantTag tag = method->constants()->tag_at(index);
+        if (tag.is_int              ()) return T_INT;
+@@ -959,7 +338,7 @@
+       // reexecute the operation and TOS value is on stack
+       assert(is_top_frame, "must be top frame");
+       use_next_mdp = false;
+-      return deopt_entry(vtos, 0);
++      return Interpreter::deopt_entry(vtos, 0);
+       break;
+ 
+ #ifdef COMPILER1
+@@ -986,28 +365,16 @@
+       }
+       break;
+     }
+-    
+-    case Bytecodes::_ldc   : 
+-      type = constant_pool_type( method, *(bcp+1) ); 
++
++    case Bytecodes::_ldc   :
++      type = constant_pool_type( method, *(bcp+1) );
+       break;
+ 
+     case Bytecodes::_ldc_w : // fall through
+-    case Bytecodes::_ldc2_w: 
+-      type = constant_pool_type( method, Bytes::get_Java_u2(bcp+1) ); 
++    case Bytecodes::_ldc2_w:
++      type = constant_pool_type( method, Bytes::get_Java_u2(bcp+1) );
+       break;
+ 
+-    case Bytecodes::_return: {
+-      // This is used for deopt during registration of finalizers
+-      // during Object.<init>.  We simply need to resume execution at
+-      // the standard return vtos bytecode to pop the frame normally.
+-      // reexecuting the real bytecode would cause double registration
+-      // of the finalizable object.
+-#ifndef CC_INTERP
+-      assert(is_top_frame, "must be on top");
+-      return _normal_table.entry(Bytecodes::_return).entry(vtos);
+-#endif // CC_INTERP
+-    }
+-
+     default:
+       type = Bytecodes::result_type(code);
+       break;
+@@ -1016,60 +383,8 @@
+   // return entry point for computed continuation state & bytecode length
+   return
+     is_top_frame
+-    ? deopt_entry (as_TosState(type), length)
+-    : return_entry(as_TosState(type), length);
+-}
+-
+-
+-#ifndef CC_INTERP
+-
+-//------------------------------------------------------------------------------------------------------------------------
+-// Suport for invokes
+-
+-int AbstractInterpreter::TosState_as_index(TosState state) {
+-  assert( state < number_of_states , "Invalid state in TosState_as_index");
+-  assert(0 <= (int)state && (int)state < AbstractInterpreter::number_of_return_addrs, "index out of bounds");
+-  return (int)state;
+-}
+-
+-#endif // CC_INTERP
+-
+-//------------------------------------------------------------------------------------------------------------------------
+-// Safepoint suppport
+-
+-#ifndef CC_INTERP
+-static inline void copy_table(address* from, address* to, int size) {
+-  // Copy non-overlapping tables. The copy has to occur word wise for MT safety.
+-  while (size-- > 0) *to++ = *from++;
+-}
+-#endif
+-
+-void AbstractInterpreter::notice_safepoints() {
+-  if (!_notice_safepoints) {
+-    // switch to safepoint dispatch table
+-    _notice_safepoints = true;
+-#ifndef CC_INTERP
+-    copy_table((address*)&_safept_table, (address*)&_active_table, sizeof(_active_table) / sizeof(address));
+-#endif
+-  }
+-}
+-
+-
+-// switch from the dispatch table which notices safepoints back to the
+-// normal dispatch table.  So that we can notice single stepping points,
+-// keep the safepoint dispatch table if we are single stepping in JVMTI.
+-// Note that the should_post_single_step test is exactly as fast as the 
+-// JvmtiExport::_enabled test and covers both cases.
+-void AbstractInterpreter::ignore_safepoints() {
+-  if (_notice_safepoints) {
+-    if (!JvmtiExport::should_post_single_step()) {
+-      // switch to normal dispatch table
+-      _notice_safepoints = false;
+-#ifndef CC_INTERP
+-      copy_table((address*)&_normal_table, (address*)&_active_table, sizeof(_active_table) / sizeof(address));
+-#endif
+-    }
+-  }
++    ? Interpreter::deopt_entry (as_TosState(type), length)
++    : Interpreter::return_entry(as_TosState(type), length);
+ }
+ 
+ void AbstractInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/interpreterGenerator.hpp openjdk/hotspot/src/share/vm/interpreter/interpreterGenerator.hpp
+--- openjdk6/hotspot/src/share/vm/interpreter/interpreterGenerator.hpp	1969-12-31 19:00:00.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/interpreterGenerator.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -0,0 +1,38 @@
++/*
++ * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++ *
++ * This code is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 only, as
++ * published by the Free Software Foundation.
++ *
++ * This code is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++ * version 2 for more details (a copy is included in the LICENSE file that
++ * accompanied this code).
++ *
++ * You should have received a copy of the GNU General Public License version
++ * 2 along with this work; if not, write to the Free Software Foundation,
++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
++ * CA 95054 USA or visit www.sun.com if you need additional information or
++ * have any questions.
++ *
++ */
++
++// This file contains the platform-independant parts
++// of the interpreter generator.
++
++
++class InterpreterGenerator: public CC_INTERP_ONLY(CppInterpreterGenerator)
++                                   NOT_CC_INTERP(TemplateInterpreterGenerator) {
++
++public:
++
++InterpreterGenerator(StubQueue* _code);
++
++#include "incls/_interpreterGenerator_pd.hpp.incl"
++
++};
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/interpreter.hpp openjdk/hotspot/src/share/vm/interpreter/interpreter.hpp
+--- openjdk6/hotspot/src/share/vm/interpreter/interpreter.hpp	2008-02-28 05:02:36.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/interpreter.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)interpreter.hpp	1.153 07/05/17 15:54:31 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This file contains the platform-independant parts
+@@ -66,397 +63,72 @@
+   Bytecodes::Code bytecode() const               { return _bytecode; }
+ };
+ 
+-
+-#ifndef CC_INTERP
+-//------------------------------------------------------------------------------------------------------------------------
+-// A little wrapper class to group tosca-specific entry points into a unit.
+-// (tosca = Top-Of-Stack CAche)
+-
+-class EntryPoint VALUE_OBJ_CLASS_SPEC {
+- private:
+-  address _entry[number_of_states];
+-
+- public:
+-  // Construction
+-  EntryPoint();
+-  EntryPoint(address bentry, address centry, address sentry, address aentry, address ientry, address lentry, address fentry, address dentry, address ventry);
+-
+-  // Attributes
+-  address entry(TosState state) const;                // return target address for a given tosca state
+-  void    set_entry(TosState state, address entry);   // set    target address for a given tosca state
+-  void    print();
+-
+-  // Comparison
+-  bool operator == (const EntryPoint& y);             // for debugging only
+-};
++// Define a prototype interface
++DEF_STUB_INTERFACE(InterpreterCodelet);
+ 
+ 
+ //------------------------------------------------------------------------------------------------------------------------
+-// A little wrapper class to group tosca-specific dispatch tables into a unit.
+-
+-class DispatchTable VALUE_OBJ_CLASS_SPEC {
+- public:
+-  enum { length = 1 << BitsPerByte };                 // an entry point for each byte value (also for undefined bytecodes)
++// A CodeletMark serves as an automatic creator/initializer for Codelets
++// (As a subclass of ResourceMark it automatically GC's the allocated
++// code buffer and assemblers).
+ 
++class CodeletMark: ResourceMark {
+  private:
+-  address _table[number_of_states][length];	      // dispatch tables, indexed by tosca and bytecode
++  InterpreterCodelet*         _clet;
++  InterpreterMacroAssembler** _masm;
++  CodeBuffer                  _cb;
+ 
+- public:
+-  // Attributes
+-  EntryPoint entry(int i) const;                      // return entry point for a given bytecode i
+-  void       set_entry(int i, EntryPoint& entry);     // set    entry point for a given bytecode i
+-  address*   table_for(TosState state) 		{ return _table[state]; }
+-  address*   table_for()			{ return table_for((TosState)0); }
+-  int	     distance_from(address *table)	{ return table - table_for(); }
+-  int	     distance_from(TosState state)	{ return distance_from(table_for(state)); }
+-
+-  // Comparison
+-  bool operator == (DispatchTable& y);                // for debugging only
+-};
++  int codelet_size() {
++    // Request the whole code buffer (minus a little for alignment).
++    // The commit call below trims it back for each codelet.
++    int codelet_size = AbstractInterpreter::code()->available_space() - 2*K;
+ 
+-#endif // CC_INTERP
+-
+-//------------------------------------------------------------------------------------------------------------------------
+-// The C++ interface to the bytecode interpreter.
+-
+-class AbstractInterpreter: AllStatic {
+-  friend class VMStructs;
+-  friend class Interpreter;
+- public:
+-  enum MethodKind {        
+-    zerolocals,                                                 // method needs locals initialization
+-    zerolocals_synchronized,                                    // method needs locals initialization & is synchronized
+-    native,                                                     // native method
+-    native_synchronized,                                        // native method & is synchronized
+-    empty,                                                      // empty method (code: _return)
+-    accessor,                                                   // accessor method (code: _aload_0, _getfield, _(a|i)return)
+-    abstract,                                                   // abstract method (throws an AbstractMethodException)
+-    java_lang_math_sin,                                         // implementation of java.lang.Math.sin   (x)
+-    java_lang_math_cos,                                         // implementation of java.lang.Math.cos   (x)
+-    java_lang_math_tan,                                         // implementation of java.lang.Math.tan   (x)
+-    java_lang_math_abs,                                         // implementation of java.lang.Math.abs   (x)
+-    java_lang_math_sqrt,                                        // implementation of java.lang.Math.sqrt  (x)
+-    java_lang_math_log,                                         // implementation of java.lang.Math.log   (x)
+-    java_lang_math_log10,                                       // implementation of java.lang.Math.log10 (x)
+-    number_of_method_entries,
+-    invalid = -1
+-  };
+-
+-  enum SomeConstants {
+-#ifndef CC_INTERP
+-    number_of_return_entries  = 9,                              // number of return entry points
+-    number_of_deopt_entries   = 9,                              // number of deoptimization entry points
+-    number_of_return_addrs    = 9,                              // number of return addresses
+-#endif // CC_INTERP
+-    number_of_result_handlers = 10                              // number of result handlers for native calls
+-  };    
+-
+- protected:
+-  static StubQueue* _code;                                      // the interpreter code (codelets)
+-  
+-  static address    _rethrow_exception_entry;                   // rethrows an activation in previous frame
+-#ifdef HOTSWAP
+-  static address    _remove_activation_preserving_args_entry;   // continuation address when current frame is being popped
+-#endif // HOTSWAP
+-
+-#ifndef CC_INTERP
+-  static address    _throw_ArrayIndexOutOfBoundsException_entry;
+-  static address    _throw_ArrayStoreException_entry;
+-  static address    _throw_ArithmeticException_entry;
+-  static address    _throw_ClassCastException_entry;
+-  static address    _throw_NullPointerException_entry;
+-  static address    _throw_StackOverflowError_entry;
+-  static address    _throw_exception_entry;
+-
+-  static address    _remove_activation_entry;                   // continuation address if an exception is not handled by current frame
+-
+-#ifndef PRODUCT
+-  static EntryPoint _trace_code;
+-#endif // !PRODUCT
+-  static EntryPoint _return_entry[number_of_return_entries];    // entry points to return to from a call
+-  static EntryPoint _earlyret_entry;                            // entry point to return early from a call
+-  static EntryPoint _deopt_entry[number_of_deopt_entries];      // entry points to return to from a deoptimization
+-  static EntryPoint _continuation_entry;
+-  static EntryPoint _safept_entry;
+-
+-  static address    _return_3_addrs_by_index[number_of_return_addrs];     // for invokevirtual   return entries
+-  static address    _return_5_addrs_by_index[number_of_return_addrs];     // for invokeinterface return entries
+-
+-  static DispatchTable _active_table;                           // the active    dispatch table (used by the interpreter for dispatch)
+-  static DispatchTable _normal_table;                           // the normal    dispatch table (used to set the active table in normal mode)
+-  static DispatchTable _safept_table;                           // the safepoint dispatch table (used to set the active table for safepoints)
+-  static address       _wentry_point[DispatchTable::length];    // wide instructions only (vtos tosca always)
+-  
+-#endif // CC_INTERP
+-  static bool       _notice_safepoints;                         // true if safepoints are activated
+-
+-  static address    _native_entry_begin;                        // Region for native entry code
+-  static address    _native_entry_end;
+-
+-  // method entry points
+-  static address    _entry_table[number_of_method_entries];     // entry points for a given method
+-  static address    _native_abi_to_tosca[number_of_result_handlers];  // for native method result handlers
+-#ifdef CC_INTERP
+-  // tosca result -> stack result
+-  static address    _tosca_to_stack[number_of_result_handlers];  // converts tosca to C++ interpreter stack result
+-  // stack result -> stack result
+-  static address    _stack_to_stack[number_of_result_handlers];  // pass result between C++ interpreter calls
+-  // stack result -> native abi result
+-  static address    _stack_to_native_abi[number_of_result_handlers];  // converts C++ interpreter results to native abi
+-#endif
+-  static address    _slow_signature_handler;                              // the native method generic (slow) signature handler
+-
+-
+-  
+-  friend class      TemplateTable;
+-  friend class      AbstractInterpreterGenerator;
+-  friend class              InterpreterGenerator;
+-  friend class      InterpreterMacroAssembler;
++    // Guarantee there's a little bit of code space left.
++    guarantee (codelet_size > 0 && (size_t)codelet_size >  2*K,
++               "not enough space for interpreter generation");
+ 
+- public:
+-  // Initialization/debugging
+-  static void       initialize();
+-  static StubQueue* code()                                      { return _code; }
+-  // this only returns whether a pc is within generated code for the interpreter.
+-#ifdef CC_INTERP
+- private:
+-  // for the c++ based interpreter this misses much code.  make sure it doesn't get called.
+-#endif // CC_INTERP
+-  static bool       contains(address pc)                        { return _code->contains(pc); }
++    return codelet_size;
++  }
+ 
+  public:
++  CodeletMark(
++    InterpreterMacroAssembler*& masm,
++    const char* description,
++    Bytecodes::Code bytecode = Bytecodes::_illegal):
++    _clet((InterpreterCodelet*)AbstractInterpreter::code()->request(codelet_size())),
++    _cb(_clet->code_begin(), _clet->code_size())
+ 
+-  // Method activation
+-  static MethodKind method_kind(methodHandle m);
+-  static address    entry_for_kind(MethodKind k)                { assert(0 <= k && k < number_of_method_entries, "illegal kind"); return _entry_table[k]; }
+-  static address    entry_for_method(methodHandle m)            { return _entry_table[method_kind(m)]; }
+-
+-  static void       print_method_kind(MethodKind kind)          PRODUCT_RETURN;
+-
+-  // Runtime support
+-
+-  static address    rethrow_exception_entry()                   { return _rethrow_exception_entry; }
+-
+-  static address    return_entry  (TosState state, int length); // length = invoke bytecode length (to advance to next bytecode)
+-  static address    deopt_entry   (TosState state, int length); // length = invoke bytecode length (to advance to next bytecode)
+-
+-#ifdef HOTSWAP
+-  static address    remove_activation_preserving_args_entry()   { return _remove_activation_preserving_args_entry; }
+-#endif // HOTSWAP
+-
+-#ifndef CC_INTERP
+-  static address    remove_activation_early_entry(TosState state) { return _earlyret_entry.entry(state); }
+-  static address    remove_activation_entry()                   { return _remove_activation_entry; }
+-  static address    throw_exception_entry()                     { return _throw_exception_entry; }
+-  static address    throw_ArithmeticException_entry()           { return _throw_ArithmeticException_entry; }
+-  static address    throw_NullPointerException_entry()          { return _throw_NullPointerException_entry; }
+-  static address    throw_StackOverflowError_entry()            { return _throw_StackOverflowError_entry; }
+-
+-  // Code generation
+-#ifndef PRODUCT
+-  static address    trace_code    (TosState state)              { return _trace_code.entry(state); }
+-#endif // !PRODUCT
+-  static address    continuation  (TosState state)              { return _continuation_entry.entry(state); }
+-  static address*   dispatch_table(TosState state)              { return _active_table.table_for(state); }
+-  static address*   dispatch_table()                            { return _active_table.table_for(); }
+-  static int        distance_from_dispatch_table(TosState state){ return _active_table.distance_from(state); }
+-  static address*   normal_table(TosState state)                { return _normal_table.table_for(state); }
+-  static address*   normal_table()                              { return _normal_table.table_for(); }
+-
+-  // Support for invokes
+-  static address*   return_3_addrs_by_index_table()             { return _return_3_addrs_by_index; }
+-  static address*   return_5_addrs_by_index_table()             { return _return_5_addrs_by_index; }
+-  static int        TosState_as_index(TosState state);          // computes index into return_3_entry_by_index table
+-#endif // CC_INTERP
+-
+-
+-  // Activation size in words for a method that is just being called.
+-  // Parameters haven't been pushed so count them too.
+-  static int        size_top_interpreter_activation(methodOop method);
+-
+-  // Deoptimization support
+-  static address    continuation_for(methodOop method,
+-				     address bcp,
+-				     int callee_parameters,
+-				     bool is_top_frame,
+-				     bool& use_next_mdp);
+-
+-  // share implementation of size_activation and layout_activation:
+-  static int        size_activation(methodOop method,
+-				    int temps,
+-                                    int popframe_args,
+-				    int monitors,
+-				    int callee_params,
+-				    int callee_locals,
+-				    bool is_top_frame);
+-
+-  static int       layout_activation(methodOop method,
+-				      int temps,
+-                                      int popframe_args,
+-				      int monitors,
+-				      int callee_params,
+-				      int callee_locals,
+-				      frame* caller,
+-				      frame* interpreter_frame,
+-				      bool is_top_frame);
+-
+-  // Runtime support
+-  static bool       is_not_reached(                       methodHandle method, int bci);
+-  // Safepoint support
+-  static void       notice_safepoints();                        // stops the thread when reaching a safepoint
+-  static void       ignore_safepoints();                        // ignores safepoints
+-
+-  // Support for native calls
+-  static address    slow_signature_handler()                    { return _slow_signature_handler; }
+-  static address    result_handler(BasicType type)              { return _native_abi_to_tosca[BasicType_as_index(type)]; }
+-  static int        BasicType_as_index(BasicType type);         // computes index into result_handler_by_index table
+-  static bool       in_native_entry(address pc)                 { return _native_entry_begin <= pc && pc < _native_entry_end; }
+-  // Debugging/printing
+-  static InterpreterCodelet* codelet_containing(address pc)     { return (InterpreterCodelet*)_code->stub_containing(pc); }
+-  static void       print();                                    // prints the interpreter code
+-#ifdef CC_INTERP
+-  static address    native_result_to_tosca()                    { return (address)_native_abi_to_tosca; } // aka result handler
+-  static address    tosca_result_to_stack()                     { return (address)_tosca_to_stack; }
+-  static address    stack_result_to_stack()                     { return (address)_stack_to_stack; }
+-  static address    stack_result_to_native()                    { return (address)_stack_to_native_abi; }
+-
+-  static address    native_result_to_tosca(int index)           { return _native_abi_to_tosca[index]; } // aka result handler
+-  static address    tosca_result_to_stack(int index)            { return _tosca_to_stack[index]; }
+-  static address    stack_result_to_stack(int index)            { return _stack_to_stack[index]; }
+-  static address    stack_result_to_native(int index)           { return _stack_to_native_abi[index]; }
+-#endif /* CC_INTERP */
+-
+-  // Support for Tagged Stacks
+-  //
+-  // Tags are stored on the Java Expression stack above the value:
+-  // 
+-  //  tag   
+-  //  value
+-  //
+-  // For double values:
+-  //
+-  //  tag2  
+-  //  high word
+-  //  tag1
+-  //  low word
++  { // request all space (add some slack for Codelet data)
++    assert (_clet != NULL, "we checked not enough space already");
+ 
+- public:
+-  static int stackElementWords()   { return TaggedStackInterpreter ? 2 : 1; }
+-  static int stackElementSize()    { return stackElementWords()*wordSize; }
+-  static int logStackElementSize() { return
+-                 TaggedStackInterpreter? LogBytesPerWord+1 : LogBytesPerWord; }
+-
+-  // Tag is at pointer, value is one below for a stack growing down
+-  // (or above for stack growing up)
+-  static int  value_offset_in_bytes()  {
+-    return TaggedStackInterpreter ?
+-      frame::interpreter_frame_expression_stack_direction() * wordSize : 0;
+-  }
+-  static int  tag_offset_in_bytes()    { 
+-    assert(TaggedStackInterpreter, "should not call this");
+-    return 0;
++    // initialize Codelet attributes
++    _clet->initialize(description, bytecode);
++    // create assembler for code generation
++    masm  = new InterpreterMacroAssembler(&_cb);
++    _masm = &masm;
+   }
+ 
+-  // Tagged Locals
+-  // Locals are stored relative to Llocals:
+-  //
+-  // tag    <- Llocals[n]
+-  // value
+-  //
+-  // Category 2 types are indexed as:
+-  //
+-  // tag    <- Llocals[-n]
+-  // high word
+-  // tag    <- Llocals[-n+1]
+-  // low word
+-  //
+-
+-  // Local values relative to locals[n]
+-  static int  local_offset_in_bytes(int n) {
+-    return ((frame::interpreter_frame_expression_stack_direction() * n) *
+-            stackElementSize()) + value_offset_in_bytes();
+-  }
+-  static int  local_tag_offset_in_bytes(int n) {
+-    assert(TaggedStackInterpreter, "should not call this");
+-    return ((frame::interpreter_frame_expression_stack_direction() * n) * 
+-            stackElementSize()) + tag_offset_in_bytes();
+-  }
+-
+-};
++  ~CodeletMark() {
++    // align so printing shows nop's instead of random code at the end (Codelets are aligned)
++    (*_masm)->align(wordSize);
++    // make sure all code is in code buffer
++    (*_masm)->flush();
+ 
+ 
+-//------------------------------------------------------------------------------------------------------------------------
+-// The interpreter generator.
+-
+-class Template;
+-class AbstractInterpreterGenerator: public StackObj {
+- protected:
+-  InterpreterMacroAssembler* _masm;
+-
+-#ifndef CC_INTERP
+-  // entry points for shared code sequence
+-  address _unimplemented_bytecode;
+-  address _illegal_bytecode_sequence;
+-#endif
+-
+-  // shared code sequences
+-  // Converter for native abi result to tosca result
+-  address generate_result_handler_for(BasicType type);
+-#ifdef CC_INTERP
+-  address generate_tosca_to_stack_converter(BasicType type);
+-  address generate_stack_to_stack_converter(BasicType type);
+-  address generate_stack_to_native_abi_converter(BasicType type);
+-#endif
+-  address generate_slow_signature_handler();
+-#ifndef CC_INTERP
+-  address generate_error_exit(const char* msg);
+-  address generate_StackOverflowError_handler();
+-  address generate_exception_handler(const char* name, const char* message) {
+-    return generate_exception_handler_common(name, message, false);
+-  }
+-  address generate_klass_exception_handler(const char* name) {
+-    return generate_exception_handler_common(name, NULL, true);
++    // commit Codelet
++    AbstractInterpreter::code()->commit((*_masm)->code()->pure_code_size());
++    // make sure nobody can use _masm outside a CodeletMark lifespan
++    *_masm = NULL;
+   }
+-  address generate_exception_handler_common(const char* name, const char* message, bool pass_oop);
+-  address generate_ClassCastException_handler();
+-  address generate_ArrayIndexOutOfBounds_handler(const char* name);
+-  address generate_continuation_for(TosState state);
+-  address generate_return_entry_for(TosState state, int step);
+-  address generate_earlyret_entry_for(TosState state);
+-  address generate_deopt_entry_for(TosState state, int step);
+-  address generate_safept_entry_for(TosState state, address runtime_entry);
+-  void    generate_throw_exception();
+-#endif // CC_INTERP
+-
+-  // entry point generator
+-  address generate_method_entry(AbstractInterpreter::MethodKind kind);
+-  void    generate_fast_accessor_code(); // implements UseFastAccessorMethods
+-  
+-#ifndef CC_INTERP
+-  // Instruction generation
+-  void generate_and_dispatch (Template* t, TosState tos_out = ilgl);
+-  void set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep);
+-  void set_short_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep);
+-  void set_wide_entry_point  (Template* t, address& wep);
+-
+-  void set_entry_points(Bytecodes::Code code);
+-  void set_unimplemented(int i);
+-  void set_entry_points_for_all_bytes();
+-  void set_safepoints_for_all_bytes();
+-
+-  // Helpers for generate_and_dispatch
+-  address generate_trace_code(TosState state)   PRODUCT_RETURN0;
+-  void count_bytecode()                         PRODUCT_RETURN;  
+-  void histogram_bytecode(Template* t)          PRODUCT_RETURN;
+-  void histogram_bytecode_pair(Template* t)     PRODUCT_RETURN;
+-  void trace_bytecode(Template* t)              PRODUCT_RETURN;
+-  void stop_interpreter_at()                    PRODUCT_RETURN;
+-#endif // CC_INTERP
++};
+ 
+-  void bang_stack_shadow_pages(bool native_call);
++// Wrapper classes to produce Interpreter/InterpreterGenerator from either
++// the c++ interpreter or the template interpreter.
+ 
+-  void generate_all();
++class Interpreter: public CC_INTERP_ONLY(CppInterpreter) NOT_CC_INTERP(TemplateInterpreter) {
+ 
+- public:
+-  AbstractInterpreterGenerator(StubQueue* _code);
++  public:
++  // Debugging/printing
++  static InterpreterCodelet* codelet_containing(address pc)     { return (InterpreterCodelet*)_code->stub_containing(pc); }
++#include "incls/_interpreter_pd.hpp.incl"
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp openjdk/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp
+--- openjdk6/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)interpreterRuntime.cpp	1.487 07/05/05 17:05:38 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -76,7 +73,7 @@
+   } else {
+ #ifdef ASSERT
+     // If we entered this runtime routine, we believed the tag contained
+-    // an unresolved string, an unresolved class or a resolved class. 
++    // an unresolved string, an unresolved class or a resolved class.
+     // However, another thread could have resolved the unresolved string
+     // or class by the time we go there.
+     assert(tag.is_unresolved_string()|| tag.is_string(), "expected string");
+@@ -98,7 +95,7 @@
+   klass->check_valid_for_instantiation(true, CHECK);
+ 
+   // Make sure klass is initialized
+-  klass->initialize(CHECK);    
++  klass->initialize(CHECK);
+ 
+   // At this point the class may not be fully initialized
+   // because of recursive initialization. If it is fully
+@@ -115,7 +112,7 @@
+   //       If we have a breakpoint, then we don't rewrite
+   //       because the _breakpoint bytecode would be lost.
+   oop obj = klass->allocate_instance(CHECK);
+-  thread->set_vm_result(obj);  
++  thread->set_vm_result(obj);
+ IRT_END
+ 
+ 
+@@ -128,8 +125,8 @@
+ IRT_ENTRY(void, InterpreterRuntime::anewarray(JavaThread* thread, constantPoolOopDesc* pool, int index, jint size))
+   // Note: no oopHandle for pool & klass needed since they are not used
+   //       anymore after new_objArray() and no GC can happen before.
+-  //       (This may have to change if this code changes!)       
+-  klassOop  klass = pool->klass_at(index, CHECK);    
++  //       (This may have to change if this code changes!)
++  klassOop  klass = pool->klass_at(index, CHECK);
+   objArrayOop obj = oopFactory::new_objArray(klass, size, CHECK);
+   thread->set_vm_result(obj);
+ IRT_END
+@@ -249,8 +246,8 @@
+       note_trap(thread, Deoptimization::Reason_null_check, CHECK);
+     }
+   }
+-  // create exception 
+-  Handle exception = Exceptions::new_exception(thread, s(), message);  
++  // create exception
++  Handle exception = Exceptions::new_exception(thread, s(), message);
+   thread->set_vm_result(exception());
+ IRT_END
+ 
+@@ -276,7 +273,7 @@
+   if (ProfileTraps) {
+     note_trap(thread, Deoptimization::Reason_range_check, CHECK);
+   }
+-  // create exception 
++  // create exception
+   sprintf(message, "%d", index);
+   THROW_MSG(s(), message);
+ IRT_END
+@@ -292,7 +289,7 @@
+     note_trap(thread, Deoptimization::Reason_class_check, CHECK);
+   }
+ 
+-  // create exception 
++  // create exception
+   THROW_MSG(vmSymbols::java_lang_ClassCastException(), message);
+ IRT_END
+ 
+@@ -314,7 +311,7 @@
+   bool               should_repeat;
+   int                handler_bci;
+   int                current_bci = bcp(thread) - h_method->code_base();
+-  
++
+   // Need to do this check first since when _do_not_unlock_if_synchronized
+   // is set, we don't want to trigger any classloading which may make calls
+   // into java, or surprisingly find a matching exception handler for bci 0
+@@ -374,13 +371,13 @@
+       h_exception = Handle(THREAD, PENDING_EXCEPTION);
+       CLEAR_PENDING_EXCEPTION;
+       if (handler_bci >= 0) {
+-	current_bci = handler_bci;
+-	should_repeat = true;
++        current_bci = handler_bci;
++        should_repeat = true;
+       }
+     }
+   } while (should_repeat == true);
+ 
+-  // notify JVMTI of an exception throw; JVMTI will detect if this is a first 
++  // notify JVMTI of an exception throw; JVMTI will detect if this is a first
+   // time throw or a stack unwinding throw and accordingly notify the debugger
+   if (JvmtiExport::can_post_exceptions()) {
+     JvmtiExport::post_exception_throw(thread, h_method(), bcp(thread), h_exception());
+@@ -398,7 +395,7 @@
+     // enough stack space available to reprotect the stack.
+ #ifndef CC_INTERP
+     continuation = Interpreter::remove_activation_entry();
+-#endif 
++#endif
+     // Count this for compilation purposes
+     h_method->interpreter_throwout_increment();
+   } else {
+@@ -409,7 +406,7 @@
+     continuation = Interpreter::dispatch_table(vtos)[*handler_pc];
+ #endif
+   }
+-  // notify debugger of an exception catch 
++  // notify debugger of an exception catch
+   // (this is good for exceptions caught in native methods as well)
+   if (JvmtiExport::can_post_exceptions()) {
+     JvmtiExport::notice_unwind_due_to_exception(thread, h_method(), handler_pc, h_exception(), (handler_pc != NULL));
+@@ -420,18 +417,18 @@
+ IRT_END
+ 
+ 
+-IRT_ENTRY(void, InterpreterRuntime::throw_pending_exception(JavaThread* thread))  
++IRT_ENTRY(void, InterpreterRuntime::throw_pending_exception(JavaThread* thread))
+   assert(thread->has_pending_exception(), "must only ne called if there's an exception pending");
+   // nothing to do - eventually we should remove this code entirely (see comments @ call sites)
+ IRT_END
+ 
+ 
+-IRT_ENTRY(void, InterpreterRuntime::throw_AbstractMethodError(JavaThread* thread))          
++IRT_ENTRY(void, InterpreterRuntime::throw_AbstractMethodError(JavaThread* thread))
+   THROW(vmSymbols::java_lang_AbstractMethodError());
+ IRT_END
+ 
+ 
+-IRT_ENTRY(void, InterpreterRuntime::throw_IncompatibleClassChangeError(JavaThread* thread))          
++IRT_ENTRY(void, InterpreterRuntime::throw_IncompatibleClassChangeError(JavaThread* thread))
+   THROW(vmSymbols::java_lang_IncompatibleClassChangeError());
+ IRT_END
+ 
+@@ -517,9 +514,9 @@
+   if (PrintBiasedLockingStatistics) {
+     Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
+   }
+-  Handle h_obj(thread, elem->obj());  
++  Handle h_obj(thread, elem->obj());
+   assert(Universe::heap()->is_in_reserved_or_null(h_obj()),
+-	 "must be NULL or an object");
++         "must be NULL or an object");
+   if (UseBiasedLocking) {
+     // Retry fast entry if bias is revoked to avoid unnecessary inflation
+     ObjectSynchronizer::fast_enter(h_obj, elem->lock(), true, CHECK);
+@@ -527,7 +524,7 @@
+     ObjectSynchronizer::slow_enter(h_obj, elem->lock(), CHECK);
+   }
+   assert(Universe::heap()->is_in_reserved_or_null(elem->obj()),
+-	 "must be NULL or an object");
++         "must be NULL or an object");
+ #ifdef ASSERT
+   thread->last_frame().interpreter_frame_verify_monitor(elem);
+ #endif
+@@ -539,23 +536,23 @@
+ #ifdef ASSERT
+   thread->last_frame().interpreter_frame_verify_monitor(elem);
+ #endif
+-  Handle h_obj(thread, elem->obj());  
++  Handle h_obj(thread, elem->obj());
+   assert(Universe::heap()->is_in_reserved_or_null(h_obj()),
+-	 "must be NULL or an object");
++         "must be NULL or an object");
+   if (elem == NULL || h_obj()->is_unlocked()) {
+     THROW(vmSymbols::java_lang_IllegalMonitorStateException());
+   }
+   ObjectSynchronizer::slow_exit(h_obj(), elem->lock(), thread);
+   // Free entry. This must be done here, since a pending exception might be installed on
+   // exit. If it is not cleared, the exception handling code will try to unlock the monitor again.
+-  elem->set_obj(NULL); 
++  elem->set_obj(NULL);
+ #ifdef ASSERT
+   thread->last_frame().interpreter_frame_verify_monitor(elem);
+ #endif
+ IRT_END
+ 
+ 
+-IRT_ENTRY(void, InterpreterRuntime::throw_illegal_monitor_state_exception(JavaThread* thread))  
++IRT_ENTRY(void, InterpreterRuntime::throw_illegal_monitor_state_exception(JavaThread* thread))
+   THROW(vmSymbols::java_lang_IllegalMonitorStateException());
+ IRT_END
+ 
+@@ -564,13 +561,13 @@
+   // Returns an illegal exception to install into the current thread. The
+   // pending_exception flag is cleared so normal exception handling does not
+   // trigger. Any current installed exception will be overwritten. This
+-  // method will be called during an exception unwind.  
++  // method will be called during an exception unwind.
+ 
+   assert(!HAS_PENDING_EXCEPTION, "no pending exception");
+   Handle exception(thread, thread->vm_result());
+   assert(exception() != NULL, "vm result should be set");
+   thread->set_vm_result(NULL); // clear vm result before continuing (may cause memory leaks and assert failures)
+-  if (!exception->is_a(SystemDictionary::threaddeath_klass())) {        
++  if (!exception->is_a(SystemDictionary::threaddeath_klass())) {
+     exception = get_preinitialized_exception(
+                        SystemDictionary::IllegalMonitorStateException_klass(),
+                        CATCH);
+@@ -594,23 +591,23 @@
+   JvmtiExport::post_raw_breakpoint(thread, method, bcp);
+ IRT_END
+ 
+-IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes::Code bytecode))  
++IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes::Code bytecode))
+   // extract receiver from the outgoing argument list if necessary
+-  Handle receiver(thread, NULL);  
++  Handle receiver(thread, NULL);
+   if (bytecode == Bytecodes::_invokevirtual || bytecode == Bytecodes::_invokeinterface) {
+     ResourceMark rm(thread);
+     methodHandle m (thread, method(thread));
+-    int bci = m->bci_from(bcp(thread));    
+-    Bytecode_invoke* call = Bytecode_invoke_at(m, bci);    
++    int bci = m->bci_from(bcp(thread));
++    Bytecode_invoke* call = Bytecode_invoke_at(m, bci);
+     symbolHandle signature (thread, call->signature());
+     receiver = Handle(thread,
+                   thread->last_frame().interpreter_callee_receiver(signature));
+     assert(Universe::heap()->is_in_reserved_or_null(receiver()),
+-	   "sanity check");    
++           "sanity check");
+     assert(receiver.is_null() ||
+-	   Universe::heap()->is_in_reserved(receiver->klass()),
+-	   "sanity check");
+-  }  
++           Universe::heap()->is_in_reserved(receiver->klass()),
++           "sanity check");
++  }
+ 
+   // resolve method
+   CallInfo info;
+@@ -618,19 +615,19 @@
+ 
+   {
+     JvmtiHideSingleStepping jhss(thread);
+-    LinkResolver::resolve_invoke(info, receiver, pool, 
+-			         two_byte_index(thread), bytecode, CHECK);
++    LinkResolver::resolve_invoke(info, receiver, pool,
++                                 two_byte_index(thread), bytecode, CHECK);
+     if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
+       int retry_count = 0;
+       while (info.resolved_method()->is_old()) {
+         // It is very unlikely that method is redefined more than 100 times
+-        // in the middle of resolve. If it is looping here more than 100 times 
++        // in the middle of resolve. If it is looping here more than 100 times
+         // means then there could be a bug here.
+         guarantee((retry_count++ < 100),
+                   "Could not resolve to latest version of redefined method");
+         // method is redefined in the middle of resolve so re-try.
+-        LinkResolver::resolve_invoke(info, receiver, pool, 
+-			             two_byte_index(thread), bytecode, CHECK);
++        LinkResolver::resolve_invoke(info, receiver, pool,
++                                     two_byte_index(thread), bytecode, CHECK);
+       }
+     }
+   } // end JvmtiHideSingleStepping
+@@ -638,7 +635,7 @@
+   // check if link resolution caused cpCache to be updated
+   if (already_resolved(thread)) return;
+ 
+-  if (bytecode == Bytecodes::_invokeinterface) {    
++  if (bytecode == Bytecodes::_invokeinterface) {
+ 
+     if (TraceItables && Verbose) {
+       ResourceMark rm(thread);
+@@ -651,17 +648,17 @@
+       methodHandle rm = info.resolved_method();
+       assert(rm->is_final() || info.has_vtable_index(),
+              "should have been set already");
+-      cache_entry(thread)->set_method(bytecode, rm, info.vtable_index()); 
+-    } else {          
+-      // Setup itable entry      
++      cache_entry(thread)->set_method(bytecode, rm, info.vtable_index());
++    } else {
++      // Setup itable entry
+       int index = klassItable::compute_itable_index(info.resolved_method()());
+       cache_entry(thread)->set_interface_call(info.resolved_method(), index);
+     }
+-  } else {    
++  } else {
+     cache_entry(thread)->set_method(
+       bytecode,
+       info.resolved_method(),
+-      info.vtable_index());     
++      info.vtable_index());
+   }
+ IRT_END
+ 
+@@ -687,11 +684,11 @@
+     bc->print();
+     if (ProfileInterpreter) {
+       if (branch_bcp != NULL) {
+-	methodDataOop mdo = m->method_data();
+-	if (mdo != NULL) {
+-	  int count = mdo->bci_to_data(branch_bci)->as_JumpData()->taken();
+-	  tty->print_cr("back branch count = %d", count);
+-	}
++        methodDataOop mdo = m->method_data();
++        if (mdo != NULL) {
++          int count = mdo->bci_to_data(branch_bci)->as_JumpData()->taken();
++          tty->print_cr("back branch count = %d", count);
++        }
+       }
+     }
+   }
+@@ -703,7 +700,7 @@
+     tty->print(osr != NULL ? "Reused OSR entry for " : "Requesting OSR entry for ");
+     method->print_short_name(tty);
+     tty->print_cr(" at bci %d", bci);
+-  }    
++  }
+ }
+ #endif // !PRODUCT
+ 
+@@ -715,7 +712,7 @@
+ 
+   frame fr = thread->last_frame();
+   assert(fr.is_interpreted_frame(), "must come from interpreter");
+-  methodHandle method(thread, fr.interpreter_frame_method());  
++  methodHandle method(thread, fr.interpreter_frame_method());
+   const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : 0;
+   const int bci = method->bci_from(fr.interpreter_frame_bcp());
+   NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci, branch_bcp);)
+@@ -972,7 +969,7 @@
+   if (!is_static) {
+     // non-static field accessors have an object, but we need a handle
+     h_obj = Handle(thread, obj);
+-  } 
++  }
+ 
+   JvmtiExport::post_raw_field_modification(thread, method(thread), bcp(thread), h_klass, h_obj,
+                                            fid, sig_type, &fvalue);
+@@ -1015,7 +1012,7 @@
+     vm_exit_out_of_memory(blob_size, "native signature handlers");
+   }
+ 
+-  BufferBlob* bb = BufferBlob::create("Signature Handler Temp Buffer", 
++  BufferBlob* bb = BufferBlob::create("Signature Handler Temp Buffer",
+                                       SignatureHandlerLibrary::buffer_size);
+   _buffer = bb->instructions_begin();
+ 
+@@ -1054,59 +1051,59 @@
+       handler_index = _fingerprints->find(fingerprint);
+       // create handler if necessary
+       if (handler_index < 0) {
+-	ResourceMark rm;
+-	ptrdiff_t align_offset = (address)
+-	  round_to((intptr_t)_buffer, CodeEntryAlignment) - (address)_buffer;
+-	CodeBuffer buffer((address)(_buffer + align_offset),
++        ResourceMark rm;
++        ptrdiff_t align_offset = (address)
++          round_to((intptr_t)_buffer, CodeEntryAlignment) - (address)_buffer;
++        CodeBuffer buffer((address)(_buffer + align_offset),
+                           SignatureHandlerLibrary::buffer_size - align_offset);
+-	InterpreterRuntime::SignatureHandlerGenerator(method, &buffer).generate(fingerprint);
+-	// copy into code heap
+-	address handler = set_handler(&buffer);
++        InterpreterRuntime::SignatureHandlerGenerator(method, &buffer).generate(fingerprint);
++        // copy into code heap
++        address handler = set_handler(&buffer);
+         if (handler == NULL) {
+           // use slow signature handler
+-	} else {
++        } else {
+           // debugging suppport
+           if (PrintSignatureHandlers) {
+-	    tty->cr();
+-	    tty->print_cr("argument handler #%d for: %s %s (fingerprint = " UINT64_FORMAT ", %d bytes generated)",
+-			  _handlers->length(),
+-			  (method->is_static() ? "static" : "receiver"),
+-			  method->name_and_sig_as_C_string(),
+-			  fingerprint,
+-			  buffer.code_size());
+-	    Disassembler::decode(handler, handler + buffer.code_size());
++            tty->cr();
++            tty->print_cr("argument handler #%d for: %s %s (fingerprint = " UINT64_FORMAT ", %d bytes generated)",
++                          _handlers->length(),
++                          (method->is_static() ? "static" : "receiver"),
++                          method->name_and_sig_as_C_string(),
++                          fingerprint,
++                          buffer.code_size());
++            Disassembler::decode(handler, handler + buffer.code_size());
+ #ifndef PRODUCT
+-	    tty->print_cr(" --- associated result handler ---");
+-	    address rh_begin = AbstractInterpreter::result_handler(method()->result_type());
+-	    address rh_end = rh_begin;
+-	    while (*(int*)rh_end != 0) {
+-	      rh_end += sizeof(int);
+-	    }
+-	    Disassembler::decode(rh_begin, rh_end);
++            tty->print_cr(" --- associated result handler ---");
++            address rh_begin = Interpreter::result_handler(method()->result_type());
++            address rh_end = rh_begin;
++            while (*(int*)rh_end != 0) {
++              rh_end += sizeof(int);
++            }
++            Disassembler::decode(rh_begin, rh_end);
+ #endif
+-	  }
+-	  // add handler to library
+-	  _fingerprints->append(fingerprint);
+-	  _handlers->append(handler);
+-	  // set handler index
+-	  assert(_fingerprints->length() == _handlers->length(), "sanity check");
+-	  handler_index = _fingerprints->length() - 1;
+-	}
++          }
++          // add handler to library
++          _fingerprints->append(fingerprint);
++          _handlers->append(handler);
++          // set handler index
++          assert(_fingerprints->length() == _handlers->length(), "sanity check");
++          handler_index = _fingerprints->length() - 1;
++        }
+       }
+     } else {
+       CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
+     }
+     if (handler_index < 0) {
+       // use generic signature handler
+-      method->set_signature_handler(AbstractInterpreter::slow_signature_handler());
++      method->set_signature_handler(Interpreter::slow_signature_handler());
+     } else {
+       // set handler
+       method->set_signature_handler(_handlers->at(handler_index));
+     }
+   }
+-  assert(method->signature_handler() == AbstractInterpreter::slow_signature_handler() ||
+-	 _handlers->find(method->signature_handler()) == _fingerprints->find(Fingerprinter(method).fingerprint()),
+-	 "sanity check");
++  assert(method->signature_handler() == Interpreter::slow_signature_handler() ||
++         _handlers->find(method->signature_handler()) == _fingerprints->find(Fingerprinter(method).fingerprint()),
++         "sanity check");
+ }
+ 
+ 
+@@ -1148,7 +1145,7 @@
+   Bytecode_invoke* invoke = Bytecode_invoke_at(mh, bci);
+   ArgumentSizeComputer asc(invoke->signature());
+   int size_of_arguments = (asc.size() + (invoke->is_invokestatic() ? 0 : 1)); // receiver
+-  Copy::conjoint_bytes(src_address, dest_address, 
++  Copy::conjoint_bytes(src_address, dest_address,
+                        size_of_arguments * Interpreter::stackElementSize());
+ IRT_END
+ #endif
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp openjdk/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp
+--- openjdk6/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)interpreterRuntime.hpp	1.143 07/05/05 17:05:38 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The InterpreterRuntime is called by the interpreter for everything
+@@ -34,7 +31,7 @@
+ 
+  private:
+   // Helper functions to access current interpreter state
+-  static frame     last_frame(JavaThread *thread)    { return thread->last_frame(); }  
++  static frame     last_frame(JavaThread *thread)    { return thread->last_frame(); }
+   static methodOop method(JavaThread *thread)        { return last_frame(thread).interpreter_frame_method(); }
+   static address   bcp(JavaThread *thread)           { return last_frame(thread).interpreter_frame_bcp(); }
+   static void      set_bcp_and_mdp(address bcp, JavaThread*thread);
+@@ -72,12 +69,12 @@
+   static void    throw_pending_exception(JavaThread* thread);
+ 
+   // Statics & fields
+-  static void    resolve_get_put(JavaThread* thread, Bytecodes::Code bytecode);  
+-  
++  static void    resolve_get_put(JavaThread* thread, Bytecodes::Code bytecode);
++
+   // Synchronization
+   static void    monitorenter(JavaThread* thread, BasicObjectLock* elem);
+   static void    monitorexit (JavaThread* thread, BasicObjectLock* elem);
+-  
++
+   static void    throw_illegal_monitor_state_exception(JavaThread* thread);
+   static void    new_illegal_monitor_state_exception(JavaThread* thread);
+ 
+@@ -104,8 +101,8 @@
+ 
+   // Native signature handlers
+   static void prepare_native_call(JavaThread* thread, methodOopDesc* method);
+-  static address slow_signature_handler(JavaThread* thread, 
+-                                        methodOopDesc* method, 
++  static address slow_signature_handler(JavaThread* thread,
++                                        methodOopDesc* method,
+                                         intptr_t* from, intptr_t* to);
+ 
+ #if defined(IA32) || defined(AMD64)
+@@ -118,7 +115,7 @@
+ 
+   // Interpreter's frequency counter overflow
+   static nmethod* frequency_counter_overflow(JavaThread* thread, address branch_bcp);
+-  
++
+   // Interpreter profiling support
+   static jint    bcp_to_di(methodOopDesc* method, address cur_bcp);
+   static jint    profile_method(JavaThread* thread, address cur_bcp);
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/invocationCounter.cpp openjdk/hotspot/src/share/vm/interpreter/invocationCounter.cpp
+--- openjdk6/hotspot/src/share/vm/interpreter/invocationCounter.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/invocationCounter.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)invocationCounter.cpp	1.60 07/05/05 17:05:38 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -37,7 +34,7 @@
+ }
+ 
+ void InvocationCounter::reset() {
+-  // Only reset the state and don't make the method look like it's never 
++  // Only reset the state and don't make the method look like it's never
+   // been executed
+   set_state(wait_for_compile);
+ }
+@@ -152,7 +149,7 @@
+ 
+   // When methodData is collected, the backward branch limit is compared against a
+   // methodData counter, rather than an InvocationCounter.  In the former case, we
+-  // don't need the shift by number_of_noncount_bits, but we do need to adjust 
++  // don't need the shift by number_of_noncount_bits, but we do need to adjust
+   // the factor by which we scale the threshold.
+   if (ProfileInterpreter) {
+     InterpreterBackwardBranchLimit = (CompileThreshold * (OnStackReplacePercentage - InterpreterProfilePercentage)) / 100;
+@@ -161,14 +158,13 @@
+   }
+ 
+   assert(0 <= InterpreterBackwardBranchLimit,
+-	 "OSR threshold should be non-negative");
+-  assert(0 <= InterpreterProfileLimit && 
+-	 InterpreterProfileLimit <= InterpreterInvocationLimit, 
+-	 "profile threshold should be less than the compilation threshold "
+-	 "and non-negative");
++         "OSR threshold should be non-negative");
++  assert(0 <= InterpreterProfileLimit &&
++         InterpreterProfileLimit <= InterpreterInvocationLimit,
++         "profile threshold should be less than the compilation threshold "
++         "and non-negative");
+ }
+ 
+ void invocationCounter_init() {
+   InvocationCounter::reinitialize(DelayCompilationDuringStartup);
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/invocationCounter.hpp openjdk/hotspot/src/share/vm/interpreter/invocationCounter.hpp
+--- openjdk6/hotspot/src/share/vm/interpreter/invocationCounter.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/invocationCounter.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)invocationCounter.hpp	1.49 07/05/05 17:05:39 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // InvocationCounters are used to trigger actions when a limit (threshold) is reached.
+@@ -71,7 +68,7 @@
+ 
+   enum State {
+     wait_for_nothing,                            // do nothing when count() > limit()
+-    wait_for_compile,                            // introduce nmethod when count() > limit()   
++    wait_for_compile,                            // introduce nmethod when count() > limit()
+     number_of_states                             // must be <= state_limit
+   };
+ 
+@@ -82,14 +79,13 @@
+   inline void set(State state, int count);       // sets state and counter
+   inline void decay();                           // decay counter (divide by two)
+   void set_carry();                              // set the sticky carry bit
+-  
++
+   // Accessors
+   State  state() const                           { return (State)(_counter & state_mask); }
+   bool   carry() const                           { return (_counter & carry_mask) != 0; }
+   int    limit() const                           { return CompileThreshold; }
+   Action action() const                          { return _action[state()]; }
+   int    count() const                           { return _counter >> number_of_noncount_bits; }
+-  bool   has_overflowed() const                  { return count() >= limit(); }
+ 
+   int   get_InvocationLimit() const              { return InterpreterInvocationLimit >> number_of_noncount_bits; }
+   int   get_BackwardBranchLimit() const          { return InterpreterBackwardBranchLimit >> number_of_noncount_bits; }
+@@ -116,7 +112,7 @@
+   // Miscellaneous
+   static ByteSize counter_offset()               { return byte_offset_of(InvocationCounter, _counter); }
+   static void reinitialize(bool delay_overflow);
+-  
++
+  private:
+   static int         _init  [number_of_states];  // the counter limits
+   static Action      _action[number_of_states];  // the actions
+@@ -136,8 +132,6 @@
+   int c = count();
+   int new_count = c >> 1;
+   // prevent from going to zero, to distinguish from never-executed methods
+-  if (c > 0 && new_count == 0) new_count = 1; 
++  if (c > 0 && new_count == 0) new_count = 1;
+   set(state(), new_count);
+ }
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/linkResolver.cpp openjdk/hotspot/src/share/vm/interpreter/linkResolver.cpp
+--- openjdk6/hotspot/src/share/vm/interpreter/linkResolver.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/linkResolver.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)linkResolver.cpp	1.174 07/05/05 17:05:40 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -95,8 +92,8 @@
+ // Klass resolution
+ 
+ void LinkResolver::check_klass_accessability(KlassHandle ref_klass, KlassHandle sel_klass, TRAPS) {
+-  if (!Reflection::verify_class_access(ref_klass->as_klassOop(), 
+-                                       sel_klass->as_klassOop(), 
++  if (!Reflection::verify_class_access(ref_klass->as_klassOop(),
++                                       sel_klass->as_klassOop(),
+                                        true)) {
+     ResourceMark rm(THREAD);
+     Exceptions::fthrow(
+@@ -116,7 +113,7 @@
+ }
+ 
+ void LinkResolver::resolve_klass_no_update(KlassHandle& result, constantPoolHandle pool, int index, TRAPS) {
+-  klassOop result_oop = 
++  klassOop result_oop =
+          constantPoolOopDesc::klass_ref_at_if_loaded_check(pool, index, CHECK);
+   result = KlassHandle(THREAD, result_oop);
+ }
+@@ -154,10 +151,10 @@
+   result = methodHandle(THREAD, ik->lookup_method_in_all_interfaces(name(), signature()));
+ }
+ 
+-void LinkResolver::check_method_accessability(KlassHandle ref_klass, 
+-                                              KlassHandle resolved_klass, 
+-                                              KlassHandle sel_klass, 
+-                                              methodHandle sel_method, 
++void LinkResolver::check_method_accessability(KlassHandle ref_klass,
++                                              KlassHandle resolved_klass,
++                                              KlassHandle sel_klass,
++                                              methodHandle sel_method,
+                                               TRAPS) {
+ 
+   AccessFlags flags = sel_method->access_flags();
+@@ -180,10 +177,10 @@
+     flags.set_flags(new_flags);
+   }
+ 
+-  if (!Reflection::verify_field_access(ref_klass->as_klassOop(), 
++  if (!Reflection::verify_field_access(ref_klass->as_klassOop(),
+                                        resolved_klass->as_klassOop(),
+-                                       sel_klass->as_klassOop(), 
+-                                       flags, 
++                                       sel_klass->as_klassOop(),
++                                       flags,
+                                        true)) {
+     ResourceMark rm(THREAD);
+     Exceptions::fthrow(
+@@ -199,34 +196,34 @@
+   }
+ }
+ 
+-void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle& resolved_klass, 
+-				  constantPoolHandle pool, int index, TRAPS) {
++void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle& resolved_klass,
++                                  constantPoolHandle pool, int index, TRAPS) {
+ 
+   // resolve klass
+   resolve_klass(resolved_klass, pool, index, CHECK);
+-  
++
+   symbolHandle method_name      (THREAD, pool->name_ref_at(index));
+   symbolHandle method_signature (THREAD, pool->signature_ref_at(index));
+   KlassHandle  current_klass(THREAD, pool->pool_holder());
+-  
++
+   resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK);
+ }
+ 
+ void LinkResolver::resolve_interface_method(methodHandle& resolved_method, KlassHandle& resolved_klass, constantPoolHandle pool, int index, TRAPS) {
+ 
+   // resolve klass
+-  resolve_klass(resolved_klass, pool, index, CHECK);  
++  resolve_klass(resolved_klass, pool, index, CHECK);
+   symbolHandle method_name      (THREAD, pool->name_ref_at(index));
+   symbolHandle method_signature (THREAD, pool->signature_ref_at(index));
+   KlassHandle  current_klass(THREAD, pool->pool_holder());
+-  
++
+   resolve_interface_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK);
+ }
+ 
+ 
+ void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle resolved_klass,
+-				  symbolHandle method_name, symbolHandle method_signature, 
+-				  KlassHandle current_klass, bool check_access, TRAPS) {
++                                  symbolHandle method_name, symbolHandle method_signature,
++                                  KlassHandle current_klass, bool check_access, TRAPS) {
+ 
+   // 1. check if klass is not interface
+   if (resolved_klass->is_interface()) {
+@@ -234,14 +231,14 @@
+     jio_snprintf(buf, sizeof(buf), "Found interface %s, but class was expected", Klass::cast(resolved_klass())->external_name());
+     THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
+   }
+-  
++
+   // 2. lookup method in resolved klass and its super klasses
+   lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, CHECK);
+-  
++
+   if (resolved_method.is_null()) { // not found in the class hierarchy
+     // 3. lookup method in all the interfaces implemented by the resolved klass
+     lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK);
+-    
++
+     if (resolved_method.is_null()) {
+       // 4. method lookup failed
+       ResourceMark rm(THREAD);
+@@ -266,37 +263,37 @@
+     assert(current_klass.not_null() , "current_klass should not be null");
+ 
+     // check if method can be accessed by the referring class
+-    check_method_accessability(current_klass, 
+-                               resolved_klass, 
+-                               KlassHandle(THREAD, resolved_method->method_holder()), 
+-                               resolved_method, 
++    check_method_accessability(current_klass,
++                               resolved_klass,
++                               KlassHandle(THREAD, resolved_method->method_holder()),
++                               resolved_method,
+                                CHECK);
+-    
++
+     // check loader constraints
+     Handle loader (THREAD, instanceKlass::cast(current_klass())->class_loader());
+     Handle class_loader (THREAD, instanceKlass::cast(resolved_method->method_holder())->class_loader());
+     {
+       ResourceMark rm(THREAD);
+-      char* failed_type_name = 
+-	SystemDictionary::check_signature_loaders(method_signature, loader,
+-						  class_loader, true, CHECK);
++      char* failed_type_name =
++        SystemDictionary::check_signature_loaders(method_signature, loader,
++                                                  class_loader, true, CHECK);
+       if (failed_type_name != NULL) {
+-	const char* msg = "loader constraint violation: when resolving method"
+-	  " \"%s\" the class loader (instance of %s) of the current class, %s,"
+-	  " and the class loader (instance of %s) for resolved class, %s, have"
+-	  " different Class objects for the type %s used in the signature";
+-	char* sig = methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),method_name(),method_signature());
+-	const char* loader1 = SystemDictionary::loader_name(loader());
+-	char* current = instanceKlass::cast(current_klass())->name()->as_C_string();
+-	const char* loader2 = SystemDictionary::loader_name(class_loader());
+-	char* resolved = instanceKlass::cast(resolved_klass())->name()->as_C_string();
+-	size_t buflen = strlen(msg) + strlen(sig) + strlen(loader1) + 
+-	  strlen(current) + strlen(loader2) + strlen(resolved) + 
+-	  strlen(failed_type_name);
+-	char* buf = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, buflen);
+-	jio_snprintf(buf, buflen, msg, sig, loader1, current, loader2,
+-		     resolved, failed_type_name);
+-	THROW_MSG(vmSymbols::java_lang_LinkageError(), buf);
++        const char* msg = "loader constraint violation: when resolving method"
++          " \"%s\" the class loader (instance of %s) of the current class, %s,"
++          " and the class loader (instance of %s) for resolved class, %s, have"
++          " different Class objects for the type %s used in the signature";
++        char* sig = methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),method_name(),method_signature());
++        const char* loader1 = SystemDictionary::loader_name(loader());
++        char* current = instanceKlass::cast(current_klass())->name()->as_C_string();
++        const char* loader2 = SystemDictionary::loader_name(class_loader());
++        char* resolved = instanceKlass::cast(resolved_klass())->name()->as_C_string();
++        size_t buflen = strlen(msg) + strlen(sig) + strlen(loader1) +
++          strlen(current) + strlen(loader2) + strlen(resolved) +
++          strlen(failed_type_name);
++        char* buf = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, buflen);
++        jio_snprintf(buf, buflen, msg, sig, loader1, current, loader2,
++                     resolved, failed_type_name);
++        THROW_MSG(vmSymbols::java_lang_LinkageError(), buf);
+       }
+     }
+   }
+@@ -315,10 +312,10 @@
+     jio_snprintf(buf, sizeof(buf), "Found class %s, but interface was expected", Klass::cast(resolved_klass())->external_name());
+     THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
+   }
+-  
++
+   // lookup method in this interface or its super, java.lang.Object
+   lookup_instance_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, CHECK);
+-  
++
+   if (resolved_method.is_null()) {
+     // lookup method in all the super-interfaces
+     lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK);
+@@ -332,33 +329,33 @@
+     }
+   }
+ 
+-  if (check_access) {  
++  if (check_access) {
+     HandleMark hm(THREAD);
+     Handle loader (THREAD, instanceKlass::cast(current_klass())->class_loader());
+     Handle class_loader (THREAD, instanceKlass::cast(resolved_method->method_holder())->class_loader());
+     {
+       ResourceMark rm(THREAD);
+       char* failed_type_name =
+-	SystemDictionary::check_signature_loaders(method_signature, loader, 
+-						  class_loader, true, CHECK);  
++        SystemDictionary::check_signature_loaders(method_signature, loader,
++                                                  class_loader, true, CHECK);
+       if (failed_type_name != NULL) {
+-	const char* msg = "loader constraint violation: when resolving "
+-	  "interface method \"%s\" the class loader (instance of %s) of the "
+-	  "current class, %s, and the class loader (instance of %s) for "
+-	  "resolved class, %s, have different Class objects for the type %s "
+-	  "used in the signature";
+-	char* sig = methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),method_name(),method_signature());
+-	const char* loader1 = SystemDictionary::loader_name(loader());
+-	char* current = instanceKlass::cast(current_klass())->name()->as_C_string();
+-	const char* loader2 = SystemDictionary::loader_name(class_loader());
+-	char* resolved = instanceKlass::cast(resolved_klass())->name()->as_C_string();
+-	size_t buflen = strlen(msg) + strlen(sig) + strlen(loader1) + 
+-	  strlen(current) + strlen(loader2) + strlen(resolved) + 
+-	  strlen(failed_type_name);
+-	char* buf = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, buflen);
+-	jio_snprintf(buf, buflen, msg, sig, loader1, current, loader2,
+-		     resolved, failed_type_name);
+-	THROW_MSG(vmSymbols::java_lang_LinkageError(), buf);
++        const char* msg = "loader constraint violation: when resolving "
++          "interface method \"%s\" the class loader (instance of %s) of the "
++          "current class, %s, and the class loader (instance of %s) for "
++          "resolved class, %s, have different Class objects for the type %s "
++          "used in the signature";
++        char* sig = methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),method_name(),method_signature());
++        const char* loader1 = SystemDictionary::loader_name(loader());
++        char* current = instanceKlass::cast(current_klass())->name()->as_C_string();
++        const char* loader2 = SystemDictionary::loader_name(class_loader());
++        char* resolved = instanceKlass::cast(resolved_klass())->name()->as_C_string();
++        size_t buflen = strlen(msg) + strlen(sig) + strlen(loader1) +
++          strlen(current) + strlen(loader2) + strlen(resolved) +
++          strlen(failed_type_name);
++        char* buf = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, buflen);
++        jio_snprintf(buf, buflen, msg, sig, loader1, current, loader2,
++                     resolved, failed_type_name);
++        THROW_MSG(vmSymbols::java_lang_LinkageError(), buf);
+       }
+     }
+   }
+@@ -367,15 +364,15 @@
+ //------------------------------------------------------------------------------------------------------------------------
+ // Field resolution
+ 
+-void LinkResolver::check_field_accessability(KlassHandle ref_klass, 
++void LinkResolver::check_field_accessability(KlassHandle ref_klass,
+                                              KlassHandle resolved_klass,
+-                                             KlassHandle sel_klass, 
+-                                             fieldDescriptor& fd, 
++                                             KlassHandle sel_klass,
++                                             fieldDescriptor& fd,
+                                              TRAPS) {
+-  if (!Reflection::verify_field_access(ref_klass->as_klassOop(), 
++  if (!Reflection::verify_field_access(ref_klass->as_klassOop(),
+                                        resolved_klass->as_klassOop(),
+-                                       sel_klass->as_klassOop(), 
+-                                       fd.access_flags(), 
++                                       sel_klass->as_klassOop(),
++                                       fd.access_flags(),
+                                        true)) {
+     ResourceMark rm(THREAD);
+     Exceptions::fthrow(
+@@ -426,7 +423,7 @@
+     ResourceMark rm(THREAD);
+     THROW_MSG(vmSymbols::java_lang_NoSuchFieldError(), field->as_C_string());
+   }
+-  
++
+   // check access
+   KlassHandle ref_klass(THREAD, pool->pool_holder());
+   check_field_accessability(ref_klass, resolved_klass, sel_klass, fd, CHECK);
+@@ -438,11 +435,11 @@
+     THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), msg);
+   }
+ 
+-  // Final fields can only be accessed from its own class.  
+-  if (is_put && fd.access_flags().is_final() && sel_klass() != pool->pool_holder()) {    
++  // Final fields can only be accessed from its own class.
++  if (is_put && fd.access_flags().is_final() && sel_klass() != pool->pool_holder()) {
+     THROW(vmSymbols::java_lang_IllegalAccessError());
+   }
+-  
++
+   // initialize resolved_klass if necessary
+   // note 1: the klass which declared the field must be initialized (i.e, sel_klass)
+   //         according to the newest JVM spec (5.5, p.170) - was bug (gri 7/28/99)
+@@ -461,25 +458,25 @@
+     {
+       ResourceMark rm(THREAD);
+       char* failed_type_name =
+-	SystemDictionary::check_signature_loaders(signature_ref,
+-						  ref_loader, sel_loader,
+-						  false,
+-						  CHECK);
++        SystemDictionary::check_signature_loaders(signature_ref,
++                                                  ref_loader, sel_loader,
++                                                  false,
++                                                  CHECK);
+       if (failed_type_name != NULL) {
+-	const char* msg = "loader constraint violation: when resolving field"
+-	  " \"%s\" the class loader (instance of %s) of the referring class, "
+-	  "%s, and the class loader (instance of %s) for the field's resolved "
+-	  "type, %s, have different Class objects for that type";
+-	char* field_name = field_h()->as_C_string();
+-	const char* loader1 = SystemDictionary::loader_name(ref_loader());
+-	char* sel = instanceKlass::cast(sel_klass())->name()->as_C_string();
+-	const char* loader2 = SystemDictionary::loader_name(sel_loader());
+-	size_t buflen = strlen(msg) + strlen(field_name) + strlen(loader1) + 
+-	  strlen(sel) + strlen(loader2) + strlen(failed_type_name);
+-	char* buf = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, buflen);
+-	jio_snprintf(buf, buflen, msg, field_name, loader1, sel, loader2,
+-		     failed_type_name);
+-	THROW_MSG(vmSymbols::java_lang_LinkageError(), buf);
++        const char* msg = "loader constraint violation: when resolving field"
++          " \"%s\" the class loader (instance of %s) of the referring class, "
++          "%s, and the class loader (instance of %s) for the field's resolved "
++          "type, %s, have different Class objects for that type";
++        char* field_name = field_h()->as_C_string();
++        const char* loader1 = SystemDictionary::loader_name(ref_loader());
++        char* sel = instanceKlass::cast(sel_klass())->name()->as_C_string();
++        const char* loader2 = SystemDictionary::loader_name(sel_loader());
++        size_t buflen = strlen(msg) + strlen(field_name) + strlen(loader1) +
++          strlen(sel) + strlen(loader2) + strlen(failed_type_name);
++        char* buf = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, buflen);
++        jio_snprintf(buf, buflen, msg, field_name, loader1, sel, loader2,
++                     failed_type_name);
++        THROW_MSG(vmSymbols::java_lang_LinkageError(), buf);
+       }
+     }
+   }
+@@ -503,17 +500,17 @@
+ // recv_klass         the receiver klass
+ 
+ 
+-void LinkResolver::resolve_static_call(CallInfo& result, KlassHandle& resolved_klass, symbolHandle method_name, 
+-                                       symbolHandle method_signature, KlassHandle current_klass, 
++void LinkResolver::resolve_static_call(CallInfo& result, KlassHandle& resolved_klass, symbolHandle method_name,
++                                       symbolHandle method_signature, KlassHandle current_klass,
+                                        bool check_access, bool initialize_class, TRAPS) {
+-  methodHandle resolved_method;  
++  methodHandle resolved_method;
+   linktime_resolve_static_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK);
+   resolved_klass = KlassHandle(THREAD, Klass::cast(resolved_method->method_holder()));
+-  
+-  // Initialize klass (this should only happen if everything is ok)  
++
++  // Initialize klass (this should only happen if everything is ok)
+   if (initialize_class && resolved_klass->should_be_initialized()) {
+     resolved_klass->initialize(CHECK);
+-    linktime_resolve_static_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK);      
++    linktime_resolve_static_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK);
+   }
+ 
+   // setup result
+@@ -521,10 +518,10 @@
+ }
+ 
+ // throws linktime exceptions
+-void LinkResolver::linktime_resolve_static_method(methodHandle& resolved_method, KlassHandle resolved_klass, 
+-						  symbolHandle method_name, symbolHandle method_signature, 
+-						  KlassHandle current_klass, bool check_access, TRAPS) {
+-  
++void LinkResolver::linktime_resolve_static_method(methodHandle& resolved_method, KlassHandle resolved_klass,
++                                                  symbolHandle method_name, symbolHandle method_signature,
++                                                  KlassHandle current_klass, bool check_access, TRAPS) {
++
+   resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK);
+   assert(resolved_method->name() != vmSymbols::class_initializer_name(), "should have been checked in verifier");
+ 
+@@ -539,7 +536,7 @@
+ }
+ 
+ 
+-void LinkResolver::resolve_special_call(CallInfo& result, KlassHandle resolved_klass, symbolHandle method_name, 
++void LinkResolver::resolve_special_call(CallInfo& result, KlassHandle resolved_klass, symbolHandle method_name,
+                                         symbolHandle method_signature, KlassHandle current_klass, bool check_access, TRAPS) {
+   methodHandle resolved_method;
+   linktime_resolve_special_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK);
+@@ -547,50 +544,50 @@
+ }
+ 
+ // throws linktime exceptions
+-void LinkResolver::linktime_resolve_special_method(methodHandle& resolved_method, KlassHandle resolved_klass, 
+-						   symbolHandle method_name, symbolHandle method_signature, 
+-						   KlassHandle current_klass, bool check_access, TRAPS) {
++void LinkResolver::linktime_resolve_special_method(methodHandle& resolved_method, KlassHandle resolved_klass,
++                                                   symbolHandle method_name, symbolHandle method_signature,
++                                                   KlassHandle current_klass, bool check_access, TRAPS) {
+ 
+   resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK);
+-  
++
+   // check if method name is <init>, that it is found in same klass as static type
+   if (resolved_method->name() == vmSymbols::object_initializer_name() &&
+       resolved_method->method_holder() != resolved_klass()) {
+     ResourceMark rm(THREAD);
+     Exceptions::fthrow(
+       THREAD_AND_LOCATION,
+-      vmSymbolHandles::java_lang_NoSuchMethodError(), 
+-      "%s: method %s%s not found", 
+-      resolved_klass->external_name(), 
++      vmSymbolHandles::java_lang_NoSuchMethodError(),
++      "%s: method %s%s not found",
++      resolved_klass->external_name(),
+       resolved_method->name()->as_C_string(),
+       resolved_method->signature()->as_C_string()
+     );
+     return;
+   }
+-  
++
+   // check if not static
+   if (resolved_method->is_static()) {
+     char buf[200];
+     jio_snprintf(buf, sizeof(buf),
+-		 "Expecting non-static method %s", 
+-		 methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),
+-							 resolved_method->name(),
+-							 resolved_method->signature()));
++                 "Expecting non-static method %s",
++                 methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),
++                                                         resolved_method->name(),
++                                                         resolved_method->signature()));
+     THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
+   }
+ }
+ 
+ // throws runtime exceptions
+ void LinkResolver::runtime_resolve_special_method(CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass,
+-						  KlassHandle current_klass, bool check_access, TRAPS) {
+-  
++                                                  KlassHandle current_klass, bool check_access, TRAPS) {
++
+   // resolved method is selected method unless we have an old-style lookup
+   methodHandle sel_method(THREAD, resolved_method());
+-  
++
+   // check if this is an old-style super call and do a new lookup if so
+   { KlassHandle method_klass  = KlassHandle(THREAD,
+                                             resolved_method->method_holder());
+-           
++
+     if (check_access &&
+         // a) check if ACC_SUPER flag is set for the current class
+         current_klass->is_super() &&
+@@ -600,7 +597,7 @@
+         resolved_method->name() != vmSymbols::object_initializer_name()) {
+       // Lookup super method
+       KlassHandle super_klass(THREAD, current_klass->super());
+-      lookup_instance_method_in_klasses(sel_method, super_klass, 
++      lookup_instance_method_in_klasses(sel_method, super_klass,
+                            symbolHandle(THREAD, resolved_method->name()),
+                            symbolHandle(THREAD, resolved_method->signature()), CHECK);
+       // check if found
+@@ -618,11 +615,11 @@
+   if (sel_method->is_static()) {
+     char buf[200];
+     jio_snprintf(buf, sizeof(buf), "Expecting non-static method %s", methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),
+-													     resolved_method->name(),
+-													     resolved_method->signature()));
++                                                                                                             resolved_method->name(),
++                                                                                                             resolved_method->signature()));
+     THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
+   }
+-  
++
+   // check if abstract
+   if (sel_method->is_abstract()) {
+     ResourceMark rm(THREAD);
+@@ -631,26 +628,26 @@
+                                                       sel_method->name(),
+                                                       sel_method->signature()));
+   }
+-  
++
+   // setup result
+   result.set_static(resolved_klass, sel_method, CHECK);
+ }
+ 
+ void LinkResolver::resolve_virtual_call(CallInfo& result, Handle recv, KlassHandle receiver_klass, KlassHandle resolved_klass,
+-                                        symbolHandle method_name, symbolHandle method_signature, KlassHandle current_klass, 
++                                        symbolHandle method_name, symbolHandle method_signature, KlassHandle current_klass,
+                                         bool check_access, bool check_null_and_abstract, TRAPS) {
+-  methodHandle resolved_method;  
++  methodHandle resolved_method;
+   linktime_resolve_virtual_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK);
+   runtime_resolve_virtual_method(result, resolved_method, resolved_klass, recv, receiver_klass, check_null_and_abstract, CHECK);
+ }
+ 
+ // throws linktime exceptions
+-void LinkResolver::linktime_resolve_virtual_method(methodHandle &resolved_method, KlassHandle resolved_klass, 
+-						   symbolHandle method_name, symbolHandle method_signature,
+-						   KlassHandle current_klass, bool check_access, TRAPS) {
++void LinkResolver::linktime_resolve_virtual_method(methodHandle &resolved_method, KlassHandle resolved_klass,
++                                                   symbolHandle method_name, symbolHandle method_signature,
++                                                   KlassHandle current_klass, bool check_access, TRAPS) {
+   // normal method resolution
+   resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK);
+-  
++
+   assert(resolved_method->name() != vmSymbols::object_initializer_name(), "should have been checked in verifier");
+   assert(resolved_method->name() != vmSymbols::class_initializer_name (), "should have been checked in verifier");
+ 
+@@ -658,32 +655,32 @@
+   if (resolved_method->is_static()) {
+     char buf[200];
+     jio_snprintf(buf, sizeof(buf), "Expecting non-static method %s", methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),
+-													     resolved_method->name(),
+-													     resolved_method->signature()));
++                                                                                                             resolved_method->name(),
++                                                                                                             resolved_method->signature()));
+     THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
+-  }  
++  }
+ }
+ 
+ // throws runtime exceptions
+ void LinkResolver::runtime_resolve_virtual_method(CallInfo& result,
+                                                   methodHandle resolved_method,
+-                                                  KlassHandle resolved_klass, 
++                                                  KlassHandle resolved_klass,
+                                                   Handle recv,
+                                                   KlassHandle recv_klass,
+                                                   bool check_null_and_abstract,
+                                                   TRAPS) {
+-    
++
+   // setup default return values
+   int vtable_index = methodOopDesc::invalid_vtable_index;
+   methodHandle selected_method;
+-  
++
+   assert(recv.is_null() || recv->is_oop(), "receiver is not an oop");
+ 
+   // runtime method resolution
+   if (check_null_and_abstract && recv.is_null()) { // check if receiver exists
+     THROW(vmSymbols::java_lang_NullPointerException());
+   }
+-    
++
+   // Virtual methods cannot be resolved before its klass has been linked, for otherwise the methodOop's
+   // has not been rewritten, and the vtable initialized.
+   assert(instanceKlass::cast(resolved_method->method_holder())->is_linked(), "must be linked");
+@@ -695,19 +692,19 @@
+ 
+   // do lookup based on receiver klass using the vtable index
+   if (resolved_method->method_holder()->klass_part()->is_interface()) { // miranda method
+-    vtable_index = vtable_index_of_miranda_method(resolved_klass, 
+-                           symbolHandle(THREAD, resolved_method->name()), 
++    vtable_index = vtable_index_of_miranda_method(resolved_klass,
++                           symbolHandle(THREAD, resolved_method->name()),
+                            symbolHandle(THREAD, resolved_method->signature()), CHECK);
+     assert(vtable_index >= 0 , "we should have valid vtable index at this point");
+ 
+     instanceKlass* inst = instanceKlass::cast(recv_klass());
+     selected_method = methodHandle(THREAD, inst->method_at_vtable(vtable_index));
+-  } else {    
++  } else {
+     // at this point we are sure that resolved_method is virtual and not
+     // a miranda method; therefore, it must have a valid vtable index.
+     vtable_index = resolved_method->vtable_index();
+-    // We could get a negative vtable_index for final methods, 
+-    // because as an optimization they are they are never put in the vtable, 
++    // We could get a negative vtable_index for final methods,
++    // because as an optimization they are they are never put in the vtable,
+     // unless they override an existing method.
+     // If we do get a negative, it means the resolved method is the the selected
+     // method, and it can never be changed by an override.
+@@ -721,7 +718,7 @@
+       selected_method = methodHandle(THREAD, inst->method_at_vtable(vtable_index));
+     }
+   }
+-  
++
+   // check if method exists
+   if (selected_method.is_null()) {
+     ResourceMark rm(THREAD);
+@@ -730,7 +727,7 @@
+                                                       resolved_method->name(),
+                                                       resolved_method->signature()));
+   }
+-  
++
+   // check if abstract
+   if (check_null_and_abstract && selected_method->is_abstract()) {
+     ResourceMark rm(THREAD);
+@@ -739,48 +736,48 @@
+                                                       selected_method->name(),
+                                                       selected_method->signature()));
+   }
+-  
++
+   // setup result
+   result.set_virtual(resolved_klass, recv_klass, resolved_method, selected_method, vtable_index, CHECK);
+ }
+ 
+ void LinkResolver::resolve_interface_call(CallInfo& result, Handle recv, KlassHandle recv_klass, KlassHandle resolved_klass,
+-                                          symbolHandle method_name, symbolHandle method_signature, KlassHandle current_klass, 
++                                          symbolHandle method_name, symbolHandle method_signature, KlassHandle current_klass,
+                                           bool check_access, bool check_null_and_abstract, TRAPS) {
+-  methodHandle resolved_method;  
++  methodHandle resolved_method;
+   linktime_resolve_interface_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK);
+-  runtime_resolve_interface_method(result, resolved_method, resolved_klass, recv, recv_klass, check_null_and_abstract, CHECK);  
++  runtime_resolve_interface_method(result, resolved_method, resolved_klass, recv, recv_klass, check_null_and_abstract, CHECK);
+ }
+ 
+ // throws linktime exceptions
+-void LinkResolver::linktime_resolve_interface_method(methodHandle& resolved_method, KlassHandle resolved_klass, symbolHandle method_name, 
+-						     symbolHandle method_signature, KlassHandle current_klass, bool check_access, TRAPS) {
++void LinkResolver::linktime_resolve_interface_method(methodHandle& resolved_method, KlassHandle resolved_klass, symbolHandle method_name,
++                                                     symbolHandle method_signature, KlassHandle current_klass, bool check_access, TRAPS) {
+   // normal interface method resolution
+   resolve_interface_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK);
+ 
+   assert(resolved_method->name() != vmSymbols::object_initializer_name(), "should have been checked in verifier");
+-  assert(resolved_method->name() != vmSymbols::class_initializer_name (), "should have been checked in verifier");  
++  assert(resolved_method->name() != vmSymbols::class_initializer_name (), "should have been checked in verifier");
+ }
+ 
+ // throws runtime exceptions
+-void LinkResolver::runtime_resolve_interface_method(CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, 
+-						    Handle recv, KlassHandle recv_klass, bool check_null_and_abstract, TRAPS) {
++void LinkResolver::runtime_resolve_interface_method(CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass,
++                                                    Handle recv, KlassHandle recv_klass, bool check_null_and_abstract, TRAPS) {
+   // check if receiver exists
+   if (check_null_and_abstract && recv.is_null()) {
+     THROW(vmSymbols::java_lang_NullPointerException());
+   }
+ 
+   // check if receiver klass implements the resolved interface
+-  if (!recv_klass->is_subtype_of(resolved_klass())) { 
++  if (!recv_klass->is_subtype_of(resolved_klass())) {
+     char buf[200];
+-    jio_snprintf(buf, sizeof(buf), "Class %s does not implement the requested interface %s", 
+-		 (Klass::cast(recv_klass()))->external_name(),
+-		 (Klass::cast(resolved_klass()))->external_name());
++    jio_snprintf(buf, sizeof(buf), "Class %s does not implement the requested interface %s",
++                 (Klass::cast(recv_klass()))->external_name(),
++                 (Klass::cast(resolved_klass()))->external_name());
+     THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
+   }
+   // do lookup based on receiver klass
+   methodHandle sel_method;
+-  lookup_instance_method_in_klasses(sel_method, recv_klass, 
++  lookup_instance_method_in_klasses(sel_method, recv_klass,
+             symbolHandle(THREAD, resolved_method->name()),
+             symbolHandle(THREAD, resolved_method->signature()), CHECK);
+   // check if method exists
+@@ -820,7 +817,7 @@
+                                                  bool check_access) {
+   EXCEPTION_MARK;
+   methodHandle method_result;
+-  linktime_resolve_interface_method(method_result, resolved_klass, method_name, method_signature, current_klass, check_access, THREAD); 
++  linktime_resolve_interface_method(method_result, resolved_klass, method_name, method_signature, current_klass, check_access, THREAD);
+   if (HAS_PENDING_EXCEPTION) {
+     CLEAR_PENDING_EXCEPTION;
+     return methodHandle();
+@@ -837,7 +834,7 @@
+                                                  bool check_access) {
+   EXCEPTION_MARK;
+   methodHandle method_result;
+-  linktime_resolve_virtual_method(method_result, resolved_klass, method_name, method_signature, current_klass, check_access, THREAD); 
++  linktime_resolve_virtual_method(method_result, resolved_klass, method_name, method_signature, current_klass, check_access, THREAD);
+   if (HAS_PENDING_EXCEPTION) {
+     CLEAR_PENDING_EXCEPTION;
+     return methodHandle();
+@@ -854,7 +851,7 @@
+                                                  KlassHandle current_klass) {
+   EXCEPTION_MARK;
+   CallInfo info;
+-  resolve_virtual_call(info, Handle(), receiver_klass, resolved_klass, name, signature, current_klass, true, false, THREAD); 
++  resolve_virtual_call(info, Handle(), receiver_klass, resolved_klass, name, signature, current_klass, true, false, THREAD);
+   if (HAS_PENDING_EXCEPTION) {
+     CLEAR_PENDING_EXCEPTION;
+     return methodHandle();
+@@ -870,7 +867,7 @@
+                                                  KlassHandle current_klass) {
+   EXCEPTION_MARK;
+   CallInfo info;
+-  resolve_interface_call(info, Handle(), receiver_klass, resolved_klass, name, signature, current_klass, true, false, THREAD); 
++  resolve_interface_call(info, Handle(), receiver_klass, resolved_klass, name, signature, current_klass, true, false, THREAD);
+   if (HAS_PENDING_EXCEPTION) {
+     CLEAR_PENDING_EXCEPTION;
+     return methodHandle();
+@@ -886,7 +883,7 @@
+                                                KlassHandle current_klass) {
+   EXCEPTION_MARK;
+   CallInfo info;
+-  resolve_virtual_call(info, Handle(), receiver_klass, resolved_klass, name, signature, current_klass, true, false, THREAD); 
++  resolve_virtual_call(info, Handle(), receiver_klass, resolved_klass, name, signature, current_klass, true, false, THREAD);
+   if (HAS_PENDING_EXCEPTION) {
+     CLEAR_PENDING_EXCEPTION;
+     return methodOopDesc::invalid_vtable_index;
+@@ -901,7 +898,7 @@
+                                                   KlassHandle current_klass) {
+   EXCEPTION_MARK;
+   CallInfo info;
+-  resolve_static_call(info, resolved_klass, name, signature, current_klass, true, false, THREAD); 
++  resolve_static_call(info, resolved_klass, name, signature, current_klass, true, false, THREAD);
+   if (HAS_PENDING_EXCEPTION) {
+     CLEAR_PENDING_EXCEPTION;
+     return methodHandle();
+@@ -909,11 +906,11 @@
+   return info.selected_method();
+ }
+ 
+-methodHandle LinkResolver::resolve_special_call_or_null(KlassHandle resolved_klass, symbolHandle name, symbolHandle signature, 
+-							KlassHandle current_klass) {
++methodHandle LinkResolver::resolve_special_call_or_null(KlassHandle resolved_klass, symbolHandle name, symbolHandle signature,
++                                                        KlassHandle current_klass) {
+   EXCEPTION_MARK;
+   CallInfo info;
+-  resolve_special_call(info, resolved_klass, name, signature, current_klass, true, THREAD); 
++  resolve_special_call(info, resolved_klass, name, signature, current_klass, true, THREAD);
+   if (HAS_PENDING_EXCEPTION) {
+     CLEAR_PENDING_EXCEPTION;
+     return methodHandle();
+@@ -936,9 +933,9 @@
+   return;
+ }
+ 
+-void LinkResolver::resolve_pool(KlassHandle& resolved_klass, symbolHandle& method_name, symbolHandle& method_signature, 
++void LinkResolver::resolve_pool(KlassHandle& resolved_klass, symbolHandle& method_name, symbolHandle& method_signature,
+                                 KlassHandle& current_klass, constantPoolHandle pool, int index, TRAPS) {
+-   // resolve klass  
++   // resolve klass
+   resolve_klass(resolved_klass, pool, index, CHECK);
+ 
+   // Get name, signature, and static klass
+@@ -948,46 +945,46 @@
+ }
+ 
+ 
+-void LinkResolver::resolve_invokestatic(CallInfo& result, constantPoolHandle pool, int index, TRAPS) {  
+-  KlassHandle  resolved_klass;    
++void LinkResolver::resolve_invokestatic(CallInfo& result, constantPoolHandle pool, int index, TRAPS) {
++  KlassHandle  resolved_klass;
+   symbolHandle method_name;
+   symbolHandle method_signature;
+   KlassHandle  current_klass;
+-  resolve_pool(resolved_klass, method_name,  method_signature, current_klass, pool, index, CHECK);  
++  resolve_pool(resolved_klass, method_name,  method_signature, current_klass, pool, index, CHECK);
+   resolve_static_call(result, resolved_klass, method_name, method_signature, current_klass, true, true, CHECK);
+ }
+ 
+ 
+-void LinkResolver::resolve_invokespecial(CallInfo& result, constantPoolHandle pool, int index, TRAPS) {            
+-  KlassHandle  resolved_klass;    
++void LinkResolver::resolve_invokespecial(CallInfo& result, constantPoolHandle pool, int index, TRAPS) {
++  KlassHandle  resolved_klass;
+   symbolHandle method_name;
+   symbolHandle method_signature;
+   KlassHandle  current_klass;
+-  resolve_pool(resolved_klass, method_name,  method_signature, current_klass, pool, index, CHECK);  
++  resolve_pool(resolved_klass, method_name,  method_signature, current_klass, pool, index, CHECK);
+   resolve_special_call(result, resolved_klass, method_name, method_signature, current_klass, true, CHECK);
+ }
+ 
+ 
+ void LinkResolver::resolve_invokevirtual(CallInfo& result, Handle recv,
+-					  constantPoolHandle pool, int index,
+-					  TRAPS) {
+-        
+-  KlassHandle  resolved_klass;    
++                                          constantPoolHandle pool, int index,
++                                          TRAPS) {
++
++  KlassHandle  resolved_klass;
+   symbolHandle method_name;
+   symbolHandle method_signature;
+   KlassHandle  current_klass;
+-  resolve_pool(resolved_klass, method_name,  method_signature, current_klass, pool, index, CHECK);  
++  resolve_pool(resolved_klass, method_name,  method_signature, current_klass, pool, index, CHECK);
+   KlassHandle recvrKlass (THREAD, recv.is_null() ? (klassOop)NULL : recv->klass());
+   resolve_virtual_call(result, recv, recvrKlass, resolved_klass, method_name, method_signature, current_klass, true, true, CHECK);
+-} 
++}
+ 
+ 
+ void LinkResolver::resolve_invokeinterface(CallInfo& result, Handle recv, constantPoolHandle pool, int index, TRAPS) {
+-  KlassHandle  resolved_klass;    
++  KlassHandle  resolved_klass;
+   symbolHandle method_name;
+   symbolHandle method_signature;
+   KlassHandle  current_klass;
+-  resolve_pool(resolved_klass, method_name,  method_signature, current_klass, pool, index, CHECK);  
++  resolve_pool(resolved_klass, method_name,  method_signature, current_klass, pool, index, CHECK);
+   KlassHandle recvrKlass (THREAD, recv.is_null() ? (klassOop)NULL : recv->klass());
+   resolve_interface_call(result, recv, recvrKlass, resolved_klass, method_name, method_signature, current_klass, true, true, CHECK);
+ }
+@@ -1001,4 +998,3 @@
+ }
+ 
+ #endif
+-
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/linkResolver.hpp openjdk/hotspot/src/share/vm/interpreter/linkResolver.hpp
+--- openjdk6/hotspot/src/share/vm/interpreter/linkResolver.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/linkResolver.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)linkResolver.hpp	1.74 07/05/05 17:05:39 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // All the necessary definitions for run-time link resolution.
+@@ -47,7 +44,7 @@
+   int          _field_index;  // original index in the klass
+   int          _field_offset;
+   BasicType    _field_type;
+-  
++
+  public:
+   void         set(KlassHandle klass, symbolHandle name, int field_index, int field_offset,
+                  BasicType field_type, AccessFlags access_flags);
+@@ -106,37 +103,37 @@
+   static void lookup_method_in_klasses          (methodHandle& result, KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS);
+   static void lookup_instance_method_in_klasses (methodHandle& result, KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS);
+   static void lookup_method_in_interfaces       (methodHandle& result, KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS);
+-  
++
+   static int vtable_index_of_miranda_method(KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS);
+ 
+-  static void resolve_klass           (KlassHandle& result, constantPoolHandle  pool, int index, TRAPS); 
++  static void resolve_klass           (KlassHandle& result, constantPoolHandle  pool, int index, TRAPS);
+   static void resolve_klass_no_update (KlassHandle& result, constantPoolHandle pool, int index, TRAPS); // no update of constantPool entry
+-  
++
+   static void resolve_pool  (KlassHandle& resolved_klass, symbolHandle& method_name, symbolHandle& method_signature, KlassHandle& current_klass, constantPoolHandle pool, int index, TRAPS);
+-  
++
+   static void resolve_interface_method(methodHandle& resolved_method, KlassHandle resolved_klass, symbolHandle method_name, symbolHandle method_signature, KlassHandle current_klass, bool check_access, TRAPS);
+   static void resolve_method          (methodHandle& resolved_method, KlassHandle resolved_klass, symbolHandle method_name, symbolHandle method_signature, KlassHandle current_klass, bool check_access, TRAPS);
+-  
++
+   static void linktime_resolve_static_method    (methodHandle& resolved_method, KlassHandle resolved_klass, symbolHandle method_name, symbolHandle method_signature, KlassHandle current_klass, bool check_access, TRAPS);
+   static void linktime_resolve_special_method   (methodHandle& resolved_method, KlassHandle resolved_klass, symbolHandle method_name, symbolHandle method_signature, KlassHandle current_klass, bool check_access, TRAPS);
+   static void linktime_resolve_virtual_method   (methodHandle &resolved_method, KlassHandle resolved_klass, symbolHandle method_name, symbolHandle method_signature,KlassHandle current_klass, bool check_access, TRAPS);
+   static void linktime_resolve_interface_method (methodHandle& resolved_method, KlassHandle resolved_klass, symbolHandle method_name, symbolHandle method_signature, KlassHandle current_klass, bool check_access, TRAPS);
+-    
++
+   static void runtime_resolve_special_method    (CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, KlassHandle current_klass, bool check_access, TRAPS);
+   static void runtime_resolve_virtual_method    (CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, Handle recv, KlassHandle recv_klass, bool check_null_and_abstract, TRAPS);
+   static void runtime_resolve_interface_method  (CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, Handle recv, KlassHandle recv_klass, bool check_null_and_abstract, TRAPS);
+-  
++
+   static void check_field_accessability   (KlassHandle ref_klass, KlassHandle resolved_klass, KlassHandle sel_klass, fieldDescriptor& fd, TRAPS);
+-  static void check_method_accessability  (KlassHandle ref_klass, KlassHandle resolved_klass, KlassHandle sel_klass, methodHandle sel_method, TRAPS);  
+-  
++  static void check_method_accessability  (KlassHandle ref_klass, KlassHandle resolved_klass, KlassHandle sel_klass, methodHandle sel_method, TRAPS);
++
+  public:
+   // constant pool resolving
+-  static void check_klass_accessability(KlassHandle ref_klass, KlassHandle sel_klass, TRAPS);  
++  static void check_klass_accessability(KlassHandle ref_klass, KlassHandle sel_klass, TRAPS);
+ 
+   // static resolving for all calls except interface calls
+   static void resolve_method          (methodHandle& method_result, KlassHandle& klass_result, constantPoolHandle pool, int index, TRAPS);
+   static void resolve_interface_method(methodHandle& method_result, KlassHandle& klass_result, constantPoolHandle pool, int index, TRAPS);
+-  
++
+   // runtime/static resolving for fields
+   static void resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, TRAPS);
+   // takes an extra bool argument "update_pool" to decide whether to update the constantPool during klass resolution.
+@@ -172,4 +169,3 @@
+ 
+   static void resolve_invoke         (CallInfo& result, Handle recv, constantPoolHandle pool, int index, Bytecodes::Code byte, TRAPS);
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/oopMapCache.cpp openjdk/hotspot/src/share/vm/interpreter/oopMapCache.cpp
+--- openjdk6/hotspot/src/share/vm/interpreter/oopMapCache.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/oopMapCache.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)oopMapCache.cpp	1.84 07/05/05 17:05:39 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -38,7 +35,7 @@
+   // Initialization
+   void fill(methodHandle method, int bci);
+   // fills the bit mask for native calls
+-  void fill_for_native(methodHandle method);  
++  void fill_for_native(methodHandle method);
+   void set_mask(CellTypeState* vars, CellTypeState* stack, int stack_top);
+ 
+   // Deallocate bit masks and initialize fields
+@@ -71,8 +68,8 @@
+   virtual void fill_stackmap_prolog       (int nof_gc_points);
+   virtual void fill_stackmap_epilog       ();
+   virtual void fill_stackmap_for_opcodes  (BytecodeStream *bcs,
+-                                           CellTypeState* vars, 
+-                                           CellTypeState* stack, 
++                                           CellTypeState* vars,
++                                           CellTypeState* stack,
+                                            int stack_top);
+   virtual void fill_init_vars             (GrowableArray<intptr_t> *init_vars);
+ 
+@@ -87,25 +84,25 @@
+ 
+ OopMapForCacheEntry::OopMapForCacheEntry(methodHandle method, int bci, OopMapCacheEntry* entry) : GenerateOopMap(method) {
+   _bci       = bci;
+-  _entry     = entry;  
++  _entry     = entry;
+   _stack_top = -1;
+ }
+ 
+ 
+-void OopMapForCacheEntry::compute_map(TRAPS) {    
++void OopMapForCacheEntry::compute_map(TRAPS) {
+   assert(!method()->is_native(), "cannot compute oop map for native methods");
+   // First check if it is a method where the stackmap is always empty
+   if (method()->code_size() == 0 || method()->max_locals() + method()->max_stack() == 0) {
+     _entry->set_mask_size(0);
+   } else {
+-    ResourceMark rm;    
++    ResourceMark rm;
+     GenerateOopMap::compute_map(CATCH);
+     result_for_basicblock(_bci);
+   }
+ }
+ 
+ 
+-bool OopMapForCacheEntry::possible_gc_point(BytecodeStream *bcs) {  
++bool OopMapForCacheEntry::possible_gc_point(BytecodeStream *bcs) {
+   return false; // We are not reporting any result. We call result_for_basicblock directly
+ }
+ 
+@@ -126,8 +123,8 @@
+ 
+ 
+ void OopMapForCacheEntry::fill_stackmap_for_opcodes(BytecodeStream *bcs,
+-                                                    CellTypeState* vars, 
+-                                                    CellTypeState* stack, 
++                                                    CellTypeState* vars,
++                                                    CellTypeState* stack,
+                                                     int stack_top) {
+   // Only interested in one specific bci
+   if (bcs->bci() == _bci) {
+@@ -177,7 +174,7 @@
+ 
+ bool InterpreterOopMap::is_empty() {
+   bool result = _method == NULL;
+-  assert(_method != NULL || (_bci == 0 && 
++  assert(_method != NULL || (_bci == 0 &&
+     (_mask_size == 0 || _mask_size == USHRT_MAX) &&
+     _bit_mask[0] == 0), "Should be completely empty");
+   return result;
+@@ -311,13 +308,13 @@
+ 
+   // Check if map is generated correctly
+   // (Use ?: operator to make sure all 'true' & 'false' are represented exactly the same so we can use == afterwards)
+-  if (TraceOopMapGeneration && Verbose) tty->print("Locals (%d): ", max_locals);  
++  if (TraceOopMapGeneration && Verbose) tty->print("Locals (%d): ", max_locals);
+ 
+   for(int i = 0; i < max_locals; i++) {
+     bool v1 = is_oop(i)               ? true : false;
+     bool v2 = vars[i].is_reference()  ? true : false;
+     assert(v1 == v2, "locals oop mask generation error");
+-    if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0);      
++    if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0);
+ #ifdef ENABLE_ZAP_DEAD_LOCALS
+     bool v3 = is_dead(i)              ? true : false;
+     bool v4 = !vars[i].is_live()      ? true : false;
+@@ -331,7 +328,7 @@
+     bool v1 = is_oop(max_locals + j)  ? true : false;
+     bool v2 = stack[j].is_reference() ? true : false;
+     assert(v1 == v2, "stack oop mask generation error");
+-    if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0);      
++    if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0);
+ #ifdef ENABLE_ZAP_DEAD_LOCALS
+     bool v3 = is_dead(max_locals + j) ? true : false;
+     bool v4 = !stack[j].is_live()     ? true : false;
+@@ -346,14 +343,14 @@
+ void OopMapCacheEntry::allocate_bit_mask() {
+   if (mask_size() > small_mask_limit) {
+     assert(_bit_mask[0] == 0, "bit mask should be new or just flushed");
+-    _bit_mask[0] = (intptr_t) 
++    _bit_mask[0] = (intptr_t)
+       NEW_C_HEAP_ARRAY(uintptr_t, mask_word_size());
+   }
+ }
+ 
+ void OopMapCacheEntry::deallocate_bit_mask() {
+   if (mask_size() > small_mask_limit && _bit_mask[0] != 0) {
+-    assert(!Thread::current()->resource_area()->contains((void*)_bit_mask[0]), 
++    assert(!Thread::current()->resource_area()->contains((void*)_bit_mask[0]),
+       "This bit mask should not be in the resource area");
+     FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0]);
+     debug_only(_bit_mask[0] = 0;)
+@@ -383,7 +380,7 @@
+     fill_for_native(method);
+   } else {
+     EXCEPTION_MARK;
+-    OopMapForCacheEntry gen(method, bci, this);    
++    OopMapForCacheEntry gen(method, bci, this);
+     gen.compute_map(CATCH);
+   }
+   #ifdef ASSERT
+@@ -404,7 +401,7 @@
+   int word_index = 0;
+   uintptr_t value = 0;
+   uintptr_t mask = 1;
+-  
++
+   CellTypeState* cell = vars;
+   for (int entry_index = 0; entry_index < n_entries; entry_index++, mask <<= bits_per_entry, cell++) {
+     // store last word
+@@ -460,8 +457,8 @@
+ 
+ #endif
+ 
+-void InterpreterOopMap::resource_copy(OopMapCacheEntry* from) { 
+-  assert(_resource_allocate_bit_mask, 
++void InterpreterOopMap::resource_copy(OopMapCacheEntry* from) {
++  assert(_resource_allocate_bit_mask,
+     "Should not resource allocate the _bit_mask");
+   assert(from->method()->is_oop(), "MethodOop is bad");
+ 
+@@ -472,15 +469,15 @@
+ 
+   // Is the bit mask contained in the entry?
+   if (from->mask_size() <= small_mask_limit) {
+-    memcpy((void *)_bit_mask, (void *)from->_bit_mask, 
++    memcpy((void *)_bit_mask, (void *)from->_bit_mask,
+       mask_word_size() * BytesPerWord);
+   } else {
+     // The expectation is that this InterpreterOopMap is a recently created
+-    // and empty. It is used to get a copy of a cached entry. 
++    // and empty. It is used to get a copy of a cached entry.
+     // If the bit mask has a value, it should be in the
+     // resource area.
+-    assert(_bit_mask[0] == 0 || 
+-      Thread::current()->resource_area()->contains((void*)_bit_mask[0]), 
++    assert(_bit_mask[0] == 0 ||
++      Thread::current()->resource_area()->contains((void*)_bit_mask[0]),
+       "The bit mask should have been allocated from a resource area");
+     // Allocate the bit_mask from a Resource area for performance.  Allocating
+     // from the C heap as is done for OopMapCache has a significant
+@@ -519,15 +516,15 @@
+   flush();
+   // Deallocate array
+   NOT_PRODUCT(_total_memory_usage -= sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);)
+-  FREE_C_HEAP_ARRAY(OopMapCacheEntry, _array);  
++  FREE_C_HEAP_ARRAY(OopMapCacheEntry, _array);
+ }
+ 
+-OopMapCacheEntry* OopMapCache::entry_at(int i) const { 
+-  return &_array[i % _size]; 
++OopMapCacheEntry* OopMapCache::entry_at(int i) const {
++  return &_array[i % _size];
+ }
+ 
+-void OopMapCache::flush() { 
+-  for (int i = 0; i < _size; i++) _array[i].flush(); 
++void OopMapCache::flush() {
++  for (int i = 0; i < _size; i++) _array[i].flush();
+ }
+ 
+ void OopMapCache::flush_obsolete_entries() {
+@@ -540,7 +537,7 @@
+ }
+ 
+ void OopMapCache::oop_iterate(OopClosure *blk) {
+-  for (int i = 0; i < _size; i++) _array[i].oop_iterate(blk); 
++  for (int i = 0; i < _size; i++) _array[i].oop_iterate(blk);
+ }
+ 
+ void OopMapCache::oop_iterate(OopClosure *blk, MemRegion mr) {
+@@ -548,12 +545,12 @@
+ }
+ 
+ void OopMapCache::verify() {
+-  for (int i = 0; i < _size; i++) _array[i].verify(); 
++  for (int i = 0; i < _size; i++) _array[i].verify();
+ }
+ 
+-void OopMapCache::lookup(methodHandle method, 
+-			 int bci,
+-			 InterpreterOopMap* entry_for) {
++void OopMapCache::lookup(methodHandle method,
++                         int bci,
++                         InterpreterOopMap* entry_for) {
+   MutexLocker x(&_mut);
+ 
+   OopMapCacheEntry* entry = NULL;
+@@ -561,7 +558,7 @@
+ 
+   // Search hashtable for match
+   int i;
+-  for(i = 0; i < _probe_depth; i++) {    
++  for(i = 0; i < _probe_depth; i++) {
+     entry = entry_at(probe + i);
+     if (entry->match(method, bci)) {
+       entry_for->resource_copy(entry);
+@@ -577,7 +574,7 @@
+     method->print_value(); tty->cr();
+   }
+ 
+-  // Entry is not in hashtable. 
++  // Entry is not in hashtable.
+   // Compute entry and return it
+ 
+   // First search for an empty slot
+@@ -593,7 +590,7 @@
+         // the cache to avoid pinning down the method.
+         entry->flush();
+       }
+-      return; 
++      return;
+     }
+   }
+ 
+@@ -607,12 +604,12 @@
+   //for(i = _probe_depth - 1; i > 0; i--) {
+   //  // Coping entry[i] = entry[i-1];
+   //  OopMapCacheEntry *to   = entry_at(probe + i);
+-  //  OopMapCacheEntry *from = entry_at(probe + i - 1);    
+-  //  to->copy(from);    
++  //  OopMapCacheEntry *from = entry_at(probe + i - 1);
++  //  to->copy(from);
+   // }
+ 
+   assert(method->is_method(), "gaga");
+- 
++
+   entry = entry_at(probe + 0);
+   entry->fill(method, bci);
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/oopMapCache.hpp openjdk/hotspot/src/share/vm/interpreter/oopMapCache.hpp
+--- openjdk6/hotspot/src/share/vm/interpreter/oopMapCache.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/oopMapCache.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)oopMapCache.hpp	1.57 07/05/05 17:05:39 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,27 +19,27 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A Cache for storing (method, bci) -> oopMap.
+-// The memory management system uses the cache when locating object 
++// The memory management system uses the cache when locating object
+ // references in an interpreted frame.
+ //
+ // OopMapCache's are allocated lazily per instanceKlass.
+ 
+-// The oopMap (InterpreterOopMap) is stored as a bit mask. If the 
++// The oopMap (InterpreterOopMap) is stored as a bit mask. If the
+ // bit_mask can fit into two words it is stored in
+ // the _bit_mask array, otherwise it is allocated on the heap.
+-// For OopMapCacheEntry the bit_mask is allocated in the C heap 
++// For OopMapCacheEntry the bit_mask is allocated in the C heap
+ // because these entries persist between garbage collections.
+ // For InterpreterOopMap the bit_mask is allocated in
+ // a resource area for better performance.  InterpreterOopMap
+ // should only be created and deleted during same garbage collection.
+ //
+-// If ENABBLE_ZAP_DEAD_LOCALS is defined, two bits are used 
++// If ENABBLE_ZAP_DEAD_LOCALS is defined, two bits are used
+ // per entry instead of one. In all cases,
+-// the first bit is set to indicate oops as opposed to other 
++// the first bit is set to indicate oops as opposed to other
+ // values. If the second bit is available,
+ // it is set for dead values. We get the following encoding:
+ //
+@@ -63,12 +60,12 @@
+ 
+  public:
+   enum {
+-    N                = 2,                // the number of words reserved 
+-					 // for inlined mask storage
+-    small_mask_limit = N * BitsPerWord,  // the maximum number of bits 
+-					 // available for small masks,
+-					 // small_mask_limit can be set to 0 
+-					 // for testing bit_mask allocation
++    N                = 2,                // the number of words reserved
++                                         // for inlined mask storage
++    small_mask_limit = N * BitsPerWord,  // the maximum number of bits
++                                         // available for small masks,
++                                         // small_mask_limit can be set to 0
++                                         // for testing bit_mask allocation
+ 
+ #ifdef ENABLE_ZAP_DEAD_LOCALS
+     bits_per_entry   = 2,
+@@ -86,32 +83,32 @@
+   int            _expression_stack_size; // the size of the expression stack in slots
+ 
+  protected:
+-  intptr_t       _bit_mask[N];    // the bit mask if 
+-				  // mask_size <= small_mask_limit, 
+-				  // ptr to bit mask otherwise
+-				  // "protected" so that sub classes can 
+-				  // access it without using trickery in 
+-				  // methd bit_mask().
++  intptr_t       _bit_mask[N];    // the bit mask if
++                                  // mask_size <= small_mask_limit,
++                                  // ptr to bit mask otherwise
++                                  // "protected" so that sub classes can
++                                  // access it without using trickery in
++                                  // methd bit_mask().
+ #ifdef ASSERT
+   bool _resource_allocate_bit_mask;
+ #endif
+ 
+   // access methods
+   methodOop      method() const                  { return _method; }
+-  void		 set_method(methodOop v)	 { _method = v; }
++  void           set_method(methodOop v)         { _method = v; }
+   int            bci() const                     { return _bci; }
+-  void		 set_bci(int v)			 { _bci = v; }
++  void           set_bci(int v)                  { _bci = v; }
+   int            mask_size() const               { return _mask_size; }
+-  void		 set_mask_size(int v)		 { _mask_size = v; }
++  void           set_mask_size(int v)            { _mask_size = v; }
+   int            number_of_entries() const       { return mask_size() / bits_per_entry; }
+   // Test bit mask size and return either the in-line bit mask or allocated
+   // bit mask.
+   uintptr_t*  bit_mask()                         { return (uintptr_t*)(mask_size() <= small_mask_limit ? (intptr_t)_bit_mask : _bit_mask[0]); }
+ 
+   // return the word size of_bit_mask.  mask_size() <= 4 * MAX_USHORT
+-  size_t mask_word_size() { 
+-    return (mask_size() + BitsPerWord - 1) / BitsPerWord; 
+-  } 
++  size_t mask_word_size() {
++    return (mask_size() + BitsPerWord - 1) / BitsPerWord;
++  }
+ 
+   uintptr_t entry_at(int offset)            { int i = offset * bits_per_entry; return bit_mask()[i / BitsPerWord] >> (i % BitsPerWord); }
+ 
+@@ -126,7 +123,7 @@
+   bool is_empty();
+ 
+   // Initialization
+-  void initialize();  
++  void initialize();
+ 
+  public:
+   InterpreterOopMap();
+@@ -142,7 +139,7 @@
+   void oop_iterate(OopClosure * blk);
+   void oop_iterate(OopClosure * blk, MemRegion mr);
+   void verify();
+-  void print();  
++  void print();
+ 
+   bool is_oop  (int offset)                      { return (entry_at(offset) & (1 << oop_bit_number )) != 0; }
+ 
+@@ -157,7 +154,7 @@
+  private:
+   enum { _size        = 32,     // Use fixed size for now
+          _probe_depth = 3       // probe depth in case of collisions
+-  };                          
++  };
+ 
+   OopMapCacheEntry* _array;
+ 
+@@ -191,4 +188,3 @@
+   // Returns total no. of bytes allocated as part of OopMapCache's
+   static long memory_usage()                     PRODUCT_RETURN0;
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/rewriter.cpp openjdk/hotspot/src/share/vm/interpreter/rewriter.cpp
+--- openjdk6/hotspot/src/share/vm/interpreter/rewriter.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/rewriter.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)rewriter.cpp	1.45 07/05/05 17:05:40 JVM"
+-#endif
+ /*
+  * Copyright 1998-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -71,7 +68,7 @@
+ // require that local 0 is never overwritten so it's available as an
+ // argument for registration.
+ 
+-void Rewriter::rewrite_Object_init(methodHandle method, TRAPS) {  
++void Rewriter::rewrite_Object_init(methodHandle method, TRAPS) {
+   RawBytecodeStream bcs(method);
+   while (!bcs.is_last_bytecode()) {
+     Bytecodes::Code opcode = bcs.raw_next();
+@@ -94,13 +91,13 @@
+         THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(),
+                   "can't overwrite local 0 in Object.<init>");
+         break;
+-    }  
++    }
+   }
+ }
+ 
+ 
+ // Rewrites a method given the index_map information
+-methodHandle Rewriter::rewrite_method(methodHandle method, intArray& index_map, TRAPS) {  
++methodHandle Rewriter::rewrite_method(methodHandle method, intArray& index_map, TRAPS) {
+ 
+   int nof_jsrs = 0;
+   bool has_monitor_bytecodes = false;
+@@ -115,12 +112,12 @@
+     // Bytecodes and their length
+     const address code_base = method->code_base();
+     const int code_length = method->code_size();
+-  
++
+     int bc_length;
+     for (int bci = 0; bci < code_length; bci += bc_length) {
+       address bcp = code_base + bci;
+       c = (Bytecodes::Code)(*bcp);
+-      
++
+       // Since we have the code, see if we can get the length
+       // directly. Some more complicated bytecodes will report
+       // a length of zero, meaning we need to make another method
+@@ -142,12 +139,12 @@
+       switch (c) {
+         case Bytecodes::_lookupswitch   : {
+ #ifndef CC_INTERP
+-          Bytecode_lookupswitch* bc = Bytecode_lookupswitch_at(bcp);        
++          Bytecode_lookupswitch* bc = Bytecode_lookupswitch_at(bcp);
+           bc->set_code(
+             bc->number_of_pairs() < BinarySwitchThreshold
+             ? Bytecodes::_fast_linearswitch
+             : Bytecodes::_fast_binaryswitch
+-          );        
++          );
+ #endif
+           break;
+         }
+@@ -164,13 +161,13 @@
+           break;
+         }
+         case Bytecodes::_jsr            : // fall through
+-        case Bytecodes::_jsr_w          : nof_jsrs++;                   break;      
++        case Bytecodes::_jsr_w          : nof_jsrs++;                   break;
+         case Bytecodes::_monitorenter   : // fall through
+         case Bytecodes::_monitorexit    : has_monitor_bytecodes = true; break;
+-      }  
++      }
+     }
+   }
+-  
++
+   // Update access flags
+   if (has_monitor_bytecodes) {
+     method->set_has_monitor_bytecodes();
+@@ -180,7 +177,7 @@
+   // have to be rewritten, so we run the oopMapGenerator on the method
+   if (nof_jsrs > 0) {
+     method->set_has_jsrs();
+-    ResolveOopMapConflicts romc(method);    
++    ResolveOopMapConflicts romc(method);
+     methodHandle original_method = method;
+     method = romc.do_potential_rewrite(CHECK_(methodHandle()));
+     if (method() != original_method()) {
+@@ -189,25 +186,25 @@
+       // will manifest itself in an easy recognizable form.
+       address bcp = original_method->bcp_from(0);
+       *bcp = (u1)Bytecodes::_shouldnotreachhere;
+-      int kind = AbstractInterpreter::method_kind(original_method);
++      int kind = Interpreter::method_kind(original_method);
+       original_method->set_interpreter_kind(kind);
+     }
+-       
++
+     // Update monitor matching info.
+     if (romc.monitor_safe()) {
+       method->set_guaranteed_monitor_matching();
+     }
+-  }  
++  }
+ 
+   // Setup method entrypoints for compiler and interpreter
+   method->link_method(method, CHECK_(methodHandle()));
+-      
++
+   return method;
+ }
+ 
+ 
+ void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
+-  // gather starting points  
++  // gather starting points
+   ResourceMark rm(THREAD);
+   constantPoolHandle pool (THREAD, klass->constants());
+   objArrayHandle methods  (THREAD, klass->methods());
+@@ -236,14 +233,14 @@
+       }
+     }
+   }
+-  
++
+   // rewrite methods
+   { int i = methods->length();
+-    while (i-- > 0) {       
++    while (i-- > 0) {
+       methodHandle m(THREAD, (methodOop)methods->obj_at(i));
+       m = rewrite_method(m, *index_map, CHECK);
+       // Method might have gotten rewritten.
+-      methods->obj_at_put(i, m());      
++      methods->obj_at_put(i, m());
+     }
+   }
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/rewriter.hpp openjdk/hotspot/src/share/vm/interpreter/rewriter.hpp
+--- openjdk6/hotspot/src/share/vm/interpreter/rewriter.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/rewriter.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)rewriter.hpp	1.20 07/05/05 17:05:40 JVM"
+-#endif
+ /*
+  * Copyright 1998-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The Rewriter adds caches to the constant pool and rewrites bytecode indices
+@@ -38,4 +35,3 @@
+  public:
+   static void rewrite(instanceKlassHandle klass, TRAPS);
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/templateInterpreter.cpp openjdk/hotspot/src/share/vm/interpreter/templateInterpreter.cpp
+--- openjdk6/hotspot/src/share/vm/interpreter/templateInterpreter.cpp	1969-12-31 19:00:00.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/templateInterpreter.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -0,0 +1,597 @@
++/*
++ * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++ *
++ * This code is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 only, as
++ * published by the Free Software Foundation.
++ *
++ * This code is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++ * version 2 for more details (a copy is included in the LICENSE file that
++ * accompanied this code).
++ *
++ * You should have received a copy of the GNU General Public License version
++ * 2 along with this work; if not, write to the Free Software Foundation,
++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
++ * CA 95054 USA or visit www.sun.com if you need additional information or
++ * have any questions.
++ *
++ */
++
++#include "incls/_precompiled.incl"
++#include "incls/_templateInterpreter.cpp.incl"
++
++#ifndef CC_INTERP
++
++# define __ _masm->
++
++void TemplateInterpreter::initialize() {
++  if (_code != NULL) return;
++  // assertions
++  assert((int)Bytecodes::number_of_codes <= (int)DispatchTable::length,
++         "dispatch table too small");
++
++  AbstractInterpreter::initialize();
++
++  TemplateTable::initialize();
++
++  // generate interpreter
++  { ResourceMark rm;
++    TraceTime timer("Interpreter generation", TraceStartupTime);
++    int code_size = InterpreterCodeSize;
++    NOT_PRODUCT(code_size *= 4;)  // debug uses extra interpreter code space
++    _code = new StubQueue(new InterpreterCodeletInterface, code_size, NULL,
++                          "Interpreter");
++    InterpreterGenerator g(_code);
++    if (PrintInterpreter) print();
++  }
++
++  // initialize dispatch table
++  _active_table = _normal_table;
++}
++
++//------------------------------------------------------------------------------------------------------------------------
++// Implementation of EntryPoint
++
++EntryPoint::EntryPoint() {
++  assert(number_of_states == 9, "check the code below");
++  _entry[btos] = NULL;
++  _entry[ctos] = NULL;
++  _entry[stos] = NULL;
++  _entry[atos] = NULL;
++  _entry[itos] = NULL;
++  _entry[ltos] = NULL;
++  _entry[ftos] = NULL;
++  _entry[dtos] = NULL;
++  _entry[vtos] = NULL;
++}
++
++
++EntryPoint::EntryPoint(address bentry, address centry, address sentry, address aentry, address ientry, address lentry, address fentry, address dentry, address ventry) {
++  assert(number_of_states == 9, "check the code below");
++  _entry[btos] = bentry;
++  _entry[ctos] = centry;
++  _entry[stos] = sentry;
++  _entry[atos] = aentry;
++  _entry[itos] = ientry;
++  _entry[ltos] = lentry;
++  _entry[ftos] = fentry;
++  _entry[dtos] = dentry;
++  _entry[vtos] = ventry;
++}
++
++
++void EntryPoint::set_entry(TosState state, address entry) {
++  assert(0 <= state && state < number_of_states, "state out of bounds");
++  _entry[state] = entry;
++}
++
++
++address EntryPoint::entry(TosState state) const {
++  assert(0 <= state && state < number_of_states, "state out of bounds");
++  return _entry[state];
++}
++
++
++void EntryPoint::print() {
++  tty->print("[");
++  for (int i = 0; i < number_of_states; i++) {
++    if (i > 0) tty->print(", ");
++    tty->print(INTPTR_FORMAT, _entry[i]);
++  }
++  tty->print("]");
++}
++
++
++bool EntryPoint::operator == (const EntryPoint& y) {
++  int i = number_of_states;
++  while (i-- > 0) {
++    if (_entry[i] != y._entry[i]) return false;
++  }
++  return true;
++}
++
++
++//------------------------------------------------------------------------------------------------------------------------
++// Implementation of DispatchTable
++
++EntryPoint DispatchTable::entry(int i) const {
++  assert(0 <= i && i < length, "index out of bounds");
++  return
++    EntryPoint(
++      _table[btos][i],
++      _table[ctos][i],
++      _table[stos][i],
++      _table[atos][i],
++      _table[itos][i],
++      _table[ltos][i],
++      _table[ftos][i],
++      _table[dtos][i],
++      _table[vtos][i]
++    );
++}
++
++
++void DispatchTable::set_entry(int i, EntryPoint& entry) {
++  assert(0 <= i && i < length, "index out of bounds");
++  assert(number_of_states == 9, "check the code below");
++  _table[btos][i] = entry.entry(btos);
++  _table[ctos][i] = entry.entry(ctos);
++  _table[stos][i] = entry.entry(stos);
++  _table[atos][i] = entry.entry(atos);
++  _table[itos][i] = entry.entry(itos);
++  _table[ltos][i] = entry.entry(ltos);
++  _table[ftos][i] = entry.entry(ftos);
++  _table[dtos][i] = entry.entry(dtos);
++  _table[vtos][i] = entry.entry(vtos);
++}
++
++
++bool DispatchTable::operator == (DispatchTable& y) {
++  int i = length;
++  while (i-- > 0) {
++    EntryPoint t = y.entry(i); // for compiler compatibility (BugId 4150096)
++    if (!(entry(i) == t)) return false;
++  }
++  return true;
++}
++
++address    TemplateInterpreter::_remove_activation_entry                    = NULL;
++address    TemplateInterpreter::_remove_activation_preserving_args_entry    = NULL;
++
++
++address    TemplateInterpreter::_throw_ArrayIndexOutOfBoundsException_entry = NULL;
++address    TemplateInterpreter::_throw_ArrayStoreException_entry            = NULL;
++address    TemplateInterpreter::_throw_ArithmeticException_entry            = NULL;
++address    TemplateInterpreter::_throw_ClassCastException_entry             = NULL;
++address    TemplateInterpreter::_throw_NullPointerException_entry           = NULL;
++address    TemplateInterpreter::_throw_StackOverflowError_entry             = NULL;
++address    TemplateInterpreter::_throw_exception_entry                      = NULL;
++
++#ifndef PRODUCT
++EntryPoint TemplateInterpreter::_trace_code;
++#endif // !PRODUCT
++EntryPoint TemplateInterpreter::_return_entry[TemplateInterpreter::number_of_return_entries];
++EntryPoint TemplateInterpreter::_earlyret_entry;
++EntryPoint TemplateInterpreter::_deopt_entry [TemplateInterpreter::number_of_deopt_entries ];
++EntryPoint TemplateInterpreter::_continuation_entry;
++EntryPoint TemplateInterpreter::_safept_entry;
++
++address    TemplateInterpreter::_return_3_addrs_by_index[TemplateInterpreter::number_of_return_addrs];
++address    TemplateInterpreter::_return_5_addrs_by_index[TemplateInterpreter::number_of_return_addrs];
++
++DispatchTable TemplateInterpreter::_active_table;
++DispatchTable TemplateInterpreter::_normal_table;
++DispatchTable TemplateInterpreter::_safept_table;
++address    TemplateInterpreter::_wentry_point[DispatchTable::length];
++
++TemplateInterpreterGenerator::TemplateInterpreterGenerator(StubQueue* _code): AbstractInterpreterGenerator(_code) {
++  _unimplemented_bytecode    = NULL;
++  _illegal_bytecode_sequence = NULL;
++}
++
++static const BasicType types[Interpreter::number_of_result_handlers] = {
++  T_BOOLEAN,
++  T_CHAR   ,
++  T_BYTE   ,
++  T_SHORT  ,
++  T_INT    ,
++  T_LONG   ,
++  T_VOID   ,
++  T_FLOAT  ,
++  T_DOUBLE ,
++  T_OBJECT
++};
++
++void TemplateInterpreterGenerator::generate_all() {
++  AbstractInterpreterGenerator::generate_all();
++
++  { CodeletMark cm(_masm, "error exits");
++    _unimplemented_bytecode    = generate_error_exit("unimplemented bytecode");
++    _illegal_bytecode_sequence = generate_error_exit("illegal bytecode sequence - method not verified");
++  }
++
++#ifndef PRODUCT
++  if (TraceBytecodes) {
++    CodeletMark cm(_masm, "bytecode tracing support");
++    Interpreter::_trace_code =
++      EntryPoint(
++        generate_trace_code(btos),
++        generate_trace_code(ctos),
++        generate_trace_code(stos),
++        generate_trace_code(atos),
++        generate_trace_code(itos),
++        generate_trace_code(ltos),
++        generate_trace_code(ftos),
++        generate_trace_code(dtos),
++        generate_trace_code(vtos)
++      );
++  }
++#endif // !PRODUCT
++
++  { CodeletMark cm(_masm, "return entry points");
++    for (int i = 0; i < Interpreter::number_of_return_entries; i++) {
++      Interpreter::_return_entry[i] =
++        EntryPoint(
++          generate_return_entry_for(itos, i),
++          generate_return_entry_for(itos, i),
++          generate_return_entry_for(itos, i),
++          generate_return_entry_for(atos, i),
++          generate_return_entry_for(itos, i),
++          generate_return_entry_for(ltos, i),
++          generate_return_entry_for(ftos, i),
++          generate_return_entry_for(dtos, i),
++          generate_return_entry_for(vtos, i)
++        );
++    }
++  }
++
++  { CodeletMark cm(_masm, "earlyret entry points");
++    Interpreter::_earlyret_entry =
++      EntryPoint(
++        generate_earlyret_entry_for(btos),
++        generate_earlyret_entry_for(ctos),
++        generate_earlyret_entry_for(stos),
++        generate_earlyret_entry_for(atos),
++        generate_earlyret_entry_for(itos),
++        generate_earlyret_entry_for(ltos),
++        generate_earlyret_entry_for(ftos),
++        generate_earlyret_entry_for(dtos),
++        generate_earlyret_entry_for(vtos)
++      );
++  }
++
++  { CodeletMark cm(_masm, "deoptimization entry points");
++    for (int i = 0; i < Interpreter::number_of_deopt_entries; i++) {
++      Interpreter::_deopt_entry[i] =
++        EntryPoint(
++          generate_deopt_entry_for(itos, i),
++          generate_deopt_entry_for(itos, i),
++          generate_deopt_entry_for(itos, i),
++          generate_deopt_entry_for(atos, i),
++          generate_deopt_entry_for(itos, i),
++          generate_deopt_entry_for(ltos, i),
++          generate_deopt_entry_for(ftos, i),
++          generate_deopt_entry_for(dtos, i),
++          generate_deopt_entry_for(vtos, i)
++        );
++    }
++  }
++
++  { CodeletMark cm(_masm, "result handlers for native calls");
++    // The various result converter stublets.
++    int is_generated[Interpreter::number_of_result_handlers];
++    memset(is_generated, 0, sizeof(is_generated));
++
++    for (int i = 0; i < Interpreter::number_of_result_handlers; i++) {
++      BasicType type = types[i];
++      if (!is_generated[Interpreter::BasicType_as_index(type)]++) {
++        Interpreter::_native_abi_to_tosca[Interpreter::BasicType_as_index(type)] = generate_result_handler_for(type);
++      }
++    }
++  }
++
++  for (int j = 0; j < number_of_states; j++) {
++    const TosState states[] = {btos, ctos, stos, itos, ltos, ftos, dtos, atos, vtos};
++    Interpreter::_return_3_addrs_by_index[Interpreter::TosState_as_index(states[j])] = Interpreter::return_entry(states[j], 3);
++    Interpreter::_return_5_addrs_by_index[Interpreter::TosState_as_index(states[j])] = Interpreter::return_entry(states[j], 5);
++  }
++
++  { CodeletMark cm(_masm, "continuation entry points");
++    Interpreter::_continuation_entry =
++      EntryPoint(
++        generate_continuation_for(btos),
++        generate_continuation_for(ctos),
++        generate_continuation_for(stos),
++        generate_continuation_for(atos),
++        generate_continuation_for(itos),
++        generate_continuation_for(ltos),
++        generate_continuation_for(ftos),
++        generate_continuation_for(dtos),
++        generate_continuation_for(vtos)
++      );
++  }
++
++  { CodeletMark cm(_masm, "safepoint entry points");
++    Interpreter::_safept_entry =
++      EntryPoint(
++        generate_safept_entry_for(btos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
++        generate_safept_entry_for(ctos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
++        generate_safept_entry_for(stos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
++        generate_safept_entry_for(atos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
++        generate_safept_entry_for(itos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
++        generate_safept_entry_for(ltos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
++        generate_safept_entry_for(ftos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
++        generate_safept_entry_for(dtos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
++        generate_safept_entry_for(vtos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint))
++      );
++  }
++
++  { CodeletMark cm(_masm, "exception handling");
++    // (Note: this is not safepoint safe because thread may return to compiled code)
++    generate_throw_exception();
++  }
++
++  { CodeletMark cm(_masm, "throw exception entrypoints");
++    Interpreter::_throw_ArrayIndexOutOfBoundsException_entry = generate_ArrayIndexOutOfBounds_handler("java/lang/ArrayIndexOutOfBoundsException");
++    Interpreter::_throw_ArrayStoreException_entry            = generate_klass_exception_handler("java/lang/ArrayStoreException"                 );
++    Interpreter::_throw_ArithmeticException_entry            = generate_exception_handler("java/lang/ArithmeticException"           , "/ by zero");
++    Interpreter::_throw_ClassCastException_entry             = generate_ClassCastException_handler();
++    Interpreter::_throw_NullPointerException_entry           = generate_exception_handler("java/lang/NullPointerException"          , NULL       );
++    Interpreter::_throw_StackOverflowError_entry             = generate_StackOverflowError_handler();
++  }
++
++
++
++#define method_entry(kind)                                                                    \
++  { CodeletMark cm(_masm, "method entry point (kind = " #kind ")");                    \
++    Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind);  \
++  }
++
++  // all non-native method kinds
++  method_entry(zerolocals)
++  method_entry(zerolocals_synchronized)
++  method_entry(empty)
++  method_entry(accessor)
++  method_entry(abstract)
++  method_entry(java_lang_math_sin  )
++  method_entry(java_lang_math_cos  )
++  method_entry(java_lang_math_tan  )
++  method_entry(java_lang_math_abs  )
++  method_entry(java_lang_math_sqrt )
++  method_entry(java_lang_math_log  )
++  method_entry(java_lang_math_log10)
++
++  // all native method kinds (must be one contiguous block)
++  Interpreter::_native_entry_begin = Interpreter::code()->code_end();
++  method_entry(native)
++  method_entry(native_synchronized)
++  Interpreter::_native_entry_end = Interpreter::code()->code_end();
++
++#undef method_entry
++
++  // Bytecodes
++  set_entry_points_for_all_bytes();
++  set_safepoints_for_all_bytes();
++}
++
++//------------------------------------------------------------------------------------------------------------------------
++
++address TemplateInterpreterGenerator::generate_error_exit(const char* msg) {
++  address entry = __ pc();
++  __ stop(msg);
++  return entry;
++}
++
++
++//------------------------------------------------------------------------------------------------------------------------
++
++void TemplateInterpreterGenerator::set_entry_points_for_all_bytes() {
++  for (int i = 0; i < DispatchTable::length; i++) {
++    Bytecodes::Code code = (Bytecodes::Code)i;
++    if (Bytecodes::is_defined(code)) {
++      set_entry_points(code);
++    } else {
++      set_unimplemented(i);
++    }
++  }
++}
++
++
++void TemplateInterpreterGenerator::set_safepoints_for_all_bytes() {
++  for (int i = 0; i < DispatchTable::length; i++) {
++    Bytecodes::Code code = (Bytecodes::Code)i;
++    if (Bytecodes::is_defined(code)) Interpreter::_safept_table.set_entry(code, Interpreter::_safept_entry);
++  }
++}
++
++
++void TemplateInterpreterGenerator::set_unimplemented(int i) {
++  address e = _unimplemented_bytecode;
++  EntryPoint entry(e, e, e, e, e, e, e, e, e);
++  Interpreter::_normal_table.set_entry(i, entry);
++  Interpreter::_wentry_point[i] = _unimplemented_bytecode;
++}
++
++
++void TemplateInterpreterGenerator::set_entry_points(Bytecodes::Code code) {
++  CodeletMark cm(_masm, Bytecodes::name(code), code);
++  // initialize entry points
++  assert(_unimplemented_bytecode    != NULL, "should have been generated before");
++  assert(_illegal_bytecode_sequence != NULL, "should have been generated before");
++  address bep = _illegal_bytecode_sequence;
++  address cep = _illegal_bytecode_sequence;
++  address sep = _illegal_bytecode_sequence;
++  address aep = _illegal_bytecode_sequence;
++  address iep = _illegal_bytecode_sequence;
++  address lep = _illegal_bytecode_sequence;
++  address fep = _illegal_bytecode_sequence;
++  address dep = _illegal_bytecode_sequence;
++  address vep = _unimplemented_bytecode;
++  address wep = _unimplemented_bytecode;
++  // code for short & wide version of bytecode
++  if (Bytecodes::is_defined(code)) {
++    Template* t = TemplateTable::template_for(code);
++    assert(t->is_valid(), "just checking");
++    set_short_entry_points(t, bep, cep, sep, aep, iep, lep, fep, dep, vep);
++  }
++  if (Bytecodes::wide_is_defined(code)) {
++    Template* t = TemplateTable::template_for_wide(code);
++    assert(t->is_valid(), "just checking");
++    set_wide_entry_point(t, wep);
++  }
++  // set entry points
++  EntryPoint entry(bep, cep, sep, aep, iep, lep, fep, dep, vep);
++  Interpreter::_normal_table.set_entry(code, entry);
++  Interpreter::_wentry_point[code] = wep;
++}
++
++
++void TemplateInterpreterGenerator::set_wide_entry_point(Template* t, address& wep) {
++  assert(t->is_valid(), "template must exist");
++  assert(t->tos_in() == vtos, "only vtos tos_in supported for wide instructions")
++  wep = __ pc(); generate_and_dispatch(t);
++}
++
++
++void TemplateInterpreterGenerator::set_short_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
++  assert(t->is_valid(), "template must exist");
++  switch (t->tos_in()) {
++    case btos: vep = __ pc(); __ pop(btos); bep = __ pc(); generate_and_dispatch(t); break;
++    case ctos: vep = __ pc(); __ pop(ctos); sep = __ pc(); generate_and_dispatch(t); break;
++    case stos: vep = __ pc(); __ pop(stos); sep = __ pc(); generate_and_dispatch(t); break;
++    case atos: vep = __ pc(); __ pop(atos); aep = __ pc(); generate_and_dispatch(t); break;
++    case itos: vep = __ pc(); __ pop(itos); iep = __ pc(); generate_and_dispatch(t); break;
++    case ltos: vep = __ pc(); __ pop(ltos); lep = __ pc(); generate_and_dispatch(t); break;
++    case ftos: vep = __ pc(); __ pop(ftos); fep = __ pc(); generate_and_dispatch(t); break;
++    case dtos: vep = __ pc(); __ pop(dtos); dep = __ pc(); generate_and_dispatch(t); break;
++    case vtos: set_vtos_entry_points(t, bep, cep, sep, aep, iep, lep, fep, dep, vep);     break;
++    default  : ShouldNotReachHere();                                                 break;
++  }
++}
++
++
++//------------------------------------------------------------------------------------------------------------------------
++
++void TemplateInterpreterGenerator::generate_and_dispatch(Template* t, TosState tos_out) {
++  if (PrintBytecodeHistogram)                                    histogram_bytecode(t);
++#ifndef PRODUCT
++  // debugging code
++  if (CountBytecodes || TraceBytecodes || StopInterpreterAt > 0) count_bytecode();
++  if (PrintBytecodePairHistogram)                                histogram_bytecode_pair(t);
++  if (TraceBytecodes)                                            trace_bytecode(t);
++  if (StopInterpreterAt > 0)                                     stop_interpreter_at();
++  __ verify_FPU(1, t->tos_in());
++#endif // !PRODUCT
++  int step;
++  if (!t->does_dispatch()) {
++    step = t->is_wide() ? Bytecodes::wide_length_for(t->bytecode()) : Bytecodes::length_for(t->bytecode());
++    if (tos_out == ilgl) tos_out = t->tos_out();
++    // compute bytecode size
++    assert(step > 0, "just checkin'");
++    // setup stuff for dispatching next bytecode
++    if (ProfileInterpreter && VerifyDataPointer
++        && methodDataOopDesc::bytecode_has_profile(t->bytecode())) {
++      __ verify_method_data_pointer();
++    }
++    __ dispatch_prolog(tos_out, step);
++  }
++  // generate template
++  t->generate(_masm);
++  // advance
++  if (t->does_dispatch()) {
++#ifdef ASSERT
++    // make sure execution doesn't go beyond this point if code is broken
++    __ should_not_reach_here();
++#endif // ASSERT
++  } else {
++    // dispatch to next bytecode
++    __ dispatch_epilog(tos_out, step);
++  }
++}
++
++//------------------------------------------------------------------------------------------------------------------------
++// Entry points
++
++address TemplateInterpreter::return_entry(TosState state, int length) {
++  guarantee(0 <= length && length < Interpreter::number_of_return_entries, "illegal length");
++  return _return_entry[length].entry(state);
++}
++
++
++address TemplateInterpreter::deopt_entry(TosState state, int length) {
++  guarantee(0 <= length && length < Interpreter::number_of_deopt_entries, "illegal length");
++  return _deopt_entry[length].entry(state);
++}
++
++//------------------------------------------------------------------------------------------------------------------------
++// Suport for invokes
++
++int TemplateInterpreter::TosState_as_index(TosState state) {
++  assert( state < number_of_states , "Invalid state in TosState_as_index");
++  assert(0 <= (int)state && (int)state < TemplateInterpreter::number_of_return_addrs, "index out of bounds");
++  return (int)state;
++}
++
++
++//------------------------------------------------------------------------------------------------------------------------
++// Safepoint suppport
++
++static inline void copy_table(address* from, address* to, int size) {
++  // Copy non-overlapping tables. The copy has to occur word wise for MT safety.
++  while (size-- > 0) *to++ = *from++;
++}
++
++void TemplateInterpreter::notice_safepoints() {
++  if (!_notice_safepoints) {
++    // switch to safepoint dispatch table
++    _notice_safepoints = true;
++    copy_table((address*)&_safept_table, (address*)&_active_table, sizeof(_active_table) / sizeof(address));
++  }
++}
++
++// switch from the dispatch table which notices safepoints back to the
++// normal dispatch table.  So that we can notice single stepping points,
++// keep the safepoint dispatch table if we are single stepping in JVMTI.
++// Note that the should_post_single_step test is exactly as fast as the
++// JvmtiExport::_enabled test and covers both cases.
++void TemplateInterpreter::ignore_safepoints() {
++  if (_notice_safepoints) {
++    if (!JvmtiExport::should_post_single_step()) {
++      // switch to normal dispatch table
++      _notice_safepoints = false;
++      copy_table((address*)&_normal_table, (address*)&_active_table, sizeof(_active_table) / sizeof(address));
++    }
++  }
++}
++
++// If deoptimization happens, this method returns the point where to continue in
++// interpreter. For calls (invokexxxx, newxxxx) the continuation is at next
++// bci and the top of stack is in eax/edx/FPU tos.
++// For putfield/getfield, put/getstatic, the continuation is at the same
++// bci and the TOS is on stack.
++
++// Note: deopt_entry(type, 0) means reexecute bytecode
++//       deopt_entry(type, length) means continue at next bytecode
++
++address TemplateInterpreter::continuation_for(methodOop method, address bcp, int callee_parameters, bool is_top_frame, bool& use_next_mdp) {
++  assert(method->contains(bcp), "just checkin'");
++  Bytecodes::Code code   = Bytecodes::java_code_at(bcp);
++  if (code == Bytecodes::_return) {
++      // This is used for deopt during registration of finalizers
++      // during Object.<init>.  We simply need to resume execution at
++      // the standard return vtos bytecode to pop the frame normally.
++      // reexecuting the real bytecode would cause double registration
++      // of the finalizable object.
++      assert(is_top_frame, "must be on top");
++      return _normal_table.entry(Bytecodes::_return).entry(vtos);
++  } else {
++    return AbstractInterpreter::continuation_for(method, bcp, callee_parameters, is_top_frame, use_next_mdp);
++  }
++}
++
++#endif // !CC_INTERP
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.hpp openjdk/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.hpp
+--- openjdk6/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.hpp	1969-12-31 19:00:00.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -0,0 +1,90 @@
++/*
++ * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++ *
++ * This code is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 only, as
++ * published by the Free Software Foundation.
++ *
++ * This code is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++ * version 2 for more details (a copy is included in the LICENSE file that
++ * accompanied this code).
++ *
++ * You should have received a copy of the GNU General Public License version
++ * 2 along with this work; if not, write to the Free Software Foundation,
++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
++ * CA 95054 USA or visit www.sun.com if you need additional information or
++ * have any questions.
++ *
++ */
++
++// This file contains the platform-independant parts
++// of the template interpreter generator.
++
++#ifndef CC_INTERP
++
++class TemplateInterpreterGenerator: public AbstractInterpreterGenerator {
++ protected:
++
++  // entry points for shared code sequence
++  address _unimplemented_bytecode;
++  address _illegal_bytecode_sequence;
++
++  // shared code sequences
++  // Converter for native abi result to tosca result
++  address generate_result_handler_for(BasicType type);
++  address generate_slow_signature_handler();
++  address generate_error_exit(const char* msg);
++  address generate_StackOverflowError_handler();
++  address generate_exception_handler(const char* name, const char* message) {
++    return generate_exception_handler_common(name, message, false);
++  }
++  address generate_klass_exception_handler(const char* name) {
++    return generate_exception_handler_common(name, NULL, true);
++  }
++  address generate_exception_handler_common(const char* name, const char* message, bool pass_oop);
++  address generate_ClassCastException_handler();
++  address generate_ArrayIndexOutOfBounds_handler(const char* name);
++  address generate_continuation_for(TosState state);
++  address generate_return_entry_for(TosState state, int step);
++  address generate_earlyret_entry_for(TosState state);
++  address generate_deopt_entry_for(TosState state, int step);
++  address generate_safept_entry_for(TosState state, address runtime_entry);
++  void    generate_throw_exception();
++
++  // entry point generator
++//   address generate_method_entry(AbstractInterpreter::MethodKind kind);
++
++  // Instruction generation
++  void generate_and_dispatch (Template* t, TosState tos_out = ilgl);
++  void set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep);
++  void set_short_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep);
++  void set_wide_entry_point  (Template* t, address& wep);
++
++  void set_entry_points(Bytecodes::Code code);
++  void set_unimplemented(int i);
++  void set_entry_points_for_all_bytes();
++  void set_safepoints_for_all_bytes();
++
++  // Helpers for generate_and_dispatch
++  address generate_trace_code(TosState state)   PRODUCT_RETURN0;
++  void count_bytecode()                         PRODUCT_RETURN;
++  void histogram_bytecode(Template* t)          PRODUCT_RETURN;
++  void histogram_bytecode_pair(Template* t)     PRODUCT_RETURN;
++  void trace_bytecode(Template* t)              PRODUCT_RETURN;
++  void stop_interpreter_at()                    PRODUCT_RETURN;
++
++  void generate_all();
++
++ public:
++  TemplateInterpreterGenerator(StubQueue* _code);
++
++  #include "incls/_templateInterpreterGenerator_pd.hpp.incl"
++
++};
++
++#endif // !CC_INTERP
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/templateInterpreter.hpp openjdk/hotspot/src/share/vm/interpreter/templateInterpreter.hpp
+--- openjdk6/hotspot/src/share/vm/interpreter/templateInterpreter.hpp	1969-12-31 19:00:00.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/templateInterpreter.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -0,0 +1,177 @@
++/*
++ * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++ *
++ * This code is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 only, as
++ * published by the Free Software Foundation.
++ *
++ * This code is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++ * version 2 for more details (a copy is included in the LICENSE file that
++ * accompanied this code).
++ *
++ * You should have received a copy of the GNU General Public License version
++ * 2 along with this work; if not, write to the Free Software Foundation,
++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
++ * CA 95054 USA or visit www.sun.com if you need additional information or
++ * have any questions.
++ *
++ */
++
++// This file contains the platform-independant parts
++// of the template interpreter and the template interpreter generator.
++
++#ifndef CC_INTERP
++
++//------------------------------------------------------------------------------------------------------------------------
++// A little wrapper class to group tosca-specific entry points into a unit.
++// (tosca = Top-Of-Stack CAche)
++
++class EntryPoint VALUE_OBJ_CLASS_SPEC {
++ private:
++  address _entry[number_of_states];
++
++ public:
++  // Construction
++  EntryPoint();
++  EntryPoint(address bentry, address centry, address sentry, address aentry, address ientry, address lentry, address fentry, address dentry, address ventry);
++
++  // Attributes
++  address entry(TosState state) const;                // return target address for a given tosca state
++  void    set_entry(TosState state, address entry);   // set    target address for a given tosca state
++  void    print();
++
++  // Comparison
++  bool operator == (const EntryPoint& y);             // for debugging only
++};
++
++
++//------------------------------------------------------------------------------------------------------------------------
++// A little wrapper class to group tosca-specific dispatch tables into a unit.
++
++class DispatchTable VALUE_OBJ_CLASS_SPEC {
++ public:
++  enum { length = 1 << BitsPerByte };                 // an entry point for each byte value (also for undefined bytecodes)
++
++ private:
++  address _table[number_of_states][length];           // dispatch tables, indexed by tosca and bytecode
++
++ public:
++  // Attributes
++  EntryPoint entry(int i) const;                      // return entry point for a given bytecode i
++  void       set_entry(int i, EntryPoint& entry);     // set    entry point for a given bytecode i
++  address*   table_for(TosState state)          { return _table[state]; }
++  address*   table_for()                        { return table_for((TosState)0); }
++  int        distance_from(address *table)      { return table - table_for(); }
++  int        distance_from(TosState state)      { return distance_from(table_for(state)); }
++
++  // Comparison
++  bool operator == (DispatchTable& y);                // for debugging only
++};
++
++class TemplateInterpreter: public AbstractInterpreter {
++  friend class VMStructs;
++  friend class InterpreterMacroAssembler;
++  friend class TemplateInterpreterGenerator;
++  friend class TemplateTable;
++  // friend class Interpreter;
++ public:
++
++  enum MoreConstants {
++    number_of_return_entries  = 9,                              // number of return entry points
++    number_of_deopt_entries   = 9,                              // number of deoptimization entry points
++    number_of_return_addrs    = 9                              // number of return addresses
++  };
++
++ protected:
++
++  static address    _throw_ArrayIndexOutOfBoundsException_entry;
++  static address    _throw_ArrayStoreException_entry;
++  static address    _throw_ArithmeticException_entry;
++  static address    _throw_ClassCastException_entry;
++  static address    _throw_NullPointerException_entry;
++  static address    _throw_exception_entry;
++
++  static address    _throw_StackOverflowError_entry;
++
++  static address    _remove_activation_entry;                   // continuation address if an exception is not handled by current frame
++#ifdef HOTSWAP
++  static address    _remove_activation_preserving_args_entry;   // continuation address when current frame is being popped
++#endif // HOTSWAP
++
++#ifndef PRODUCT
++  static EntryPoint _trace_code;
++#endif // !PRODUCT
++  static EntryPoint _return_entry[number_of_return_entries];    // entry points to return to from a call
++  static EntryPoint _earlyret_entry;                            // entry point to return early from a call
++  static EntryPoint _deopt_entry[number_of_deopt_entries];      // entry points to return to from a deoptimization
++  static EntryPoint _continuation_entry;
++  static EntryPoint _safept_entry;
++
++  static address    _return_3_addrs_by_index[number_of_return_addrs];     // for invokevirtual   return entries
++  static address    _return_5_addrs_by_index[number_of_return_addrs];     // for invokeinterface return entries
++
++  static DispatchTable _active_table;                           // the active    dispatch table (used by the interpreter for dispatch)
++  static DispatchTable _normal_table;                           // the normal    dispatch table (used to set the active table in normal mode)
++  static DispatchTable _safept_table;                           // the safepoint dispatch table (used to set the active table for safepoints)
++  static address       _wentry_point[DispatchTable::length];    // wide instructions only (vtos tosca always)
++
++
++ public:
++  // Initialization/debugging
++  static void       initialize();
++  // this only returns whether a pc is within generated code for the interpreter.
++  static bool       contains(address pc)                        { return _code != NULL && _code->contains(pc); }
++
++ public:
++
++  static address    remove_activation_early_entry(TosState state) { return _earlyret_entry.entry(state); }
++#ifdef HOTSWAP
++  static address    remove_activation_preserving_args_entry()   { return _remove_activation_preserving_args_entry; }
++#endif // HOTSWAP
++
++  static address    remove_activation_entry()                   { return _remove_activation_entry; }
++  static address    throw_exception_entry()                     { return _throw_exception_entry; }
++  static address    throw_ArithmeticException_entry()           { return _throw_ArithmeticException_entry; }
++  static address    throw_NullPointerException_entry()          { return _throw_NullPointerException_entry; }
++  static address    throw_StackOverflowError_entry()            { return _throw_StackOverflowError_entry; }
++
++  // Code generation
++#ifndef PRODUCT
++  static address    trace_code    (TosState state)              { return _trace_code.entry(state); }
++#endif // !PRODUCT
++  static address    continuation  (TosState state)              { return _continuation_entry.entry(state); }
++  static address*   dispatch_table(TosState state)              { return _active_table.table_for(state); }
++  static address*   dispatch_table()                            { return _active_table.table_for(); }
++  static int        distance_from_dispatch_table(TosState state){ return _active_table.distance_from(state); }
++  static address*   normal_table(TosState state)                { return _normal_table.table_for(state); }
++  static address*   normal_table()                              { return _normal_table.table_for(); }
++
++  // Support for invokes
++  static address*   return_3_addrs_by_index_table()             { return _return_3_addrs_by_index; }
++  static address*   return_5_addrs_by_index_table()             { return _return_5_addrs_by_index; }
++  static int        TosState_as_index(TosState state);          // computes index into return_3_entry_by_index table
++
++  static address    return_entry  (TosState state, int length);
++  static address    deopt_entry   (TosState state, int length);
++
++  // Safepoint support
++  static void       notice_safepoints();                        // stops the thread when reaching a safepoint
++  static void       ignore_safepoints();                        // ignores safepoints
++
++  // Deoptimization support
++  static address    continuation_for(methodOop method,
++                                     address bcp,
++                                     int callee_parameters,
++                                     bool is_top_frame,
++                                     bool& use_next_mdp);
++
++#include "incls/_templateInterpreter_pd.hpp.incl"
++
++};
++
++#endif // !CC_INTERP
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/templateTable.cpp openjdk/hotspot/src/share/vm/interpreter/templateTable.cpp
+--- openjdk6/hotspot/src/share/vm/interpreter/templateTable.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/templateTable.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)templateTable.cpp	1.108 07/05/05 17:05:37 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -254,8 +251,8 @@
+   const int  disp = 1 << Template::does_dispatch_bit;
+   const int  clvm = 1 << Template::calls_vm_bit;
+   const int  iswd = 1 << Template::wide_bit;
+-  //                                    interpr. templates                                                  
+-  // Java spec bytecodes                ubcp|disp|clvm|iswd  in    out   generator             argument     
++  //                                    interpr. templates
++  // Java spec bytecodes                ubcp|disp|clvm|iswd  in    out   generator             argument
+   def(Bytecodes::_nop                 , ____|____|____|____, vtos, vtos, nop                 ,  _           );
+   def(Bytecodes::_aconst_null         , ____|____|____|____, vtos, atos, aconst_null         ,  _           );
+   def(Bytecodes::_iconst_m1           , ____|____|____|____, vtos, itos, iconst              , -1           );
+@@ -401,9 +398,9 @@
+   def(Bytecodes::_d2i                 , ____|____|____|____, dtos, itos, convert             ,  _           );
+   def(Bytecodes::_d2l                 , ____|____|____|____, dtos, ltos, convert             ,  _           );
+   def(Bytecodes::_d2f                 , ____|____|____|____, dtos, ftos, convert             ,  _           );
+-  def(Bytecodes::_i2b	              , ____|____|____|____, itos, itos, convert             ,  _           );
+-  def(Bytecodes::_i2c	              , ____|____|____|____, itos, itos, convert             ,  _           );
+-  def(Bytecodes::_i2s	              , ____|____|____|____, itos, itos, convert             ,  _           );
++  def(Bytecodes::_i2b                 , ____|____|____|____, itos, itos, convert             ,  _           );
++  def(Bytecodes::_i2c                 , ____|____|____|____, itos, itos, convert             ,  _           );
++  def(Bytecodes::_i2s                 , ____|____|____|____, itos, itos, convert             ,  _           );
+   def(Bytecodes::_lcmp                , ____|____|____|____, ltos, itos, lcmp                ,  _           );
+   def(Bytecodes::_fcmpl               , ____|____|____|____, ftos, itos, float_cmp           , -1           );
+   def(Bytecodes::_fcmpg               , ____|____|____|____, ftos, itos, float_cmp           ,  1           );
+@@ -415,25 +412,25 @@
+   def(Bytecodes::_ifge                , ubcp|____|clvm|____, itos, vtos, if_0cmp             , greater_equal);
+   def(Bytecodes::_ifgt                , ubcp|____|clvm|____, itos, vtos, if_0cmp             , greater      );
+   def(Bytecodes::_ifle                , ubcp|____|clvm|____, itos, vtos, if_0cmp             , less_equal   );
+-  def(Bytecodes::_if_icmpeq	      , ubcp|____|clvm|____, itos, vtos, if_icmp             , equal        );
+-  def(Bytecodes::_if_icmpne	      , ubcp|____|clvm|____, itos, vtos, if_icmp             , not_equal    );
+-  def(Bytecodes::_if_icmplt	      , ubcp|____|clvm|____, itos, vtos, if_icmp             , less         );
++  def(Bytecodes::_if_icmpeq           , ubcp|____|clvm|____, itos, vtos, if_icmp             , equal        );
++  def(Bytecodes::_if_icmpne           , ubcp|____|clvm|____, itos, vtos, if_icmp             , not_equal    );
++  def(Bytecodes::_if_icmplt           , ubcp|____|clvm|____, itos, vtos, if_icmp             , less         );
+   def(Bytecodes::_if_icmpge           , ubcp|____|clvm|____, itos, vtos, if_icmp             , greater_equal);
+-  def(Bytecodes::_if_icmpgt	      , ubcp|____|clvm|____, itos, vtos, if_icmp             , greater      );
+-  def(Bytecodes::_if_icmple	      , ubcp|____|clvm|____, itos, vtos, if_icmp             , less_equal   );
+-  def(Bytecodes::_if_acmpeq	      , ubcp|____|clvm|____, atos, vtos, if_acmp             , equal        );
+-  def(Bytecodes::_if_acmpne	      , ubcp|____|clvm|____, atos, vtos, if_acmp             , not_equal    );
+-  def(Bytecodes::_goto	              , ubcp|disp|clvm|____, vtos, vtos, _goto               ,  _           );
+-  def(Bytecodes::_jsr	              , ubcp|disp|____|____, vtos, vtos, jsr                 ,  _           ); // result is not an oop, so do not transition to atos
+-  def(Bytecodes::_ret	              , ubcp|disp|____|____, vtos, vtos, ret                 ,  _           );
++  def(Bytecodes::_if_icmpgt           , ubcp|____|clvm|____, itos, vtos, if_icmp             , greater      );
++  def(Bytecodes::_if_icmple           , ubcp|____|clvm|____, itos, vtos, if_icmp             , less_equal   );
++  def(Bytecodes::_if_acmpeq           , ubcp|____|clvm|____, atos, vtos, if_acmp             , equal        );
++  def(Bytecodes::_if_acmpne           , ubcp|____|clvm|____, atos, vtos, if_acmp             , not_equal    );
++  def(Bytecodes::_goto                , ubcp|disp|clvm|____, vtos, vtos, _goto               ,  _           );
++  def(Bytecodes::_jsr                 , ubcp|disp|____|____, vtos, vtos, jsr                 ,  _           ); // result is not an oop, so do not transition to atos
++  def(Bytecodes::_ret                 , ubcp|disp|____|____, vtos, vtos, ret                 ,  _           );
+   def(Bytecodes::_tableswitch         , ubcp|disp|____|____, itos, vtos, tableswitch         ,  _           );
+   def(Bytecodes::_lookupswitch        , ubcp|disp|____|____, itos, itos, lookupswitch        ,  _           );
+-  def(Bytecodes::_ireturn	      , ____|disp|clvm|____, itos, itos, _return             , itos         );
+-  def(Bytecodes::_lreturn	      , ____|disp|clvm|____, ltos, ltos, _return             , ltos         );
+-  def(Bytecodes::_freturn	      , ____|disp|clvm|____, ftos, ftos, _return             , ftos         );
+-  def(Bytecodes::_dreturn	      , ____|disp|clvm|____, dtos, dtos, _return             , dtos         );
+-  def(Bytecodes::_areturn	      , ____|disp|clvm|____, atos, atos, _return             , atos         );
+-  def(Bytecodes::_return	      , ____|disp|clvm|____, vtos, vtos, _return             , vtos         );
++  def(Bytecodes::_ireturn             , ____|disp|clvm|____, itos, itos, _return             , itos         );
++  def(Bytecodes::_lreturn             , ____|disp|clvm|____, ltos, ltos, _return             , ltos         );
++  def(Bytecodes::_freturn             , ____|disp|clvm|____, ftos, ftos, _return             , ftos         );
++  def(Bytecodes::_dreturn             , ____|disp|clvm|____, dtos, dtos, _return             , dtos         );
++  def(Bytecodes::_areturn             , ____|disp|clvm|____, atos, atos, _return             , atos         );
++  def(Bytecodes::_return              , ____|disp|clvm|____, vtos, vtos, _return             , vtos         );
+   def(Bytecodes::_getstatic           , ubcp|____|clvm|____, vtos, vtos, getstatic           ,  1           );
+   def(Bytecodes::_putstatic           , ubcp|____|clvm|____, vtos, vtos, putstatic           ,  2           );
+   def(Bytecodes::_getfield            , ubcp|____|clvm|____, vtos, vtos, getfield            ,  1           );
+@@ -443,21 +440,21 @@
+   def(Bytecodes::_invokestatic        , ubcp|disp|clvm|____, vtos, vtos, invokestatic        ,  1           );
+   def(Bytecodes::_invokeinterface     , ubcp|disp|clvm|____, vtos, vtos, invokeinterface     ,  1           );
+   def(Bytecodes::_new                 , ubcp|____|clvm|____, vtos, atos, _new                ,  _           );
+-  def(Bytecodes::_newarray	      , ubcp|____|clvm|____, itos, atos, newarray            ,  _           );
+-  def(Bytecodes::_anewarray	      , ubcp|____|clvm|____, itos, atos, anewarray           ,  _           );
++  def(Bytecodes::_newarray            , ubcp|____|clvm|____, itos, atos, newarray            ,  _           );
++  def(Bytecodes::_anewarray           , ubcp|____|clvm|____, itos, atos, anewarray           ,  _           );
+   def(Bytecodes::_arraylength         , ____|____|____|____, atos, itos, arraylength         ,  _           );
+-  def(Bytecodes::_athrow	      , ____|disp|____|____, atos, vtos, athrow              ,  _           );
+-  def(Bytecodes::_checkcast	      , ubcp|____|clvm|____, atos, atos, checkcast           ,  _           );
+-  def(Bytecodes::_instanceof	      , ubcp|____|clvm|____, atos, itos, instanceof          ,  _           );
++  def(Bytecodes::_athrow              , ____|disp|____|____, atos, vtos, athrow              ,  _           );
++  def(Bytecodes::_checkcast           , ubcp|____|clvm|____, atos, atos, checkcast           ,  _           );
++  def(Bytecodes::_instanceof          , ubcp|____|clvm|____, atos, itos, instanceof          ,  _           );
+   def(Bytecodes::_monitorenter        , ____|disp|clvm|____, atos, vtos, monitorenter        ,  _           );
+   def(Bytecodes::_monitorexit         , ____|____|clvm|____, atos, vtos, monitorexit         ,  _           );
+-  def(Bytecodes::_wide	              , ubcp|disp|____|____, vtos, vtos, wide                ,  _           );
++  def(Bytecodes::_wide                , ubcp|disp|____|____, vtos, vtos, wide                ,  _           );
+   def(Bytecodes::_multianewarray      , ubcp|____|clvm|____, vtos, atos, multianewarray      ,  _           );
+-  def(Bytecodes::_ifnull	      , ubcp|____|clvm|____, atos, vtos, if_nullcmp          , equal        );
+-  def(Bytecodes::_ifnonnull	      , ubcp|____|clvm|____, atos, vtos, if_nullcmp          , not_equal    );
+-  def(Bytecodes::_goto_w	      , ubcp|____|clvm|____, vtos, vtos, goto_w              ,  _           );
+-  def(Bytecodes::_jsr_w	              , ubcp|____|____|____, vtos, vtos, jsr_w               ,  _           );
+-     
++  def(Bytecodes::_ifnull              , ubcp|____|clvm|____, atos, vtos, if_nullcmp          , equal        );
++  def(Bytecodes::_ifnonnull           , ubcp|____|clvm|____, atos, vtos, if_nullcmp          , not_equal    );
++  def(Bytecodes::_goto_w              , ubcp|____|clvm|____, vtos, vtos, goto_w              ,  _           );
++  def(Bytecodes::_jsr_w               , ubcp|____|____|____, vtos, vtos, jsr_w               ,  _           );
++
+   // wide Java spec bytecodes
+   def(Bytecodes::_iload               , ubcp|____|____|iswd, vtos, itos, wide_iload          ,  _           );
+   def(Bytecodes::_lload               , ubcp|____|____|iswd, vtos, ltos, wide_lload          ,  _           );
+@@ -470,9 +467,9 @@
+   def(Bytecodes::_dstore              , ubcp|____|____|iswd, vtos, vtos, wide_dstore         ,  _           );
+   def(Bytecodes::_astore              , ubcp|____|____|iswd, vtos, vtos, wide_astore         ,  _           );
+   def(Bytecodes::_iinc                , ubcp|____|____|iswd, vtos, vtos, wide_iinc           ,  _           );
+-  def(Bytecodes::_ret	              , ubcp|disp|____|iswd, vtos, vtos, wide_ret            ,  _           );
++  def(Bytecodes::_ret                 , ubcp|disp|____|iswd, vtos, vtos, wide_ret            ,  _           );
+   def(Bytecodes::_breakpoint          , ubcp|disp|clvm|____, vtos, vtos, _breakpoint         ,  _           );
+-  
++
+   // JVM bytecodes
+   def(Bytecodes::_fast_agetfield      , ubcp|____|____|____, atos, atos, fast_accessfield    ,  atos        );
+   def(Bytecodes::_fast_bgetfield      , ubcp|____|____|____, atos, itos, fast_accessfield    ,  itos        );
+@@ -535,7 +532,7 @@
+ }
+ 
+ 
+-void TemplateTable::unimplemented_bc() {  
++void TemplateTable::unimplemented_bc() {
+   _masm->unimplemented( Bytecodes::name(_desc->bytecode()));
+ }
+ #endif /* !CC_INTERP */
+diff -ruN openjdk6/hotspot/src/share/vm/interpreter/templateTable.hpp openjdk/hotspot/src/share/vm/interpreter/templateTable.hpp
+--- openjdk6/hotspot/src/share/vm/interpreter/templateTable.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/interpreter/templateTable.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)templateTable.hpp	1.91 07/05/05 17:05:39 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #ifndef CC_INTERP
+@@ -76,7 +73,7 @@
+  public:
+   enum Operation { add, sub, mul, div, rem, _and, _or, _xor, shl, shr, ushr };
+   enum Condition { equal, not_equal, less, less_equal, greater, greater_equal };
+-  
++
+  private:
+   static bool            _is_initialized;        // true if TemplateTable has been initialized
+   static Template        _template_table     [Bytecodes::number_of_codes];
+@@ -150,7 +147,7 @@
+   static void baload();
+   static void caload();
+   static void saload();
+-  
++
+   static void iload(int n);
+   static void lload(int n);
+   static void fload(int n);
+@@ -245,9 +242,9 @@
+   static void fast_binaryswitch();
+ 
+   static void _return(TosState state);
+-  
++
+   static void resolve_cache_and_index(int byte_no, Register cache, Register index);
+-  static void load_invoke_cp_cache_entry(int byte_no, 
++  static void load_invoke_cp_cache_entry(int byte_no,
+                                          Register method,
+                                          Register itable_index,
+                                          Register flags,
+@@ -281,7 +278,7 @@
+   static void instanceof();
+ 
+   static void athrow();
+-  
++
+   static void monitorenter();
+   static void monitorexit();
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/libadt/dict.cpp openjdk/hotspot/src/share/vm/libadt/dict.cpp
+--- openjdk6/hotspot/src/share/vm/libadt/dict.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/libadt/dict.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)dict.cpp	1.35 07/05/05 17:05:41 JVM"
+-#endif
+ /*
+  * Copyright 1997-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Dictionaries - An Abstract Data Type
+@@ -38,7 +35,7 @@
+ 
+ #include <assert.h>
+ 
+-// The iostream is not needed and it gets confused for gcc by the 
++// The iostream is not needed and it gets confused for gcc by the
+ // define of bool.
+ //
+ // #include <iostream.h>
+@@ -46,20 +43,20 @@
+ //------------------------------data-----------------------------------------
+ // String hash tables
+ #define MAXID 20
+-static byte initflag = 0;	// True after 1st initialization
++static byte initflag = 0;       // True after 1st initialization
+ static const char shft[MAXID] = {1,2,3,4,5,6,7,1,2,3,4,5,6,7,1,2,3,4,5,6};
+ static short xsum[MAXID];
+ 
+ //------------------------------bucket---------------------------------------
+ class bucket : public ResourceObj {
+ public:
+-  uint _cnt, _max;		// Size of bucket
+-  void **_keyvals;	  	// Array of keys and values
++  uint _cnt, _max;              // Size of bucket
++  void **_keyvals;              // Array of keys and values
+ };
+ 
+ //------------------------------Dict-----------------------------------------
+ // The dictionary is kept has a hash table.  The hash table is a even power
+-// of two, for nice modulo operations.	Each bucket in the hash table points
++// of two, for nice modulo operations.  Each bucket in the hash table points
+ // to a linear list of key-value pairs; each key & value is just a (void *).
+ // The list starts with a count.  A hash lookup finds the list head, then a
+ // simple linear scan finds the key.  If the table gets too full, it's
+@@ -71,37 +68,37 @@
+   int i;
+ 
+   // Precompute table of null character hashes
+-  if( !initflag ) {		// Not initializated yet?
+-    xsum[0] = (1<<shft[0])+1;	// Initialize
++  if( !initflag ) {             // Not initializated yet?
++    xsum[0] = (1<<shft[0])+1;   // Initialize
+     for(i=1; i<MAXID; i++) {
+       xsum[i] = (1<<shft[i])+1+xsum[i-1];
+     }
+-    initflag = 1;		// Never again
++    initflag = 1;               // Never again
+   }
+ 
+-  _size = 16;  			// Size is a power of 2
+-  _cnt = 0;			// Dictionary is empty
++  _size = 16;                   // Size is a power of 2
++  _cnt = 0;                     // Dictionary is empty
+   _bin = (bucket*)_arena->Amalloc_4(sizeof(bucket)*_size);
+   memset(_bin,0,sizeof(bucket)*_size);
+ }
+ 
+-Dict::Dict(CmpKey initcmp, Hash inithash, Arena *arena, int size) 
++Dict::Dict(CmpKey initcmp, Hash inithash, Arena *arena, int size)
+ : _hash(inithash), _cmp(initcmp), _arena(arena) {
+   int i;
+ 
+   // Precompute table of null character hashes
+-  if( !initflag ) {		// Not initializated yet?
+-    xsum[0] = (1<<shft[0])+1;	// Initialize
++  if( !initflag ) {             // Not initializated yet?
++    xsum[0] = (1<<shft[0])+1;   // Initialize
+     for(i=1; i<MAXID; i++) {
+       xsum[i] = (1<<shft[i])+1+xsum[i-1];
+     }
+-    initflag = 1;		// Never again
++    initflag = 1;               // Never again
+   }
+ 
+-  i=16; 
++  i=16;
+   while( i < size ) i <<= 1;
+-  _size = i;  			// Size is a power of 2
+-  _cnt = 0;			// Dictionary is empty
++  _size = i;                    // Size is a power of 2
++  _cnt = 0;                     // Dictionary is empty
+   _bin = (bucket*)_arena->Amalloc_4(sizeof(bucket)*_size);
+   memset(_bin,0,sizeof(bucket)*_size);
+ }
+@@ -122,9 +119,9 @@
+ //------------------------------Clear----------------------------------------
+ // Zap to empty; ready for re-use
+ void Dict::Clear() {
+-  _cnt = 0;			// Empty contents
++  _cnt = 0;                     // Empty contents
+   for( uint i=0; i<_size; i++ )
+-    _bin[i]._cnt = 0;		// Empty buckets, but leave allocated
++    _bin[i]._cnt = 0;           // Empty buckets, but leave allocated
+   // Leave _size & _bin alone, under the assumption that dictionary will
+   // grow to this size again.
+ }
+@@ -137,18 +134,18 @@
+ // lo list depending on the value of the bit.
+ void Dict::doubhash(void) {
+   uint oldsize = _size;
+-  _size <<= 1;			// Double in size
++  _size <<= 1;                  // Double in size
+   _bin = (bucket*)_arena->Arealloc( _bin, sizeof(bucket)*oldsize, sizeof(bucket)*_size );
+   memset( &_bin[oldsize], 0, oldsize*sizeof(bucket) );
+   // Rehash things to spread into new table
+   for( uint i=0; i < oldsize; i++) { // For complete OLD table do
+-    bucket *b = &_bin[i];	// Handy shortcut for _bin[i]
+-    if( !b->_keyvals ) continue;	// Skip empties fast
++    bucket *b = &_bin[i];       // Handy shortcut for _bin[i]
++    if( !b->_keyvals ) continue;        // Skip empties fast
+ 
+     bucket *nb = &_bin[i+oldsize];  // New bucket shortcut
+-    uint j = b->_max;		    // Trim new bucket to nearest power of 2 
++    uint j = b->_max;               // Trim new bucket to nearest power of 2
+     while( j > b->_cnt ) j >>= 1;   // above old bucket _cnt
+-    if( !j ) j = 1;		// Handle zero-sized buckets
++    if( !j ) j = 1;             // Handle zero-sized buckets
+     nb->_max = j<<1;
+     // Allocate worst case space for key-value pairs
+     nb->_keyvals = (void**)_arena->Amalloc_4( sizeof(void *)*nb->_max*2 );
+@@ -157,13 +154,13 @@
+     for( j=0; j<b->_cnt; j++ ) {  // Rehash all keys in this bucket
+       void *key = b->_keyvals[j+j];
+       if( (_hash( key ) & (_size-1)) != i ) { // Moving to hi bucket?
+-	nb->_keyvals[nbcnt+nbcnt] = key;
+-	nb->_keyvals[nbcnt+nbcnt+1] = b->_keyvals[j+j+1];
+-	nb->_cnt = nbcnt = nbcnt+1;
+-	b->_cnt--;		// Remove key/value from lo bucket
+-	b->_keyvals[j+j  ] = b->_keyvals[b->_cnt+b->_cnt  ];
+-	b->_keyvals[j+j+1] = b->_keyvals[b->_cnt+b->_cnt+1];
+-	j--;			// Hash compacted element also
++        nb->_keyvals[nbcnt+nbcnt] = key;
++        nb->_keyvals[nbcnt+nbcnt+1] = b->_keyvals[j+j+1];
++        nb->_cnt = nbcnt = nbcnt+1;
++        b->_cnt--;              // Remove key/value from lo bucket
++        b->_keyvals[j+j  ] = b->_keyvals[b->_cnt+b->_cnt  ];
++        b->_keyvals[j+j+1] = b->_keyvals[b->_cnt+b->_cnt+1];
++        j--;                    // Hash compacted element also
+       }
+     } // End of for all key-value pairs in bucket
+   } // End of for all buckets
+@@ -186,7 +183,7 @@
+ //------------------------------Dict-----------------------------------------
+ // Deep copy a dictionary.
+ Dict &Dict::operator =( const Dict &d ) {
+-  if( _size < d._size ) {	// If must have more buckets
++  if( _size < d._size ) {       // If must have more buckets
+     _arena = d._arena;
+     _bin = (bucket*)_arena->Arealloc( _bin, sizeof(bucket)*_size, sizeof(bucket)*d._size );
+     memset( &_bin[_size], 0, (d._size-_size)*sizeof(bucket) );
+@@ -194,12 +191,12 @@
+   }
+   uint i;
+   for( i=0; i<_size; i++ ) // All buckets are empty
+-    _bin[i]._cnt = 0;		// But leave bucket allocations alone
++    _bin[i]._cnt = 0;           // But leave bucket allocations alone
+   _cnt = d._cnt;
+   *(Hash*)(&_hash) = d._hash;
+   *(CmpKey*)(&_cmp) = d._cmp;
+   for( i=0; i<_size; i++ ) {
+-    bucket *b = &d._bin[i];	// Shortcut to source bucket
++    bucket *b = &d._bin[i];     // Shortcut to source bucket
+     for( uint j=0; j<b->_cnt; j++ )
+       Insert( b->_keyvals[j+j], b->_keyvals[j+j+1] );
+   }
+@@ -207,59 +204,59 @@
+ }
+ 
+ //------------------------------Insert----------------------------------------
+-// Insert or replace a key/value pair in the given dictionary.	If the
++// Insert or replace a key/value pair in the given dictionary.  If the
+ // dictionary is too full, it's size is doubled.  The prior value being
+-// replaced is returned (NULL if this is a 1st insertion of that key).	If
++// replaced is returned (NULL if this is a 1st insertion of that key).  If
+ // an old value is found, it's swapped with the prior key-value pair on the
+ // list.  This moves a commonly searched-for value towards the list head.
+ void *Dict::Insert(void *key, void *val, bool replace) {
+-  uint hash = _hash( key );	// Get hash key
+-  uint i = hash & (_size-1);	// Get hash key, corrected for size
+-  bucket *b = &_bin[i];		// Handy shortcut
++  uint hash = _hash( key );     // Get hash key
++  uint i = hash & (_size-1);    // Get hash key, corrected for size
++  bucket *b = &_bin[i];         // Handy shortcut
+   for( uint j=0; j<b->_cnt; j++ ) {
+     if( !_cmp(key,b->_keyvals[j+j]) ) {
+       if (!replace) {
+         return b->_keyvals[j+j+1];
+       } else {
+         void *prior = b->_keyvals[j+j+1];
+-        b->_keyvals[j+j  ] = key;	// Insert current key-value
++        b->_keyvals[j+j  ] = key;       // Insert current key-value
+         b->_keyvals[j+j+1] = val;
+-        return prior;		// Return prior
++        return prior;           // Return prior
+       }
+     }
+   }
+-  if( ++_cnt > _size ) {	// Hash table is full
+-    doubhash();			// Grow whole table if too full
+-    i = hash & (_size-1);	// Rehash
+-    b = &_bin[i];		// Handy shortcut
++  if( ++_cnt > _size ) {        // Hash table is full
++    doubhash();                 // Grow whole table if too full
++    i = hash & (_size-1);       // Rehash
++    b = &_bin[i];               // Handy shortcut
+   }
+-  if( b->_cnt == b->_max ) {	// Must grow bucket?
++  if( b->_cnt == b->_max ) {    // Must grow bucket?
+     if( !b->_keyvals ) {
+-      b->_max = 2;		// Initial bucket size
++      b->_max = 2;              // Initial bucket size
+       b->_keyvals = (void**)_arena->Amalloc_4(sizeof(void*) * b->_max * 2);
+     } else {
+       b->_keyvals = (void**)_arena->Arealloc(b->_keyvals, sizeof(void*) * b->_max * 2, sizeof(void*) * b->_max * 4);
+-      b->_max <<= 1;		// Double bucket
++      b->_max <<= 1;            // Double bucket
+     }
+   }
+   b->_keyvals[b->_cnt+b->_cnt  ] = key;
+   b->_keyvals[b->_cnt+b->_cnt+1] = val;
+   b->_cnt++;
+-  return NULL;			// Nothing found prior
++  return NULL;                  // Nothing found prior
+ }
+ 
+ //------------------------------Delete---------------------------------------
+ // Find & remove a value from dictionary. Return old value.
+ void *Dict::Delete(void *key) {
+-  uint i = _hash( key ) & (_size-1);	// Get hash key, corrected for size
+-  bucket *b = &_bin[i];		// Handy shortcut
++  uint i = _hash( key ) & (_size-1);    // Get hash key, corrected for size
++  bucket *b = &_bin[i];         // Handy shortcut
+   for( uint j=0; j<b->_cnt; j++ )
+     if( !_cmp(key,b->_keyvals[j+j]) ) {
+       void *prior = b->_keyvals[j+j+1];
+-      b->_cnt--;		// Remove key/value from lo bucket
++      b->_cnt--;                // Remove key/value from lo bucket
+       b->_keyvals[j+j  ] = b->_keyvals[b->_cnt+b->_cnt  ];
+       b->_keyvals[j+j+1] = b->_keyvals[b->_cnt+b->_cnt+1];
+-      _cnt--;			// One less thing in table
++      _cnt--;                   // One less thing in table
+       return prior;
+     }
+   return NULL;
+@@ -269,10 +266,10 @@
+ // Find a key-value pair in the given dictionary.  If not found, return NULL.
+ // If found, move key-value pair towards head of list.
+ void *Dict::operator [](const void *key) const {
+-  uint i = _hash( key ) & (_size-1);	// Get hash key, corrected for size
+-  bucket *b = &_bin[i];		// Handy shortcut
++  uint i = _hash( key ) & (_size-1);    // Get hash key, corrected for size
++  bucket *b = &_bin[i];         // Handy shortcut
+   for( uint j=0; j<b->_cnt; j++ )
+-    if( !_cmp(key,b->_keyvals[j+j]) ) 
++    if( !_cmp(key,b->_keyvals[j+j]) )
+       return b->_keyvals[j+j+1];
+   return NULL;
+ }
+@@ -285,13 +282,13 @@
+   if( _cnt != d2._cnt ) return 0;
+   if( _hash != d2._hash ) return 0;
+   if( _cmp != d2._cmp ) return 0;
+-  for( uint i=0; i < _size; i++) {	// For complete hash table do
+-    bucket *b = &_bin[i];	// Handy shortcut
++  for( uint i=0; i < _size; i++) {      // For complete hash table do
++    bucket *b = &_bin[i];       // Handy shortcut
+     if( b->_cnt != d2._bin[i]._cnt ) return 0;
+     if( memcmp(b->_keyvals, d2._bin[i]._keyvals, b->_cnt*2*sizeof(void*) ) )
+-      return 0;			// Key-value pairs must match
++      return 0;                 // Key-value pairs must match
+   }
+-  return 1;			// All match, is OK
++  return 1;                     // All match, is OK
+ }
+ 
+ //------------------------------print------------------------------------------
+@@ -306,7 +303,7 @@
+ }
+ 
+ //------------------------------Hashing Functions----------------------------
+-// Convert string to hash key.	This algorithm implements a universal hash
++// Convert string to hash key.  This algorithm implements a universal hash
+ // function with the multipliers frozen (ok, so it's not universal).  The
+ // multipliers (and allowable characters) are all odd, so the resultant sum
+ // is odd - guarenteed not divisible by any power of two, so the hash tables
+@@ -322,8 +319,8 @@
+   register const char *s = (const char *)t;
+ 
+   while( ((c = *s++) != '\0') && (k < MAXID-1) ) { // Get characters till null or MAXID-1
+-    c = (c<<1)+1;		// Characters are always odd!
+-    sum += c + (c<<shft[k++]);	// Universal hash function
++    c = (c<<1)+1;               // Characters are always odd!
++    sum += c + (c<<shft[k++]);  // Universal hash function
+   }
+   return (int)((sum+xsum[k]) >> 1); // Hash key, un-modulo'd table size
+ }
+@@ -358,23 +355,23 @@
+ //------------------------------reset------------------------------------------
+ // Create an iterator and initialize the first variables.
+ void DictI::reset( const Dict *dict ) {
+-  _d = dict;			// The dictionary
+-  _i = (uint)-1;		// Before the first bin
+-  _j = 0;			// Nothing left in the current bin
+-  ++(*this);			// Step to first real value
++  _d = dict;                    // The dictionary
++  _i = (uint)-1;                // Before the first bin
++  _j = 0;                       // Nothing left in the current bin
++  ++(*this);                    // Step to first real value
+ }
+ 
+ //------------------------------next-------------------------------------------
+ // Find the next key-value pair in the dictionary, or return a NULL key and
+ // value.
+ void DictI::operator ++(void) {
+-  if( _j-- ) {			// Still working in current bin?
++  if( _j-- ) {                  // Still working in current bin?
+     _key   = _d->_bin[_i]._keyvals[_j+_j];
+     _value = _d->_bin[_i]._keyvals[_j+_j+1];
+     return;
+   }
+ 
+-  while( ++_i < _d->_size ) {	// Else scan for non-zero bucket
++  while( ++_i < _d->_size ) {   // Else scan for non-zero bucket
+     _j = _d->_bin[_i]._cnt;
+     if( !_j ) continue;
+     _j--;
+@@ -384,4 +381,3 @@
+   }
+   _key = _value = NULL;
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/libadt/dict.hpp openjdk/hotspot/src/share/vm/libadt/dict.hpp
+--- openjdk6/hotspot/src/share/vm/libadt/dict.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/libadt/dict.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)dict.hpp	1.24 07/05/05 17:05:40 JVM"
+-#endif
+ /*
+  * Copyright 1997-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #ifndef _DICT_
+@@ -44,23 +41,23 @@
+ 
+ class Dict : public ResourceObj { // Dictionary structure
+  private:
+-  class Arena *_arena;		// Where to draw storage from
+-  class bucket *_bin;		// Hash table is array of buckets
+-  uint _size;			// Size (# of slots) in hash table
+-  uint32 _cnt;			// Number of key-value pairs in hash table
+-  const Hash _hash;		// Hashing function
+-  const CmpKey _cmp;		// Key comparison function
+-  void doubhash( void );	// Double hash table size
++  class Arena *_arena;          // Where to draw storage from
++  class bucket *_bin;           // Hash table is array of buckets
++  uint _size;                   // Size (# of slots) in hash table
++  uint32 _cnt;                  // Number of key-value pairs in hash table
++  const Hash _hash;             // Hashing function
++  const CmpKey _cmp;            // Key comparison function
++  void doubhash( void );        // Double hash table size
+ 
+  public:
+-  friend class DictI;		 // Friendly iterator function
++  friend class DictI;            // Friendly iterator function
+ 
+   // cmp is a key comparision routine.  hash is a routine to hash a key.
+   Dict( CmpKey cmp, Hash hash );
+   Dict( CmpKey cmp, Hash hash, Arena *arena, int size=16 );
+   ~Dict();
+ 
+-  Dict( const Dict & );		// Deep-copy guts
++  Dict( const Dict & );         // Deep-copy guts
+   Dict &operator =( const Dict & );
+ 
+   // Zap to empty; ready for re-use
+@@ -72,9 +69,9 @@
+   // Insert inserts the given key-value pair into the dictionary.  The prior
+   // value of the key is returned; NULL if the key was not previously defined.
+   void *Insert(void *key, void *val, bool replace = true); // A new key-value
+-  void *Delete(void *key);	  // Delete & return old
++  void *Delete(void *key);        // Delete & return old
+ 
+-  // Find finds the value of a given key; or NULL if not found.  
++  // Find finds the value of a given key; or NULL if not found.
+   // The dictionary is NOT changed.
+   void *operator [](const void *key) const;  // Do a lookup
+ 
+@@ -88,7 +85,7 @@
+ };
+ 
+ // Hashing functions
+-int hashstr(const void *s);	   // Nice string hash
++int hashstr(const void *s);        // Nice string hash
+ // Slimey cheap hash function; no guarenteed performance.  Better than the
+ // default for pointers, especially on MS-DOS machines.
+ int hashptr(const void *key);
+@@ -102,7 +99,7 @@
+ 
+ //------------------------------Iteration--------------------------------------
+ // The class of dictionary iterators.  Fails in the presences of modifications
+-// to the dictionary during iteration (including searches).  
++// to the dictionary during iteration (including searches).
+ // Usage:  for( DictI i(dict); i.test(); ++i ) { body = i.key; body = i.value;}
+ class DictI {
+  private:
+@@ -118,4 +115,3 @@
+ };
+ 
+ #endif // _DICT_
+-
+diff -ruN openjdk6/hotspot/src/share/vm/libadt/port.cpp openjdk/hotspot/src/share/vm/libadt/port.cpp
+--- openjdk6/hotspot/src/share/vm/libadt/port.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/libadt/port.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)port.cpp	1.21 07/05/05 17:05:41 JVM"
+-#endif
+ /*
+  * Copyright 1997-1998 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Code for portable compiling
+@@ -51,9 +48,9 @@
+ {
+   register uint32 tmp;
+   while( x ) {                  // While not zero
+-    tmp = x;			// Hold onto smaller x value
+-    x = y % x;			// Compute modulus; since y>=x, 0 <= mod < x
+-    y = tmp;			// y = old x
++    tmp = x;                    // Hold onto smaller x value
++    x = y % x;                  // Compute modulus; since y>=x, 0 <= mod < x
++    y = tmp;                    // y = old x
+   }
+   return y;
+ }
+@@ -85,11 +82,11 @@
+ // negative rotates right.
+ uint32 rotate32( register uint32 x, register int32 cnt )
+ {
+-  if( cnt >= 0 ) {		// Positive rotates left
+-    cnt &= 31;			// Mask off extra shift bits
+-  } else {			// Negative rotates right
+-    cnt = (-cnt)&31;		// Flip sign; mask extra shift bits
+-    cnt = 32-cnt;		// Rotate right by big left rotation
++  if( cnt >= 0 ) {              // Positive rotates left
++    cnt &= 31;                  // Mask off extra shift bits
++  } else {                      // Negative rotates right
++    cnt = (-cnt)&31;            // Flip sign; mask extra shift bits
++    cnt = 32-cnt;               // Rotate right by big left rotation
+   }
+   return (x << cnt) | (x >> (32-cnt));
+ }
+@@ -99,14 +96,14 @@
+    for the existing log2. Keep around until we have
+    verified all uses of log2 do the correct thing!
+ //------------------------------log2-------------------------------------------
+-// Log base 2.  Might also be called 'count leading zeros'.  Log2(x) returns 
++// Log base 2.  Might also be called 'count leading zeros'.  Log2(x) returns
+ // an l such that (1L<<l) <= x < (2L<<l).  log2(x) returns 32.
+ uint log2( uint32 x )
+ {
+-  register uint l = 32;		// Log bits
+-  register int32 sx = x;	// Treat as signed number
+-  while( sx >= 0 )		// While high bit is clear
+-    sx <<= 1, l--;		// Shift bits left, count down log2
++  register uint l = 32;         // Log bits
++  register int32 sx = x;        // Treat as signed number
++  while( sx >= 0 )              // While high bit is clear
++    sx <<= 1, l--;              // Shift bits left, count down log2
+   return l;
+ }
+ */
+diff -ruN openjdk6/hotspot/src/share/vm/libadt/port.hpp openjdk/hotspot/src/share/vm/libadt/port.hpp
+--- openjdk6/hotspot/src/share/vm/libadt/port.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/libadt/port.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)port.hpp	1.44 07/05/05 17:05:41 JVM"
+-#endif
+ /*
+  * Copyright 1997-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #ifndef _PORT_
+@@ -76,7 +73,7 @@
+ 
+ #elif defined(__hpux)
+ 
+-#define INTERFACE 
++#define INTERFACE
+ #define IMPLEMENTATION
+ #define signed
+ #include <strings.h>
+@@ -89,7 +86,7 @@
+ 
+ #elif defined(__MOTO__)
+ // Motorola's mcc
+-#define INTERFACE 
++#define INTERFACE
+ #define IMPLEMENTATION
+ #include <stdlib.h>
+ #include <memory.h>
+@@ -98,7 +95,7 @@
+ 
+ #elif defined(_AIX)
+ // IBM's xlC compiler
+-#define INTERFACE 
++#define INTERFACE
+ #define IMPLEMENTATION
+ #include <stdlib.h>
+ #include <memory.h>
+@@ -107,7 +104,7 @@
+ 
+ #elif defined(_MSC_VER)
+ // Microsoft Visual C++
+-//#define INTERFACE 
++//#define INTERFACE
+ #define IMPLEMENTATION
+ #include <stdlib.h>
+ #undef small
+@@ -117,7 +114,7 @@
+ 
+ #elif defined(SPARC_WORKS)
+ 
+-#define INTERFACE 
++#define INTERFACE
+ #define IMPLEMENTATION
+ 
+ #include <stddef.h>
+@@ -126,7 +123,7 @@
+ 
+ #elif defined(SOLARIS)
+ 
+-#define INTERFACE 
++#define INTERFACE
+ #define IMPLEMENTATION
+ 
+ #include <stddef.h>
+@@ -144,7 +141,7 @@
+ 
+ #elif defined(__cplusplus)
+ // AT&Ts cfront
+-#define INTERFACE 
++#define INTERFACE
+ #define IMPLEMENTATION
+ #include <unistd.h>
+ #define signed
+@@ -185,26 +182,26 @@
+ typedef unsigned char byte;
+ 
+ // All uses of *int16 changed to 32-bit to speed up compiler on Intel
+-//typedef signed short int16;	// Exactly 16bits signed 
+-//typedef unsigned short uint16;	// Exactly 16bits unsigned
++//typedef signed short int16;   // Exactly 16bits signed
++//typedef unsigned short uint16;        // Exactly 16bits unsigned
+ //const unsigned int min_uint16 = 0x0000;    // smallest uint16
+ //const unsigned int max_uint16 = 0xFFFF;    // largest  uint16
+ 
+-typedef unsigned int uint;	// When you need a fast >=16bit unsigned value 
+-/*typedef int int; */	        // When you need a fast >=16bit value 
++typedef unsigned int uint;      // When you need a fast >=16bit unsigned value
++/*typedef int int; */           // When you need a fast >=16bit value
+ const unsigned int max_uint = (uint)-1;
+-typedef int32_t int32;   // Exactly 32bits signed 
+-typedef uint32_t uint32; // Exactly 32bits unsigned 
++typedef int32_t int32;   // Exactly 32bits signed
++typedef uint32_t uint32; // Exactly 32bits unsigned
+ 
+ // Bit-sized floating point and long thingies
+ #ifndef __TANDEM
+ // Do not define these for Tandem, because they conflict with typedefs in softieee.h.
+-typedef float float32;		// 32-bit float
+-typedef double float64;		// 64-bit float
++typedef float float32;          // 32-bit float
++typedef double float64;         // 64-bit float
+ #endif // __TANDEM
+ 
+-typedef jlong int64;		// Java long for my 64-bit type
+-typedef julong uint64;		// Java long for my 64-bit type
++typedef jlong int64;            // Java long for my 64-bit type
++typedef julong uint64;          // Java long for my 64-bit type
+ 
+ //-----------------------------------------------------------------------------
+ // Nice constants
+diff -ruN openjdk6/hotspot/src/share/vm/libadt/set.cpp openjdk/hotspot/src/share/vm/libadt/set.cpp
+--- openjdk6/hotspot/src/share/vm/libadt/set.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/libadt/set.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)set.cpp	1.26 07/05/05 17:05:41 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Sets - An Abstract Data Type
+@@ -69,42 +66,42 @@
+ char *Set::setstr() const
+ {
+   if( !this ) return os::strdup("{no set}");
+-  Set &set = clone();		// Virtually copy the basic set.
+-  set.Sort();			// Sort elements for in-order retrieval
++  Set &set = clone();           // Virtually copy the basic set.
++  set.Sort();                   // Sort elements for in-order retrieval
+ 
+-  uint len = 128;		// Total string space
++  uint len = 128;               // Total string space
+   char *buf = NEW_C_HEAP_ARRAY(char,len);// Some initial string space
+ 
+-  register char *s = buf;	// Current working string pointer
++  register char *s = buf;       // Current working string pointer
+   *s++ = '{';
+   *s = '\0';
+ 
+   // For all elements of the Set
+   uint hi = (uint)-2, lo = (uint)-2;
+   for( SetI i(&set); i.test(); ++i ) {
+-    if( hi+1 == i.elem ) {	  // Moving sequentially thru range?
+-      hi = i.elem;		  // Yes, just update hi end of range
+-    } else {			  // Else range ended
+-      if( buf+len-s < 25 ) {	  // Generous trailing space for upcoming numbers
+-	int offset = (int)(s-buf);// Not enuf space; compute offset into buffer
+-	len <<= 1;	          // Double string size
+-	buf = REALLOC_C_HEAP_ARRAY(char,buf,len); // Reallocate doubled size
+-	s = buf+offset; 	// Get working pointer into new bigger buffer
++    if( hi+1 == i.elem ) {        // Moving sequentially thru range?
++      hi = i.elem;                // Yes, just update hi end of range
++    } else {                      // Else range ended
++      if( buf+len-s < 25 ) {      // Generous trailing space for upcoming numbers
++        int offset = (int)(s-buf);// Not enuf space; compute offset into buffer
++        len <<= 1;                // Double string size
++        buf = REALLOC_C_HEAP_ARRAY(char,buf,len); // Reallocate doubled size
++        s = buf+offset;         // Get working pointer into new bigger buffer
+       }
+-      if( lo != (uint)-2 ) { 	// Startup?  No!  Then print previous range.
+-	if( lo != hi ) sprintf(s,"%d-%d,",lo,hi);
+-	else sprintf(s,"%d,",lo);
+-	s += strlen(s); 	// Advance working string
++      if( lo != (uint)-2 ) {    // Startup?  No!  Then print previous range.
++        if( lo != hi ) sprintf(s,"%d-%d,",lo,hi);
++        else sprintf(s,"%d,",lo);
++        s += strlen(s);         // Advance working string
+       }
+       hi = lo = i.elem;
+     }
+   }
+   if( lo != (uint)-2 ) {
+-    if( buf+len-s < 25 ) {	// Generous trailing space for upcoming numbers
++    if( buf+len-s < 25 ) {      // Generous trailing space for upcoming numbers
+       int offset = (int)(s-buf);// Not enuf space; compute offset into buffer
+-      len <<= 1;		// Double string size
++      len <<= 1;                // Double string size
+       buf = (char*)ReallocateHeap(buf,len); // Reallocate doubled size
+-      s = buf+offset;		// Get working pointer into new bigger buffer
++      s = buf+offset;           // Get working pointer into new bigger buffer
+     }
+     if( lo != hi ) sprintf(s,"%d-%d}",lo,hi);
+     else sprintf(s,"%d}",lo);
+@@ -127,43 +124,43 @@
+ // Set.  Return the amount of text parsed in "len", or zero in "len".
+ int Set::parse(const char *s)
+ {
+-  register char c;		// Parse character
+-  register const char *t = s;	// Save the starting position of s.
+-  do c = *s++;			// Skip characters
++  register char c;              // Parse character
++  register const char *t = s;   // Save the starting position of s.
++  do c = *s++;                  // Skip characters
+   while( c && (c <= ' ') );     // Till no more whitespace or EOS
+   if( c != '{' ) return 0;      // Oops, not a Set openner
+   if( *s == '}' ) return 2;     // The empty Set
+ 
+   // Sets are filled with values of the form "xx," or "xx-yy," with the comma
+   // a "}" at the very end.
+-  while( 1 ) {			// While have elements in the Set
+-    char *u;			// Pointer to character ending parse
+-    uint hi, i;			// Needed for range handling below
++  while( 1 ) {                  // While have elements in the Set
++    char *u;                    // Pointer to character ending parse
++    uint hi, i;                 // Needed for range handling below
+     uint elem = (uint)strtoul(s,&u,10);// Get element
+-    if( u == s ) return 0;	// Bogus crude
+-    s = u;			// Skip over the number
+-    c = *s++;			// Get the number seperator
+-    switch ( c ) {		// Different seperators
++    if( u == s ) return 0;      // Bogus crude
++    s = u;                      // Skip over the number
++    c = *s++;                   // Get the number seperator
++    switch ( c ) {              // Different seperators
+     case '}':                   // Last simple element
+     case ',':                   // Simple element
+-      (*this) <<= elem; 	// Insert the simple element into the Set
+-      break;			// Go get next element
++      (*this) <<= elem;         // Insert the simple element into the Set
++      break;                    // Go get next element
+     case '-':                   // Range
+       hi = (uint)strtoul(s,&u,10); // Get element
+-      if( u == s ) return 0;	// Bogus crude
++      if( u == s ) return 0;    // Bogus crude
+       for( i=elem; i<=hi; i++ )
+-	(*this) <<= i;		// Insert the entire range into the Set
+-      s = u;			// Skip over the number
+-      c = *s++; 		// Get the number seperator
++        (*this) <<= i;          // Insert the entire range into the Set
++      s = u;                    // Skip over the number
++      c = *s++;                 // Get the number seperator
+       break;
+     }
+     if( c == '}' ) break;       // End of the Set
+     if( c != ',' ) return 0;    // Bogus garbage
+   }
+-  return (int)(s-t);		// Return length parsed
++  return (int)(s-t);            // Return length parsed
+ }
+ 
+ //------------------------------Iterator---------------------------------------
+-SetI_::~SetI_() 
++SetI_::~SetI_()
+ {
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/libadt/set.hpp openjdk/hotspot/src/share/vm/libadt/set.hpp
+--- openjdk6/hotspot/src/share/vm/libadt/set.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/libadt/set.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)set.hpp	1.23 07/05/05 17:05:41 JVM"
+-#endif
+ /*
+  * Copyright 1997 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #ifndef _SET_
+@@ -47,56 +44,56 @@
+ // element currently in them.  Basically, they allow a bunch of bits to be
+ // grouped together, tested, set & cleared, intersected, etc.  The basic
+ // Set class is an abstract class, and cannot be constructed.  Instead,
+-// one of VectorSet, SparseSet, or ListSet is created.  Each variation has 
++// one of VectorSet, SparseSet, or ListSet is created.  Each variation has
+ // different asymptotic running times for different operations, and different
+-// constants of proportionality as well.  
++// constants of proportionality as well.
+ // {n = number of elements, N = largest element}
+ 
+-// 		VectorSet	SparseSet	ListSet
+-// Create	O(N)		O(1)		O(1)
+-// Clear	O(N)		O(1)		O(1)
+-// Insert	O(1)		O(1)		O(log n)
+-// Delete	O(1)		O(1)		O(log n)
+-// Member	O(1)		O(1)		O(log n)
+-// Size		O(N)		O(1)		O(1)
+-// Copy		O(N)		O(n)		O(n)
+-// Union	O(N)		O(n)		O(n log n)
+-// Intersect	O(N)		O(n)		O(n log n)
+-// Difference	O(N)		O(n)		O(n log n)
+-// Equal	O(N)		O(n)		O(n log n)
+-// ChooseMember	O(N)		O(1)		O(1)
+-// Sort		O(1)		O(n log n)	O(1)
+-// Forall	O(N)		O(n)		O(n)
+-// Complement	O(1)		O(1)		O(1)
+-
+-// TIME:	N/32		n		8*n	Accesses
+-// SPACE:	N/8		4*N+4*n		8*n	Bytes
+-
+-// Create:	Make an empty set
+-// Clear:	Remove all the elements of a Set
+-// Insert:	Insert an element into a Set; duplicates are ignored
+-// Delete:	Removes an element from a Set
+-// Member:	Tests for membership in a Set
+-// Size:	Returns the number of members of a Set
+-// Copy:	Copy or assign one Set to another
+-// Union:	Union 2 sets together
+-// Intersect:	Intersect 2 sets together
+-// Difference:	Compute A & !B; remove from set A those elements in set B
+-// Equal:	Test for equality between 2 sets
++//              VectorSet       SparseSet       ListSet
++// Create       O(N)            O(1)            O(1)
++// Clear        O(N)            O(1)            O(1)
++// Insert       O(1)            O(1)            O(log n)
++// Delete       O(1)            O(1)            O(log n)
++// Member       O(1)            O(1)            O(log n)
++// Size         O(N)            O(1)            O(1)
++// Copy         O(N)            O(n)            O(n)
++// Union        O(N)            O(n)            O(n log n)
++// Intersect    O(N)            O(n)            O(n log n)
++// Difference   O(N)            O(n)            O(n log n)
++// Equal        O(N)            O(n)            O(n log n)
++// ChooseMember O(N)            O(1)            O(1)
++// Sort         O(1)            O(n log n)      O(1)
++// Forall       O(N)            O(n)            O(n)
++// Complement   O(1)            O(1)            O(1)
++
++// TIME:        N/32            n               8*n     Accesses
++// SPACE:       N/8             4*N+4*n         8*n     Bytes
++
++// Create:      Make an empty set
++// Clear:       Remove all the elements of a Set
++// Insert:      Insert an element into a Set; duplicates are ignored
++// Delete:      Removes an element from a Set
++// Member:      Tests for membership in a Set
++// Size:        Returns the number of members of a Set
++// Copy:        Copy or assign one Set to another
++// Union:       Union 2 sets together
++// Intersect:   Intersect 2 sets together
++// Difference:  Compute A & !B; remove from set A those elements in set B
++// Equal:       Test for equality between 2 sets
+ // ChooseMember Pick a random member
+-// Sort:	If no other operation changes the set membership, a following
+-//		Forall will iterate the members in ascending order.
+-// Forall:	Iterate over the elements of a Set.  Operations that modify
+-//		the set membership during iteration work, but the iterator may
+-//		skip any member or duplicate any member.
+-// Complement:	Only supported in the Co-Set variations.  It adds a small
+-//		constant-time test to every Set operation.
++// Sort:        If no other operation changes the set membership, a following
++//              Forall will iterate the members in ascending order.
++// Forall:      Iterate over the elements of a Set.  Operations that modify
++//              the set membership during iteration work, but the iterator may
++//              skip any member or duplicate any member.
++// Complement:  Only supported in the Co-Set variations.  It adds a small
++//              constant-time test to every Set operation.
+ //
+ // PERFORMANCE ISSUES:
+ // If you "cast away" the specific set variation you are using, and then do
+ // operations on the basic "Set" object you will pay a virtual function call
+ // to get back the specific set variation.  On the other hand, using the
+-// generic Set means you can change underlying implementations by just 
++// generic Set means you can change underlying implementations by just
+ // changing the initial declaration.  Examples:
+ //      void foo(VectorSet vs1, VectorSet vs2) { vs1 |= vs2; }
+ // "foo" must be called with a VectorSet.  The vector set union operation
+@@ -104,16 +101,16 @@
+ //      void foo(Set vs1, Set vs2) { vs1 |= vs2; }
+ // "foo" may be called with *any* kind of sets; suppose it is called with
+ // VectorSets.  Two virtual function calls are used to figure out the that vs1
+-// and vs2 are VectorSets.  In addition, if vs2 is not a VectorSet then a 
++// and vs2 are VectorSets.  In addition, if vs2 is not a VectorSet then a
+ // temporary VectorSet copy of vs2 will be made before the union proceeds.
+-// 
++//
+ // VectorSets have a small constant.  Time and space are proportional to the
+ //   largest element.  Fine for dense sets and largest element < 10,000.
+ // SparseSets have a medium constant.  Time is proportional to the number of
+ //   elements, space is proportional to the largest element.
+ //   Fine (but big) with the largest element < 100,000.
+ // ListSets have a big constant.  Time *and space* are proportional to the
+-//   number of elements.  They work well for a few elements of *any* size 
++//   number of elements.  They work well for a few elements of *any* size
+ //   (i.e. sets of pointers)!
+ 
+ //------------------------------Set--------------------------------------------
+@@ -124,7 +121,7 @@
+   // DO NOT CONSTRUCT A Set.  THIS IS AN ABSTRACT CLASS, FOR INHERITENCE ONLY
+   Set(Arena *arena) : _set_arena(arena) {};
+ 
+-  // Creates a new set from an existing set 
++  // Creates a new set from an existing set
+   // DO NOT CONSTRUCT A Set.  THIS IS AN ABSTRACT CLASS, FOR INHERITENCE ONLY
+   Set(const Set &) {};
+ 
+@@ -136,22 +133,22 @@
+   virtual ~Set() {};
+ 
+   // Add member to set
+-  virtual Set &operator <<=(uint elem)=0; 
++  virtual Set &operator <<=(uint elem)=0;
+   // virtual Set  operator << (uint elem);
+ 
+   // Delete member from set
+-  virtual Set &operator >>=(uint elem)=0; 
++  virtual Set &operator >>=(uint elem)=0;
+   // virtual Set  operator >> (uint elem);
+ 
+   // Membership test.  Result is Zero (absent)/ Non-Zero (present)
+   virtual int operator [](uint elem) const=0;
+ 
+   // Intersect sets
+-  virtual Set &operator &=(const Set &s)=0; 
++  virtual Set &operator &=(const Set &s)=0;
+   // virtual Set  operator & (const Set &s) const;
+ 
+   // Union sets
+-  virtual Set &operator |=(const Set &s)=0; 
++  virtual Set &operator |=(const Set &s)=0;
+   // virtual Set  operator | (const Set &s) const;
+ 
+   // Difference sets
+@@ -184,7 +181,7 @@
+   // inbetween then the iterator will visit the elements in ascending order.
+   virtual void Sort(void)=0;
+ 
+-  // Convert a set to printable string in an allocated buffer.  
++  // Convert a set to printable string in an allocated buffer.
+   // The caller must deallocate the string.
+   virtual char *setstr(void) const;
+ 
+@@ -232,10 +229,10 @@
+ 
+ class SetI_ : public ResourceObj {
+ protected:
+-  friend class SetI; 
++  friend class SetI;
+   virtual ~SetI_();
+   virtual uint next(void)=0;
+-  virtual int test(void)=0; 
++  virtual int test(void)=0;
+ };
+ 
+ class SetI {
+@@ -252,4 +249,3 @@
+ };
+ 
+ #endif // _SET_
+-
+diff -ruN openjdk6/hotspot/src/share/vm/libadt/vectset.cpp openjdk/hotspot/src/share/vm/libadt/vectset.cpp
+--- openjdk6/hotspot/src/share/vm/libadt/vectset.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/libadt/vectset.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)vectset.cpp	1.25 07/05/05 17:05:41 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Vector Sets - An Abstract Data Type
+@@ -67,9 +64,9 @@
+ }
+ 
+ //------------------------------Construct--------------------------------------
+-Set &VectorSet_Construct(Arena *arena) 
+-{ 
+-  return *(new VectorSet(arena)); 
++Set &VectorSet_Construct(Arena *arena)
++{
++  return *(new VectorSet(arena));
+ }
+ 
+ //------------------------------operator=--------------------------------------
+@@ -93,7 +90,7 @@
+ 
+ //------------------------------grow-------------------------------------------
+ // Expand the existing set to a bigger size
+-void VectorSet::grow( uint newsize ) 
++void VectorSet::grow( uint newsize )
+ {
+   newsize = (newsize+31) >> 5;  // Convert to longwords
+   uint x = size;
+@@ -153,14 +150,14 @@
+ VectorSet &VectorSet::operator |= (const VectorSet &s)
+ {
+   // This many words must be unioned
+-  register uint cnt = ((size<s.size)?size:s.size); 
++  register uint cnt = ((size<s.size)?size:s.size);
+   register uint32 *u1 = data;   // Pointer to the destination data
+   register uint32 *u2 = s.data; // Pointer to the source data
+   for( uint i=0; i<cnt; i++)    // Copy and OR the two sets
+     *u1++ |= *u2++;
+   if( size < s.size ) {         // Is set 2 larger than set 1?
+     // Extend result by larger set
+-    grow(s.size*sizeof(uint32)*8); 
++    grow(s.size*sizeof(uint32)*8);
+     memcpy(&data[cnt], u2, (s.size - cnt)*sizeof(uint32));
+   }
+   return *this;                 // Return result set
+@@ -178,7 +175,7 @@
+ VectorSet &VectorSet::operator -= (const VectorSet &s)
+ {
+   // This many words must be unioned
+-  register uint cnt = ((size<s.size)?size:s.size); 
++  register uint cnt = ((size<s.size)?size:s.size);
+   register uint32 *u1 = data;   // Pointer to the destination data
+   register uint32 *u2 = s.data; // Pointer to the source data
+   for( uint i=0; i<cnt; i++ )   // For data in set
+@@ -199,17 +196,17 @@
+ //        X1 --  A is a subset of B
+ //        0X --  B is not a subset of A
+ //        1X --  B is a subset of A
+-int VectorSet::compare (const VectorSet &s) const 
++int VectorSet::compare (const VectorSet &s) const
+ {
+   register uint32 *u1 = data;   // Pointer to the destination data
+   register uint32 *u2 = s.data; // Pointer to the source data
+   register uint32 AnotB = 0, BnotA = 0;
+   // This many words must be unioned
+-  register uint cnt = ((size<s.size)?size:s.size); 
++  register uint cnt = ((size<s.size)?size:s.size);
+ 
+   // Get bits for both sets
+-  uint i;			// Exit value of loop
+-  for( i=0; i<cnt; i++ ) {	// For data in BOTH sets
++  uint i;                       // Exit value of loop
++  for( i=0; i<cnt; i++ ) {      // For data in BOTH sets
+     register uint32 A = *u1++;  // Data from one guy
+     register uint32 B = *u2++;  // Data from other guy
+     AnotB |= (A & ~B);          // Compute bits in A not B
+@@ -249,12 +246,12 @@
+ {
+   // The cast is a virtual function that checks that "set" is a VectorSet.
+   const VectorSet &s = *(set.asVectorSet());
+-  
++
+   // NOTE: The intersection is never any larger than the smallest set.
+-  register uint small = ((size<s.size)?size:s.size); 
++  register uint small = ((size<s.size)?size:s.size);
+   register uint32 *u1 = data;   // Pointer to the destination data
+   register uint32 *u2 = s.data; // Pointer to the source data
+-  for( uint i=0; i<small; i++)	// For data in set
++  for( uint i=0; i<small; i++)  // For data in set
+     if( *u1++ & *u2++ )         // If any elements in common
+       return 0;                 // Then not disjoint
+   return 1;                     // Else disjoint
+@@ -303,12 +300,12 @@
+ // Get any element from the set.
+ uint VectorSet::getelem(void) const
+ {
+-  uint i;			// Exit value of loop
++  uint i;                       // Exit value of loop
+   for( i=0; i<size; i++ )
+     if( data[i] )
+       break;
+   uint32 word = data[i];
+-  int j;			// Exit value of loop
++  int j;                        // Exit value of loop
+   for( j= -1; word; j++, word>>=1 );
+   return (i<<5)+j;
+ }
+@@ -317,9 +314,9 @@
+ // Clear a set
+ void VectorSet::Clear(void)
+ {
+-  if( size > 100 ) {		// Reclaim storage only if huge
++  if( size > 100 ) {            // Reclaim storage only if huge
+     FREE_RESOURCE_ARRAY(uint32,data,size);
+-    size = 2;			// Small initial size
++    size = 2;                   // Small initial size
+     data = NEW_RESOURCE_ARRAY(uint32,size);
+   }
+   memset( data, 0, size*sizeof(uint32) );
+@@ -329,7 +326,7 @@
+ // Return number of elements in a Set
+ uint VectorSet::Size(void) const
+ {
+-  uint sum = 0;                 // Cumulative size so far. 
++  uint sum = 0;                 // Cumulative size so far.
+   uint8 *currByte = (uint8*)data;
+   for( uint32 i = 0; i < (size<<2); i++) // While have bytes to process
+     sum += bitsInByte[*currByte++];      // Add bits in current byte to size.
+@@ -391,5 +388,3 @@
+   } while( i<s->size );
+   return max_juint;             // No element, iterated them all
+ }
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/libadt/vectset.hpp openjdk/hotspot/src/share/vm/libadt/vectset.hpp
+--- openjdk6/hotspot/src/share/vm/libadt/vectset.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/libadt/vectset.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)vectset.hpp	1.22 07/05/05 17:05:41 JVM"
+-#endif
+ /*
+  * Copyright 1997 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #ifndef _VECTOR_SET_
+@@ -44,17 +41,17 @@
+ 
+ //------------------------------VectorSet--------------------------------------
+ class VectorSet : public Set {
+-friend class VectorSetI;	// Friendly iterator class
++friend class VectorSetI;        // Friendly iterator class
+ protected:
+   uint size;                    // Size of data IN LONGWORDS (32bits)
+   uint32 *data;                 // The data, bit packed
+ 
+-  void slamin( const VectorSet& s );	 // Initialize one set with another
++  void slamin( const VectorSet& s );     // Initialize one set with another
+   int compare(const VectorSet &s) const; // Compare set contents
+   void grow(uint newsize);               // Grow vector to required bitsize
+ 
+ public:
+-  VectorSet(Arena *arena);		        // Creates a new, empty set.
++  VectorSet(Arena *arena);                      // Creates a new, empty set.
+   VectorSet(const VectorSet &s) : Set(s._set_arena) {slamin(s);} // Set clone; deep-copy guts
+   Set &operator =(const Set &s);                // Set clone; deep-copy guts
+   VectorSet &operator =(const VectorSet &s)     // Set clone; deep-copy guts
+@@ -94,10 +91,10 @@
+ 
+   int operator [](uint elem) const; // Test for membership
+   uint getelem(void) const;         // Return a random element
+-  void Clear(void);		    // Clear a set
++  void Clear(void);                 // Clear a set
+   uint Size(void) const;            // Number of elements in the Set.
+   void Sort(void);                  // Sort before iterating
+-  int hash() const;		    // Hash function
++  int hash() const;                 // Hash function
+ 
+   /* Removed for MCC BUG
+      operator const VectorSet* (void) const { return this; } */
+@@ -113,7 +110,7 @@
+   // With:
+   //     if( visited.test_set(idx) ) return;
+   //
+-  int test_set( uint elem ) { 
++  int test_set( uint elem ) {
+     uint word = elem >> 5;           // Get the longword offset
+     if( word >= size )               // Beyond the last?
+       return test_set_grow(elem);    // Then grow; set; return 0;
+@@ -129,20 +126,20 @@
+ 
+   // Fast inlined test
+   int test( uint elem ) const {
+-    uint word = elem >> 5;	// Get the longword offset
++    uint word = elem >> 5;      // Get the longword offset
+     if( word >= size ) return 0; // Beyond the last?
+     uint32 mask = 1L << (elem & 31); // Get bit mask
+-    return data[word] & mask;	// Get bit
++    return data[word] & mask;   // Get bit
+   }
+ 
+   // Fast inlined set
+-  void set( uint elem ) { 
+-    uint word = elem >> 5;	// Get the longword offset
+-    if( word >= size ) {	// Beyond the last?
+-      test_set_grow(elem);	// Then grow and set
++  void set( uint elem ) {
++    uint word = elem >> 5;      // Get the longword offset
++    if( word >= size ) {        // Beyond the last?
++      test_set_grow(elem);      // Then grow and set
+     } else {
+       uint32 mask = 1L << (elem & 31); // Get bit mask
+-      data[word] |= mask;	// Set bit
++      data[word] |= mask;       // Set bit
+     }
+   }
+ 
+@@ -177,4 +174,3 @@
+ };
+ 
+ #endif // _VECTOR_SET_
+-
+diff -ruN openjdk6/hotspot/src/share/vm/memory/allocation.cpp openjdk/hotspot/src/share/vm/memory/allocation.cpp
+--- openjdk6/hotspot/src/share/vm/memory/allocation.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/allocation.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)allocation.cpp	1.72 07/05/05 17:05:42 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -47,7 +44,7 @@
+    case C_HEAP:
+     res = (address)AllocateHeap(size, "C_Heap: ResourceOBJ");
+     break;
+-   case RESOURCE_AREA: 
++   case RESOURCE_AREA:
+     res = (address)operator new(size);
+     break;
+    default:
+@@ -82,7 +79,7 @@
+ 
+ // MT-safe pool of chunks to reduce malloc/free thrashing
+ // NB: not using Mutex because pools are used before Threads are initialized
+-class ChunkPool { 
++class ChunkPool {
+   Chunk*       _first;        // first cached Chunk; its first word points to next chunk
+   size_t       _num_chunks;   // number of unused chunks in pool
+   size_t       _num_used;     // number of chunks currently checked out
+@@ -94,23 +91,23 @@
+   static ChunkPool* _small_pool;
+ 
+   // return first element or null
+-  void* get_first() { 
+-    Chunk* c = _first; 
+-    if (_first) { 
+-      _first = _first->next(); 
+-      _num_chunks--; 
++  void* get_first() {
++    Chunk* c = _first;
++    if (_first) {
++      _first = _first->next();
++      _num_chunks--;
+     }
+-    return c; 
++    return c;
+   }
+-  
++
+  public:
+   // All chunks in a ChunkPool has the same size
+    ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
+-  
++
+   // Allocate a new chunk from the pool (might expand the pool)
+   void* allocate(size_t bytes) {
+     assert(bytes == _size, "bad size");
+-    void* p = NULL;    
++    void* p = NULL;
+     { ThreadCritical tc;
+       _num_used++;
+       p = get_first();
+@@ -129,28 +126,28 @@
+     _num_used--;
+ 
+     // Add chunk to list
+-    chunk->set_next(_first); 
+-    _first = chunk; 
+-    _num_chunks++;     
++    chunk->set_next(_first);
++    _first = chunk;
++    _num_chunks++;
+   }
+ 
+   // Prune the pool
+   void free_all_but(size_t n) {
+-    // if we have more than n chunks, free all of them 
++    // if we have more than n chunks, free all of them
+     ThreadCritical tc;
+     if (_num_chunks > n) {
+       // free chunks at end of queue, for better locality
+       Chunk* cur = _first;
+       for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();
+-      
++
+       if (cur != NULL) {
+-        Chunk* next = cur->next();        
+-        cur->set_next(NULL);              
++        Chunk* next = cur->next();
++        cur->set_next(NULL);
+         cur = next;
+ 
+         // Free all remaining chunks
+         while(cur != NULL) {
+-          next = cur->next();        
++          next = cur->next();
+           os::free(cur);
+           _num_chunks--;
+           cur = next;
+@@ -173,7 +170,7 @@
+ 
+ ChunkPool* ChunkPool::_large_pool  = NULL;
+ ChunkPool* ChunkPool::_medium_pool = NULL;
+-ChunkPool* ChunkPool::_small_pool  = NULL; 
++ChunkPool* ChunkPool::_small_pool  = NULL;
+ 
+ 
+ void chunkpool_init() {
+@@ -202,7 +199,7 @@
+ // Chunk implementation
+ 
+ void* Chunk::operator new(size_t requested_size, size_t length) {
+-  // requested_size is equal to sizeof(Chunk) but in order for the arena 
++  // requested_size is equal to sizeof(Chunk) but in order for the arena
+   // allocations to come out aligned as expected the size must be aligned
+   // to expected arean alignment.
+   // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it.
+@@ -272,18 +269,18 @@
+   _hwm = _chunk->bottom();      // Save the cached hwm, max
+   _max = _chunk->top();
+   set_size_in_bytes(init_size);
+-} 
++}
+ 
+ Arena::Arena() {
+   _first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size);
+   _hwm = _chunk->bottom();      // Save the cached hwm, max
+   _max = _chunk->top();
+   set_size_in_bytes(Chunk::init_size);
+-} 
++}
+ 
+-Arena::Arena(Arena *a) : _chunk(a->_chunk), _hwm(a->_hwm), _max(a->_max), _first(a->_first) { 
++Arena::Arena(Arena *a) : _chunk(a->_chunk), _hwm(a->_hwm), _max(a->_max), _first(a->_first) {
+   set_size_in_bytes(a->size_in_bytes());
+-} 
++}
+ 
+ Arena *Arena::move_contents(Arena *copy) {
+   copy->destruct_contents();
+@@ -307,7 +304,7 @@
+     char* end = _first->next() ? _first->top() : _hwm;
+     free_malloced_objects(_first, _first->bottom(), end, _hwm);
+   }
+-  _first->chop(); 
++  _first->chop();
+   reset();
+ }
+ 
+@@ -347,7 +344,7 @@
+ 
+ 
+ 
+-// Reallocate storage in Arena.  
++// Reallocate storage in Arena.
+ void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size) {
+   assert(new_size >= 0, "bad size");
+   if (new_size == 0) return NULL;
+@@ -407,7 +404,7 @@
+     return false;
+   }
+ #endif
+-  if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm ) 
++  if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm )
+     return true;                // Check for in this chunk
+   for (Chunk *c = _first; c; c = c->next()) {
+     if (c == _chunk) continue;  // current chunk has been processed
+@@ -428,14 +425,14 @@
+ }
+ 
+ // for debugging with UseMallocOnly
+-void* Arena::internal_malloc_4(size_t x) { 
++void* Arena::internal_malloc_4(size_t x) {
+   assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
+   if (_hwm + x > _max) {
+     return grow(x);
+   } else {
+-    char *old = _hwm; 
+-    _hwm += x; 
+-    return old; 
++    char *old = _hwm;
++    _hwm += x;
++    return old;
+   }
+ }
+ #endif
+@@ -449,14 +446,14 @@
+ // a memory leak.  Use CHeapObj as the base class of such objects to make it explicit
+ // that they're allocated on the C heap.
+ // Commented out in product version to avoid conflicts with third-party C++ native code.
+-// %% note this is causing a problem on solaris debug build. the global 
++// %% note this is causing a problem on solaris debug build. the global
+ // new is being called from jdk source and causing data corruption.
+ // src/share/native/sun/awt/font/fontmanager/textcache/hsMemory.cpp::hsSoftNew
+ // define CATCH_OPERATOR_NEW_USAGE if you want to use this.
+ #ifdef CATCH_OPERATOR_NEW_USAGE
+ void* operator new(size_t size){
+   static bool warned = false;
+-  if (!warned && warn_new_operator) 
++  if (!warned && warn_new_operator)
+     warning("should not call global (default) operator new");
+   warned = true;
+   return (void *) AllocateHeap(size, "global operator new");
+@@ -484,11 +481,11 @@
+ }
+ 
+ int     AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; }
+-size_t  AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; } 
+-size_t  AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; } 
+-int     AllocStats::num_frees() { return os::num_frees - start_frees; } 
++size_t  AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; }
++size_t  AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; }
++int     AllocStats::num_frees() { return os::num_frees - start_frees; }
+ void    AllocStats::print() {
+-  tty->print("%d mallocs (%ldK), %d frees, %ldK resrc", 
++  tty->print("%d mallocs (%ldK), %d frees, %ldK resrc",
+              num_mallocs(), alloc_bytes()/K, num_frees(), resource_bytes()/K);
+ }
+ 
+@@ -540,4 +537,3 @@
+ }
+ 
+ #endif // Non-product
+-
+diff -ruN openjdk6/hotspot/src/share/vm/memory/allocation.hpp openjdk/hotspot/src/share/vm/memory/allocation.hpp
+--- openjdk6/hotspot/src/share/vm/memory/allocation.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/allocation.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)allocation.hpp	1.77 07/05/05 17:05:42 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1)
+@@ -49,10 +46,10 @@
+ //
+ // The printable subclasses are used for debugging and define virtual
+ // member functions for printing. Classes that avoid allocating the
+-// vtbl entries in the objects should therefore not be the printable 
++// vtbl entries in the objects should therefore not be the printable
+ // subclasses.
+ //
+-// The following macros and function should be used to allocate memory 
++// The following macros and function should be used to allocate memory
+ // directly in the resource area or in the C-heap:
+ //
+ //   NEW_RESOURCE_ARRAY(type,size)
+@@ -73,17 +70,17 @@
+ // a word overhead for empty super classes.
+ 
+ #ifdef PRODUCT
+-#define ALLOCATION_SUPER_CLASS_SPEC 
++#define ALLOCATION_SUPER_CLASS_SPEC
+ #else
+ #define ALLOCATION_SUPER_CLASS_SPEC : public AllocatedObj
+ class AllocatedObj {
+- public:  
++ public:
+   // Printing support
+   void print() const;
+   void print_value() const;
+ 
+   virtual void print_on(outputStream* st) const;
+-  virtual void print_value_on(outputStream* st) const;  
++  virtual void print_value_on(outputStream* st) const;
+ };
+ #endif
+ 
+@@ -147,12 +144,12 @@
+   void  operator delete(void* p);
+   Chunk(size_t length);
+ 
+-  enum { 
++  enum {
+     // default sizes; make them slightly smaller than 2**k to guard against
+     // buddy-system style malloc implementations
+ #ifdef _LP64
+     slack      = 40,            // [RGV] Not sure if this is right, but make it
+-				//       a multiple of 8.
++                                //       a multiple of 8.
+ #else
+     slack      = 20,            // suspected sizeof(Chunk) + internal malloc headers
+ #endif
+@@ -161,7 +158,7 @@
+     medium_size= 10*K  - slack, // Size of medium-sized chunk
+     size       = 32*K  - slack, // Default size of an Arena chunk (following the first)
+     non_pool_size = init_size + 32 // An initial size which is not one of above
+-  };    
++  };
+ 
+   void chop();                  // Chop this chunk
+   void next_chop();             // Chop next chunk
+@@ -205,30 +202,30 @@
+   char* hwm() const             { return _hwm; }
+ 
+   // Fast allocate in the arena.  Common case is: pointer test + increment.
+-  void* Amalloc(size_t x) { 
++  void* Amalloc(size_t x) {
+     assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2");
+     x = ARENA_ALIGN(x);
+     debug_only(if (UseMallocOnly) return malloc(x);)
+     NOT_PRODUCT(_bytes_allocated += x);
+     if (_hwm + x > _max) {
+-      return grow(x); 
++      return grow(x);
+     } else {
+-      char *old = _hwm; 
+-      _hwm += x; 
+-      return old; 
++      char *old = _hwm;
++      _hwm += x;
++      return old;
+     }
+   }
+   // Further assume size is padded out to words
+-  void *Amalloc_4(size_t x) { 
++  void *Amalloc_4(size_t x) {
+     assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
+     debug_only(if (UseMallocOnly) return malloc(x);)
+     NOT_PRODUCT(_bytes_allocated += x);
+     if (_hwm + x > _max) {
+       return grow(x);
+     } else {
+-      char *old = _hwm; 
+-      _hwm += x; 
+-      return old; 
++      char *old = _hwm;
++      _hwm += x;
++      return old;
+     }
+   }
+ 
+@@ -246,22 +243,22 @@
+     if (_hwm + x > _max) {
+       return grow(x); // grow() returns a result aligned >= 8 bytes.
+     } else {
+-      char *old = _hwm; 
+-      _hwm += x; 
++      char *old = _hwm;
++      _hwm += x;
+ #if defined(SPARC) && !defined(_LP64)
+       old += delta; // align to 8-bytes
+ #endif
+-      return old; 
++      return old;
+     }
+   }
+ 
+   // Fast delete in area.  Common case is: NOP (except for storage reclaimed)
+-  void Afree(void *ptr, size_t size) { 
++  void Afree(void *ptr, size_t size) {
+ #ifdef ASSERT
+     if (ZapResourceArea) memset(ptr, badResourceValue, size); // zap freed memory
+     if (UseMallocOnly) return;
+ #endif
+-    if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr;  
++    if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr;
+   }
+ 
+   void *Arealloc( void *old_ptr, size_t old_size, size_t new_size );
+@@ -275,7 +272,7 @@
+   // Total of all chunks in use (not thread-safe)
+   size_t used() const;
+ 
+-  // Total # of bytes used  
++  // Total # of bytes used
+   size_t size_in_bytes() const         NOT_PRODUCT({  return _size_in_bytes; }) PRODUCT_RETURN0;
+   void set_size_in_bytes(size_t size)  NOT_PRODUCT({ _size_in_bytes = size;  }) PRODUCT_RETURN;
+   static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2)  PRODUCT_RETURN;
+@@ -287,9 +284,9 @@
+     _first = _chunk = NULL;
+     _hwm = _max = NULL;
+   }
+-}; 
++};
+ 
+-// One of the following macros must be used when allocating 
++// One of the following macros must be used when allocating
+ // an array or object from an arena
+ #define NEW_ARENA_ARRAY(arena, type, size)\
+   (type*) arena->Amalloc((size) * sizeof(type))
+@@ -387,11 +384,11 @@
+   size_t start_malloc_bytes, start_res_bytes;
+  public:
+   AllocStats();
+-  
++
+   int    num_mallocs();    // since creation of receiver
+-  size_t alloc_bytes(); 
+-  size_t resource_bytes(); 
+-  int    num_frees(); 
++  size_t alloc_bytes();
++  size_t resource_bytes();
++  int    num_frees();
+   void   print();
+ };
+ #endif
+@@ -410,4 +407,3 @@
+   ReallocMark()   PRODUCT_RETURN;
+   void check()    PRODUCT_RETURN;
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/memory/allocation.inline.hpp openjdk/hotspot/src/share/vm/memory/allocation.inline.hpp
+--- openjdk6/hotspot/src/share/vm/memory/allocation.inline.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/allocation.inline.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)allocation.inline.hpp	1.22 07/05/05 17:05:42 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Explicit C-heap memory management
+diff -ruN openjdk6/hotspot/src/share/vm/memory/allocationStats.cpp openjdk/hotspot/src/share/vm/memory/allocationStats.cpp
+--- openjdk6/hotspot/src/share/vm/memory/allocationStats.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/allocationStats.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)allocationStats.cpp	1.6 07/05/05 17:05:42 JVM"
+-#endif
+ /*
+  * Copyright 2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -31,4 +28,3 @@
+ // Technically this should be derived from machine speed, and
+ // ideally it would be dynamically adjusted.
+ float AllocationStats::_threshold = ((float)CMS_SweepTimerThresholdMillis)/1000;
+-
+diff -ruN openjdk6/hotspot/src/share/vm/memory/allocationStats.hpp openjdk/hotspot/src/share/vm/memory/allocationStats.hpp
+--- openjdk6/hotspot/src/share/vm/memory/allocationStats.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/allocationStats.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)allocationStats.hpp	1.19 07/05/05 17:05:41 JVM"
+-#endif
+ /*
+  * Copyright 2001-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class AllocationStats VALUE_OBJ_CLASS_SPEC {
+@@ -43,20 +40,20 @@
+   // this size is then simply computed as the product of these two
+   // estimates.
+   AdaptivePaddedAverage _demand_rate_estimate;
+-     
++
+   ssize_t     _desired;          // Estimate computed as described above
+-  ssize_t     _coalDesired;   	// desired +/- small-percent for tuning coalescing
++  ssize_t     _coalDesired;     // desired +/- small-percent for tuning coalescing
+ 
+-  ssize_t     _surplus;       	// count - (desired +/- small-percent), 
+-				// used to tune splitting in best fit
+-  ssize_t     _bfrSurp;     	// surplus at start of current sweep
+-  ssize_t     _prevSweep;     	// count from end of previous sweep
+-  ssize_t     _beforeSweep;   	// count from before current sweep
+-  ssize_t     _coalBirths;    	// additional chunks from coalescing
+-  ssize_t     _coalDeaths;    	// loss from coalescing
+-  ssize_t     _splitBirths;   	// additional chunks from splitting
+-  ssize_t     _splitDeaths;   	// loss from splitting
+-  size_t     _returnedBytes;	// number of bytes returned to list.
++  ssize_t     _surplus;         // count - (desired +/- small-percent),
++                                // used to tune splitting in best fit
++  ssize_t     _bfrSurp;         // surplus at start of current sweep
++  ssize_t     _prevSweep;       // count from end of previous sweep
++  ssize_t     _beforeSweep;     // count from before current sweep
++  ssize_t     _coalBirths;      // additional chunks from coalescing
++  ssize_t     _coalDeaths;      // loss from coalescing
++  ssize_t     _splitBirths;     // additional chunks from splitting
++  ssize_t     _splitDeaths;     // loss from splitting
++  size_t     _returnedBytes;    // number of bytes returned to list.
+  public:
+   void initialize() {
+     AdaptivePaddedAverage* dummy =
+diff -ruN openjdk6/hotspot/src/share/vm/memory/barrierSet.hpp openjdk/hotspot/src/share/vm/memory/barrierSet.hpp
+--- openjdk6/hotspot/src/share/vm/memory/barrierSet.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/barrierSet.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)barrierSet.hpp	1.18 07/05/05 17:05:43 JVM"
+-#endif
+ /*
+  * Copyright 2000-2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This class provides the interface between a barrier implementation and
+@@ -42,7 +39,7 @@
+ protected:
+   int _max_covered_regions;
+   Name _kind;
+-  
++
+ public:
+ 
+   // To get around prohibition on RTTI.
+@@ -76,7 +73,7 @@
+   // Invoke the barrier, if any, necessary when writing "new_val" into the
+   // ref field at "offset" in "obj".
+   // (For efficiency reasons, this operation is specialized for certain
+-  // barrier types.  Semantically, it should be thought of as a call to the 
++  // barrier types.  Semantically, it should be thought of as a call to the
+   // virtual "_work" function below, which must implement the barrier.)
+   inline void write_ref_field(oop* field, oop new_val);
+ protected:
+@@ -86,7 +83,7 @@
+   // Invoke the barrier, if any, necessary when writing the "bytes"-byte
+   // value(s) "val1" (and "val2") into the primitive "field".
+   virtual void write_prim_field(HeapWord* field, size_t bytes,
+-				juint val1, juint val2) = 0;
++                                juint val1, juint val2) = 0;
+ 
+   // Operations on arrays, or general regions (e.g., for "clone") may be
+   // optimized by some barriers.
+@@ -106,7 +103,7 @@
+   // barrier for an array whose elements are all in the given memory region.
+   virtual void read_ref_array(MemRegion mr) = 0;
+   virtual void read_prim_array(MemRegion mr) = 0;
+-  
++
+   inline void write_ref_array(MemRegion mr);
+ protected:
+   virtual void write_ref_array_work(MemRegion mr) = 0;
+@@ -116,7 +113,7 @@
+   virtual void read_region(MemRegion mr) = 0;
+ 
+   // (For efficiency reasons, this operation is specialized for certain
+-  // barrier types.  Semantically, it should be thought of as a call to the 
++  // barrier types.  Semantically, it should be thought of as a call to the
+   // virtual "_work" function below, which must implement the barrier.)
+   inline void write_region(MemRegion mr);
+ protected:
+diff -ruN openjdk6/hotspot/src/share/vm/memory/barrierSet.inline.hpp openjdk/hotspot/src/share/vm/memory/barrierSet.inline.hpp
+--- openjdk6/hotspot/src/share/vm/memory/barrierSet.inline.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/barrierSet.inline.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)barrierSet.inline.hpp	1.12 07/05/05 17:05:43 JVM"
+-#endif
+ /*
+  * Copyright 2001-2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Inline functions of BarrierSet, which de-virtualize certain
+diff -ruN openjdk6/hotspot/src/share/vm/memory/blockOffsetTable.cpp openjdk/hotspot/src/share/vm/memory/blockOffsetTable.cpp
+--- openjdk6/hotspot/src/share/vm/memory/blockOffsetTable.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/blockOffsetTable.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)blockOffsetTable.cpp	1.82 07/05/05 17:05:42 JVM"
+-#endif
+ /*
+  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -33,7 +30,7 @@
+ //////////////////////////////////////////////////////////////////////
+ 
+ BlockOffsetSharedArray::BlockOffsetSharedArray(MemRegion reserved,
+-					       size_t init_word_size):
++                                               size_t init_word_size):
+   _reserved(reserved), _end(NULL)
+ {
+   size_t size = compute_size(reserved.word_size());
+@@ -56,7 +53,7 @@
+     gclog_or_tty->print_cr("  "
+                   "  _vs.low_boundary(): " INTPTR_FORMAT
+                   "  _vs.high_boundary(): " INTPTR_FORMAT,
+-                  _vs.low_boundary(), 
++                  _vs.low_boundary(),
+                   _vs.high_boundary());
+   }
+ }
+@@ -106,7 +103,7 @@
+ //////////////////////////////////////////////////////////////////////
+ 
+ BlockOffsetArray::BlockOffsetArray(BlockOffsetSharedArray* array,
+-				   MemRegion mr, bool init_to_zero) :
++                                   MemRegion mr, bool init_to_zero) :
+   BlockOffsetTable(mr.start(), mr.end()),
+   _array(array),
+   _init_to_zero(init_to_zero)
+@@ -133,14 +130,14 @@
+     return;
+   }
+ 
+-  // Write the backskip value for each region.  
++  // Write the backskip value for each region.
+   //
+-  //	offset
++  //    offset
+   //    card             2nd                       3rd
+   //     | +- 1st        |                         |
+   //     v v             v                         v
+   //    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+     +-+-+-+-+-+-+-+-+-+-+-
+-  //    |x|0|0|0|0|0|0|0|1|1|1|1|1|1| ... |1|1|1|1|2|2|2|2|2|2| ... 
++  //    |x|0|0|0|0|0|0|0|1|1|1|1|1|1| ... |1|1|1|1|2|2|2|2|2|2| ...
+   //    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+     +-+-+-+-+-+-+-+-+-+-+-
+   //    11              19                        75
+   //      12
+@@ -154,7 +151,7 @@
+   //    3rd - start of third logarithmic region
+   //      2 corresponds to logarithmic value N_words + 2 and 2**(3 * 2) = 64
+   //
+-  //    integer below the block offset entry is an example of 
++  //    integer below the block offset entry is an example of
+   //    the index of the entry
+   //
+   //    Given an address,
+@@ -163,7 +160,7 @@
+   //      Convert the entry to a back slide
+   //        (e.g., with today's, offset = 0x81 =>
+   //          back slip = 2**(3*(0x81 - N_words)) = 2**3) = 8
+-  //      Move back N (e.g., 8) entries and repeat with the 
++  //      Move back N (e.g., 8) entries and repeat with the
+   //        value of the new entry
+   //
+   size_t start_card = _array->index_for(start);
+@@ -209,7 +206,7 @@
+ // is an expensive check -- use with care and only under protection of
+ // suitable flag.
+ void BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const {
+-  
++
+   if (end_card < start_card) {
+     return;
+   }
+@@ -277,7 +274,7 @@
+     assert(boundary <= (HeapWord*)boundary_before_end, "tautology");
+     switch (action) {
+       case Action_mark: {
+-	if (init_to_zero()) {
++        if (init_to_zero()) {
+           _array->set_offset_array(start_index, boundary, blk_start);
+           break;
+         } // Else fall through to the next case
+@@ -287,10 +284,10 @@
+         // We have finished marking the "offset card". We need to now
+         // mark the subsequent cards that this blk spans.
+         if (start_index < end_index) {
+-	  HeapWord* rem_st = _array->address_for_index(start_index) + N_words;
+-	  HeapWord* rem_end = _array->address_for_index(end_index) + N_words;
++          HeapWord* rem_st = _array->address_for_index(start_index) + N_words;
++          HeapWord* rem_end = _array->address_for_index(end_index) + N_words;
+           set_remainder_to_point_to_start(rem_st, rem_end);
+-	}
++        }
+         break;
+       }
+       case Action_check: {
+@@ -344,14 +341,14 @@
+     // First check if the start is an allocated block and only
+     // then if it is a valid object.
+     oop o = oop(start);
+-    assert(!Universe::is_fully_initialized() || 
+-	   _sp->is_free_block(start) ||
+-	   o->is_oop_or_null(), "Bad object was found");
++    assert(!Universe::is_fully_initialized() ||
++           _sp->is_free_block(start) ||
++           o->is_oop_or_null(), "Bad object was found");
+     next_index++;
+     last_p = p;
+     last_start = start;
+     last_o = o;
+-  }  
++  }
+ }
+ 
+ //////////////////////////////////////////////////////////////////////
+@@ -376,9 +373,9 @@
+ // Adjust BOT to show that a previously whole block has been split
+ // into two.  We verify the BOT for the first part (prefix) and
+ // update the  BOT for the second part (suffix).
+-// 	blk is the start of the block
+-//	blk_size is the size of the original block
+-//	left_blk_size is the size of the first part of the split
++//      blk is the start of the block
++//      blk_size is the size of the original block
++//      left_blk_size is the size of the first part of the split
+ void BlockOffsetArrayNonContigSpace::split_block(HeapWord* blk,
+                                                  size_t blk_size,
+                                                  size_t left_blk_size) {
+@@ -427,7 +424,7 @@
+ 
+   // Calculate the # cards that the prefix and suffix affect.
+   size_t num_pref_cards = suff_index - pref_index;
+-  
++
+   size_t num_suff_cards = end_index  - suff_index;
+   // Change the cards that need changing
+   if (num_suff_cards > 0) {
+@@ -445,7 +442,7 @@
+         // the "offset card" in the suffix block.
+         set_remainder_to_point_to_start_incl(suff_index + 1,
+           suff_index + num_pref_cards - 1);
+-        // Fix the appropriate cards in the remainder of the 
++        // Fix the appropriate cards in the remainder of the
+         // suffix block -- these are the last num_pref_cards
+         // cards in each power block of the "new" range plumbed
+         // from suff_addr.
+@@ -526,7 +523,7 @@
+   size_t index = _array->index_for(addr);
+   HeapWord* q = _array->address_for_index(index);
+ 
+-  uint offset = _array->offset_array(index);	// Extend u_char to uint.
++  uint offset = _array->offset_array(index);    // Extend u_char to uint.
+   while (offset >= N_words) {
+     // The excess of the offset from N_words indicates a power of Base
+     // to go back by.
+@@ -598,7 +595,7 @@
+   if (VerifyBlockOffsetArray) {
+     do_block_internal(blk_start, blk_end, Action_check);
+   }
+-} 
++}
+ 
+ void BlockOffsetArrayNonContigSpace::verify_single_block(
+   HeapWord* blk, size_t size) {
+@@ -644,7 +641,7 @@
+   index = MIN2(index, _next_offset_index-1);
+   HeapWord* q = _array->address_for_index(index);
+ 
+-  uint offset = _array->offset_array(index);	// Extend u_char to uint.
++  uint offset = _array->offset_array(index);    // Extend u_char to uint.
+   while (offset > N_words) {
+     // The excess of the offset from N_words indicates a power of Base
+     // to go back by.
+@@ -674,34 +671,34 @@
+   return q;
+ }
+ 
+-//		
+-//		_next_offset_threshold
+-// 		|   _next_offset_index
+-//	    	v   v
+-//	+-------+-------+-------+-------+-------+
+-//	| i-1	|   i	| i+1	| i+2	| i+3	|
+-//	+-------+-------+-------+-------+-------+
+-//	 ( ^    ]
+-//	   block-start
+-//		
++//
++//              _next_offset_threshold
++//              |   _next_offset_index
++//              v   v
++//      +-------+-------+-------+-------+-------+
++//      | i-1   |   i   | i+1   | i+2   | i+3   |
++//      +-------+-------+-------+-------+-------+
++//       ( ^    ]
++//         block-start
++//
+ 
+ void BlockOffsetArrayContigSpace::alloc_block_work(HeapWord* blk_start,
+-					HeapWord* blk_end) {
++                                        HeapWord* blk_end) {
+   assert(blk_start != NULL && blk_end > blk_start,
+          "phantom block");
+   assert(blk_end > _next_offset_threshold,
+-	 "should be past threshold");
++         "should be past threshold");
+   assert(blk_start <= _next_offset_threshold,
+-	 "blk_start should be at or before threshold")
++         "blk_start should be at or before threshold")
+   assert(pointer_delta(_next_offset_threshold, blk_start) <= N_words,
+-	 "offset should be <= BlockOffsetSharedArray::N");
++         "offset should be <= BlockOffsetSharedArray::N");
+   assert(Universe::heap()->is_in_reserved(blk_start),
+-	 "reference must be into the heap");
++         "reference must be into the heap");
+   assert(Universe::heap()->is_in_reserved(blk_end-1),
+-	 "limit must be within the heap");
++         "limit must be within the heap");
+   assert(_next_offset_threshold ==
+-	 _array->_reserved.start() + _next_offset_index*N_words,
+-	 "index must agree with threshold");
++         _array->_reserved.start() + _next_offset_index*N_words,
++         "index must agree with threshold");
+ 
+   debug_only(size_t orig_next_offset_index = _next_offset_index;)
+ 
+@@ -733,21 +730,21 @@
+   _next_offset_threshold = _array->address_for_index(end_index) +
+     N_words;
+   assert(_next_offset_threshold >= blk_end, "Incorrent offset threshold");
+-  
++
+ #ifdef ASSERT
+   // The offset can be 0 if the block starts on a boundary.  That
+   // is checked by an assertion above.
+   size_t start_index = _array->index_for(blk_start);
+   HeapWord* boundary    = _array->address_for_index(start_index);
+   assert((_array->offset_array(orig_next_offset_index) == 0 &&
+-	  blk_start == boundary) ||
+-	  (_array->offset_array(orig_next_offset_index) > 0 &&
+-	 _array->offset_array(orig_next_offset_index) <= N_words),
++          blk_start == boundary) ||
++          (_array->offset_array(orig_next_offset_index) > 0 &&
++         _array->offset_array(orig_next_offset_index) <= N_words),
+          "offset array should have been set");
+   for (size_t j = orig_next_offset_index + 1; j <= end_index; j++) {
+-    assert(_array->offset_array(j) > 0 && 
+-  	   _array->offset_array(j) <= (u_char) (N_words+N_powers-1), 
+-	   "offset array should have been set");
++    assert(_array->offset_array(j) > 0 &&
++           _array->offset_array(j) <= (u_char) (N_words+N_powers-1),
++           "offset array should have been set");
+   }
+ #endif
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/memory/blockOffsetTable.hpp openjdk/hotspot/src/share/vm/memory/blockOffsetTable.hpp
+--- openjdk6/hotspot/src/share/vm/memory/blockOffsetTable.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/blockOffsetTable.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)blockOffsetTable.hpp	1.57 07/05/05 17:05:43 JVM"
+-#endif
+ /*
+  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The CollectedHeap type requires subtypes to implement a method
+@@ -35,7 +32,7 @@
+ //   - BlockOffsetArray (abstract)
+ //     - BlockOffsetArrayNonContigSpace
+ //     - BlockOffsetArrayContigSpace
+-//      
++//
+ 
+ class ContiguousSpace;
+ class SerializeOopClosure;
+@@ -49,8 +46,8 @@
+   // These members describe the region covered by the table.
+ 
+   // The space this table is covering.
+-  HeapWord* _bottom;	// == reserved.start
+-  HeapWord* _end;	// End of currently allocated region.
++  HeapWord* _bottom;    // == reserved.start
++  HeapWord* _end;       // End of currently allocated region.
+ 
+ public:
+   // Initialize the table to cover the given space.
+@@ -119,7 +116,7 @@
+   // Array for keeping offsets for retrieving object start fast given an
+   // address.
+   VirtualSpace _vs;
+-  u_char* _offset_array;	  // byte array keeping backwards offsets
++  u_char* _offset_array;          // byte array keeping backwards offsets
+ 
+  protected:
+   // Bounds checking accessors:
+@@ -157,24 +154,24 @@
+     assert(index < _vs.committed_size(), "index out of range");
+     assert(high >= low, "addresses out of order");
+     assert(pointer_delta(high, low) <= N_words, "offset too large");
+-    assert(_offset_array[index] == pointer_delta(high, low), 
++    assert(_offset_array[index] == pointer_delta(high, low),
+            "Wrong offset");
+   }
+ 
+   bool is_card_boundary(HeapWord* p) const;
+ 
+-  // Return the number of slots needed for an offset array 
++  // Return the number of slots needed for an offset array
+   // that covers mem_region_words words.
+-  // We always add an extra slot because if an object 
+-  // ends on a card boundary we put a 0 in the next 
+-  // offset array slot, so we want that slot always 
++  // We always add an extra slot because if an object
++  // ends on a card boundary we put a 0 in the next
++  // offset array slot, so we want that slot always
+   // to be reserved.
+- 
++
+   size_t compute_size(size_t mem_region_words) {
+     size_t number_of_slots = (mem_region_words / N_words) + 1;
+     return ReservedSpace::allocation_align_size_up(number_of_slots);
+   }
+-  
++
+ public:
+   // Initialize the table to cover from "base" to (at least)
+   // "base + init_word_size".  In the future, the table may be expanded
+@@ -198,7 +195,7 @@
+   // Return the appropriate index into "_offset_array" for "p".
+   size_t index_for(const void* p) const;
+ 
+-  // Return the address indicating the start of the region corresponding to 
++  // Return the address indicating the start of the region corresponding to
+   // "index" in "_offset_array".
+   HeapWord* address_for_index(size_t index) const;
+ 
+@@ -380,7 +377,7 @@
+   }
+ 
+   // The following methods are useful and optimized for a
+-  // non-contiguous space. 
++  // non-contiguous space.
+ 
+   // Given a block [blk_start, blk_start + full_blk_size), and
+   // a left_blk_size < full_blk_size, adjust the BOT to show two
+@@ -406,10 +403,10 @@
+   // verified in the non-product VM) that the BOT is correct for
+   // the given block.
+   void allocated(HeapWord* blk_start, HeapWord* blk_end) {
+-    // Verify that the BOT shows [blk, blk + blk_size) to be one block. 
+-    verify_single_block(blk_start, blk_end); 
++    // Verify that the BOT shows [blk, blk + blk_size) to be one block.
++    verify_single_block(blk_start, blk_end);
+     if (BlockOffsetArrayUseUnallocatedBlock) {
+-      _unallocated_block = MAX2(_unallocated_block, blk_end); 
++      _unallocated_block = MAX2(_unallocated_block, blk_end);
+     }
+   }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/memory/blockOffsetTable.inline.hpp openjdk/hotspot/src/share/vm/memory/blockOffsetTable.inline.hpp
+--- openjdk6/hotspot/src/share/vm/memory/blockOffsetTable.inline.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/blockOffsetTable.inline.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)blockOffsetTable.inline.hpp	1.20 07/05/05 17:05:43 JVM"
+-#endif
+ /*
+  * Copyright 2000-2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //////////////////////////////////////////////////////////////////////////
+@@ -42,8 +39,8 @@
+ inline size_t BlockOffsetSharedArray::index_for(const void* p) const {
+   char* pc = (char*)p;
+   assert(pc >= (char*)_reserved.start() &&
+-	 pc <  (char*)_reserved.end(),
+-	 "p not in range.");
++         pc <  (char*)_reserved.end(),
++         "p not in range.");
+   size_t delta = pointer_delta(pc, _reserved.start(), sizeof(char));
+   size_t result = delta >> LogN;
+   assert(result < _vs.committed_size(), "bad index from address");
+@@ -57,7 +54,7 @@
+          "bad address from index");
+   return result;
+ }
+-    
++
+ 
+ //////////////////////////////////////////////////////////////////////////
+ // BlockOffsetArrayNonContigSpace inlines
+diff -ruN openjdk6/hotspot/src/share/vm/memory/cardTableModRefBS.cpp openjdk/hotspot/src/share/vm/memory/cardTableModRefBS.cpp
+--- openjdk6/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)cardTableModRefBS.cpp	1.58 07/05/29 09:44:14 JVM"
+-#endif
+ /*
+  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
+@@ -32,9 +29,30 @@
+ # include "incls/_precompiled.incl"
+ # include "incls/_cardTableModRefBS.cpp.incl"
+ 
++size_t CardTableModRefBS::cards_required(size_t covered_words)
++{
++  // Add one for a guard card, used to detect errors.
++  const size_t words = align_size_up(covered_words, card_size_in_words);
++  return words / card_size_in_words + 1;
++}
++
++size_t CardTableModRefBS::compute_byte_map_size()
++{
++  assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
++                                        "unitialized, check declaration order");
++  assert(_page_size != 0, "unitialized, check declaration order");
++  const size_t granularity = os::vm_allocation_granularity();
++  return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
++}
++
+ CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
+-				     int max_covered_regions) :
+-  ModRefBarrierSet(max_covered_regions), _whole_heap(whole_heap)
++                                     int max_covered_regions):
++  ModRefBarrierSet(max_covered_regions),
++  _whole_heap(whole_heap),
++  _guard_index(cards_required(whole_heap.word_size()) - 1),
++  _last_valid_index(_guard_index - 1),
++  _page_size(os::page_size_for_region(_guard_index + 1, _guard_index + 1, 1)),
++  _byte_map_size(compute_byte_map_size())
+ {
+   _kind = BarrierSet::CardTableModRef;
+ 
+@@ -43,14 +61,7 @@
+   assert((uintptr_t(low_bound)  & (card_size - 1))  == 0, "heap must start at card boundary");
+   assert((uintptr_t(high_bound) & (card_size - 1))  == 0, "heap must end at card boundary");
+ 
+-  assert(card_size <= 512, "card_size must be less than 512");
+-  size_t heap_size_in_words = _whole_heap.word_size();
+-  // Add one for the last_card, treated as a guard card
+-  _byte_map_size = ReservedSpace::allocation_align_size_up((heap_size_in_words / 
+-                                                      card_size_in_words) + 1);
+-  // A couple of useful indicies
+-  _guard_index      = _byte_map_size - 1;
+-  _last_valid_index = _byte_map_size - 2;
++  assert(card_size <= 512, "card_size must be less than 512"); // why?
+ 
+   _covered   = new MemRegion[max_covered_regions];
+   _committed = new MemRegion[max_covered_regions];
+@@ -63,13 +74,19 @@
+   }
+   _cur_covered_regions = 0;
+ 
+-  ReservedSpace heap_rs(_byte_map_size);
++  const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
++    MAX2(_page_size, (size_t) os::vm_allocation_granularity());
++  ReservedSpace heap_rs(_byte_map_size, rs_align, false);
++  os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
++                       _page_size, heap_rs.base(), heap_rs.size());
+   if (!heap_rs.is_reserved()) {
+-    vm_exit_during_initialization("Could not reserve enough space for card marking array");
++    vm_exit_during_initialization("Could not reserve enough space for the "
++                                  "card marking array");
+   }
+-  // The assember store_check code will do an unsigned shift of the oop, 
++
++  // The assember store_check code will do an unsigned shift of the oop,
+   // then add it to byte_map_base, i.e.
+-  // 
++  //
+   //   _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
+   _byte_map = (jbyte*) heap_rs.base();
+   byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
+@@ -77,11 +94,11 @@
+   assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
+ 
+   jbyte* guard_card = &_byte_map[_guard_index];
+-  uintptr_t guard_page = align_size_down((uintptr_t)guard_card, os::vm_page_size());
+-  _guard_region = MemRegion((HeapWord*)guard_page, os::vm_page_size());
+-  if (!os::commit_memory((char*)guard_page, os::vm_page_size())) {
++  uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
++  _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
++  if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) {
+     // Do better than this for Merlin
+-    vm_exit_out_of_memory(os::vm_page_size(), "card table last card");
++    vm_exit_out_of_memory(_page_size, "card table last card");
+   }
+   *guard_card = last_card;
+ 
+@@ -94,8 +111,8 @@
+   _last_LNC_resizing_collection =
+     NEW_C_HEAP_ARRAY(int, max_covered_regions);
+   if (_lowest_non_clean == NULL
+-      || _lowest_non_clean_chunk_size == NULL 
+-      || _lowest_non_clean_base_chunk_index == NULL 
++      || _lowest_non_clean_chunk_size == NULL
++      || _lowest_non_clean_base_chunk_index == NULL
+       || _last_LNC_resizing_collection == NULL)
+     vm_exit_during_initialization("couldn't allocate an LNC array.");
+   for (i = 0; i < max_covered_regions; i++) {
+@@ -125,7 +142,7 @@
+   }
+   // If we didn't find it, create a new one.
+   assert(_cur_covered_regions < _max_covered_regions,
+-	 "too many covered regions");
++         "too many covered regions");
+   // Move the ones above up, to maintain sorted order.
+   for (int j = _cur_covered_regions; j > i; j--) {
+     _covered[j] = _covered[j-1];
+@@ -136,8 +153,7 @@
+   _covered[res].set_start(base);
+   _covered[res].set_word_size(0);
+   jbyte* ct_start = byte_for(base);
+-  uintptr_t ct_start_aligned =
+-    align_size_down((uintptr_t)ct_start, os::vm_page_size());
++  uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
+   _committed[res].set_start((HeapWord*)ct_start_aligned);
+   _committed[res].set_word_size(0);
+   return res;
+@@ -162,7 +178,7 @@
+   return max_end;
+ }
+ 
+-MemRegion CardTableModRefBS::committed_unique_to_self(int self, 
++MemRegion CardTableModRefBS::committed_unique_to_self(int self,
+                                                       MemRegion mr) const {
+   MemRegion result = mr;
+   for (int r = 0; r < _cur_covered_regions; r += 1) {
+@@ -177,8 +193,8 @@
+ 
+ void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
+   // We don't change the start of a region, only the end.
+-  assert(_whole_heap.contains(new_region), 
+-	   "attempt to cover area not in reserved area");
++  assert(_whole_heap.contains(new_region),
++           "attempt to cover area not in reserved area");
+   debug_only(verify_guard();)
+   int ind = find_covering_region_by_base(new_region.start());
+   MemRegion old_region = _covered[ind];
+@@ -186,7 +202,7 @@
+   if (new_region.word_size() != old_region.word_size()) {
+     // Commit new or uncommit old pages, if necessary.
+     MemRegion cur_committed = _committed[ind];
+-    // Extend the end of this _commited region 
++    // Extend the end of this _commited region
+     // to cover the end of any lower _committed regions.
+     // This forms overlapping regions, but never interior regions.
+     HeapWord* max_prev_end = largest_prev_committed_end(ind);
+@@ -196,7 +212,7 @@
+     // Align the end up to a page size (starts are already aligned).
+     jbyte* new_end = byte_after(new_region.last());
+     HeapWord* new_end_aligned =
+-      (HeapWord*)align_size_up((uintptr_t)new_end, os::vm_page_size());
++      (HeapWord*)align_size_up((uintptr_t)new_end, _page_size);
+     assert(new_end_aligned >= (HeapWord*) new_end,
+            "align up, but less");
+     // The guard page is always committed and should not be committed over.
+@@ -204,25 +220,25 @@
+     if (new_end_for_commit > cur_committed.end()) {
+       // Must commit new pages.
+       MemRegion new_committed =
+-	MemRegion(cur_committed.end(), new_end_for_commit);
++        MemRegion(cur_committed.end(), new_end_for_commit);
+ 
+       assert(!new_committed.is_empty(), "Region should not be empty here");
+       if (!os::commit_memory((char*)new_committed.start(),
+-	                     new_committed.byte_size())) {
++                             new_committed.byte_size(), _page_size)) {
+         // Do better than this for Merlin
+         vm_exit_out_of_memory(new_committed.byte_size(),
+-	        "card table expansion");
++                "card table expansion");
+       }
+     // Use new_end_aligned (as opposed to new_end_for_commit) because
+     // the cur_committed region may include the guard region.
+     } else if (new_end_aligned < cur_committed.end()) {
+       // Must uncommit pages.
+-      MemRegion uncommit_region = 
++      MemRegion uncommit_region =
+         committed_unique_to_self(ind, MemRegion(new_end_aligned,
+                                                 cur_committed.end()));
+       if (!uncommit_region.is_empty()) {
+         if (!os::uncommit_memory((char*)uncommit_region.start(),
+-			         uncommit_region.byte_size())) {
++                                 uncommit_region.byte_size())) {
+           // Do better than this for Merlin
+           vm_exit_out_of_memory(uncommit_region.byte_size(),
+             "card table contraction");
+@@ -254,7 +270,7 @@
+     gclog_or_tty->print_cr("  "
+                   "  _covered[%d].start(): " INTPTR_FORMAT
+                   "  _covered[%d].last(): " INTPTR_FORMAT,
+-                  ind, _covered[ind].start(), 
++                  ind, _covered[ind].start(),
+                   ind, _covered[ind].last());
+     gclog_or_tty->print_cr("  "
+                   "  _committed[%d].start(): " INTPTR_FORMAT
+@@ -284,10 +300,10 @@
+ 
+ 
+ void CardTableModRefBS::non_clean_card_iterate(Space* sp,
+-					       MemRegion mr,
+-					       DirtyCardToOopClosure* dcto_cl,
+-					       MemRegionClosure* cl,
+-					       bool clear) {
++                                               MemRegion mr,
++                                               DirtyCardToOopClosure* dcto_cl,
++                                               MemRegionClosure* cl,
++                                               bool clear) {
+   if (!mr.is_empty()) {
+     int n_threads = SharedHeap::heap()->n_par_threads();
+     if (n_threads > 0) {
+@@ -308,8 +324,8 @@
+ // cards (and miss those marked precleaned). In that sense,
+ // the name precleaned is currently somewhat of a misnomer.
+ void CardTableModRefBS::non_clean_card_iterate_work(MemRegion mr,
+-						    MemRegionClosure* cl,
+-						    bool clear) {
++                                                    MemRegionClosure* cl,
++                                                    bool clear) {
+   // Figure out whether we have to worry about parallelism.
+   bool is_par = (SharedHeap::heap()->n_par_threads() > 1);
+   for (int i = 0; i < _cur_covered_regions; i++) {
+@@ -319,39 +335,39 @@
+       jbyte* limit = byte_for(mri.start());
+       while (cur_entry >= limit) {
+         jbyte* next_entry = cur_entry - 1;
+-	if (*cur_entry != clean_card) {
+-	  size_t non_clean_cards = 1;
+-	  // Should the next card be included in this range of dirty cards.
++        if (*cur_entry != clean_card) {
++          size_t non_clean_cards = 1;
++          // Should the next card be included in this range of dirty cards.
+           while (next_entry >= limit && *next_entry != clean_card) {
+-	    non_clean_cards++; 
+-	    cur_entry = next_entry;
+-	    next_entry--;
+-	  }
+-	  // The memory region may not be on a card boundary.  So that
+-	  // objects beyond the end of the region are not processed, make
+-	  // cur_cards precise with regard to the end of the memory region.
+-	  MemRegion cur_cards(addr_for(cur_entry), 
+-			      non_clean_cards * card_size_in_words);
+-	  MemRegion dirty_region = cur_cards.intersection(mri);
+-	  if (clear) {
++            non_clean_cards++;
++            cur_entry = next_entry;
++            next_entry--;
++          }
++          // The memory region may not be on a card boundary.  So that
++          // objects beyond the end of the region are not processed, make
++          // cur_cards precise with regard to the end of the memory region.
++          MemRegion cur_cards(addr_for(cur_entry),
++                              non_clean_cards * card_size_in_words);
++          MemRegion dirty_region = cur_cards.intersection(mri);
++          if (clear) {
+             for (size_t i = 0; i < non_clean_cards; i++) {
+-	      // Clean the dirty cards (but leave the other non-clean
+-	      // alone.)  If parallel, do the cleaning atomically.
+-	      jbyte cur_entry_val = cur_entry[i];
+-	      if (card_is_dirty_wrt_gen_iter(cur_entry_val)) {
+-		if (is_par) {
+-		  jbyte res = Atomic::cmpxchg(clean_card, &cur_entry[i], cur_entry_val);
+-		  assert(res != clean_card,
+-			 "Dirty card mysteriously cleaned");
+-		} else {
+-		  cur_entry[i] = clean_card;
+-		}
+-	      }
++              // Clean the dirty cards (but leave the other non-clean
++              // alone.)  If parallel, do the cleaning atomically.
++              jbyte cur_entry_val = cur_entry[i];
++              if (card_is_dirty_wrt_gen_iter(cur_entry_val)) {
++                if (is_par) {
++                  jbyte res = Atomic::cmpxchg(clean_card, &cur_entry[i], cur_entry_val);
++                  assert(res != clean_card,
++                         "Dirty card mysteriously cleaned");
++                } else {
++                  cur_entry[i] = clean_card;
++                }
++              }
+             }
+           }
+-	  cl->do_MemRegion(dirty_region);
+-	}
+-	cur_entry = next_entry;
++          cl->do_MemRegion(dirty_region);
++        }
++        cur_entry = next_entry;
+       }
+     }
+   }
+@@ -360,7 +376,7 @@
+ void CardTableModRefBS::mod_oop_in_space_iterate(Space* sp,
+                                                  OopClosure* cl,
+                                                  bool clear,
+-						 bool before_save_marks) {
++                                                 bool before_save_marks) {
+   // Note that dcto_cl is resource-allocated, so there is no
+   // corresponding "delete".
+   DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision());
+@@ -507,7 +523,7 @@
+   void do_MemRegion(MemRegion mr) {
+     jbyte* entry = _ct->byte_for(mr.start());
+     guarantee(*entry != CardTableModRefBS::clean_card,
+-	      "Dirty card in region that should be clean");
++              "Dirty card in region that should be clean");
+   }
+ };
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/memory/cardTableModRefBS.hpp openjdk/hotspot/src/share/vm/memory/cardTableModRefBS.hpp
+--- openjdk6/hotspot/src/share/vm/memory/cardTableModRefBS.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/cardTableModRefBS.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)cardTableModRefBS.hpp	1.51 07/05/29 09:44:14 JVM"
+-#endif
+ /*
+  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
+@@ -43,9 +40,7 @@
+ 
+ class CardTableModRefBS: public ModRefBarrierSet {
+   // Some classes get to look at some private stuff.
+-#ifdef CC_INTERP
+-  friend class cInterpreter;
+-#endif
++  friend class BytecodeInterpreter;
+   friend class VMStructs;
+   friend class CardTableRS;
+   friend class CheckForUnmarkedOops; // Needs access to raw card bytes.
+@@ -75,22 +70,22 @@
+     return CardTableModRefBS::card_is_dirty_wrt_gen_iter(cv);
+   }
+ 
+-  // Returns "true" iff the value "cv" may have represented a dirty card at 
++  // Returns "true" iff the value "cv" may have represented a dirty card at
+   // some point.
+   virtual bool card_may_have_been_dirty(jbyte cv) {
+     return card_is_dirty_wrt_gen_iter(cv);
+   }
+ 
+-  // The region that the card table can cover.
+-  MemRegion    _whole_heap;
+-
+-  jbyte*       _byte_map;      // the card marking array
+-  size_t       _byte_map_size;  // In bytes.
+-
+-  size_t       _last_valid_index;  // index of last valid element in card table
+-  size_t       _guard_index;       // index of very last element in card table;
+-                                   //  it is set to a guard value "last_card" and 
+-                                   // should never be modified
++  // The declaration order of these const fields is important; see the
++  // constructor before changing.
++  const MemRegion _whole_heap;       // the region covered by the card table
++  const size_t    _guard_index;      // index of very last element in the card
++                                     // table; it is set to a guard value
++                                     // (last_card) and should never be modified
++  const size_t    _last_valid_index; // index of the last valid element
++  const size_t    _page_size;        // page size used when mapping _byte_map
++  const size_t    _byte_map_size;    // in bytes
++  jbyte*          _byte_map;         // the card marking array
+ 
+   int _cur_covered_regions;
+   // The covered regions should be in address order.
+@@ -108,6 +103,12 @@
+   // uncommit the MemRegion for that page.
+   MemRegion _guard_region;
+ 
++ protected:
++  // Initialization utilities; covered_words is the size of the covered region
++  // in, um, words.
++  inline size_t cards_required(size_t covered_words);
++  inline size_t compute_byte_map_size();
++
+   // Finds and return the index of the region, if any, to which the given
+   // region would be contiguous.  If none exists, assign a new region and
+   // returns its index.  Requires that no more than the maximum number of
+@@ -122,18 +123,18 @@
+   void resize_covered_region(MemRegion new_region);
+ 
+   // Returns the leftmost end of a committed region corresponding to a
+-  // covered region before covered region "ind", or else "NULL" if "ind" is 
++  // covered region before covered region "ind", or else "NULL" if "ind" is
+   // the first covered region.
+   HeapWord* largest_prev_committed_end(int ind) const;
+ 
+-  // Returns the part of the region mr that doesn't intersect with 
+-  // any committed region other than self.  Used to prevent uncommitting 
++  // Returns the part of the region mr that doesn't intersect with
++  // any committed region other than self.  Used to prevent uncommitting
+   // regions that are also committed by other regions.  Also protects
+   // against uncommitting the guard region.
+   MemRegion committed_unique_to_self(int self, MemRegion mr) const;
+ 
+   // Mapping from address to card marking array entry
+-  jbyte* byte_for(const void* p) const { 
++  jbyte* byte_for(const void* p) const {
+     assert(_whole_heap.contains(p),
+            "out of bounds access to card marking array");
+     jbyte* result = &byte_map_base[uintptr_t(p) >> card_shift];
+@@ -150,9 +151,9 @@
+   }
+ 
+   // Mapping from card marking array entry to address of first word
+-  HeapWord* addr_for(const jbyte* p) const { 
++  HeapWord* addr_for(const jbyte* p) const {
+     assert(p >= _byte_map && p < _byte_map + _byte_map_size,
+-	   "out of bounds access to card marking array");
++           "out of bounds access to card marking array");
+     size_t delta = pointer_delta(p, byte_map_base, sizeof(jbyte));
+     HeapWord* result = (HeapWord*) (delta << card_shift);
+     assert(_whole_heap.contains(result),
+@@ -167,13 +168,13 @@
+   // may be modified. Note that this function will operate in a parallel
+   // mode if worker threads are available.
+   void non_clean_card_iterate(Space* sp, MemRegion mr,
+-			      DirtyCardToOopClosure* dcto_cl,
+-			      MemRegionClosure* cl,
+-			      bool clear);
++                              DirtyCardToOopClosure* dcto_cl,
++                              MemRegionClosure* cl,
++                              bool clear);
+ 
+   // Utility function used to implement the other versions below.
+   void non_clean_card_iterate_work(MemRegion mr, MemRegionClosure* cl,
+-				   bool clear);
++                                   bool clear);
+ 
+   void par_non_clean_card_iterate_work(Space* sp, MemRegion mr,
+                                        DirtyCardToOopClosure* dcto_cl,
+@@ -212,16 +213,16 @@
+   // covering "sp", and "lowest_non_clean_base_chunk_index" to the chunk
+   // index of the corresponding to the first element of that array.
+   // Ensures that these arrays are of sufficient size, allocating if necessary.
+-  // May be called by several threads concurrently.  
++  // May be called by several threads concurrently.
+   void get_LNC_array_for_space(Space* sp,
+-			       jbyte**& lowest_non_clean, 
+-			       uintptr_t& lowest_non_clean_base_chunk_index,
+-			       size_t& lowest_non_clean_chunk_size);
++                               jbyte**& lowest_non_clean,
++                               uintptr_t& lowest_non_clean_base_chunk_index,
++                               size_t& lowest_non_clean_chunk_size);
+ 
+   // Returns the number of chunks necessary to cover "mr".
+   size_t chunks_to_cover(MemRegion mr) {
+     return (size_t)(addr_to_chunk_index(mr.last()) -
+-  		    addr_to_chunk_index(mr.start()) + 1);
++                    addr_to_chunk_index(mr.start()) + 1);
+   }
+ 
+   // Returns the index of the chunk in a stride which
+@@ -234,25 +235,25 @@
+   // Apply cl, which must either itself apply dcto_cl or be dcto_cl,
+   // to the cards in the stride (of n_strides) within the given space.
+   void process_stride(Space* sp,
+-		      MemRegion used,
+-		      jint stride, int n_strides,
+-		      DirtyCardToOopClosure* dcto_cl,
+-		      MemRegionClosure* cl,
+-		      bool clear,
+-		      jbyte** lowest_non_clean,
+-		      uintptr_t lowest_non_clean_base_chunk_index,
+-		      size_t lowest_non_clean_chunk_size);
++                      MemRegion used,
++                      jint stride, int n_strides,
++                      DirtyCardToOopClosure* dcto_cl,
++                      MemRegionClosure* cl,
++                      bool clear,
++                      jbyte** lowest_non_clean,
++                      uintptr_t lowest_non_clean_base_chunk_index,
++                      size_t lowest_non_clean_chunk_size);
+ 
+   // Makes sure that chunk boundaries are handled appropriately, by
+   // adjusting the min_done of dcto_cl, and by using a special card-table
+   // value to indicate how min_done should be set.
+   void process_chunk_boundaries(Space* sp,
+-				DirtyCardToOopClosure* dcto_cl,
+-				MemRegion chunk_mr,
+-				MemRegion used,
+-				jbyte** lowest_non_clean,
+-				uintptr_t lowest_non_clean_base_chunk_index,
+-				size_t    lowest_non_clean_chunk_size);
++                                DirtyCardToOopClosure* dcto_cl,
++                                MemRegion chunk_mr,
++                                MemRegion used,
++                                jbyte** lowest_non_clean,
++                                uintptr_t lowest_non_clean_base_chunk_index,
++                                size_t    lowest_non_clean_chunk_size);
+ 
+ public:
+   // Constants
+@@ -268,7 +269,7 @@
+     return bsn == BarrierSet::CardTableModRef || bsn == BarrierSet::ModRef;
+   }
+ 
+-  CardTableModRefBS(MemRegion whole_heap, int max_covered_regions); 
++  CardTableModRefBS(MemRegion whole_heap, int max_covered_regions);
+ 
+   // *** Barrier set functions.
+ 
+@@ -279,8 +280,8 @@
+   }
+ 
+   // Record a reference update. Note that these versions are precise!
+-  // The scanning code has to handle the fact that the write barrier may be 
+-  // either precise or imprecise. We make non-virtual inline variants of 
++  // The scanning code has to handle the fact that the write barrier may be
++  // either precise or imprecise. We make non-virtual inline variants of
+   // these functions here for performance.
+ protected:
+   void write_ref_field_work(oop obj, size_t offset, oop newVal);
+@@ -346,9 +347,9 @@
+   void invalidate(MemRegion mr);
+   void clear(MemRegion mr);
+   void mod_oop_in_space_iterate(Space* sp, OopClosure* cl,
+-				bool clear = false,
+-				bool before_save_marks = false);
+- 
++                                bool clear = false,
++                                bool before_save_marks = false);
++
+   // *** Card-table-RemSet-specific things.
+ 
+   // Invoke "cl.do_MemRegion" on a set of MemRegions that collectively
+@@ -368,7 +369,7 @@
+   // for cards within the MemRegion "mr" (which is required to be
+   // card-aligned and sized.)
+   void mod_card_iterate(MemRegion mr, MemRegionClosure* cl,
+-			bool clear = false) {
++                        bool clear = false) {
+     non_clean_card_iterate_work(mr, cl, clear);
+   }
+ 
+@@ -389,7 +390,7 @@
+   // Mapping from address to card marking array index.
+   int index_for(void* p) {
+     assert(_whole_heap.contains(p),
+-	   "out of bounds access to card marking array");
++           "out of bounds access to card marking array");
+     return byte_for(p) - _byte_map;
+   }
+ 
+@@ -413,8 +414,8 @@
+   bool card_may_have_been_dirty(jbyte cv);
+ public:
+   CardTableModRefBSForCTRS(MemRegion whole_heap,
+-			   int max_covered_regions) :
++                           int max_covered_regions) :
+     CardTableModRefBS(whole_heap, max_covered_regions) {}
+-    
++
+   void set_CTRS(CardTableRS* rs) { _rs = rs; }
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/memory/cardTableRS.cpp openjdk/hotspot/src/share/vm/memory/cardTableRS.cpp
+--- openjdk6/hotspot/src/share/vm/memory/cardTableRS.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/cardTableRS.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)cardTableRS.cpp	1.45 07/05/25 12:54:50 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,14 +19,14 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_cardTableRS.cpp.incl"
+ 
+ CardTableRS::CardTableRS(MemRegion whole_heap,
+-			 int max_covered_regions) :
++                         int max_covered_regions) :
+   GenRemSet(&_ct_bs),
+   _ct_bs(whole_heap, max_covered_regions),
+   _cur_youngergen_card_val(youngergenP1_card)
+@@ -56,8 +53,8 @@
+     bool seen = false;
+     for (int g = 0; g < gch->n_gens()+1; g++) {
+       if (_last_cur_val_in_gen[g] == v) {
+-	seen = true;
+-	break;
++        seen = true;
++        break;
+       }
+     }
+     if (!seen) return v;
+@@ -82,7 +79,7 @@
+ }
+ 
+ void CardTableRS::younger_refs_iterate(Generation* g,
+-				       OopsInGenClosure* blk) {
++                                       OopsInGenClosure* blk) {
+   _last_cur_val_in_gen[g->level()+1] = cur_youngergen_card_val();
+   g->younger_refs_iterate(blk);
+ }
+@@ -92,50 +89,50 @@
+   CardTableRS* _ct;
+   bool _is_par;
+ private:
+-  // Clears the given card, return true if the corresponding card should be 
++  // Clears the given card, return true if the corresponding card should be
+   // processed.
+   bool clear_card(jbyte* entry) {
+     if (_is_par) {
+       while (true) {
+-	// In the parallel case, we may have to do this several times.
+-	jbyte entry_val = *entry;
+-	assert(entry_val != CardTableRS::clean_card_val(),
+-	       "We shouldn't be looking at clean cards, and this should "
+-	       "be the only place they get cleaned.");
+-	if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val)
+-	    || _ct->is_prev_youngergen_card_val(entry_val)) {
+-	  jbyte res =
+-	    Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val);
+-	  if (res == entry_val) {
+-	    break;
+-	  } else {
+-	    assert(res == CardTableRS::cur_youngergen_and_prev_nonclean_card,
+-		   "The CAS above should only fail if another thread did "
+-		   "a GC write barrier.");
+-	  }
+-	} else if (entry_val ==
+-		   CardTableRS::cur_youngergen_and_prev_nonclean_card) {
+-	  // Parallelism shouldn't matter in this case.  Only the thread
+-	  // assigned to scan the card should change this value.
+-	  *entry = _ct->cur_youngergen_card_val();
+-	  break;
+-	} else {
+-	  assert(entry_val == _ct->cur_youngergen_card_val(),
+-		 "Should be the only possibility.");
+-	  // In this case, the card was clean before, and become
+-	  // cur_youngergen only because of processing of a promoted object.
+-	  // We don't have to look at the card.
+-	  return false;
+-	}
++        // In the parallel case, we may have to do this several times.
++        jbyte entry_val = *entry;
++        assert(entry_val != CardTableRS::clean_card_val(),
++               "We shouldn't be looking at clean cards, and this should "
++               "be the only place they get cleaned.");
++        if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val)
++            || _ct->is_prev_youngergen_card_val(entry_val)) {
++          jbyte res =
++            Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val);
++          if (res == entry_val) {
++            break;
++          } else {
++            assert(res == CardTableRS::cur_youngergen_and_prev_nonclean_card,
++                   "The CAS above should only fail if another thread did "
++                   "a GC write barrier.");
++          }
++        } else if (entry_val ==
++                   CardTableRS::cur_youngergen_and_prev_nonclean_card) {
++          // Parallelism shouldn't matter in this case.  Only the thread
++          // assigned to scan the card should change this value.
++          *entry = _ct->cur_youngergen_card_val();
++          break;
++        } else {
++          assert(entry_val == _ct->cur_youngergen_card_val(),
++                 "Should be the only possibility.");
++          // In this case, the card was clean before, and become
++          // cur_youngergen only because of processing of a promoted object.
++          // We don't have to look at the card.
++          return false;
++        }
+       }
+       return true;
+     } else {
+       jbyte entry_val = *entry;
+       assert(entry_val != CardTableRS::clean_card_val(),
+-	     "We shouldn't be looking at clean cards, and this should "
+-	     "be the only place they get cleaned.");
++             "We shouldn't be looking at clean cards, and this should "
++             "be the only place they get cleaned.");
+       assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card,
+-	     "This should be possible in the sequential case.");
++             "This should be possible in the sequential case.");
+       *entry = CardTableRS::clean_card_val();
+       return true;
+     }
+@@ -143,7 +140,7 @@
+ 
+ public:
+   ClearNoncleanCardWrapper(MemRegionClosure* dirty_card_closure,
+-			   CardTableRS* ct) :
++                           CardTableRS* ct) :
+     _dirty_card_closure(dirty_card_closure), _ct(ct) {
+     _is_par = (SharedHeap::heap()->n_par_threads() > 0);
+   }
+@@ -190,7 +187,7 @@
+ };
+ // clean (by dirty->clean before) ==> cur_younger_gen
+ // dirty                          ==> cur_youngergen_and_prev_nonclean_card
+-// precleaned			  ==> cur_youngergen_and_prev_nonclean_card
++// precleaned                     ==> cur_youngergen_and_prev_nonclean_card
+ // prev-younger-gen               ==> cur_youngergen_and_prev_nonclean_card
+ // cur-younger-gen                ==> cur_younger_gen
+ // cur_youngergen_and_prev_nonclean_card ==> no change.
+@@ -204,7 +201,7 @@
+       *entry = cur_youngergen_card_val();
+       return;
+     } else if (card_is_dirty_wrt_gen_iter(entry_val)
+-	       || is_prev_youngergen_card_val(entry_val)) {
++               || is_prev_youngergen_card_val(entry_val)) {
+       // Mark it as both cur and prev youngergen; card cleaning thread will
+       // eventually remove the previous stuff.
+       jbyte new_val = cur_youngergen_and_prev_nonclean_card;
+@@ -215,15 +212,15 @@
+       continue;
+     } else {
+       assert(entry_val == cur_youngergen_and_prev_nonclean_card
+-	     || entry_val == cur_youngergen_card_val(),
+-	     "should be only possibilities.");
++             || entry_val == cur_youngergen_card_val(),
++             "should be only possibilities.");
+       return;
+     }
+   } while (true);
+ }
+ 
+-void CardTableRS::younger_refs_in_space_iterate(Space* sp, 
+-						OopsInGenClosure* cl) {
++void CardTableRS::younger_refs_in_space_iterate(Space* sp,
++                                                OopsInGenClosure* cl) {
+   DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, _ct_bs.precision(),
+                                                    cl->gen_boundary());
+   ClearNoncleanCardWrapper clear_cl(dcto_cl, this);
+@@ -300,8 +297,8 @@
+     HeapWord* jp = (HeapWord*)p;
+     if (jp >= begin && jp < end) {
+       guarantee(*p == NULL || (HeapWord*)p < boundary
+-		|| (HeapWord*)(*p) >= boundary,
+-		"pointer on clean card crosses boundary");
++                || (HeapWord*)(*p) >= boundary,
++                "pointer on clean card crosses boundary");
+     }
+   }
+   VerifyCleanCardClosure(HeapWord* b, HeapWord* _begin, HeapWord* _end) :
+@@ -341,8 +338,8 @@
+     if (*cur_entry == CardTableModRefBS::clean_card) {
+       jbyte* first_dirty = cur_entry+1;
+       while (first_dirty < limit &&
+-	     *first_dirty == CardTableModRefBS::clean_card) {
+-	first_dirty++;
++             *first_dirty == CardTableModRefBS::clean_card) {
++        first_dirty++;
+       }
+       // If the first object is a regular object, and it has a
+       // young-to-old field, that would mark the previous card.
+@@ -352,27 +349,27 @@
+       HeapWord* begin = boundary;             // Until proven otherwise.
+       HeapWord* start_block = boundary_block; // Until proven otherwise.
+       if (boundary_block < boundary) {
+-	if (s->block_is_obj(boundary_block) && s->obj_is_alive(boundary_block)) {
+-	  oop boundary_obj = oop(boundary_block);
+-	  if (!boundary_obj->is_objArray() &&
+-	      !boundary_obj->is_typeArray()) {
+-	    guarantee(cur_entry > byte_for(used.start()),
+-		      "else boundary would be boundary_block");
+-	    if (*byte_for(boundary_block) != CardTableModRefBS::clean_card) {
+-	      begin = boundary_block + s->block_size(boundary_block);
+-	      start_block = begin;
+-	    }
+-	  }
+-	}
++        if (s->block_is_obj(boundary_block) && s->obj_is_alive(boundary_block)) {
++          oop boundary_obj = oop(boundary_block);
++          if (!boundary_obj->is_objArray() &&
++              !boundary_obj->is_typeArray()) {
++            guarantee(cur_entry > byte_for(used.start()),
++                      "else boundary would be boundary_block");
++            if (*byte_for(boundary_block) != CardTableModRefBS::clean_card) {
++              begin = boundary_block + s->block_size(boundary_block);
++              start_block = begin;
++            }
++          }
++        }
+       }
+       // Now traverse objects until end.
+       HeapWord* cur = start_block;
+       VerifyCleanCardClosure verify_blk(gen_boundary, begin, end);
+       while (cur < end) {
+-	if (s->block_is_obj(cur) && s->obj_is_alive(cur)) {
+-	  oop(cur)->oop_iterate(&verify_blk);
+-	}
+-	cur += s->block_size(cur);
++        if (s->block_is_obj(cur) && s->obj_is_alive(cur)) {
++          oop(cur)->oop_iterate(&verify_blk);
++        }
++        cur += s->block_size(cur);
+       }
+       cur_entry = first_dirty;
+     } else {
+@@ -380,7 +377,7 @@
+       // is a transient value, that cannot be in the card table
+       // except during GC, and thus assert that:
+       // guarantee(*cur_entry != cur_youngergen_and_prev_nonclean_card,
+-      //	"Illegal CT value");
++      //        "Illegal CT value");
+       // That however, need not hold, as will become clear in the
+       // following...
+ 
+@@ -389,8 +386,8 @@
+       // from the current value) in the card table, and so we'd like to
+       // assert that:
+       // guarantee(cur_youngergen_card_val() == youngergen_card
+-      //	   || !is_prev_youngergen_card_val(*cur_entry),
+-      //	   "Illegal CT value");
++      //           || !is_prev_youngergen_card_val(*cur_entry),
++      //           "Illegal CT value");
+       // That, however, may not hold occasionally, because of
+       // CMS or MSC in the old gen. To wit, consider the
+       // following two simple illustrative scenarios:
+@@ -445,7 +442,7 @@
+       // "derivative" case to consider, where we have a stale
+       // "cur_younger_gen_and_prev_non_clean" value, as will become
+       // apparent in the case analysis below.
+-      // o Case 1. If the stale value corresponds to a younger_gen_n 
++      // o Case 1. If the stale value corresponds to a younger_gen_n
+       //   value other than the cur_younger_gen value then the code
+       //   treats this as being tantamount to a prev_younger_gen
+       //   card. This means that the card may be unnecessarily scanned.
+diff -ruN openjdk6/hotspot/src/share/vm/memory/cardTableRS.hpp openjdk/hotspot/src/share/vm/memory/cardTableRS.hpp
+--- openjdk6/hotspot/src/share/vm/memory/cardTableRS.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/cardTableRS.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)cardTableRS.hpp	1.29 07/05/05 17:05:44 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,20 +19,20 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class Space;
+ class OopsInGenClosure;
+ class DirtyCardToOopClosure;
+ 
+-// This kind of "GenRemSet" uses a card table both as shared data structure 
++// This kind of "GenRemSet" uses a card table both as shared data structure
+ // for a mod ref barrier set and for the rem set information.
+ 
+ class CardTableRS: public GenRemSet {
+   friend class VMStructs;
+   // Below are private classes used in impl.
+-  friend class VerifyCTSpaceClosure;  
++  friend class VerifyCTSpaceClosure;
+   friend class ClearNoncleanCardWrapper;
+ 
+   static jbyte clean_card_val() {
+@@ -56,9 +53,9 @@
+   enum ExtendedCardValue {
+     youngergen_card   = CardTableModRefBS::CT_MR_BS_last_reserved + 1,
+     // These are for parallel collection.
+-    // There are three P (parallel) youngergen card values.  In general, this 
++    // There are three P (parallel) youngergen card values.  In general, this
+     // needs to be more than the number of generations (including the perm
+-    // gen) that might have younger_refs_do invoked on them separately.  So 
++    // gen) that might have younger_refs_do invoked on them separately.  So
+     // if we add more gens, we have to add more values.
+     youngergenP1_card  = CardTableModRefBS::CT_MR_BS_last_reserved + 2,
+     youngergenP2_card  = CardTableModRefBS::CT_MR_BS_last_reserved + 3,
+@@ -67,13 +64,13 @@
+       CardTableModRefBS::CT_MR_BS_last_reserved + 5
+   };
+ 
+-  // An array that contains, for each generation, the card table value last 
++  // An array that contains, for each generation, the card table value last
+   // used as the current value for a younger_refs_do iteration of that
+   // portion of the table.  (The perm gen is index 0; other gens are at
+   // their level plus 1.  They youngest gen is in the table, but will
+   // always have the value "clean_card".)
+   jbyte* _last_cur_val_in_gen;
+-  
++
+   jbyte _cur_youngergen_card_val;
+ 
+   jbyte cur_youngergen_card_val() {
+@@ -132,7 +129,7 @@
+   void verify_empty(MemRegion mr);
+ 
+   void clear(MemRegion mr) { _ct_bs.clear(mr); }
+-  void clear_into_younger(Generation* gen, bool clear_perm); 
++  void clear_into_younger(Generation* gen, bool clear_perm);
+ 
+   void invalidate(MemRegion mr) { _ct_bs.invalidate(mr); }
+   void invalidate_or_clear(Generation* gen, bool younger, bool perm);
+diff -ruN openjdk6/hotspot/src/share/vm/memory/classify.cpp openjdk/hotspot/src/share/vm/memory/classify.cpp
+--- openjdk6/hotspot/src/share/vm/memory/classify.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/classify.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)classify.cpp	1.9 07/05/05 17:05:44 JVM"
+-#endif
+ /*
+  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/memory/classify.hpp openjdk/hotspot/src/share/vm/memory/classify.hpp
+--- openjdk6/hotspot/src/share/vm/memory/classify.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/classify.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)classify.hpp	1.10 07/05/05 17:05:42 JVM"
+-#endif
+ /*
+  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ typedef enum oop_type {
+diff -ruN openjdk6/hotspot/src/share/vm/memory/collectorPolicy.cpp openjdk/hotspot/src/share/vm/memory/collectorPolicy.cpp
+--- openjdk6/hotspot/src/share/vm/memory/collectorPolicy.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/collectorPolicy.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)collectorPolicy.cpp	1.89 07/06/12 09:41:19 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -59,9 +56,9 @@
+ void CollectorPolicy::initialize_size_info() {
+   // User inputs from -mx and ms are aligned
+   _initial_heap_byte_size = align_size_up(Arguments::initial_heap_size(),
+-					  min_alignment());
++                                          min_alignment());
+   _min_heap_byte_size = align_size_up(Arguments::min_heap_size(),
+-					  min_alignment());
++                                          min_alignment());
+   _max_heap_byte_size = align_size_up(MaxHeapSize, max_alignment());
+ 
+   // Check validity of heap parameters from launcher
+@@ -104,7 +101,7 @@
+ void CollectorPolicy::initialize_perm_generation(PermGen::Name pgnm) {
+   _permanent_generation =
+     new PermanentGenerationSpec(pgnm, PermSize, MaxPermSize,
+-				SharedReadOnlySize,
++                                SharedReadOnlySize,
+                                 SharedReadWriteSize,
+                                 SharedMiscDataSize,
+                                 SharedMiscCodeSize);
+@@ -115,10 +112,10 @@
+ 
+ 
+ GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap,
+-					   int max_covered_regions) {
++                                           int max_covered_regions) {
+   switch (rem_set_name()) {
+   case GenRemSet::CardTable: {
+-    if (barrier_set_name() != BarrierSet::CardTableModRef) 
++    if (barrier_set_name() != BarrierSet::CardTableModRef)
+       vm_exit_during_initialization("Mismatch between RS and BS.");
+     CardTableRS* res = new CardTableRS(whole_heap, max_covered_regions);
+     return res;
+@@ -132,14 +129,14 @@
+ // GenCollectorPolicy methods.
+ 
+ void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
+-						size_t init_promo_size,
+-						size_t init_survivor_size) {
++                                                size_t init_promo_size,
++                                                size_t init_survivor_size) {
+   double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
+   _size_policy = new AdaptiveSizePolicy(init_eden_size,
+-					init_promo_size,
+-					init_survivor_size,
+-					max_gc_minor_pause_sec,
+-					GCTimeRatio);
++                                        init_promo_size,
++                                        init_survivor_size,
++                                        max_gc_minor_pause_sec,
++                                        GCTimeRatio);
+ }
+ 
+ size_t GenCollectorPolicy::compute_max_alignment() {
+@@ -149,9 +146,14 @@
+   // byte entry and the os page size is 4096, the maximum heap size should
+   // be 512*4096 = 2MB aligned.
+   size_t alignment = GenRemSet::max_alignment_constraint(rem_set_name());
+-  if (UseLargePages) {
+-      // in presence of large pages we have to make sure that our 
+-      // alignment is large page aware 
++
++  // Parallel GC does its own alignment of the generations to avoid requiring a
++  // large page (256M on some platforms) for the permanent generation.  The
++  // other collectors should also be updated to do their own alignment and then
++  // this use of lcm() should be removed.
++  if (UseLargePages && !UseParallelGC) {
++      // in presence of large pages we have to make sure that our
++      // alignment is large page aware
+       alignment = lcm(os::large_page_size(), alignment);
+   }
+ 
+@@ -162,14 +164,14 @@
+   // All sizes must be multiples of the generation granularity.
+   set_min_alignment((uintx) Generation::GenGrain);
+   set_max_alignment(compute_max_alignment());
+-  assert(max_alignment() >= min_alignment() && 
+-	 max_alignment() % min_alignment() == 0, 
+-	 "invalid alignment constraints");
++  assert(max_alignment() >= min_alignment() &&
++         max_alignment() % min_alignment() == 0,
++         "invalid alignment constraints");
+ 
+   CollectorPolicy::initialize_flags();
+ 
+   // All generational heaps have a youngest gen; handle those flags here.
+-  
++
+   // Adjust max size parameters
+   if (NewSize > MaxNewSize) {
+     MaxNewSize = NewSize;
+@@ -217,33 +219,33 @@
+     _min_gen0_size = NewSize;
+   } else {
+     _min_gen0_size = align_size_down(_min_heap_byte_size / (NewRatio+1),
+-				     min_alignment());
++                                     min_alignment());
+     // We bound the minimum size by NewSize below (since it historically
+     // would have been NewSize and because the NewRatio calculation could
+     // yield a size that is too small) and bound it by MaxNewSize above.
+     // This is not always best.  The NewSize calculated by CMS (which has
+     // a fixed minimum of 16m) can sometimes be "too" large.  Consider
+     // the case where -Xmx32m.  The CMS calculated NewSize would be about
+-    // half the entire heap which seems too large.  But the counter 
++    // half the entire heap which seems too large.  But the counter
+     // example is seen when the client defaults for NewRatio are used.
+-    // An initial young generation size of 640k was observed 
++    // An initial young generation size of 640k was observed
+     // with -Xmx128m -XX:MaxNewSize=32m when NewSize was not used
+     // as a lower bound as with
+     // _min_gen0_size = MIN2(_min_gen0_size, MaxNewSize);
+     // and 640k seemed too small a young generation.
+     _min_gen0_size = MIN2(MAX2(_min_gen0_size, NewSize), MaxNewSize);
+-  }	
++  }
+ 
+   // Parameters are valid, compute area sizes.
+   size_t max_new_size = align_size_down(_max_heap_byte_size / (NewRatio+1),
+-					min_alignment());
++                                        min_alignment());
+   max_new_size = MIN2(MAX2(max_new_size, _min_gen0_size), MaxNewSize);
+ 
+   // desired_new_size is used to set the initial size.  The
+   // initial size must be greater than the minimum size.
+-  size_t desired_new_size = 
++  size_t desired_new_size =
+     align_size_down(_initial_heap_byte_size / (NewRatio+1),
+-		  min_alignment());
++                  min_alignment());
+ 
+   size_t new_size = MIN2(MAX2(desired_new_size, _min_gen0_size), max_new_size);
+ 
+@@ -253,7 +255,7 @@
+ 
+ void TwoGenerationCollectorPolicy::initialize_size_info() {
+   GenCollectorPolicy::initialize_size_info();
+-  
++
+   // Minimum sizes of the generations may be different than
+   // the initial sizes.  An inconsistently is permitted here
+   // in the total size that can be specified explicitly by
+@@ -264,11 +266,11 @@
+     _min_gen1_size = OldSize;
+     // The generation minimums and the overall heap mimimum should
+     // be within one heap alignment.
+-    if ((_min_gen1_size + _min_gen0_size + max_alignment()) < 
+-	 _min_heap_byte_size) {
++    if ((_min_gen1_size + _min_gen0_size + max_alignment()) <
++         _min_heap_byte_size) {
+       warning("Inconsistency between minimum heap size and minimum "
+-	"generation sizes: using min heap = " SIZE_FORMAT, 
+-	_min_heap_byte_size);
++        "generation sizes: using min heap = " SIZE_FORMAT,
++        _min_heap_byte_size);
+     }
+   } else {
+     _min_gen1_size = _min_heap_byte_size - _min_gen0_size;
+@@ -278,9 +280,9 @@
+   _max_gen1_size = _max_heap_byte_size - _max_gen0_size;
+ }
+ 
+-HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size, 
+-					bool is_tlab,
+-					bool* gc_overhead_limit_was_exceeded) {
++HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
++                                        bool is_tlab,
++                                        bool* gc_overhead_limit_was_exceeded) {
+   GenCollectedHeap *gch = GenCollectedHeap::heap();
+ 
+   debug_only(gch->check_for_valid_allocation_state());
+@@ -375,7 +377,7 @@
+       size_policy()->set_gc_time_limit_exceeded(false);
+       return NULL;
+     }
+-      
++
+     VM_GenCollectForAllocation op(size,
+                                   is_tlab,
+                                   gc_count_before);
+@@ -387,7 +389,7 @@
+          continue;  // retry and/or stall as necessary
+       }
+       assert(result == NULL || gch->is_in_reserved(result),
+-	     "result not in heap");
++             "result not in heap");
+       return result;
+     }
+ 
+@@ -395,13 +397,13 @@
+     if ((QueuedAllocationWarningCount > 0) &&
+         (try_count % QueuedAllocationWarningCount == 0)) {
+           warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t"
+-		  " size=%d %s", try_count, size, is_tlab ? "(TLAB)" : "");
++                  " size=%d %s", try_count, size, is_tlab ? "(TLAB)" : "");
+     }
+   }
+ }
+ 
+ HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size,
+-						       bool   is_tlab) {
++                                                       bool   is_tlab) {
+   GenCollectedHeap *gch = GenCollectedHeap::heap();
+   HeapWord* result = NULL;
+   for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) {
+@@ -415,7 +417,7 @@
+ }
+ 
+ HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
+-							bool   is_tlab) {
++                                                        bool   is_tlab) {
+   GenCollectedHeap *gch = GenCollectedHeap::heap();
+   GCCauseSetter x(gch, GCCause::_allocation_failure);
+   HeapWord* result = NULL;
+@@ -430,7 +432,7 @@
+     return result;   // could be null if we are out of space
+   } else if (!gch->incremental_collection_will_fail()) {
+     // The gc_prologues have not executed yet.  The value
+-    // for incremental_collection_will_fail() is the remanent 
++    // for incremental_collection_will_fail() is the remanent
+     // of the last collection.
+     // Do an incremental collection.
+     gch->do_collection(false            /* full */,
+@@ -443,26 +445,26 @@
+     // for the original code and why this has been simplified
+     // with from-space allocation criteria modified and
+     // such allocation moved out of the safepoint path.
+-    gch->do_collection(true             /* full */, 
+-                       false            /* clear_all_soft_refs */, 
+-                       size             /* size */, 
++    gch->do_collection(true             /* full */,
++                       false            /* clear_all_soft_refs */,
++                       size             /* size */,
+                        is_tlab          /* is_tlab */,
+                        number_of_generations() - 1 /* max_level */);
+   }
+-  
++
+   result = gch->attempt_allocation(size, is_tlab, false /*first_only*/);
+-  
++
+   if (result != NULL) {
+     assert(gch->is_in_reserved(result), "result not in heap");
+     return result;
+   }
+-  
++
+   // OK, collection failed, try expansion.
+   result = expand_heap_and_allocate(size, is_tlab);
+   if (result != NULL) {
+     return result;
+   }
+-  
++
+   // If we reach this point, we're really out of memory. Try every trick
+   // we can to reclaim memory. Force collection of soft references. Force
+   // a complete compaction of the heap. Any additional methods for finding
+@@ -483,7 +485,7 @@
+     assert(gch->is_in_reserved(result), "result not in heap");
+     return result;
+   }
+-  
++
+   // What else?  We might try synchronous finalization later.  If the total
+   // space available is large enough for the allocation, then a more
+   // complete compaction phase than we've tried so far might be
+@@ -502,7 +504,7 @@
+ //   was a full collection because a partial collection (would
+ //   have) failed and is likely to fail again
+ bool GenCollectorPolicy::should_try_older_generation_allocation(
+-	size_t word_size) const {
++        size_t word_size) const {
+   GenCollectedHeap* gch = GenCollectedHeap::heap();
+   size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc();
+   return    (word_size > heap_word_size(gen0_capacity))
+@@ -525,7 +527,7 @@
+   _generations = new GenerationSpecPtr[number_of_generations()];
+   if (_generations == NULL)
+     vm_exit_during_initialization("Unable to allocate gen spec");
+-  
++
+   if (UseParNewGC && ParallelGCThreads > 0) {
+     _generations[0] = new GenerationSpec(Generation::ParNew, _initial_gen0_size, _max_gen0_size);
+   } else {
+diff -ruN openjdk6/hotspot/src/share/vm/memory/collectorPolicy.hpp openjdk/hotspot/src/share/vm/memory/collectorPolicy.hpp
+--- openjdk6/hotspot/src/share/vm/memory/collectorPolicy.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/collectorPolicy.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)collectorPolicy.hpp	1.41 07/05/29 09:44:14 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,13 +19,13 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This class (or more correctly, subtypes of this class)
+ // are used to define global garbage collector attributes.
+ // This includes initialization of generations and any other
+-// shared resources they may need. 
++// shared resources they may need.
+ //
+ // In general, all flag adjustment and validation should be
+ // done in initialize_flags(), which is called prior to
+@@ -37,7 +34,7 @@
+ // This class is not fully developed yet. As more collector(s)
+ // are added, it is expected that we will come across further
+ // behavior that requires global attention. The correct place
+-// to deal with those issues is this class. 
++// to deal with those issues is this class.
+ 
+ // Forward declarations.
+ class GenCollectorPolicy;
+@@ -102,7 +99,7 @@
+   virtual MarkSweepPolicy*              as_mark_sweep_policy()            { return NULL; }
+ #ifndef SERIALGC
+   virtual ConcurrentMarkSweepPolicy*    as_concurrent_mark_sweep_policy() { return NULL; }
+-#endif // SERIALGC 
++#endif // SERIALGC
+   // Note that these are not virtual.
+   bool is_generation_policy()            { return as_generation_policy() != NULL; }
+   bool is_two_generation_policy()        { return as_two_generation_policy() != NULL; }
+@@ -124,7 +121,7 @@
+   // Create the remembered set (to cover the given reserved region,
+   // allowing breaking up into at most "max_covered_regions").
+   virtual GenRemSet* create_rem_set(MemRegion reserved,
+-				    int max_covered_regions);
++                                    int max_covered_regions);
+ 
+   // This method controls how a collector satisfies a request
+   // for a block of memory.  "gc_time_limit_was_exceeded" will
+@@ -135,7 +132,7 @@
+   virtual HeapWord* mem_allocate_work(size_t size,
+                                       bool is_tlab,
+                                       bool* gc_overhead_limit_was_exceeded) = 0;
+-  
++
+   // This method controls how a collector handles one or more
+   // of its generations being fully allocated.
+   virtual HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab) = 0;
+@@ -145,18 +142,18 @@
+   // Create the jstat counters for the GC policy.  By default, policy's
+   // don't have associated counters, and we complain if this is invoked.
+   virtual void initialize_gc_policy_counters() {
+-    ShouldNotReachHere(); 
++    ShouldNotReachHere();
+   }
+ 
+-  virtual CollectorPolicy::Name kind() { 
+-    return CollectorPolicy::CollectorPolicyKind; 
++  virtual CollectorPolicy::Name kind() {
++    return CollectorPolicy::CollectorPolicyKind;
+   }
+-  
++
+   // Returns true if a collector has eden space with soft end.
+   virtual bool has_soft_ended_eden() {
+     return false;
+   }
+-  
++
+ };
+ 
+ class GenCollectorPolicy : public CollectorPolicy {
+@@ -181,7 +178,7 @@
+ 
+   // Try to allocate space by expanding the heap.
+   virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
+-    
++
+   // compute max heap alignment
+   size_t compute_max_alignment();
+ 
+@@ -216,9 +213,9 @@
+   // Adaptive size policy
+   AdaptiveSizePolicy* size_policy() { return _size_policy; }
+   virtual void initialize_size_policy(size_t init_eden_size,
+-				      size_t init_promo_size,
+-				      size_t init_survivor_size);
+-  
++                                      size_t init_promo_size,
++                                      size_t init_survivor_size);
++
+ };
+ 
+ 
+@@ -246,8 +243,8 @@
+   BarrierSet::Name barrier_set_name()          { return BarrierSet::CardTableModRef; }
+   GenRemSet::Name rem_set_name()               { return GenRemSet::CardTable; }
+ 
+-  virtual CollectorPolicy::Name kind() { 
+-    return CollectorPolicy::TwoGenerationCollectorPolicyKind; 
++  virtual CollectorPolicy::Name kind() {
++    return CollectorPolicy::TwoGenerationCollectorPolicyKind;
+   }
+ };
+ 
+@@ -262,4 +259,3 @@
+ 
+   void initialize_gc_policy_counters();
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/memory/compactingPermGenGen.cpp openjdk/hotspot/src/share/vm/memory/compactingPermGenGen.cpp
+--- openjdk6/hotspot/src/share/vm/memory/compactingPermGenGen.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/compactingPermGenGen.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)compactingPermGenGen.cpp	1.20 07/05/05 17:05:45 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -233,7 +230,7 @@
+           (!mapinfo->map_space(mc, mc_rs, NULL))      ||
+           // check the alignment constraints
+           (ch == NULL || ch->kind() != CollectedHeap::GenCollectedHeap ||
+-           image_alignment != 
++           image_alignment !=
+            ((GenCollectedHeap*)ch)->gen_policy()->max_alignment())) {
+         // Base addresses didn't match; skip sharing, but continue
+         shared_rs.release();
+@@ -341,7 +338,7 @@
+ 
+ // References from the perm gen to the younger generation objects may
+ // occur in static fields in Java classes or in constant pool references
+-// to String objects. 
++// to String objects.
+ 
+ void CompactingPermGenGen::younger_refs_iterate(OopsInGenClosure* blk) {
+   OneContigSpaceCardGeneration::younger_refs_iterate(blk);
+@@ -394,7 +391,7 @@
+ void CompactingPermGenGen::grow_to_reserved() {
+   // Don't allow _virtual_size to expand into shared spaces.
+   if (_virtual_space.uncommitted_size() > _shared_space_size) {
+-    size_t remaining_bytes = 
++    size_t remaining_bytes =
+       _virtual_space.uncommitted_size() - _shared_space_size;
+     bool success = OneContigSpaceCardGeneration::grow_by(remaining_bytes);
+     DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
+diff -ruN openjdk6/hotspot/src/share/vm/memory/compactingPermGenGen.hpp openjdk/hotspot/src/share/vm/memory/compactingPermGenGen.hpp
+--- openjdk6/hotspot/src/share/vm/memory/compactingPermGenGen.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/compactingPermGenGen.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)compactingPermGenGen.hpp	1.20 07/05/05 17:05:45 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // All heaps contains a "permanent generation," containing permanent
+@@ -59,13 +56,13 @@
+   // variable is initialized for only the unshared part but is
+   // later extended to include the shared part during initialization
+   // if shared spaces are being used.
+-  // The reserved size for the _virtual_space for CompactingPermGenGen 
++  // The reserved size for the _virtual_space for CompactingPermGenGen
+   // is the size of the space for the permanent generation including the
+   // the shared spaces.  This can be seen by the use of MaxPermSize
+-  // in the allocation of PermanentGenerationSpec.  The space for the 
++  // in the allocation of PermanentGenerationSpec.  The space for the
+   // shared spaces is committed separately (???).
+   // In general at initialization only a part of the
+-  // space for the unshared part of the permanent generation is 
++  // space for the unshared part of the permanent generation is
+   // committed and more is committed as the permanent generation is
+   // grown.  In growing the permanent generation the capacity() and
+   // max_capacity() of the generation are used.  For the permanent
+@@ -117,13 +114,13 @@
+ 
+   CompactingPermGenGen(ReservedSpace rs, ReservedSpace shared_rs,
+                        size_t initial_byte_size, int level, GenRemSet* remset,
+-		       ContiguousSpace* space,
++                       ContiguousSpace* space,
+                        PermanentGenerationSpec* perm_spec);
+ 
+   const char* name() const {
+     return "compacting perm gen";
+   }
+-  
++
+   const char* short_name() const {
+     return "Perm";
+   }
+@@ -165,15 +162,14 @@
+ 
+   bool is_in_unshared(const void* p) const {
+     return OneContigSpaceCardGeneration::is_in(p);
+-  } 
++  }
+ 
+   bool is_in_shared(const void* p) const {
+    return p >= shared_bottom && p < shared_end;
+-   } 
++   }
+ 
+   inline bool is_in(const void* p) const {
+-    if (!is_in_unshared(p) && !is_in_shared(p)) return false;
+-    return true;
++    return is_in_unshared(p) || is_in_shared(p);
+   }
+ 
+   inline PermanentGenerationSpec* spec() const { return _spec; }
+@@ -237,7 +233,7 @@
+   void verify(bool allow_dirty);
+ 
+   // Serialization
+-  static void initialize_oops();
++  static void initialize_oops() KERNEL_RETURN;
+   static void serialize_oops(SerializeOopClosure* soc);
+   void serialize_bts(SerializeOopClosure* soc);
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/memory/compactPermGen.hpp openjdk/hotspot/src/share/vm/memory/compactPermGen.hpp
+--- openjdk6/hotspot/src/share/vm/memory/compactPermGen.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/compactPermGen.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)compactPermGen.hpp	1.17 07/05/05 17:05:45 JVM"
+-#endif
+ /*
+  * Copyright 2000-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class ContigPermSpace;
+@@ -41,9 +38,9 @@
+   CompactingPermGen(ReservedSpace rs, ReservedSpace shared_rs,
+                     size_t initial_byte_size, GenRemSet* remset,
+                     PermanentGenerationSpec* perm_spec);
+-	
++
+   HeapWord* mem_allocate(size_t size);
+-		   
++
+   void compute_new_size();
+ 
+   Generation* as_gen() const { return _gen; }
+diff -ruN openjdk6/hotspot/src/share/vm/memory/defNewGeneration.cpp openjdk/hotspot/src/share/vm/memory/defNewGeneration.cpp
+--- openjdk6/hotspot/src/share/vm/memory/defNewGeneration.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/defNewGeneration.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)defNewGeneration.cpp	1.73 07/05/22 17:24:57 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -63,7 +60,7 @@
+   // were discovering weak references. While we might not need
+   // to take a special action to keep this reference alive, we
+   // will need to dirty a card as the field was modified.
+-  //  
++  //
+   // Alternatively, we could create a method which iterates through
+   // each generation, allowing them in turn to examine the modified
+   // field.
+@@ -98,7 +95,7 @@
+ 
+ DefNewGeneration::EvacuateFollowersClosure::
+ EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
+-			 ScanClosure* cur, ScanClosure* older) :
++                         ScanClosure* cur, ScanClosure* older) :
+   _gch(gch), _level(level),
+   _scan_cur_or_nonheap(cur), _scan_older(older)
+ {}
+@@ -112,8 +109,8 @@
+ 
+ DefNewGeneration::FastEvacuateFollowersClosure::
+ FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
+-			     DefNewGeneration* gen,
+-			     FastScanClosure* cur, FastScanClosure* older) :
++                             DefNewGeneration* gen,
++                             FastScanClosure* cur, FastScanClosure* older) :
+   _gch(gch), _level(level), _gen(gen),
+   _scan_cur_or_nonheap(cur), _scan_older(older)
+ {}
+@@ -124,18 +121,18 @@
+                                        _scan_older);
+   } while (!_gch->no_allocs_since_save_marks(_level));
+   guarantee(_gen->promo_failure_scan_stack() == NULL
+-	    || _gen->promo_failure_scan_stack()->length() == 0,
+-	    "Failed to finish scan");
++            || _gen->promo_failure_scan_stack()->length() == 0,
++            "Failed to finish scan");
+ }
+ 
+-ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : 
++ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
+   OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
+ {
+   assert(_g->level() == 0, "Optimized for youngest generation");
+   _boundary = _g->reserved().end();
+ }
+ 
+-FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : 
++FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
+   OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
+ {
+   assert(_g->level() == 0, "Optimized for youngest generation");
+@@ -151,18 +148,18 @@
+ 
+ 
+ DefNewGeneration::DefNewGeneration(ReservedSpace rs,
+-				   size_t initial_size,
+-				   int level,
+-				   const char* policy)
++                                   size_t initial_size,
++                                   int level,
++                                   const char* policy)
+   : Generation(rs, initial_size, level),
+-    _objs_with_preserved_marks(NULL), 
+-    _preserved_marks_of_objs(NULL), 
++    _objs_with_preserved_marks(NULL),
++    _preserved_marks_of_objs(NULL),
+     _promo_failure_scan_stack(NULL),
+     _promo_failure_drain_in_progress(false),
+     _should_allocate_from_space(false)
+ {
+   MemRegion cmr((HeapWord*)_virtual_space.low(),
+-		(HeapWord*)_virtual_space.high());
++                (HeapWord*)_virtual_space.high());
+   Universe::heap()->barrier_set()->resize_covered_region(cmr);
+ 
+   if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
+@@ -217,7 +214,7 @@
+     // May happen due to 64Kb rounding, if so adjust eden size back up
+     minimum_eden_size = align_size_up(minimum_eden_size, alignment);
+     uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
+-    uintx unaligned_survivor_size = 
++    uintx unaligned_survivor_size =
+       align_size_down(maximum_survivor_size, alignment);
+     survivor_size = MAX2(unaligned_survivor_size, alignment);
+     eden_size = size - (2*survivor_size);
+@@ -307,16 +304,16 @@
+   int next_level = level() + 1;
+   GenCollectedHeap* gch = GenCollectedHeap::heap();
+   assert(next_level < gch->_n_gens,
+-	 "DefNewGeneration cannot be an oldest gen");
+-    
++         "DefNewGeneration cannot be an oldest gen");
++
+   Generation* next_gen = gch->_gens[next_level];
+   size_t old_size = next_gen->capacity();
+   size_t new_size_before = _virtual_space.committed_size();
+   size_t min_new_size = spec()->init_size();
+   size_t max_new_size = reserved().byte_size();
+   assert(min_new_size <= new_size_before &&
+-	 new_size_before <= max_new_size,
+-	 "just checking");
++         new_size_before <= max_new_size,
++         "just checking");
+   // All space sizes must be multiples of Generation::GenGrain.
+   size_t alignment = Generation::GenGrain;
+ 
+@@ -340,7 +337,7 @@
+     }
+     // If the heap failed to expand to the desired size,
+     // "changed" will be false.  If the expansion failed
+-    // (and at this point it was expected to succeed), 
++    // (and at this point it was expected to succeed),
+     // ignore the failure (leaving "changed" as false).
+   }
+   if (desired_new_size < new_size_before && eden()->is_empty()) {
+@@ -358,11 +355,11 @@
+       size_t new_size_after  = _virtual_space.committed_size();
+       size_t eden_size_after = eden()->capacity();
+       size_t survivor_size_after = from()->capacity();
+-      gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" 
+-        SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", 
++      gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden="
++        SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
+         new_size_before/K, new_size_after/K, eden_size_after/K, survivor_size_after/K);
+       if (WizardMode) {
+-        gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]", 
++        gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
+           thread_increase_size/K, threads_count);
+       }
+       gclog_or_tty->cr();
+@@ -426,31 +423,31 @@
+ 
+ 
+ void DefNewGeneration::space_iterate(SpaceClosure* blk,
+-				     bool usedOnly) {
++                                     bool usedOnly) {
+   blk->do_space(eden());
+   blk->do_space(from());
+   blk->do_space(to());
+ }
+ 
+-// The last collection bailed out, we are running out of heap space, 
++// The last collection bailed out, we are running out of heap space,
+ // so we try to allocate the from-space, too.
+ HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
+   HeapWord* result = NULL;
+   if (PrintGC && Verbose) {
+     gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):"
+                   "  will_fail: %s"
+-		  "  heap_lock: %s"
++                  "  heap_lock: %s"
+                   "  free: " SIZE_FORMAT,
+                   size,
+                GenCollectedHeap::heap()->incremental_collection_will_fail() ? "true" : "false",
+-	       Heap_lock->is_locked() ? "locked" : "unlocked",
++               Heap_lock->is_locked() ? "locked" : "unlocked",
+                from()->free());
+     }
+   if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {
+     if (Heap_lock->owned_by_self() ||
+         (SafepointSynchronize::is_at_safepoint() &&
+          Thread::current()->is_VM_thread())) {
+-      // If the Heap_lock is not locked by this thread, this will be called 
++      // If the Heap_lock is not locked by this thread, this will be called
+       // again later with the Heap_lock held.
+       result = from()->allocate(size);
+     } else if (PrintGC && Verbose) {
+@@ -467,7 +464,7 @@
+ 
+ HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
+                                                 bool   is_tlab,
+-						bool   parallel) {
++                                                bool   parallel) {
+   // We don't attempt to expand the young generation (but perhaps we should.)
+   return allocate(size, is_tlab);
+ }
+@@ -475,12 +472,12 @@
+ 
+ void DefNewGeneration::collect(bool   full,
+                                bool   clear_all_soft_refs,
+-			       size_t size,
++                               size_t size,
+                                bool   is_tlab) {
+   assert(full || size > 0, "otherwise we don't want to collect");
+   GenCollectedHeap* gch = GenCollectedHeap::heap();
+   _next_gen = gch->next_gen(this);
+-  assert(_next_gen != NULL, 
++  assert(_next_gen != NULL,
+     "This must be the youngest gen, and not the only gen");
+ 
+   // If the next generation is too full to accomodate promotion
+@@ -509,17 +506,17 @@
+ 
+   gch->rem_set()->prepare_for_younger_refs_iterate(false);
+ 
+-  assert(gch->no_allocs_since_save_marks(0), 
+-	 "save marks have not been newly set.");
++  assert(gch->no_allocs_since_save_marks(0),
++         "save marks have not been newly set.");
+ 
+   // Weak refs.
+   // FIXME: Are these storage leaks, or are they resource objects?
+ #ifdef COMPILER2
+   ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy();
+-#else 
++#else
+   ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy();
+ #endif // COMPILER2
+-      
++
+   // Not very pretty.
+   CollectorPolicy* cp = gch->collector_policy();
+ 
+@@ -535,12 +532,12 @@
+          "save marks have not been newly set.");
+ 
+   gch->gen_process_strong_roots(_level,
+-				true, // Process younger gens, if any, as
+-				      // strong roots.
+-				false,// not collecting permanent generation.
+-				SharedHeap::SO_AllClasses,
+-				&fsc_with_gc_barrier,
+-				&fsc_with_no_gc_barrier);
++                                true, // Process younger gens, if any, as
++                                      // strong roots.
++                                false,// not collecting permanent generation.
++                                SharedHeap::SO_AllClasses,
++                                &fsc_with_gc_barrier,
++                                &fsc_with_no_gc_barrier);
+ 
+   // "evacuate followers".
+   evacuate_followers.do_void();
+@@ -553,7 +550,7 @@
+     eden()->clear();
+     from()->clear();
+     swap_spaces();
+-  
++
+     assert(to()->is_empty(), "to space should be empty now");
+ 
+     // Set the desired survivor size to half the real survivor space
+@@ -564,9 +561,9 @@
+       gch->print_heap_change(gch_prev_used);
+     }
+   } else {
+-    assert(HandlePromotionFailure, 
++    assert(HandlePromotionFailure,
+       "Should not be here unless promotion failure handling is on");
+-    assert(_promo_failure_scan_stack != NULL && 
++    assert(_promo_failure_scan_stack != NULL &&
+       _promo_failure_scan_stack->length() == 0, "post condition");
+ 
+     // deallocate stack and it's elements
+@@ -633,9 +630,9 @@
+   if (m->must_be_preserved_for_promotion_failure(obj)) {
+     if (_objs_with_preserved_marks == NULL) {
+       assert(_preserved_marks_of_objs == NULL, "Both or none.");
+-      _objs_with_preserved_marks = new (ResourceObj::C_HEAP) 
++      _objs_with_preserved_marks = new (ResourceObj::C_HEAP)
+         GrowableArray<oop>(PreserveMarkStackSize, true);
+-      _preserved_marks_of_objs = new (ResourceObj::C_HEAP) 
++      _preserved_marks_of_objs = new (ResourceObj::C_HEAP)
+         GrowableArray<markOop>(PreserveMarkStackSize, true);
+     }
+     _objs_with_preserved_marks->push(obj);
+@@ -661,10 +658,10 @@
+ 
+ oop DefNewGeneration::copy_to_survivor_space(oop old, oop* from) {
+   assert(is_in_reserved(old) && !old->is_forwarded(),
+-	 "shouldn't be scavenging this oop"); 
++         "shouldn't be scavenging this oop");
+   size_t s = old->size();
+   oop obj = NULL;
+-  
++
+   // Try allocating obj in to-space (unless too old)
+   if (old->age() < tenuring_threshold()) {
+     obj = (oop) to()->allocate(s);
+@@ -692,7 +689,7 @@
+     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
+ 
+     // Increment age if obj still in new generation
+-    obj->incr_age(); 
++    obj->incr_age();
+     age_table()->add(obj, s);
+   }
+ 
+@@ -720,14 +717,14 @@
+   }
+ }
+ 
+-void DefNewGeneration::save_marks() { 
++void DefNewGeneration::save_marks() {
+   eden()->set_saved_mark();
+   to()->set_saved_mark();
+   from()->set_saved_mark();
+ }
+ 
+ 
+-void DefNewGeneration::reset_saved_marks() { 
++void DefNewGeneration::reset_saved_marks() {
+   eden()->reset_saved_mark();
+   to()->reset_saved_mark();
+   from()->reset_saved_mark();
+@@ -740,16 +737,16 @@
+   return to()->saved_mark_at_top();
+ }
+ 
+-#define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)	\
+-								\
+-void DefNewGeneration::						\
+-oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {	\
+-  cl->set_generation(this);					\
+-  eden()->oop_since_save_marks_iterate##nv_suffix(cl);		\
+-  to()->oop_since_save_marks_iterate##nv_suffix(cl);		\
+-  from()->oop_since_save_marks_iterate##nv_suffix(cl);		\
+-  cl->reset_generation();					\
+-  save_marks();							\
++#define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
++                                                                \
++void DefNewGeneration::                                         \
++oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
++  cl->set_generation(this);                                     \
++  eden()->oop_since_save_marks_iterate##nv_suffix(cl);          \
++  to()->oop_since_save_marks_iterate##nv_suffix(cl);            \
++  from()->oop_since_save_marks_iterate##nv_suffix(cl);          \
++  cl->reset_generation();                                       \
++  save_marks();                                                 \
+ }
+ 
+ ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
+@@ -757,7 +754,7 @@
+ #undef DefNew_SINCE_SAVE_MARKS_DEFN
+ 
+ void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
+-					 size_t max_alloc_words) {
++                                         size_t max_alloc_words) {
+   if (requestor == this || _promotion_failed) return;
+   assert(requestor->level() > level(), "DefNewGeneration must be youngest");
+ 
+@@ -785,16 +782,16 @@
+   if (_next_gen == NULL) {
+     GenCollectedHeap* gch = GenCollectedHeap::heap();
+     _next_gen = gch->next_gen(this);
+-    assert(_next_gen != NULL, 
++    assert(_next_gen != NULL,
+            "This must be the youngest gen, and not the only gen");
+   }
+ 
+   // Decide if there's enough room for a full promotion
+-  // When using extremely large edens, we effectively lose a 
+-  // large amount of old space.  Use the "MaxLiveObjectEvacuationRatio" 
+-  // flag to reduce the minimum evacuation space requirements. If 
+-  // there is not enough space to evacuate eden during a scavenge, 
+-  // the VM will immediately exit with an out of memory error. 
++  // When using extremely large edens, we effectively lose a
++  // large amount of old space.  Use the "MaxLiveObjectEvacuationRatio"
++  // flag to reduce the minimum evacuation space requirements. If
++  // there is not enough space to evacuate eden during a scavenge,
++  // the VM will immediately exit with an out of memory error.
+   // This flag has not been tested
+   // with collectors other than simple mark & sweep.
+   //
+@@ -812,7 +809,7 @@
+   size_t worst_case_evacuation = (size_t)(used() * evacuation_ratio);
+ 
+   return _next_gen->promotion_attempt_is_safe(worst_case_evacuation,
+-					      HandlePromotionFailure);
++                                              HandlePromotionFailure);
+ }
+ 
+ void DefNewGeneration::gc_epilogue(bool full) {
+@@ -830,7 +827,7 @@
+       set_should_allocate_from_space();
+     }
+   }
+-  
++
+   // update the generation and space performance counters
+   update_counters();
+   gch->collector_policy()->counters()->update_counters();
+diff -ruN openjdk6/hotspot/src/share/vm/memory/defNewGeneration.hpp openjdk/hotspot/src/share/vm/memory/defNewGeneration.hpp
+--- openjdk6/hotspot/src/share/vm/memory/defNewGeneration.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/defNewGeneration.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)defNewGeneration.hpp	1.40 07/05/17 15:54:44 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class EdenSpace;
+@@ -39,15 +36,15 @@
+   int         _tenuring_threshold;   // Tenuring threshold for next collection.
+   ageTable    _age_table;
+   // Size of object to pretenure in words; command line provides bytes
+-  size_t        _pretenure_size_threshold_words; 
++  size_t        _pretenure_size_threshold_words;
+ 
+   ageTable*   age_table() { return &_age_table; }
+-  // Initialize state to optimistically assume no promotion failure will 
+-  // happen. 
+-  void   init_assuming_no_promotion_failure(); 
+-  // True iff a promotion has failed in the current collection. 
+-  bool   _promotion_failed; 
+-  bool   promotion_failed() { return _promotion_failed; } 
++  // Initialize state to optimistically assume no promotion failure will
++  // happen.
++  void   init_assuming_no_promotion_failure();
++  // True iff a promotion has failed in the current collection.
++  bool   _promotion_failed;
++  bool   promotion_failed() { return _promotion_failed; }
+ 
+   // Handling promotion failure.  A young generation collection
+   // can fail if a live object cannot be copied out of its
+@@ -67,22 +64,22 @@
+   //     All objects in the young generation are unmarked.
+   //     Eden, from-space, and to-space will all be collected by
+   //       the full collection.
+-  void handle_promotion_failure(oop); 
++  void handle_promotion_failure(oop);
+ 
+   // In the absence of promotion failure, we wouldn't look at "from-space"
+   // objects after a young-gen collection.  When promotion fails, however,
+   // the subsequent full collection will look at from-space objects:
+   // therefore we must remove their forwarding pointers.
+-  void remove_forwarding_pointers(); 
++  void remove_forwarding_pointers();
+ 
+-  // Preserve the mark of "obj", if necessary, in preparation for its mark 
+-  // word being overwritten with a self-forwarding-pointer. 
+-  void   preserve_mark_if_necessary(oop obj, markOop m); 
+-
+-  // When one is non-null, so is the other.  Together, they each pair is 
+-  // an object with a preserved mark, and its mark value. 
+-  GrowableArray<oop>*     _objs_with_preserved_marks; 
+-  GrowableArray<markOop>* _preserved_marks_of_objs; 
++  // Preserve the mark of "obj", if necessary, in preparation for its mark
++  // word being overwritten with a self-forwarding-pointer.
++  void   preserve_mark_if_necessary(oop obj, markOop m);
++
++  // When one is non-null, so is the other.  Together, they each pair is
++  // an object with a preserved mark, and its mark value.
++  GrowableArray<oop>*     _objs_with_preserved_marks;
++  GrowableArray<markOop>* _preserved_marks_of_objs;
+ 
+   // Returns true if the collection can be safely attempted.
+   // If this method returns false, a collection is not
+@@ -178,7 +175,7 @@
+     ScanClosure* _scan_older;
+   public:
+     EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
+-			     ScanClosure* cur, ScanClosure* older);
++                             ScanClosure* cur, ScanClosure* older);
+     void do_void();
+   };
+ 
+@@ -192,9 +189,9 @@
+     FastScanClosure* _scan_older;
+   public:
+     FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
+-				 DefNewGeneration* gen,
+-				 FastScanClosure* cur,
+-				 FastScanClosure* older);
++                                 DefNewGeneration* gen,
++                                 FastScanClosure* cur,
++                                 FastScanClosure* older);
+     void do_void();
+   };
+ 
+@@ -260,9 +257,9 @@
+     const bool check_too_big = _pretenure_size_threshold_words > 0;
+     const bool not_too_big   = word_size < _pretenure_size_threshold_words;
+     const bool size_ok       = is_tlab || !check_too_big || not_too_big;
+-      
++
+     bool result = !overflows &&
+-                  non_zero   && 
++                  non_zero   &&
+                   size_ok;
+ 
+     return result;
+@@ -288,27 +285,27 @@
+   // Need to declare the full complement of closures, whether we'll
+   // override them or not, or get message from the compiler:
+   //   oop_since_save_marks_iterate_nv hides virtual function...
+-#define DefNew_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)	\
++#define DefNew_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
+   void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
+ 
+   ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DECL)
+ 
+ #undef DefNew_SINCE_SAVE_MARKS_DECL
+-  
++
+   // For non-youngest collection, the DefNewGeneration can contribute
+   // "to-space".
+   void contribute_scratch(ScratchBlock*& list, Generation* requestor,
+-			  size_t max_alloc_words);
++                          size_t max_alloc_words);
+ 
+   // GC support
+   virtual void compute_new_size();
+   virtual void collect(bool   full,
+                        bool   clear_all_soft_refs,
+-                       size_t size, 
++                       size_t size,
+                        bool   is_tlab);
+   HeapWord* expand_and_allocate(size_t size,
+-				bool is_tlab,
+-				bool parallel = false);
++                                bool is_tlab,
++                                bool parallel = false);
+ 
+   oop copy_to_survivor_space(oop old, oop* from);
+   int tenuring_threshold() { return _tenuring_threshold; }
+diff -ruN openjdk6/hotspot/src/share/vm/memory/defNewGeneration.inline.hpp openjdk/hotspot/src/share/vm/memory/defNewGeneration.inline.hpp
+--- openjdk6/hotspot/src/share/vm/memory/defNewGeneration.inline.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/defNewGeneration.inline.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)defNewGeneration.inline.hpp	1.18 07/05/05 17:05:46 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ CompactibleSpace* DefNewGeneration::first_compaction_space() const {
+@@ -44,7 +41,7 @@
+     HeapWord* old_limit = eden()->soft_end();
+     if (old_limit < eden()->end()) {
+       // Tell the next generation we reached a limit.
+-      HeapWord* new_limit = 
++      HeapWord* new_limit =
+         next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
+       if (new_limit != NULL) {
+         Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
+@@ -53,7 +50,7 @@
+                "invalid state after allocation_limit_reached returned null");
+       }
+     } else {
+-      // The allocation failed and the soft limit is equal to the hard limit, 
++      // The allocation failed and the soft limit is equal to the hard limit,
+       // there are no reasons to do an attempt to allocate
+       assert(old_limit == eden()->end(), "sanity check");
+       break;
+@@ -73,7 +70,7 @@
+ }
+ 
+ HeapWord* DefNewGeneration::par_allocate(size_t word_size,
+-					 bool is_tlab) {
++                                         bool is_tlab) {
+   return eden()->par_allocate(word_size);
+ }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/memory/dump.cpp openjdk/hotspot/src/share/vm/memory/dump.cpp
+--- openjdk6/hotspot/src/share/vm/memory/dump.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/dump.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)dump.cpp	1.33 07/05/23 10:53:38 JVM"
+-#endif
+ /*
+  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -116,7 +113,7 @@
+     obj->set_mark(markOopDesc::prototype()->set_marked());
+     return true;
+   }
+-  
++
+   return false;
+ }
+ 
+@@ -459,7 +456,7 @@
+         mark_and_move_for_policy(OP_favor_startup, k->klass_part()->name(), _move_ro);
+         do_object(k);
+       }
+-      
++
+       objArrayOop methods = ik->methods();
+       for(i = 0; i < methods->length(); i++) {
+         methodOop m = methodOop(methods->obj_at(i));
+@@ -478,7 +475,7 @@
+ 
+       mark_and_move_for_policy(OP_favor_startup, ik->transitive_interfaces(), _move_ro);
+       mark_and_move_for_policy(OP_favor_startup, ik->fields(), _move_ro);
+-      
++
+       mark_and_move_for_policy(OP_favor_runtime, ik->secondary_supers(),  _move_ro);
+       mark_and_move_for_policy(OP_favor_runtime, ik->method_ordering(),   _move_ro);
+       mark_and_move_for_policy(OP_favor_runtime, ik->class_annotations(), _move_ro);
+@@ -503,7 +500,7 @@
+     if (obj->is_klass() && obj->blueprint()->oop_is_instanceKlass()) {
+       instanceKlass* ik = instanceKlass::cast((klassOop)obj);
+       int i;
+-      
++
+       mark_and_move_for_policy(OP_favor_startup, ik->as_klassOop(), _move_rw);
+ 
+       if (ik->super() != NULL) {
+@@ -575,14 +572,14 @@
+                               ik->methods_parameter_annotations(),
+                               ik->methods_default_annotations(),
+                               true /* idempotent, slow */);
+-  
++
+   // Itable indices are calculated based on methods array order
+   // (see klassItable::compute_itable_index()).  Must reinitialize.
+   // We assume that since checkconstraints is false, this method
+   // cannot throw an exception.  An exception here would be
+   // problematic since this is the VMThread, not a JavaThread.
+   ik->itable()->initialize_itable(false, THREAD);
+-}  
++}
+ 
+ // Sort methods if the oop is an instanceKlass.
+ 
+@@ -777,7 +774,7 @@
+     tty->cr(); tty->print_cr("ReadWrite space:");
+     gen->rw_space()->object_iterate(&coc);
+     coc.print();
+-  
++
+     // Reset counters
+ 
+     ClearAllocCountClosure cacc;
+@@ -831,7 +828,7 @@
+     _md_vs = md_vs;
+     _klass_objects = new GrowableArray<klassOop>();
+   }
+-  
++
+ 
+   void do_object(oop obj) {
+     if (obj->is_klass()) {
+@@ -882,7 +879,7 @@
+     _md_vs = md_vs;
+     _mc_vs = mc_vs;
+   }
+-  
++
+   VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
+   void doit() {
+     Thread* THREAD = VMThread::vm_thread();
+@@ -921,14 +918,14 @@
+     MarkStringObjects mark_strings;
+     MoveMarkedObjects move_ro(_ro_space, true);
+     MoveMarkedObjects move_rw(_rw_space, false);
+-    
++
+     // The SharedOptimizeColdStart VM option governs the new layout
+     // algorithm for promoting classes into the shared archive.
+     // The general idea is to minimize cold start time by laying
+     // out the objects in the order they are accessed at startup time.
+     // By doing this we are trying to eliminate out-of-order accesses
+     // in the shared archive.  This benefits cold startup time by making
+-    // disk reads as sequential as possible during class loading and 
++    // disk reads as sequential as possible during class loading and
+     // bootstrapping activities.  There may also be a small secondary
+     // effect of better "packing" of more commonly used data on a smaller
+     // number of pages, although no direct benefit has been measured from
+@@ -936,7 +933,7 @@
+     //
+     // At the class level of granularity, the promotion order is dictated
+     // by the classlist file whose generation is discussed elsewhere.
+-    // 
++    //
+     // At smaller granularity, optimal ordering was determined by an
+     // offline analysis of object access order in the shared archive.
+     // The dbx watchpoint facility, combined with SA post-processing,
+@@ -1026,7 +1023,7 @@
+         mark_and_move_ordered_rw.do_object(obj);
+       }
+       tty->print_cr("done. ");
+-    } 
++    }
+     tty->print("Moving read-write objects to shared space at " PTR_FORMAT " ... ",
+                _rw_space->top());
+     Universe::oops_do(&mark_all, true);
+@@ -1091,7 +1088,7 @@
+                                                   &mc_top, mc_end);
+ 
+     // Fix (forward) all of the references in these shared objects (which
+-    // are required to point ONLY to objects in the shared spaces). 
++    // are required to point ONLY to objects in the shared spaces).
+     // Also, create a list of all objects which might later contain a
+     // reference to a younger generation object.
+ 
+@@ -1111,7 +1108,7 @@
+     // Now, we reorder methods as a separate step after ALL forwarding
+     // pointer resolution, so that methods can be promoted in any order
+     // with respect to their holder classes.
+-    
++
+     SortMethodsClosure sort(THREAD);
+     gen->ro_space()->object_iterate(&sort);
+     gen->rw_space()->object_iterate(&sort);
+@@ -1171,7 +1168,7 @@
+     }
+ 
+     // Write the oop data to the output array.
+-    
++
+     WriteClosure wc(md_top, md_end);
+     CompactingPermGenGen::serialize_oops(&wc);
+     md_top = wc.get_top();
+@@ -1198,10 +1195,10 @@
+     _ro_space->set_saved_mark();
+     mapinfo->write_space(CompactingPermGenGen::rw, _rw_space, false);
+     _rw_space->set_saved_mark();
+-    mapinfo->write_region(CompactingPermGenGen::md, _md_vs->low(), 
++    mapinfo->write_region(CompactingPermGenGen::md, _md_vs->low(),
+                           md_top - _md_vs->low(), SharedMiscDataSize,
+                           false, false);
+-    mapinfo->write_region(CompactingPermGenGen::mc, _mc_vs->low(), 
++    mapinfo->write_region(CompactingPermGenGen::mc, _mc_vs->low(),
+                           mc_top - _mc_vs->low(), SharedMiscCodeSize,
+                           true, true);
+ 
+@@ -1210,10 +1207,10 @@
+     mapinfo->write_header();
+     mapinfo->write_space(CompactingPermGenGen::ro, _ro_space, true);
+     mapinfo->write_space(CompactingPermGenGen::rw, _rw_space, false);
+-    mapinfo->write_region(CompactingPermGenGen::md, _md_vs->low(), 
++    mapinfo->write_region(CompactingPermGenGen::md, _md_vs->low(),
+                           md_top - _md_vs->low(), SharedMiscDataSize,
+                           false, false);
+-    mapinfo->write_region(CompactingPermGenGen::mc, _mc_vs->low(), 
++    mapinfo->write_region(CompactingPermGenGen::mc, _mc_vs->low(),
+                           mc_top - _mc_vs->low(), SharedMiscCodeSize,
+                           true, true);
+     mapinfo->close();
+@@ -1285,12 +1282,12 @@
+     jlong h = start;
+     char *p = (char *)buf, *e = p + len;
+     while (p < e) {
+-	char c = *p++;
+-	if (c <= ' ') {
+-	    /* Skip spaces and control characters */
+-	    continue;
+-	}
+-	h = 31 * h + c;
++        char c = *p++;
++        if (c <= ' ') {
++            /* Skip spaces and control characters */
++            continue;
++        }
++        h = 31 * h + c;
+     }
+     return h;
+ }
+@@ -1344,8 +1341,8 @@
+     StringTable::intern("([Ljava/lang/String;)V", THREAD);
+     StringTable::intern("Ljava/lang/Class;", THREAD);
+ 
+-    StringTable::intern("I", THREAD);	// Needed for StringBuffer persistence?
+-    StringTable::intern("Z", THREAD);	// Needed for StringBuffer persistence?
++    StringTable::intern("I", THREAD);   // Needed for StringBuffer persistence?
++    StringTable::intern("Z", THREAD);   // Needed for StringBuffer persistence?
+ 
+     // sun.io.Converters
+     static const char obj_array_sig[] = "[[Ljava/lang/Object;";
+@@ -1362,7 +1359,7 @@
+         jint fsh, fsl;
+         if (sscanf(class_name, "# %8x%8x\n", &fsh, &fsl) == 2) {
+           file_jsum = ((jlong)(fsh) << 32) | (fsl & 0xffffffff);
+-        }        
++        }
+ 
+         continue;
+       }
+diff -ruN openjdk6/hotspot/src/share/vm/memory/filemap.cpp openjdk/hotspot/src/share/vm/memory/filemap.cpp
+--- openjdk6/hotspot/src/share/vm/memory/filemap.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/filemap.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)filemap.cpp	1.25 07/05/05 17:05:41 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -49,15 +46,15 @@
+               " shared archive file.\n");
+   jio_vfprintf(defaultStream::error_stream(), msg, ap);
+   jio_fprintf(defaultStream::error_stream(), "\n");
+-  vm_exit_during_initialization("Unable to use shared archive.", NULL);	
++  vm_exit_during_initialization("Unable to use shared archive.", NULL);
+ }
+ 
+ 
+ void FileMapInfo::fail_stop(const char *msg, ...) {
+-	va_list ap;
++        va_list ap;
+   va_start(ap, msg);
+-  fail(msg, ap);	// Never returns.
+-  va_end(ap);		// for completeness.
++  fail(msg, ap);        // Never returns.
++  va_end(ap);           // for completeness.
+ }
+ 
+ 
+@@ -282,7 +279,7 @@
+ 
+ // Close the shared archive file.  This does NOT unmap mapped regions.
+ 
+-void FileMapInfo::close() { 
++void FileMapInfo::close() {
+   if (_file_open) {
+     if (::close(_fd) < 0) {
+       fail_stop("Unable to close the shared archive file.");
+@@ -508,8 +505,8 @@
+ }
+ 
+ // The following method is provided to see whether a given pointer
+-// falls in the mapped shared space. 
+-// Param: 
++// falls in the mapped shared space.
++// Param:
+ // p, The given pointer
+ // Return:
+ // True if the p is within the mapped shared space, otherwise, false.
+diff -ruN openjdk6/hotspot/src/share/vm/memory/filemap.hpp openjdk/hotspot/src/share/vm/memory/filemap.hpp
+--- openjdk6/hotspot/src/share/vm/memory/filemap.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/filemap.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)filemap.hpp	1.16 07/05/05 17:05:47 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Layout of the file:
+diff -ruN openjdk6/hotspot/src/share/vm/memory/gcLocker.cpp openjdk/hotspot/src/share/vm/memory/gcLocker.cpp
+--- openjdk6/hotspot/src/share/vm/memory/gcLocker.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/gcLocker.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)gcLocker.cpp	1.52 07/05/17 15:54:45 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -126,7 +123,7 @@
+ }
+ 
+ 
+-// JRT_LEAF rules: 
++// JRT_LEAF rules:
+ // A JRT_LEAF method may not interfere with safepointing by
+ //   1) acquiring or blocking on a Mutex or JavaLock - checked
+ //   2) allocating heap memory - checked
+diff -ruN openjdk6/hotspot/src/share/vm/memory/gcLocker.hpp openjdk/hotspot/src/share/vm/memory/gcLocker.hpp
+--- openjdk6/hotspot/src/share/vm/memory/gcLocker.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/gcLocker.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)gcLocker.hpp	1.60 07/05/17 15:54:47 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The direct lock/unlock calls do not force a collection if an unlock
+@@ -37,38 +34,38 @@
+   static volatile bool _doing_gc;        // unlock_critical() is doing a GC
+ 
+   // Accessors
+-  static bool is_jni_active() { 
+-    return _jni_lock_count > 0; 
++  static bool is_jni_active() {
++    return _jni_lock_count > 0;
+   }
+ 
+-  static void set_needs_gc() { 
++  static void set_needs_gc() {
+     assert(SafepointSynchronize::is_at_safepoint(),
+       "needs_gc is only set at a safepoint");
+     _needs_gc = true;
+   }
+ 
+-  static void clear_needs_gc() { 
++  static void clear_needs_gc() {
+     assert_lock_strong(JNICritical_lock);
+     _needs_gc = false;
+   }
+ 
+-  static void jni_lock() { 
++  static void jni_lock() {
+     Atomic::inc(&_jni_lock_count);
+     CHECK_UNHANDLED_OOPS_ONLY(
+       if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count++; })
+-    assert(Universe::heap() == NULL || !Universe::heap()->is_gc_active(), 
+-           "locking failed");                
++    assert(Universe::heap() == NULL || !Universe::heap()->is_gc_active(),
++           "locking failed");
+   }
+ 
+-  static void jni_unlock() { 
+-    Atomic::dec(&_jni_lock_count);           
++  static void jni_unlock() {
++    Atomic::dec(&_jni_lock_count);
+     CHECK_UNHANDLED_OOPS_ONLY(
+       if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count--; })
+   }
+ 
+   static void jni_lock_slow();
+   static void jni_unlock_slow();
+-    
++
+  public:
+   // Accessors
+   static bool is_active();
+@@ -90,7 +87,7 @@
+   // Non-structured GC locking: currently needed for JNI. Use with care!
+   static void lock();
+   static void unlock();
+-  
++
+   // The following two methods are used for JNI critical regions.
+   // If we find that we failed to perform a GC because the GC_locker
+   // was active, arrange for one as soon as possible by allowing
+@@ -139,13 +136,13 @@
+  friend class Pause_No_GC_Verifier;
+ 
+  protected:
+-  bool _verifygc;  
++  bool _verifygc;
+   unsigned int _old_invocations;
+ 
+  public:
+ #ifdef ASSERT
+   No_GC_Verifier(bool verifygc = true);
+-  ~No_GC_Verifier();   
++  ~No_GC_Verifier();
+ #else
+   No_GC_Verifier(bool verifygc = true) {}
+   ~No_GC_Verifier() {}
+@@ -164,7 +161,7 @@
+  public:
+ #ifdef ASSERT
+   Pause_No_GC_Verifier(No_GC_Verifier * ngcv);
+-  ~Pause_No_GC_Verifier();   
++  ~Pause_No_GC_Verifier();
+ #else
+   Pause_No_GC_Verifier(No_GC_Verifier * ngcv) {}
+   ~Pause_No_GC_Verifier() {}
+@@ -182,12 +179,12 @@
+ class No_Safepoint_Verifier : public No_GC_Verifier {
+  friend class Pause_No_Safepoint_Verifier;
+ 
+- private:  
++ private:
+   bool _activated;
+   Thread *_thread;
+  public:
+ #ifdef ASSERT
+-  No_Safepoint_Verifier(bool activated = true, bool verifygc = true ) : No_GC_Verifier(verifygc) {      
++  No_Safepoint_Verifier(bool activated = true, bool verifygc = true ) : No_GC_Verifier(verifygc) {
+     _thread = Thread::current();
+     if (_activated) {
+       _thread->_allow_allocation_count++;
+@@ -215,7 +212,7 @@
+ // something to do for the underlying No_GC_Verifier object.
+ 
+ class Pause_No_Safepoint_Verifier : public Pause_No_GC_Verifier {
+- private:  
++ private:
+   No_Safepoint_Verifier * _nsv;
+ 
+  public:
+@@ -273,7 +270,7 @@
+ 
+  public:
+ #ifdef ASSERT
+-  No_Alloc_Verifier(bool activated = true) { 
++  No_Alloc_Verifier(bool activated = true) {
+     _activated = activated;
+     if (_activated) Thread::current()->_allow_allocation_count++;
+   }
+@@ -286,4 +283,3 @@
+   ~No_Alloc_Verifier() {}
+ #endif
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/memory/gcLocker.inline.hpp openjdk/hotspot/src/share/vm/memory/gcLocker.inline.hpp
+--- openjdk6/hotspot/src/share/vm/memory/gcLocker.inline.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/gcLocker.inline.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)gcLocker.inline.hpp	1.21 07/05/05 17:05:49 JVM"
+-#endif
+ /*
+  * Copyright 2000-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ inline bool GC_locker::is_active() {
+@@ -42,7 +39,7 @@
+   CHECK_UNHANDLED_OOPS_ONLY(
+     if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count++; })
+   assert(Universe::heap() == NULL ||
+-	 !Universe::heap()->is_gc_active(), "locking failed");
++         !Universe::heap()->is_gc_active(), "locking failed");
+ }
+ 
+ inline void GC_locker::unlock() {
+@@ -73,4 +70,3 @@
+     }
+   }
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/memory/genCollectedHeap.cpp openjdk/hotspot/src/share/vm/memory/genCollectedHeap.cpp
+--- openjdk6/hotspot/src/share/vm/memory/genCollectedHeap.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/genCollectedHeap.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)genCollectedHeap.cpp	1.189 07/06/12 09:41:51 JVM"
+-#endif
+ /*
+  * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -116,11 +113,11 @@
+   if (!heap_rs.is_reserved()) {
+     vm_shutdown_during_initialization(
+       "Could not reserve enough space for object heap");
+-    return JNI_ENOMEM;                                    
++    return JNI_ENOMEM;
+   }
+ 
+   _reserved = MemRegion((HeapWord*)heap_rs.base(),
+-			(HeapWord*)(heap_rs.base() + heap_rs.size()));
++                        (HeapWord*)(heap_rs.base() + heap_rs.size()));
+ 
+   // It is important to do this in a way such that concurrent readers can't
+   // temporarily think somethings in the heap.  (Seen this happen in asserts.)
+@@ -160,7 +157,7 @@
+ 
+ char* GenCollectedHeap::allocate(size_t alignment,
+                                  PermanentGenerationSpec* perm_gen_spec,
+-                                 size_t* _total_reserved, 
++                                 size_t* _total_reserved,
+                                  int* _n_covered_regions,
+                                  ReservedSpace* heap_rs){
+   const char overflow_msg[] = "The size of the object heap + VM data exceeds "
+@@ -169,11 +166,11 @@
+   // Now figure out the total size.
+   size_t total_reserved = 0;
+   int n_covered_regions = 0;
+-  const size_t pageSize = UseLargePages ? 
++  const size_t pageSize = UseLargePages ?
+       os::large_page_size() : os::vm_page_size();
+ 
+-  for (int i = 0; i < _n_gens; i++) {     
+-    total_reserved += _gen_specs[i]->max_size();    
++  for (int i = 0; i < _n_gens; i++) {
++    total_reserved += _gen_specs[i]->max_size();
+     if (total_reserved < _gen_specs[i]->max_size()) {
+       vm_exit_during_initialization(overflow_msg);
+     }
+@@ -182,7 +179,7 @@
+   assert(total_reserved % pageSize == 0, "Gen size");
+   total_reserved += perm_gen_spec->max_size();
+   assert(total_reserved % pageSize == 0, "Perm Gen size");
+-    
++
+   if (total_reserved < perm_gen_spec->max_size()) {
+     vm_exit_during_initialization(overflow_msg);
+   }
+@@ -191,19 +188,19 @@
+   // Add the size of the data area which shares the same reserved area
+   // as the heap, but which is not actually part of the heap.
+   size_t s = perm_gen_spec->misc_data_size() + perm_gen_spec->misc_code_size();
+-  
++
+   total_reserved += s;
+   if (total_reserved < s) {
+     vm_exit_during_initialization(overflow_msg);
+   }
+ 
+-  if (UseLargePages) { 
++  if (UseLargePages) {
+     assert(total_reserved != 0, "total_reserved cannot be 0");
+-    total_reserved = round_to(total_reserved, os::large_page_size()); 
++    total_reserved = round_to(total_reserved, os::large_page_size());
+     if (total_reserved < os::large_page_size()) {
+       vm_exit_during_initialization(overflow_msg);
+     }
+-  } 
++  }
+ 
+   // Calculate the address at which the heap must reside in order for
+   // the shared data to be at the required address.
+@@ -226,15 +223,15 @@
+   *_total_reserved = total_reserved;
+   *_n_covered_regions = n_covered_regions;
+   *heap_rs = ReservedSpace(total_reserved, alignment,
+-                           UseLargePages, heap_address); 
+-  
++                           UseLargePages, heap_address);
++
+   return heap_address;
+ }
+ 
+ 
+ void GenCollectedHeap::post_initialize() {
+   SharedHeap::post_initialize();
+-  TwoGenerationCollectorPolicy *policy = 
++  TwoGenerationCollectorPolicy *policy =
+     (TwoGenerationCollectorPolicy *)collector_policy();
+   guarantee(policy->is_two_generation_policy(), "Illegal policy type");
+   DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
+@@ -242,15 +239,15 @@
+          def_new_gen->kind() == Generation::ParNew ||
+          def_new_gen->kind() == Generation::ASParNew,
+          "Wrong generation kind");
+-  
++
+   Generation* old_gen = get_gen(1);
+   assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
+-	 old_gen->kind() == Generation::ASConcurrentMarkSweep ||
+-	 old_gen->kind() == Generation::MarkSweepCompact,
++         old_gen->kind() == Generation::ASConcurrentMarkSweep ||
++         old_gen->kind() == Generation::MarkSweepCompact,
+     "Wrong generation kind");
+ 
+   policy->initialize_size_policy(def_new_gen->eden()->capacity(),
+-				 old_gen->capacity(),
++                                 old_gen->capacity(),
+                                  def_new_gen->from()->capacity());
+   policy->initialize_gc_policy_counters();
+ }
+@@ -344,7 +341,7 @@
+ void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
+                                                          size_t size) {
+   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
+-    // We are asked to check a size in HeapWords, 
++    // We are asked to check a size in HeapWords,
+     // but the memory is mangled in juint words.
+     juint* start = (juint*) (addr + skip_header_HeapWords());
+     juint* end   = (juint*) (addr + size);
+@@ -354,11 +351,11 @@
+     }
+   }
+ }
+-#endif 
++#endif
+ 
+ HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
+                                                bool is_tlab,
+-					       bool first_only) {
++                                               bool first_only) {
+   HeapWord* res;
+   for (int i = 0; i < _n_gens; i++) {
+     if (_gens[i]->should_allocate(size, is_tlab)) {
+@@ -374,10 +371,10 @@
+ HeapWord* GenCollectedHeap::mem_allocate(size_t size,
+                                          bool is_large_noref,
+                                          bool is_tlab,
+-					 bool* gc_overhead_limit_was_exceeded) {
+-  return collector_policy()->mem_allocate_work(size, 
+-					       is_tlab, 
+-					       gc_overhead_limit_was_exceeded);
++                                         bool* gc_overhead_limit_was_exceeded) {
++  return collector_policy()->mem_allocate_work(size,
++                                               is_tlab,
++                                               gc_overhead_limit_was_exceeded);
+ }
+ 
+ bool GenCollectedHeap::must_clear_all_soft_refs() {
+@@ -394,7 +391,7 @@
+                                      bool   clear_all_soft_refs,
+                                      size_t size,
+                                      bool   is_tlab,
+-				     int    max_level) {
++                                     int    max_level) {
+   bool prepared_for_verification = false;
+   ResourceMark rm;
+   DEBUG_ONLY(Thread* my_thread = Thread::current();)
+@@ -419,7 +416,7 @@
+       gclog_or_tty->print_cr("GC Cause: %s", GCCause::to_string(gc_cause()));
+     }
+   }
+-  
++
+   {
+     FlagSetting fl(_is_gc_active, true);
+ 
+@@ -428,9 +425,9 @@
+     if (complete) {
+       GCCause::Cause cause = gc_cause();
+       if (cause == GCCause::_java_lang_system_gc) {
+-	gc_cause_str = "Full GC (System) ";
++        gc_cause_str = "Full GC (System) ";
+       } else {
+-	gc_cause_str = "Full GC ";
++        gc_cause_str = "Full GC ";
+       }
+     }
+     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
+@@ -444,7 +441,7 @@
+ 
+     int starting_level = 0;
+     if (full) {
+-      // Search for the oldest generation which will collect all younger 
++      // Search for the oldest generation which will collect all younger
+       // generations, and start collection loop there.
+       for (int i = max_level; i >= 0; i--) {
+         if (_gens[i]->full_collects_younger_generations()) {
+@@ -475,15 +472,15 @@
+                      size*HeapWordSize);
+         }
+ 
+-        if (VerifyBeforeGC && i >= VerifyGCLevel && 
++        if (VerifyBeforeGC && i >= VerifyGCLevel &&
+             total_collections() >= VerifyGCStartAt) {
+           HandleMark hm;  // Discard invalid handles created during verification
+-	  if (!prepared_for_verification) {
+-	    prepare_for_verify(); 
++          if (!prepared_for_verification) {
++            prepare_for_verify();
+             prepared_for_verification = true;
+-	  }
+-	  gclog_or_tty->print(" VerifyBeforeGC:");
+-	  Universe::verify(true);
++          }
++          gclog_or_tty->print(" VerifyBeforeGC:");
++          Universe::verify(true);
+         }
+         COMPILER2_PRESENT(DerivedPointerTable::clear());
+ 
+@@ -511,7 +508,7 @@
+ 
+           HandleMark hm;  // Discard invalid handles created during gc
+           save_marks();   // save marks for all gens
+-          // We want to discover references, but not process them yet. 
++          // We want to discover references, but not process them yet.
+           // This mode is disabled in process_discovered_references if the
+           // generation does some collection work, or in
+           // enqueue_discovered_references if the generation returns
+@@ -530,11 +527,11 @@
+           if (!rp->enqueuing_is_done()) {
+             rp->enqueue_discovered_references();
+           } else {
+-	    rp->set_enqueuing_is_done(false);
++            rp->set_enqueuing_is_done(false);
+           }
+           rp->verify_no_references_recorded();
+         }
+-	max_level_collected = i;
++        max_level_collected = i;
+ 
+         // Determine if allocation request was met.
+         if (size > 0) {
+@@ -546,16 +543,16 @@
+         }
+ 
+         COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
+-  
++
+         _gens[i]->stat_record()->accumulated_time.stop();
+ 
+-	update_gc_stats(i, full);
+-  
++        update_gc_stats(i, full);
++
+         if (VerifyAfterGC && i >= VerifyGCLevel &&
+             total_collections() >= VerifyGCStartAt) {
+           HandleMark hm;  // Discard invalid handles created during verification
+-	  gclog_or_tty->print(" VerifyAfterGC:");
+-	  Universe::verify(false);
++          gclog_or_tty->print(" VerifyAfterGC:");
++          Universe::verify(false);
+         }
+ 
+         if (PrintGCDetails) {
+@@ -565,11 +562,16 @@
+       }
+     }
+ 
++    // Update "complete" boolean wrt what actually transpired --
++    // for instance, a promotion failure could have led to
++    // a whole heap collection.
++    complete = complete || (max_level_collected == n_gens() - 1);
++
+     if (PrintGCDetails) {
+       print_heap_change(gch_prev_used);
+-        
++
+       // Print perm gen info for full GC with PrintGCDetails flag.
+-      if (full && max_level == n_gens() - 1) {
++      if (complete) {
+         print_perm_heap_change(perm_prev_used);
+       }
+     }
+@@ -578,6 +580,7 @@
+       // Adjust generation sizes.
+       _gens[j]->compute_new_size();
+     }
++
+     if (complete) {
+       // Ask the permanent generation to adjust size for full collections
+       perm()->compute_new_size();
+@@ -606,7 +609,7 @@
+     vm_exit(-1);
+   }
+ }
+-    
++
+ HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
+   return collector_policy()->satisfy_failed_allocation(size, is_tlab);
+ }
+@@ -626,14 +629,14 @@
+ 
+ void GenCollectedHeap::
+ gen_process_strong_roots(int level,
+-			 bool younger_gens_as_roots,
+-			 bool collecting_perm_gen,
+-			 SharedHeap::ScanningOption so,
+-			 OopsInGenClosure* older_gens,
+-			 OopsInGenClosure* not_older_gens) {
++                         bool younger_gens_as_roots,
++                         bool collecting_perm_gen,
++                         SharedHeap::ScanningOption so,
++                         OopsInGenClosure* older_gens,
++                         OopsInGenClosure* not_older_gens) {
+   // General strong roots.
+   SharedHeap::process_strong_roots(collecting_perm_gen, so,
+-				   not_older_gens, older_gens);
++                                   not_older_gens, older_gens);
+ 
+   if (younger_gens_as_roots) {
+     if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
+@@ -656,7 +659,7 @@
+ }
+ 
+ void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure,
+-					      OopClosure* non_root_closure) {
++                                              OopClosure* non_root_closure) {
+   SharedHeap::process_weak_roots(root_closure, non_root_closure);
+   // "Local" "weak" refs
+   for (int i = 0; i < _n_gens; i++) {
+@@ -664,16 +667,16 @@
+   }
+ }
+ 
+-#define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)	\
+-void GenCollectedHeap::							\
+-oop_since_save_marks_iterate(int level,					\
+-			     OopClosureType* cur,			\
+-			     OopClosureType* older) {			\
+-  _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur);		\
+-  for (int i = level+1; i < n_gens(); i++) {				\
+-    _gens[i]->oop_since_save_marks_iterate##nv_suffix(older);		\
+-  }									\
+-  perm_gen()->oop_since_save_marks_iterate##nv_suffix(older);		\
++#define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)    \
++void GenCollectedHeap::                                                 \
++oop_since_save_marks_iterate(int level,                                 \
++                             OopClosureType* cur,                       \
++                             OopClosureType* older) {                   \
++  _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur);           \
++  for (int i = level+1; i < n_gens(); i++) {                            \
++    _gens[i]->oop_since_save_marks_iterate##nv_suffix(older);           \
++  }                                                                     \
++  perm_gen()->oop_since_save_marks_iterate##nv_suffix(older);           \
+ }
+ 
+ ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
+@@ -745,7 +748,7 @@
+   assert(Heap_lock->is_locked(), "Precondition#2");
+   GCCauseSetter gcs(this, cause);
+   switch (cause) {
+-    case GCCause::_heap_inspection: 
++    case GCCause::_heap_inspection:
+     case GCCause::_heap_dump: {
+       HandleMark hm;
+       do_full_collection(false,         // don't clear all soft refs
+@@ -788,7 +791,7 @@
+ bool GenCollectedHeap::create_cms_collector() {
+ 
+   assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) ||
+-	 (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)) &&
++         (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)) &&
+          _perm_gen->as_gen()->kind() == Generation::ConcurrentMarkSweep,
+          "Unexpected generation kinds");
+   // Skip two header words in the block content verification
+@@ -796,7 +799,7 @@
+   CMSCollector* collector = new CMSCollector(
+     (ConcurrentMarkSweepGeneration*)_gens[1],
+     (ConcurrentMarkSweepGeneration*)_perm_gen->as_gen(),
+-    _rem_set->as_CardTableRS(), 
++    _rem_set->as_CardTableRS(),
+     (ConcurrentMarkSweepPolicy*) collector_policy());
+ 
+   if (collector == NULL || !collector->completed_initialization()) {
+@@ -826,7 +829,7 @@
+ 
+ 
+ void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
+-					  int max_level) {
++                                          int max_level) {
+   int local_max_level;
+   if (!incremental_collection_will_fail() &&
+       gc_cause() == GCCause::_gc_locker) {
+@@ -861,10 +864,10 @@
+ // Returns "TRUE" iff "p" points into the allocated area of the heap.
+ bool GenCollectedHeap::is_in(const void* p) const {
+   #ifndef ASSERT
+-  guarantee(VerifyBeforeGC   || 
+-            VerifyDuringGC   || 
++  guarantee(VerifyBeforeGC   ||
++            VerifyDuringGC   ||
+             VerifyBeforeExit ||
+-            VerifyAfterGC, "too expensive"); 
++            VerifyAfterGC, "too expensive");
+   #endif
+   // This might be sped up with a cache of the last generation that
+   // answered yes.
+@@ -1004,7 +1007,7 @@
+   HeapWord* result = mem_allocate(size   /* size */,
+                                   false  /* is_large_noref */,
+                                   true   /* is_tlab */,
+-				  &gc_overhead_limit_was_exceeded);
++                                  &gc_overhead_limit_was_exceeded);
+   return result;
+ }
+ 
+@@ -1045,7 +1048,7 @@
+ }
+ 
+ ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
+-					       size_t max_alloc_words) {
++                                               size_t max_alloc_words) {
+   ScratchBlock* res = NULL;
+   for (int i = 0; i < _n_gens; i++) {
+     _gens[i]->contribute_scratch(res, requestor, max_alloc_words);
+@@ -1073,7 +1076,7 @@
+ 
+ 
+ void GenCollectedHeap::generation_iterate(GenClosure* cl,
+-					  bool old_to_young) {
++                                          bool old_to_young) {
+   if (old_to_young) {
+     for (int i = _n_gens-1; i >= 0; i--) {
+       cl->do_generation(_gens[i]);
+@@ -1124,7 +1127,7 @@
+ void GenCollectedHeap::prepare_for_compaction() {
+   Generation* scanning_gen = _gens[_n_gens-1];
+   // Start by compacting into same gen.
+-  CompactPoint cp(scanning_gen, NULL, NULL); 
++  CompactPoint cp(scanning_gen, NULL, NULL);
+   while (scanning_gen != NULL) {
+     scanning_gen->prepare_for_compaction(&cp);
+     scanning_gen = prev_gen(scanning_gen);
+@@ -1137,7 +1140,7 @@
+ 
+ void GenCollectedHeap::verify(bool allow_dirty, bool silent) {
+   if (!silent) {
+-    gclog_or_tty->print("permgen ");     
++    gclog_or_tty->print("permgen ");
+   }
+   perm_gen()->verify(allow_dirty);
+   for (int i = _n_gens-1; i >= 0; i--) {
+@@ -1221,8 +1224,8 @@
+ class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
+  private:
+   bool _full;
+- public: 
+-  void do_generation(Generation* gen) { 
++ public:
++  void do_generation(Generation* gen) {
+     gen->gc_prologue(_full);
+   }
+   GenGCPrologueClosure(bool full) : _full(full) {};
+@@ -1247,8 +1250,8 @@
+ class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
+  private:
+   bool _full;
+- public: 
+-  void do_generation(Generation* gen) { 
++ public:
++  void do_generation(Generation* gen) {
+     gen->gc_epilogue(_full);
+   }
+   GenGCEpilogueClosure(bool full) : _full(full) {};
+@@ -1296,9 +1299,9 @@
+ }
+ 
+ oop GenCollectedHeap::handle_failed_promotion(Generation* gen,
+-					      oop obj,
+-					      size_t obj_size,
+-					      oop* ref) {
++                                              oop obj,
++                                              size_t obj_size,
++                                              oop* ref) {
+   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
+   HeapWord* result = NULL;
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/memory/genCollectedHeap.hpp openjdk/hotspot/src/share/vm/memory/genCollectedHeap.hpp
+--- openjdk6/hotspot/src/share/vm/memory/genCollectedHeap.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/genCollectedHeap.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)genCollectedHeap.hpp	1.104 07/05/29 09:44:15 JVM"
+-#endif
+ /*
+  * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class SubTasksDone;
+@@ -41,6 +38,7 @@
+   friend class VM_GenCollectFull;
+   friend class VM_GenCollectFullConcurrent;
+   friend class VM_GC_HeapInspection;
++  friend class VM_HeapDumper;
+   friend class HeapInspection;
+   friend class GCCauseSetter;
+   friend class VMStructs;
+@@ -63,13 +61,13 @@
+   // The generational collector policy.
+   GenCollectorPolicy* _gen_policy;
+ 
+-  // If a generation would bail out of an incremental collection, 
+-  // it sets this flag.  If the flag is set, satisfy_failed_allocation 
++  // If a generation would bail out of an incremental collection,
++  // it sets this flag.  If the flag is set, satisfy_failed_allocation
+   // will attempt allocating in all generations before doing a full GC.
+   bool _incremental_collection_will_fail;
+   bool _last_incremental_collection_failed;
+ 
+-  // In support of ExplicitGCInvokesConcurrent functionality 
++  // In support of ExplicitGCInvokesConcurrent functionality
+   unsigned int _full_collections_completed;
+ 
+   // Data structure for claiming the (potentially) parallel tasks in
+@@ -99,7 +97,7 @@
+                      bool   clear_all_soft_refs,
+                      size_t size,
+                      bool   is_tlab,
+-		     int    max_level);
++                     int    max_level);
+ 
+   // Callback from VM_GenCollectForAllocation operation.
+   // This function does everything necessary/possible to satisfy an
+@@ -125,7 +123,7 @@
+   char* allocate(size_t alignment, PermanentGenerationSpec* perm_gen_spec,
+                  size_t* _total_reserved, int* _n_covered_regions,
+                  ReservedSpace* heap_rs);
+-  
++
+   // Does operations required after initialization has been done.
+   void post_initialize();
+ 
+@@ -140,7 +138,7 @@
+   GenCollectorPolicy* gen_policy() const { return _gen_policy; }
+ 
+   // Adaptive size policy
+-  virtual AdaptiveSizePolicy* size_policy() { 
++  virtual AdaptiveSizePolicy* size_policy() {
+     return gen_policy()->size_policy();
+   }
+ 
+@@ -156,7 +154,7 @@
+   HeapWord* mem_allocate(size_t size,
+                          bool   is_large_noref,
+                          bool   is_tlab,
+-			 bool*  gc_overhead_limit_was_exceeded);
++                         bool*  gc_overhead_limit_was_exceeded);
+ 
+   // We may support a shared contiguous allocation area, if the youngest
+   // generation does.
+@@ -225,13 +223,13 @@
+   // possible to find its size, and thus to progress forward to the next
+   // block.  (Blocks may be of different sizes.)  Thus, blocks may
+   // represent Java objects, or they might be free blocks in a
+-  // free-list-based heap (or subheap), as long as the two kinds are 
++  // free-list-based heap (or subheap), as long as the two kinds are
+   // distinguishable and the size of each is determinable.
+ 
+   // Returns the address of the start of the "block" that contains the
+   // address "addr".  We say "blocks" instead of "object" since some heaps
+   // may not pack objects densely; a chunk may either be an object or a
+-  // non-object. 
++  // non-object.
+   virtual HeapWord* block_start(const void* addr) const;
+ 
+   // Requires "addr" to be the start of a chunk, and returns its size.
+@@ -253,7 +251,7 @@
+   virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
+   virtual HeapWord* allocate_new_tlab(size_t size);
+ 
+-  // The "requestor" generation is performing some garbage collection 
++  // The "requestor" generation is performing some garbage collection
+   // action for which it would be useful to have scratch space.  The
+   // requestor promises to allocate no more than "max_alloc_words" in any
+   // older generation (via promotion say.)   Any blocks of space that can
+@@ -339,7 +337,7 @@
+   // maximal committed limit that they can reach, without a garbage
+   // collection.
+   virtual bool is_maximal_no_gc() const;
+-  
++
+   // Return the generation before "gen", or else NULL.
+   Generation* prev_gen(Generation* gen) const {
+     int l = gen->level();
+@@ -371,7 +369,7 @@
+   static GenCollectedHeap* heap();
+ 
+   void set_par_threads(int t);
+-  
++
+ 
+   // Invoke the "do_oop" method of one of the closures "not_older_gens"
+   // or "older_gens" on root locations for the generation at
+@@ -391,16 +389,16 @@
+   // "SO_Symbols_and_Strings" applies the closure to all entries in
+   // SymbolsTable and StringTable.
+   void gen_process_strong_roots(int level, bool younger_gens_as_roots,
+-				bool collecting_perm_gen,
+-				SharedHeap::ScanningOption so,
+-				OopsInGenClosure* older_gens,
+-				OopsInGenClosure* not_older_gens);
++                                bool collecting_perm_gen,
++                                SharedHeap::ScanningOption so,
++                                OopsInGenClosure* older_gens,
++                                OopsInGenClosure* not_older_gens);
+ 
+   // Apply "blk" to all the weak roots of the system.  These include
+   // JNI weak roots, the code cache, system dictionary, symbol table,
+-  // string table, and referents of reachable weak refs. 
++  // string table, and referents of reachable weak refs.
+   void gen_process_weak_roots(OopClosure* root_closure,
+-			      OopClosure* non_root_closure);
++                              OopClosure* non_root_closure);
+ 
+   // Set the saved marks of generations, if that makes sense.
+   // In particular, if any generation might iterate over the oops
+@@ -412,10 +410,10 @@
+   // "level" (including the permanent generation.)  The "cur" closure is
+   // applied to references in the generation at "level", and the "older"
+   // closure to older (and permanent) generations.
+-#define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix)	\
+-  void oop_since_save_marks_iterate(int level,				\
+-			            OopClosureType* cur,		\
+-				    OopClosureType* older);
++#define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix)    \
++  void oop_since_save_marks_iterate(int level,                          \
++                                    OopClosureType* cur,                \
++                                    OopClosureType* older);
+ 
+   ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DECL)
+ 
+@@ -426,7 +424,7 @@
+   // call to "save_marks".
+   bool no_allocs_since_save_marks(int level);
+ 
+-  // If a generation bails out of an incremental collection, 
++  // If a generation bails out of an incremental collection,
+   // it sets this flag.
+   bool incremental_collection_will_fail() {
+     return _incremental_collection_will_fail;
+@@ -453,9 +451,9 @@
+   // Otherwise, try expand-and-allocate for obj in each generation starting at
+   // gen; return the new location of obj if successful.  Otherwise, return NULL.
+   oop handle_failed_promotion(Generation* gen,
+-			      oop obj,
+-			      size_t obj_size,
+-			      oop* ref);
++                              oop obj,
++                              size_t obj_size,
++                              oop* ref);
+ 
+ private:
+   // Accessor for memory state verification support
+@@ -487,7 +485,7 @@
+ protected:
+   virtual void gc_prologue(bool full);
+   virtual void gc_epilogue(bool full);
+-  
++
+ public:
+-  virtual void preload_and_dump(TRAPS);
++  virtual void preload_and_dump(TRAPS) KERNEL_RETURN;
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/memory/generation.cpp openjdk/hotspot/src/share/vm/memory/generation.cpp
+--- openjdk6/hotspot/src/share/vm/memory/generation.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/generation.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)generation.cpp	1.245 07/05/05 17:05:51 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -33,10 +30,10 @@
+   _ref_processor(NULL) {
+   if (!_virtual_space.initialize(rs, initial_size)) {
+     vm_exit_during_initialization("Could not reserve enough space for "
+-		    "object heap");
++                    "object heap");
+   }
+   _reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(),
+-	  (HeapWord*)_virtual_space.high_boundary());
++          (HeapWord*)_virtual_space.high_boundary());
+ }
+ 
+ GenerationSpec* Generation::spec() {
+@@ -81,7 +78,7 @@
+ 
+ void Generation::print_on(outputStream* st)  const {
+   st->print(" %-20s", name());
+-  st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", 
++  st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
+              capacity()/K, used()/K);
+   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
+               _virtual_space.low_boundary(),
+@@ -95,9 +92,9 @@
+   StatRecord* sr = stat_record();
+   double time = sr->accumulated_time.seconds();
+   st->print_cr("[Accumulated GC generation %d time %3.7f secs, "
+-	       "%d GC's, avg GC time %3.7f]", 
+-	       level(), time, sr->invocations,
+-	       sr->invocations > 0 ? time / sr->invocations : 0.0);
++               "%d GC's, avg GC time %3.7f]",
++               level(), time, sr->invocations,
++               sr->invocations > 0 ? time / sr->invocations : 0.0);
+ }
+ 
+ // Utility iterator classes
+@@ -136,7 +133,7 @@
+   assert((kind() == Generation::DefNew) ||
+          (kind() == Generation::ParNew) ||
+          (kind() == Generation::ASParNew),
+-    "Wrong youngest generation type"); 
++    "Wrong youngest generation type");
+   return (DefNewGeneration*) this;
+ }
+ 
+@@ -163,7 +160,7 @@
+ }
+ 
+ bool Generation::promotion_attempt_is_safe(size_t promotion_in_bytes,
+-				           bool not_used) const {
++                                           bool not_used) const {
+   if (PrintGC && Verbose) {
+     gclog_or_tty->print_cr("Generation::promotion_attempt_is_safe"
+                 " contiguous_available: " SIZE_FORMAT
+@@ -177,11 +174,11 @@
+ oop Generation::promote(oop obj, size_t obj_size, oop* ref) {
+   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
+ 
+-#ifndef	PRODUCT
++#ifndef PRODUCT
+   if (Universe::heap()->promotion_should_fail()) {
+     return NULL;
+   }
+-#endif	// #ifndef PRODUCT
++#endif  // #ifndef PRODUCT
+ 
+   HeapWord* result = allocate(obj_size, false);
+   if (result != NULL) {
+@@ -194,14 +191,14 @@
+ }
+ 
+ oop Generation::par_promote(int thread_num,
+-			    oop obj, markOop m, size_t word_sz) {
++                            oop obj, markOop m, size_t word_sz) {
+   // Could do a bad general impl here that gets a lock.  But no.
+   ShouldNotCallThis();
+   return NULL;
+ }
+ 
+ void Generation::par_promote_alloc_undo(int thread_num,
+-					HeapWord* obj, size_t word_sz) {
++                                        HeapWord* obj, size_t word_sz) {
+   // Could do a bad general impl here that gets a lock.  But no.
+   guarantee(false, "No good general implementation.");
+ }
+@@ -296,7 +293,7 @@
+ }
+ 
+ void Generation::younger_refs_in_space_iterate(Space* sp,
+-					       OopsInGenClosure* cl) {
++                                               OopsInGenClosure* cl) {
+   GenRemSet* rs = SharedHeap::heap()->rem_set();
+   rs->younger_refs_in_space_iterate(sp, cl);
+ }
+@@ -327,8 +324,8 @@
+ 
+ class AdjustPointersClosure: public SpaceClosure {
+  public:
+-  void do_space(Space* sp) { 
+-    sp->adjust_pointers(); 
++  void do_space(Space* sp) {
++    sp->adjust_pointers();
+   }
+ };
+ 
+@@ -348,8 +345,8 @@
+ }
+ 
+ CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
+-			       int level,
+-			       GenRemSet* remset) :
++                               int level,
++                               GenRemSet* remset) :
+   Generation(rs, initial_byte_size, level), _rs(remset)
+ {
+   HeapWord* start = (HeapWord*)rs.base();
+@@ -396,11 +393,11 @@
+ 
+ void OneContigSpaceCardGeneration::collect(bool   full,
+                                            bool   clear_all_soft_refs,
+-					   size_t size,
++                                           size_t size,
+                                            bool   is_tlab) {
+   SpecializationStats::clear();
+   // Temporarily expand the span of our ref processor, so
+-  // refs discovery is over the entire heap, not just this generation 
++  // refs discovery is over the entire heap, not just this generation
+   ReferenceProcessorSpanMutator
+     x(ref_processor(), GenCollectedHeap::heap()->reserved_region());
+   GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs);
+@@ -410,7 +407,7 @@
+ HeapWord*
+ OneContigSpaceCardGeneration::expand_and_allocate(size_t word_size,
+                                                   bool is_tlab,
+-						  bool parallel) {
++                                                  bool parallel) {
+   assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation");
+   if (parallel) {
+     MutexLocker x(ParGCRareEvent_lock);
+@@ -419,7 +416,7 @@
+     while (true) {
+       expand(byte_size, _min_heap_delta_bytes);
+       if (GCExpandToAllocateDelayMillis > 0) {
+-	os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
++        os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
+       }
+       result = _the_space->par_allocate(word_size);
+       if ( result != NULL) {
+@@ -522,7 +519,7 @@
+       size_t new_mem_size = _virtual_space.committed_size();
+       size_t old_mem_size = new_mem_size - bytes;
+       gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
+-                      SIZE_FORMAT "K to " SIZE_FORMAT "K", 
++                      SIZE_FORMAT "K to " SIZE_FORMAT "K",
+                       name(), old_mem_size/K, bytes/K, new_mem_size/K);
+     }
+   }
+@@ -558,7 +555,7 @@
+     size_t new_mem_size = _virtual_space.committed_size();
+     size_t old_mem_size = new_mem_size + bytes;
+     gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
+-	     	  name(), old_mem_size/K, new_mem_size/K);
++                  name(), old_mem_size/K, new_mem_size/K);
+   }
+ }
+ 
+@@ -571,7 +568,7 @@
+ }
+ 
+ void OneContigSpaceCardGeneration::space_iterate(SpaceClosure* blk,
+-						 bool usedOnly) {
++                                                 bool usedOnly) {
+   blk->do_space(_the_space);
+ }
+ 
+@@ -591,12 +588,12 @@
+   blk->reset_generation();
+ }
+ 
+-void OneContigSpaceCardGeneration::save_marks() { 
++void OneContigSpaceCardGeneration::save_marks() {
+   _the_space->set_saved_mark();
+ }
+ 
+ 
+-void OneContigSpaceCardGeneration::reset_saved_marks() { 
++void OneContigSpaceCardGeneration::reset_saved_marks() {
+   _the_space->reset_saved_mark();
+ }
+ 
+@@ -605,14 +602,14 @@
+   return _the_space->saved_mark_at_top();
+ }
+ 
+-#define OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)	\
+-										\
+-void OneContigSpaceCardGeneration::						\
+-oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {			\
+-  blk->set_generation(this);							\
+-  _the_space->oop_since_save_marks_iterate##nv_suffix(blk);			\
+-  blk->reset_generation();							\
+-  save_marks();									\
++#define OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)      \
++                                                                                \
++void OneContigSpaceCardGeneration::                                             \
++oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {                  \
++  blk->set_generation(this);                                                    \
++  _the_space->oop_since_save_marks_iterate##nv_suffix(blk);                     \
++  blk->reset_generation();                                                      \
++  save_marks();                                                                 \
+ }
+ 
+ ALL_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN)
+diff -ruN openjdk6/hotspot/src/share/vm/memory/generation.hpp openjdk/hotspot/src/share/vm/memory/generation.hpp
+--- openjdk6/hotspot/src/share/vm/memory/generation.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/generation.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)generation.hpp	1.195 07/05/17 15:55:02 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A Generation models a heap area for similarly-aged objects.
+@@ -72,7 +69,7 @@
+   ScratchBlock* next;
+   size_t num_words;
+   HeapWord scratch_space[1];  // Actually, of size "num_words-2" (assuming
+-			      // first two fields are word-sized.)
++                              // first two fields are word-sized.)
+ };
+ 
+ 
+@@ -90,7 +87,7 @@
+   // other generations.
+   MemRegion _reserved;
+ 
+-  // Memory area reserved for generation 
++  // Memory area reserved for generation
+   VirtualSpace _virtual_space;
+ 
+   // Level in the generation hierarchy.
+@@ -137,7 +134,7 @@
+     LogOfGenGrain = 16,
+     GenGrain = 1 << LogOfGenGrain
+   };
+-  
++
+   // allocate and initialize ("weak") refs processing support
+   virtual void ref_processor_init();
+   void set_ref_processor(ReferenceProcessor* rp) {
+@@ -175,10 +172,10 @@
+   virtual size_t max_contiguous_available() const;
+ 
+   // Returns true if promotions of the specified amount can
+-  // be attempted safely (without a vm failure). 
++  // be attempted safely (without a vm failure).
+   // Promotion of the full amount is not guaranteed but
+-  // can be attempted.  
+-  //   younger_handles_promotion_failure 
++  // can be attempted.
++  //   younger_handles_promotion_failure
+   // is true if the younger generation handles a promotion
+   // failure.
+   virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
+@@ -258,7 +255,7 @@
+   // the next older gen.  The return value is a new limit, or NULL if none.  The
+   // caller must do the necessary locking.
+   virtual HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
+-					     size_t word_size) {
++                                             size_t word_size) {
+     return NULL;
+   }
+ 
+@@ -290,19 +287,19 @@
+     guarantee(false, "Generation doesn't support thread local allocation buffers");
+     return 0;
+   }
+-  
++
+   // "obj" is the address of an object in a younger generation.  Allocate space
+   // for "obj" in the current (or some higher) generation, and copy "obj" into
+   // the newly allocated space, if possible, returning the result (or NULL if
+   // the allocation failed).
+-  // 
++  //
+   // The "obj_size" argument is just obj->size(), passed along so the caller can
+   // avoid repeating the virtual call to retrieve it.
+-  // 
++  //
+   // The "ref" argument, if non-NULL, is the address of some reference to "obj"
+   // (that is "*ref == obj"); some generations may use this information to, for
+   // example, influence placement decisions.
+-  // 
++  //
+   // The default implementation ignores "ref" and calls allocate().
+   virtual oop promote(oop obj, size_t obj_size, oop* ref);
+ 
+@@ -314,12 +311,12 @@
+   // also taking care to copy the klass pointer *last*.  Returns the new
+   // object if successful, or else NULL.
+   virtual oop par_promote(int thread_num,
+-			  oop obj, markOop m, size_t word_sz);
++                          oop obj, markOop m, size_t word_sz);
+ 
+   // Undo, if possible, the most recent par_promote_alloc allocation by
+   // "thread_num" ("obj", of "word_sz").
+   virtual void par_promote_alloc_undo(int thread_num,
+-				      HeapWord* obj, size_t word_sz);
++                                      HeapWord* obj, size_t word_sz);
+ 
+   // Informs the current generation that all par_promote_alloc's in the
+   // collection have been completed; any supporting data structures can be
+@@ -328,7 +325,7 @@
+ 
+   // Informs the current generation that all oop_since_save_marks_iterates
+   // performed by "thread_num" in the current collection, if any, have been
+-  // completed; any supporting data structures can be reset.  Default is to 
++  // completed; any supporting data structures can be reset.  Default is to
+   // do nothing.
+   virtual void par_oop_since_save_marks_iterate_done(int thread_num) {}
+ 
+@@ -344,7 +341,7 @@
+   virtual bool performs_in_place_marking() const { return true; }
+ 
+   // Returns "true" iff collect() should subsequently be called on this
+-  // this generation. See comment below. 
++  // this generation. See comment below.
+   // This is a generic implementation which can be overridden.
+   //
+   // Note: in the current (1.4) implementation, when genCollectedHeap's
+@@ -361,12 +358,12 @@
+ 
+   // Perform a garbage collection.
+   // If full is true attempt a full garbage collection of this generation.
+-  // Otherwise, attempting to (at least) free enough space to support an 
++  // Otherwise, attempting to (at least) free enough space to support an
+   // allocation of the given "word_size".
+   virtual void collect(bool   full,
+                        bool   clear_all_soft_refs,
+                        size_t word_size,
+-		       bool   is_tlab) = 0;
++                       bool   is_tlab) = 0;
+ 
+   // Perform a heap collection, attempting to create (at least) enough
+   // space to support an allocation of the given "word_size".  If
+@@ -374,8 +371,8 @@
+   // "oop" (initializing the allocated block). If the allocation is
+   // still unsuccessful, return "NULL".
+   virtual HeapWord* expand_and_allocate(size_t word_size,
+-					bool is_tlab,
+-					bool parallel = false) = 0;
++                                        bool is_tlab,
++                                        bool parallel = false) = 0;
+ 
+   // Some generations may require some cleanup or preparation actions before
+   // allowing a collection.  The default is to do nothing.
+@@ -410,7 +407,7 @@
+   // the level of the collection that has most recently
+   // occurred.  This allows the generation to decide what
+   // statistics are valid to collect.  For example, the
+-  // generation can decide to gather the amount of promoted data 
++  // generation can decide to gather the amount of promoted data
+   // if the collection of the younger generations has completed.
+   GCStats* gc_stats() const { return _gc_stats; }
+   virtual void update_gc_stats(int current_level, bool full) {}
+@@ -465,15 +462,15 @@
+   // implemention of the _nv versions call the virtual version.
+   // Note that the _nv suffix is not really semantically necessary,
+   // but it avoids some not-so-useful warnings on Solaris.)
+-#define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)		\
+-  virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {	\
+-    oop_since_save_marks_iterate_v((OopsInGenClosure*)cl);			\
++#define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)             \
++  virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {    \
++    oop_since_save_marks_iterate_v((OopsInGenClosure*)cl);                      \
+   }
+   SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(Generation_SINCE_SAVE_MARKS_DECL)
+ 
+ #undef Generation_SINCE_SAVE_MARKS_DECL
+ 
+-  // The "requestor" generation is performing some garbage collection 
++  // The "requestor" generation is performing some garbage collection
+   // action for which it would be useful to have scratch space.  If
+   // the target is not the requestor, no gc actions will be required
+   // of the target.  The requestor promises to allocate no more than
+@@ -483,7 +480,7 @@
+   // it to "list", leaving "list" pointing to the head of the
+   // augmented list.  The default is to offer no space.
+   virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
+-				  size_t max_alloc_words) {}
++                                  size_t max_alloc_words) {}
+ 
+   // When an older generation has been collected, and perhaps resized,
+   // this method will be invoked on all younger generations (from older to
+@@ -546,7 +543,7 @@
+   // Returns the address of the start of the "block" that contains the
+   // address "addr".  We say "blocks" instead of "object" since some heaps
+   // may not pack objects densely; a chunk may either be an object or a
+-  // non-object. 
++  // non-object.
+   virtual HeapWord* block_start(const void* addr) const;
+ 
+   // Requires "addr" to be the start of a chunk, and returns its size.
+@@ -604,8 +601,8 @@
+   BlockOffsetSharedArray* _bts;
+ 
+   CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
+-		 GenRemSet* remset);
+-  
++                 GenRemSet* remset);
++
+  public:
+ 
+   virtual void clear_remembered_set();
+@@ -623,14 +620,14 @@
+ class OneContigSpaceCardGeneration: public CardGeneration {
+   friend class VMStructs;
+   // Abstractly, this is a subtype that gets access to protected fields.
+-  friend class CompactingPermGen;  
++  friend class CompactingPermGen;
+   friend class VM_PopulateDumpSharedSpace;
+ 
+  protected:
+   size_t     _min_heap_delta_bytes;   // Minimum amount to expand.
+   ContiguousSpace*  _the_space;       // actual space holding objects
+   WaterMark  _last_gc;                // watermark between objects allocated before
+-			              // and after last GC.
++                                      // and after last GC.
+ 
+   // Grow generation with specified size (returns false if unable to grow)
+   bool grow_by(size_t bytes);
+@@ -648,9 +645,9 @@
+ 
+  public:
+   OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size,
+-			       size_t min_heap_delta_bytes,
+-			       int level, GenRemSet* remset,
+-			       ContiguousSpace* space) :
++                               size_t min_heap_delta_bytes,
++                               int level, GenRemSet* remset,
++                               ContiguousSpace* space) :
+     CardGeneration(rs, initial_byte_size, level, remset),
+     _the_space(space), _min_heap_delta_bytes(min_heap_delta_bytes)
+   {}
+@@ -683,7 +680,7 @@
+   inline WaterMark top_mark();
+   inline WaterMark bottom_mark();
+ 
+-#define OneContig_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)	\
++#define OneContig_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)      \
+   void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
+   OneContig_SINCE_SAVE_MARKS_DECL(OopsInGenClosure,_v)
+   SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_DECL)
+@@ -695,14 +692,14 @@
+   inline size_t block_size(const HeapWord* addr) const;
+ 
+   inline bool block_is_obj(const HeapWord* addr) const;
+-  
++
+   virtual void collect(bool full,
+                        bool clear_all_soft_refs,
+-                       size_t size, 
++                       size_t size,
+                        bool is_tlab);
+   HeapWord* expand_and_allocate(size_t size,
+-				bool is_tlab,
+-				bool parallel = false);
++                                bool is_tlab,
++                                bool parallel = false);
+ 
+   virtual void prepare_for_verify();
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/memory/generation.inline.hpp openjdk/hotspot/src/share/vm/memory/generation.inline.hpp
+--- openjdk6/hotspot/src/share/vm/memory/generation.inline.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/generation.inline.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)generation.inline.hpp	1.38 07/05/05 17:05:50 JVM"
+-#endif
+ /*
+  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ bool OneContigSpaceCardGeneration::is_in(const void* p) const {
+@@ -46,7 +43,7 @@
+ }
+ 
+ HeapWord* OneContigSpaceCardGeneration::par_allocate(size_t word_size,
+-						     bool is_tlab) {
++                                                     bool is_tlab) {
+   assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation");
+   return the_space()->par_allocate(word_size);
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/memory/generationSpec.cpp openjdk/hotspot/src/share/vm/memory/generationSpec.cpp
+--- openjdk6/hotspot/src/share/vm/memory/generationSpec.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/generationSpec.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)generationSpec.cpp	1.29 07/05/29 09:44:15 JVM"
+-#endif
+ /*
+  * Copyright 2001-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,36 +19,36 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_generationSpec.cpp.incl"
+ 
+ Generation* GenerationSpec::init(ReservedSpace rs, int level,
+-				 GenRemSet* remset) {
++                                 GenRemSet* remset) {
+   switch (name()) {
+     case Generation::DefNew:
+       return new DefNewGeneration(rs, init_size(), level);
+ 
+     case Generation::MarkSweepCompact:
+       return new TenuredGeneration(rs, init_size(), level, remset);
+-    
++
+ #ifndef SERIALGC
+     case Generation::ParNew:
+       return new ParNewGeneration(rs, init_size(), level);
+ 
+     case Generation::ASParNew:
+-      return new ASParNewGeneration(rs, 
+-				    init_size(), 
+-				    init_size() /* min size */,
+-				    level);
++      return new ASParNewGeneration(rs,
++                                    init_size(),
++                                    init_size() /* min size */,
++                                    level);
+ 
+     case Generation::ConcurrentMarkSweep: {
+       assert(UseConcMarkSweepGC, "UseConcMarkSweepGC should be set");
+       CardTableRS* ctrs = remset->as_CardTableRS();
+       if (ctrs == NULL) {
+-	vm_exit_during_initialization("Rem set incompatibility.");
++        vm_exit_during_initialization("Rem set incompatibility.");
+       }
+       // Otherwise
+       // The constructor creates the CMSCollector if needed,
+@@ -71,7 +68,7 @@
+       assert(UseConcMarkSweepGC, "UseConcMarkSweepGC should be set");
+       CardTableRS* ctrs = remset->as_CardTableRS();
+       if (ctrs == NULL) {
+-	vm_exit_during_initialization("Rem set incompatibility.");
++        vm_exit_during_initialization("Rem set incompatibility.");
+       }
+       // Otherwise
+       // The constructor creates the CMSCollector if needed,
+@@ -153,12 +150,12 @@
+   switch (name()) {
+     case PermGen::MarkSweepCompact:
+       return new CompactingPermGen(perm_rs, shared_rs, init_size, remset, this);
+-      
++
+ #ifndef SERIALGC
+     case PermGen::MarkSweep:
+       guarantee(false, "NYI");
+       return NULL;
+-      
++
+     case PermGen::ConcurrentMarkSweep: {
+       assert(UseConcMarkSweepGC, "UseConcMarkSweepGC should be set");
+       CardTableRS* ctrs = remset->as_CardTableRS();
+diff -ruN openjdk6/hotspot/src/share/vm/memory/generationSpec.hpp openjdk/hotspot/src/share/vm/memory/generationSpec.hpp
+--- openjdk6/hotspot/src/share/vm/memory/generationSpec.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/generationSpec.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)generationSpec.hpp	1.17 07/05/05 17:05:50 JVM"
+-#endif
+ /*
+  * Copyright 2001-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The specification of a generation.  This class also encapsulates
+diff -ruN openjdk6/hotspot/src/share/vm/memory/genMarkSweep.cpp openjdk/hotspot/src/share/vm/memory/genMarkSweep.cpp
+--- openjdk6/hotspot/src/share/vm/memory/genMarkSweep.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/genMarkSweep.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)genMarkSweep.cpp	1.40 07/05/17 15:54:55 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -44,8 +41,8 @@
+   CodeCache::gc_prologue();
+   Threads::gc_prologue();
+ 
+-  // Increment the invocation count for the permanent generation, since it is 
+-  // implicitly collected whenever we do a full mark sweep collection. 
++  // Increment the invocation count for the permanent generation, since it is
++  // implicitly collected whenever we do a full mark sweep collection.
+   GenCollectedHeap* gch = GenCollectedHeap::heap();
+   gch->perm_gen()->stat_record()->invocations++;
+ 
+@@ -71,7 +68,7 @@
+   // Don't add any more derived pointers during phase3
+   COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
+   COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
+-    
++
+   mark_sweep_phase3(level);
+ 
+   VALIDATE_MARK_SWEEP_ONLY(
+@@ -100,7 +97,7 @@
+ 
+   // If compaction completely evacuated all generations younger than this
+   // one, then we can clear the card table.  Otherwise, we must invalidate
+-  // it (consider all cards dirty).  In the future, we might consider doing 
++  // it (consider all cards dirty).  In the future, we might consider doing
+   // compaction within generations only, and doing card-table sliding.
+   bool all_empty = true;
+   for (int i = 0; all_empty && i < level; i++) {
+@@ -149,12 +146,12 @@
+ 
+   // $$$ To cut a corner, we'll only use the first scratch block, and then
+   // revert to malloc.
+-  if (scratch != NULL) { 
++  if (scratch != NULL) {
+     _preserved_count_max =
+-      scratch->num_words * HeapWordSize / sizeof(PreservedMark); 
+-  } else { 
+-    _preserved_count_max = 0; 
+-  } 
++      scratch->num_words * HeapWordSize / sizeof(PreservedMark);
++  } else {
++    _preserved_count_max = 0;
++  }
+ 
+   _preserved_marks = (PreservedMark*)scratch;
+   _preserved_count = 0;
+@@ -218,8 +215,8 @@
+ #endif
+ }
+ 
+-void GenMarkSweep::mark_sweep_phase1(int level, 
+-				  bool clear_all_softrefs) {
++void GenMarkSweep::mark_sweep_phase1(int level,
++                                  bool clear_all_softrefs) {
+   // Recursively traverse all live objects and mark them
+   EventMark m("1 mark object");
+   TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
+@@ -236,10 +233,10 @@
+   follow_root_closure.set_orig_generation(gch->get_gen(level));
+ 
+   gch->gen_process_strong_roots(level,
+-				false, // Younger gens are not roots.
+-				true,  // Collecting permanent generation.
+-				SharedHeap::SO_SystemClasses,
+-				&follow_root_closure, &follow_root_closure);
++                                false, // Younger gens are not roots.
++                                true,  // Collecting permanent generation.
++                                SharedHeap::SO_SystemClasses,
++                                &follow_root_closure, &follow_root_closure);
+ 
+   // Process reference objects found during marking
+   {
+@@ -286,7 +283,7 @@
+   // array. If perm_gen is not traversed last a klassOop may get
+   // overwritten. This is fine since it is dead, but if the class has dead
+   // instances we have to skip them, and in order to find their size we
+-  // need the klassOop! 
++  // need the klassOop!
+   //
+   // It is not required that we traverse spaces in the same order in
+   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
+@@ -294,7 +291,7 @@
+ 
+   GenCollectedHeap* gch = GenCollectedHeap::heap();
+   Generation* pg = gch->perm_gen();
+-  
++
+   EventMark m("2 compute new addresses");
+   TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
+   trace("2");
+@@ -337,16 +334,16 @@
+   adjust_pointer_closure.set_orig_generation(gch->get_gen(level));
+ 
+   gch->gen_process_strong_roots(level,
+-				false, // Younger gens are not roots.
+-				true,  // Collecting permanent generation.
+-				SharedHeap::SO_AllClasses,
+-				&adjust_root_pointer_closure,
+-				&adjust_root_pointer_closure);
++                                false, // Younger gens are not roots.
++                                true,  // Collecting permanent generation.
++                                SharedHeap::SO_AllClasses,
++                                &adjust_root_pointer_closure,
++                                &adjust_root_pointer_closure);
+ 
+   // Now adjust pointers in remaining weak roots.  (All of which should
+   // have been cleared if they pointed to non-surviving objects.)
+   gch->gen_process_weak_roots(&adjust_root_pointer_closure,
+-			      &adjust_pointer_closure);
++                              &adjust_pointer_closure);
+ 
+   adjust_marks();
+   GenAdjustPointersClosure blk;
+@@ -383,7 +380,7 @@
+   VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking(true));
+ 
+   pg->compact();
+-  
++
+   VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking(false));
+ 
+   GenCompactClosure blk;
+diff -ruN openjdk6/hotspot/src/share/vm/memory/genMarkSweep.hpp openjdk/hotspot/src/share/vm/memory/genMarkSweep.hpp
+--- openjdk6/hotspot/src/share/vm/memory/genMarkSweep.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/genMarkSweep.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)genMarkSweep.hpp	1.11 07/05/05 17:05:49 JVM"
+-#endif
+ /*
+  * Copyright 2001-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class GenMarkSweep : public MarkSweep {
+diff -ruN openjdk6/hotspot/src/share/vm/memory/genOopClosures.hpp openjdk/hotspot/src/share/vm/memory/genOopClosures.hpp
+--- openjdk6/hotspot/src/share/vm/memory/genOopClosures.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/genOopClosures.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)genOopClosures.hpp	1.64 07/05/29 09:44:15 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class Generation;
+@@ -49,7 +46,7 @@
+   // For assertions
+   Generation* generation() { return _gen; }
+   CardTableRS* rs() { return _rs; }
+- 
++
+   // Derived classes that modify oops so that they might be old-to-young
+   // pointers must call the method below.
+   void do_barrier(oop* p);
+@@ -65,8 +62,8 @@
+ 
+   // Problem with static closures: must have _gen_boundary set at some point,
+   // but cannot do this until after the heap is initialized.
+-  void set_orig_generation(Generation* gen) { 
+-    _orig_gen = gen; 
++  void set_orig_generation(Generation* gen) {
++    _orig_gen = gen;
+     set_generation(gen);
+   }
+ 
+@@ -89,7 +86,7 @@
+   bool do_header() { return false; }
+   Prefetch::style prefetch_style() {
+     return Prefetch::do_write;
+-  } 
++  }
+ };
+ 
+ // Closure for scanning DefNewGeneration.
+@@ -109,7 +106,7 @@
+   bool do_header() { return false; }
+   Prefetch::style prefetch_style() {
+     return Prefetch::do_write;
+-  } 
++  }
+ };
+ 
+ class FilteringClosure: public OopClosure {
+@@ -144,7 +141,7 @@
+ };
+ 
+ class VerifyOopClosure: public OopClosure {
+-public: 
++public:
+   void do_oop(oop* p) {
+     guarantee((*p)->is_oop_or_null(), "invalid oop");
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/memory/genOopClosures.inline.hpp openjdk/hotspot/src/share/vm/memory/genOopClosures.inline.hpp
+--- openjdk6/hotspot/src/share/vm/memory/genOopClosures.inline.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/genOopClosures.inline.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)genOopClosures.inline.hpp	1.40 07/05/29 09:44:15 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ inline OopsInGenClosure::OopsInGenClosure(Generation* gen) :
+@@ -61,7 +58,7 @@
+       assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
+       if (obj->is_forwarded()) {
+         *p = obj->forwardee();
+-      } else {        
++      } else {
+         *p = _g->copy_to_survivor_space(obj, p);
+       }
+     }
+@@ -86,7 +83,7 @@
+       assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
+       if (obj->is_forwarded()) {
+         *p = obj->forwardee();
+-      } else {        
++      } else {
+         *p = _g->copy_to_survivor_space(obj, p);
+       }
+       if (_gc_barrier) {
+@@ -111,7 +108,7 @@
+   if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) {
+     if (obj->is_forwarded()) {
+       *p = obj->forwardee();
+-    } else {        
++    } else {
+       *p = _g->copy_to_survivor_space(obj, p);
+     }
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/memory/genRemSet.cpp openjdk/hotspot/src/share/vm/memory/genRemSet.cpp
+--- openjdk6/hotspot/src/share/vm/memory/genRemSet.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/genRemSet.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)genRemSet.cpp	1.11 07/05/05 17:05:50 JVM"
+-#endif
+ /*
+  * Copyright 2001-2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
+diff -ruN openjdk6/hotspot/src/share/vm/memory/genRemSet.hpp openjdk/hotspot/src/share/vm/memory/genRemSet.hpp
+--- openjdk6/hotspot/src/share/vm/memory/genRemSet.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/genRemSet.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)genRemSet.hpp	1.23 07/05/05 17:05:50 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A GenRemSet provides ways of iterating over pointers accross generations.
+@@ -37,7 +34,7 @@
+   friend class Generation;
+ 
+   BarrierSet* _bs;
+-  
++
+ public:
+   enum Name {
+     CardTable,
+@@ -67,7 +64,7 @@
+   virtual void younger_refs_iterate(Generation* g, OopsInGenClosure* blk) = 0;
+ 
+   virtual void younger_refs_in_space_iterate(Space* sp,
+-					     OopsInGenClosure* cl) = 0;
++                                             OopsInGenClosure* cl) = 0;
+ 
+   // This method is used to notify the remembered set that "new_val" has
+   // been written into "field" by the garbage collector.
+@@ -96,7 +93,7 @@
+   // Verify that the remembered set has no entries for
+   // the heap interval denoted by mr.
+   virtual void verify_empty(MemRegion mr) = 0;
+-  
++
+   // If appropriate, print some information about the remset on "tty".
+   virtual void print() {}
+ 
+@@ -113,7 +110,7 @@
+   // Informs the RS that refs in the given "mr" may have changed
+   // arbitrarily, and therefore may contain old-to-young pointers.
+   virtual void invalidate(MemRegion mr) = 0;
+-  
++
+   // Informs the RS that refs in this generation
+   // may have changed arbitrarily, and therefore may contain
+   // old-to-young pointers in arbitrary locations. The parameter
+diff -ruN openjdk6/hotspot/src/share/vm/memory/genRemSet.inline.hpp openjdk/hotspot/src/share/vm/memory/genRemSet.inline.hpp
+--- openjdk6/hotspot/src/share/vm/memory/genRemSet.inline.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/genRemSet.inline.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)genRemSet.inline.hpp	1.10 07/05/05 17:05:50 JVM"
+-#endif
+ /*
+  * Copyright 2001 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Inline functions of GenRemSet, which de-virtualize this
+diff -ruN openjdk6/hotspot/src/share/vm/memory/heap.cpp openjdk/hotspot/src/share/vm/memory/heap.cpp
+--- openjdk6/hotspot/src/share/vm/memory/heap.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/heap.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)heap.cpp	1.54 07/05/05 17:05:46 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -88,41 +85,52 @@
+ 
+ 
+ void CodeHeap::on_code_mapping(char* base, size_t size) {
+-#ifdef LINUX  
++#ifdef LINUX
+   extern void linux_wrap_code(char* base, size_t size);
+   linux_wrap_code(base, size);
+ #endif
+ }
+ 
+ 
+-bool CodeHeap::reserve(size_t reserved_size, size_t committed_size, size_t segment_size) {
++bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
++                       size_t segment_size) {
++  assert(reserved_size >= committed_size, "reserved < committed");
+   assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
+   assert(is_power_of_2(segment_size), "segment_size must be a power of 2");
++
+   _segment_size      = segment_size;
+   _log2_segment_size = exact_log2(segment_size);
+-  // reserve space for _memory
+-  assert(reserved_size >= committed_size, "reserved size must be >= committed size");
+-  if (!_memory.initialize(
+-        ReservedSpace(align_to_allocation_size(reserved_size),
+-                      // On Solaris using MPSS only, try for large
+-                      // page allocation of the code cache
+-                      SOLARIS_ONLY(UseMPSS ? os::large_page_size() : 0) NOT_SOLARIS(0),
+-                      SOLARIS_ONLY(UseMPSS)                             NOT_SOLARIS(false),
+-                      NULL),
+-        align_to_allocation_size(committed_size))) {
++
++  // Reserve and initialize space for _memory.
++  const size_t page_size = os::page_size_for_region(committed_size,
++                                                    reserved_size, 8);
++  const size_t granularity = os::vm_allocation_granularity();
++  const size_t r_align = MAX2(page_size, granularity);
++  const size_t r_size = align_size_up(reserved_size, r_align);
++  const size_t c_size = align_size_up(committed_size, page_size);
++
++  const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
++    MAX2(page_size, granularity);
++  ReservedSpace rs(r_size, rs_align, false);
++  os::trace_page_sizes("code heap", committed_size, reserved_size, page_size,
++                       rs.base(), rs.size());
++  if (!_memory.initialize(rs, c_size)) {
+     return false;
+   }
++
+   on_code_mapping(_memory.low(), _memory.committed_size());
+   _number_of_committed_segments = number_of_segments(_memory.committed_size());
+   _number_of_reserved_segments  = number_of_segments(_memory.reserved_size());
+   assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
++
+   // reserve space for _segmap
+   if (!_segmap.initialize(align_to_page_size(_number_of_reserved_segments), align_to_page_size(_number_of_committed_segments))) {
+-    return false;  
++    return false;
+   }
+   assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit  enough space for segment map");
+   assert(_segmap.reserved_size()  >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map");
+   assert(_segmap.reserved_size()  >= _segmap.committed_size()     , "just checking");
++
+   // initialize remaining instance variables
+   clear();
+   return true;
+@@ -172,14 +180,14 @@
+ void* CodeHeap::allocate(size_t size) {
+   size_t length = number_of_segments(size + sizeof(HeapBlock));
+   assert(length *_segment_size >= sizeof(FreeBlock), "not enough room for FreeList");
+-    
++
+   // First check if we can satify request from freelist
+   debug_only(verify());
+   HeapBlock* block = search_freelist(length);
+   debug_only(if (VerifyCodeCacheOften) verify());
+   if (block != NULL) {
+-    assert(block->length() >= length && block->length() < length + CodeCacheMinBlockLength, "sanity check");    
+-    assert(!block->free(), "must be marked free");    
++    assert(block->length() >= length && block->length() < length + CodeCacheMinBlockLength, "sanity check");
++    assert(!block->free(), "must be marked free");
+ #ifdef ASSERT
+     memset((void *)block->allocated_space(), badCodeHeapNewVal, size);
+ #endif
+@@ -192,7 +200,7 @@
+   if (_next_segment + length <= _number_of_committed_segments) {
+     mark_segmap_as_used(_next_segment, _next_segment + length);
+     HeapBlock* b =  block_at(_next_segment);
+-    b->initialize(length);    
++    b->initialize(length);
+     _next_segment += length;
+ #ifdef ASSERT
+     memset((void *)b->allocated_space(), badCodeHeapNewVal, size);
+@@ -210,8 +218,8 @@
+   HeapBlock* b = (((HeapBlock *)p) - 1);
+   assert(b->allocated_space() == p, "sanity check");
+ #ifdef ASSERT
+-  memset((void *)b->allocated_space(), 
+-         badCodeHeapFreeVal,  
++  memset((void *)b->allocated_space(),
++         badCodeHeapFreeVal,
+          size(b->length()) - sizeof(HeapBlock));
+ #endif
+   add_to_freelist(b);
+@@ -273,8 +281,8 @@
+ }
+ 
+ // Returns the next Heap block an offset into one
+-HeapBlock* CodeHeap::next_block(HeapBlock *b) const {  
+-  if (b == NULL) return NULL;  
++HeapBlock* CodeHeap::next_block(HeapBlock *b) const {
++  if (b == NULL) return NULL;
+   size_t i = segment_for(b) + b->length();
+   if (i < _next_segment)
+     return block_at(i);
+@@ -306,14 +314,14 @@
+ 
+ // Free list management
+ 
+-FreeBlock *CodeHeap::following_block(FreeBlock *b) { 
+-  return (FreeBlock*)(((address)b) + _segment_size * b->length()); 
++FreeBlock *CodeHeap::following_block(FreeBlock *b) {
++  return (FreeBlock*)(((address)b) + _segment_size * b->length());
+ }
+ 
+ // Inserts block b after a
+ void CodeHeap::insert_after(FreeBlock* a, FreeBlock* b) {
+-  assert(a != NULL && b != NULL, "must be real pointers");  
+-  
++  assert(a != NULL && b != NULL, "must be real pointers");
++
+   // Link b into the list after a
+   b->set_link(a->link());
+   a->set_link(b);
+@@ -328,16 +336,16 @@
+   assert(a->free(), "must be a free block");
+   if (following_block(a) == a->link()) {
+     assert(a->link() != NULL && a->link()->free(), "must be free too");
+-    // Update block a to include the following block    
+-    a->set_length(a->length() + a->link()->length()); 
++    // Update block a to include the following block
++    a->set_length(a->length() + a->link()->length());
+     a->set_link(a->link()->link());
+     // Update find_start map
+     size_t beg = segment_for(a);
+-    mark_segmap_as_used(beg, beg + a->length());    
++    mark_segmap_as_used(beg, beg + a->length());
+   }
+ }
+ 
+-void CodeHeap::add_to_freelist(HeapBlock *a) {  
++void CodeHeap::add_to_freelist(HeapBlock *a) {
+   FreeBlock* b = (FreeBlock*)a;
+   assert(b != _freelist, "cannot be removed twice");
+ 
+@@ -350,26 +358,26 @@
+     _freelist = b;
+     b->set_link(NULL);
+     return;
+-  }  
+-      
++  }
++
+   // Scan for right place to put into list. List
+   // is sorted by increasing addresseses
+   FreeBlock* prev = NULL;
+-  FreeBlock* cur  = _freelist;    
+-  while(cur != NULL && cur < b) { 
++  FreeBlock* cur  = _freelist;
++  while(cur != NULL && cur < b) {
+     assert(prev == NULL || prev < cur, "must be ordered");
+     prev = cur;
+     cur  = cur->link();
+   }
+-    
+-  assert( (prev == NULL && b < _freelist) ||          
++
++  assert( (prev == NULL && b < _freelist) ||
+           (prev < b && (cur == NULL || b < cur)), "list must be ordered");
+-  
++
+   if (prev == NULL) {
+     // Insert first in list
+     b->set_link(_freelist);
+-    _freelist = b;    
+-    merge_right(_freelist); 
++    _freelist = b;
++    merge_right(_freelist);
+   } else {
+     insert_after(prev, b);
+   }
+@@ -394,7 +402,7 @@
+       best_length = best_block->length();
+     }
+ 
+-    // Next element in list    
++    // Next element in list
+     prev = cur;
+     cur  = cur->link();
+   }
+@@ -404,33 +412,33 @@
+     return NULL;
+   }
+ 
+-  assert((best_prev == NULL && _freelist == best_block ) || 
++  assert((best_prev == NULL && _freelist == best_block ) ||
+          (best_prev != NULL && best_prev->link() == best_block), "sanity check");
+ 
+   // Exact (or at least good enough) fit. Remove from list.
+   // Don't leave anything on the freelist smaller than CodeCacheMinBlockLength.
+-  if (best_length < length + CodeCacheMinBlockLength) {    
++  if (best_length < length + CodeCacheMinBlockLength) {
+     length = best_length;
+     if (best_prev == NULL) {
+       assert(_freelist == best_block, "sanity check");
+-      _freelist = _freelist->link();            
++      _freelist = _freelist->link();
+     } else {
+       // Unmap element
+       best_prev->set_link(best_block->link());
+-    }        
++    }
+   } else {
+     // Truncate block and return a pointer to the following block
+-    best_block->set_length(best_length - length);  
++    best_block->set_length(best_length - length);
+     best_block = following_block(best_block);
+-    // Set used bit and length on new block 
++    // Set used bit and length on new block
+     size_t beg = segment_for(best_block);
+     mark_segmap_as_used(beg, beg + length);
+-    best_block->set_length(length); 
++    best_block->set_length(length);
+   }
+ 
+   best_block->set_used();
+   _free_segments -= length;
+-  return best_block;  
++  return best_block;
+ }
+ 
+ //----------------------------------------------------------------------------
+@@ -445,7 +453,7 @@
+ #endif
+ 
+ void CodeHeap::verify() {
+-  // Count the number of blocks on the freelist, and the amount of space 
++  // Count the number of blocks on the freelist, and the amount of space
+   // represented.
+   int count = 0;
+   size_t len = 0;
+@@ -455,12 +463,12 @@
+   }
+ 
+   // Verify that freelist contains the right amount of free space
+-  guarantee(len == _free_segments, "wrong freelist");  
++  guarantee(len == _free_segments, "wrong freelist");
+ 
+   // Verify that the number of free blocks is not out of hand.
+   static int free_block_threshold = 10000;
+   if (count > free_block_threshold) {
+-    warning("CodeHeap: # of free blocks > %d", free_block_threshold);  
++    warning("CodeHeap: # of free blocks > %d", free_block_threshold);
+     // Double the warning limit
+     free_block_threshold *= 2;
+   }
+@@ -470,6 +478,5 @@
+   for(HeapBlock *h = first_block(); h != NULL; h = next_block(h)) {
+     if (h->free()) count--;
+   }
+-  guarantee(count == 0, "missing free blocks");  
++  guarantee(count == 0, "missing free blocks");
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/memory/heap.hpp openjdk/hotspot/src/share/vm/memory/heap.hpp
+--- openjdk6/hotspot/src/share/vm/memory/heap.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/heap.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)heap.hpp	1.43 07/05/05 17:05:47 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Blocks
+@@ -32,7 +29,7 @@
+ 
+  public:
+   struct Header {
+-    size_t  _length;                             // the length in segments    
++    size_t  _length;                             // the length in segments
+     bool    _used;                               // Used bit
+   };
+ 
+@@ -49,7 +46,7 @@
+ 
+   // Accessors
+   void* allocated_space() const                  { return (void*)(this + 1); }
+-  size_t length() const                          { return _header._length; }  
++  size_t length() const                          { return _header._length; }
+ 
+   // Used/free
+   void set_used()                                { _header._used = true; }
+@@ -59,19 +56,19 @@
+ 
+ class FreeBlock: public HeapBlock {
+   friend class VMStructs;
+- protected:  
+-  FreeBlock* _link; 
++ protected:
++  FreeBlock* _link;
+ 
+  public:
+   // Initialization
+   void initialize(size_t length)             { HeapBlock::initialize(length); _link= NULL; }
+ 
+-  // Merging    
++  // Merging
+   void set_length(size_t l)                  { _header._length = l; }
+-  
+-  // Accessors    
+-  FreeBlock* link() const                    { return _link; }  
+-  void set_link(FreeBlock* link)             { _link = link; }  
++
++  // Accessors
++  FreeBlock* link() const                    { return _link; }
++  void set_link(FreeBlock* link)             { _link = link; }
+ };
+ 
+ class CodeHeap : public CHeapObj {
+@@ -87,7 +84,7 @@
+ 
+   size_t       _next_segment;
+ 
+-  FreeBlock*   _freelist; 
++  FreeBlock*   _freelist;
+   size_t       _free_segments;                   // No. of segments in freelist
+ 
+   // Helper functions
+@@ -100,11 +97,11 @@
+   void  mark_segmap_as_free(size_t beg, size_t end);
+   void  mark_segmap_as_used(size_t beg, size_t end);
+ 
+-  // Freelist management helpers      
++  // Freelist management helpers
+   FreeBlock* following_block(FreeBlock *b);
+   void insert_after(FreeBlock* a, FreeBlock* b);
+   void merge_right (FreeBlock* a);
+-  
++
+   // Toplevel freelist management
+   void add_to_freelist(HeapBlock *b);
+   FreeBlock* search_freelist(size_t length);
+@@ -146,18 +143,18 @@
+   char *high_boundary() const                    { return _memory.high_boundary(); }
+ 
+   // Iteration
+-  
++
+   // returns the first block or NULL
+   void* first() const       { return next_free(first_block()); }
+   // returns the next block given a block p or NULL
+   void* next(void* p) const { return next_free(next_block(block_start(p))); }
+ 
+   // Statistics
+-  size_t capacity() const;  
++  size_t capacity() const;
+   size_t max_capacity() const;
+   size_t allocated_capacity() const;
+   size_t unallocated_capacity() const            { return max_capacity() - allocated_capacity(); }
+-  
++
+   // Debugging
+   void verify();
+   void print()  PRODUCT_RETURN;
+diff -ruN openjdk6/hotspot/src/share/vm/memory/heapInspection.cpp openjdk/hotspot/src/share/vm/memory/heapInspection.cpp
+--- openjdk6/hotspot/src/share/vm/memory/heapInspection.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/heapInspection.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)heapInspection.cpp	1.21 07/05/29 09:44:16 JVM"
+-#endif
+ /*
+  * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -151,7 +148,7 @@
+ int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
+   return (*e1)->compare(*e1,*e2);
+ }
+-  
++
+ KlassInfoHisto::KlassInfoHisto(const char* title, int estimatedCount) :
+   _title(title) {
+   _elements = new (ResourceObj::C_HEAP) GrowableArray<KlassInfoEntry*>(estimatedCount,true);
+@@ -208,7 +205,7 @@
+   void do_object(oop obj) {
+     _cit->record_instance(obj);
+   }
+-};
++};
+ 
+ void HeapInspection::heap_inspection(outputStream* st) {
+   ResourceMark rm;
+@@ -229,7 +226,7 @@
+       break;
+     }
+ #endif // SERIALGC
+-    default: 
++    default:
+       ShouldNotReachHere(); // Unexpected heap kind for this op
+   }
+   // Collect klass instance info
+@@ -277,7 +274,7 @@
+ 
+   // Ensure that the heap is parsable
+   Universe::heap()->ensure_parsability(false);  // no need to retire TALBs
+- 
++
+   // Iterate over objects in the heap
+   FindInstanceClosure fic(k, result);
+   Universe::heap()->object_iterate(&fic);
+diff -ruN openjdk6/hotspot/src/share/vm/memory/heapInspection.hpp openjdk/hotspot/src/share/vm/memory/heapInspection.hpp
+--- openjdk6/hotspot/src/share/vm/memory/heapInspection.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/heapInspection.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)heapInspection.hpp	1.15 07/05/05 17:05:51 JVM"
+-#endif
+ /*
+  * Copyright 2002-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,15 +19,18 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
++#ifndef SERVICES_KERNEL
++
++
+ // HeapInspection
+ 
+ // KlassInfoTable is a bucket hash table that
+ // maps klassOops to extra information:
+ //    instance count and instance word size.
+-// 
++//
+ // A KlassInfoBucket is the head of a link list
+ // of KlassInfoEntry's
+ //
+@@ -122,8 +122,10 @@
+   void sort();
+ };
+ 
++#endif // SERVICES_KERNEL
++
+ class HeapInspection : public AllStatic {
+  public:
+-  static void heap_inspection(outputStream* st);             
+-  static void find_instances_at_safepoint(klassOop k, GrowableArray<oop>* result);
++  static void heap_inspection(outputStream* st) KERNEL_RETURN;
++  static void find_instances_at_safepoint(klassOop k, GrowableArray<oop>* result) KERNEL_RETURN;
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/memory/iterator.cpp openjdk/hotspot/src/share/vm/memory/iterator.cpp
+--- openjdk6/hotspot/src/share/vm/memory/iterator.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/iterator.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)iterator.cpp	1.18 07/05/05 17:05:50 JVM"
+-#endif
+ /*
+  * Copyright 1997-2001 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -35,4 +32,3 @@
+ void VoidClosure::do_void() {
+   ShouldNotCallThis();
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/memory/iterator.hpp openjdk/hotspot/src/share/vm/memory/iterator.hpp
+--- openjdk6/hotspot/src/share/vm/memory/iterator.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/iterator.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)iterator.hpp	1.38 07/05/05 17:05:52 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The following classes are C++ `closures` for iterating over objects, roots and spaces
+@@ -55,7 +52,7 @@
+   // Controls how prefetching is done for invocations of this closure.
+   Prefetch::style prefetch_style() { // Note that this is non-virtual.
+     return Prefetch::do_none;
+-  } 
++  }
+ };
+ 
+ // ObjectClosure is used for iterating through an object space
+diff -ruN openjdk6/hotspot/src/share/vm/memory/memRegion.cpp openjdk/hotspot/src/share/vm/memory/memRegion.cpp
+--- openjdk6/hotspot/src/share/vm/memory/memRegion.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/memRegion.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)memRegion.cpp	1.23 07/05/05 17:05:52 JVM"
+-#endif
+ /*
+  * Copyright 2000-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A very simple data structure representing a contigous word-aligned
+@@ -49,7 +46,7 @@
+ 
+   // Otherwise, regions must overlap or be adjacent
+   assert(((start() <= mr2.start()) && (end() >= mr2.start())) ||
+-         ((mr2.start() <= start()) && (mr2.end() >= start())), 
++         ((mr2.start() <= start()) && (mr2.end() >= start())),
+              "non-adjacent or overlapping regions");
+   MemRegion res;
+   HeapWord* res_start = MIN2(start(), mr2.start());
+@@ -68,9 +65,9 @@
+   //                        |overlap ending|
+   //                                   |strictly above|
+   //              |completely overlapping|
+-  // We can't deal with an interior case because it would 
++  // We can't deal with an interior case because it would
+   // produce two disjoint regions as a result.
+-  // We aren't trying to be optimal in the number of tests below, 
++  // We aren't trying to be optimal in the number of tests below,
+   // but the order is important to distinguish the strictly cases
+   // from the overlapping cases.
+   if (mr2.end() <= start()) {
+diff -ruN openjdk6/hotspot/src/share/vm/memory/memRegion.hpp openjdk/hotspot/src/share/vm/memory/memRegion.hpp
+--- openjdk6/hotspot/src/share/vm/memory/memRegion.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/memRegion.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)memRegion.hpp	1.27 07/05/05 17:05:53 JVM"
+-#endif
+ /*
+  * Copyright 2000-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A very simple data structure representing a contigous region
+@@ -46,13 +43,13 @@
+     _start(start), _word_size(pointer_delta(end, start)) {
+     assert(end >= start, "incorrect constructor arguments");
+   }
+-  
++
+   MemRegion(const MemRegion& mr): _start(mr._start), _word_size(mr._word_size) {}
+-    
++
+   MemRegion intersection(const MemRegion mr2) const;
+   // regions must overlap or be adjacent
+   MemRegion _union(const MemRegion mr2) const;
+-  // minus will fail a guarantee if mr2 is interior to this, 
++  // minus will fail a guarantee if mr2 is interior to this,
+   // since there's no way to return 2 disjoint regions.
+   MemRegion minus(const MemRegion mr2) const;
+ 
+@@ -96,13 +93,13 @@
+ class MemRegionClosureRO: public MemRegionClosure {
+ public:
+   void* operator new(size_t size, ResourceObj::allocation_type type) {
+-	return ResourceObj::operator new(size, type);
++        return ResourceObj::operator new(size, type);
+   }
+   void* operator new(size_t size, Arena *arena) {
+-	return ResourceObj::operator new(size, arena);
++        return ResourceObj::operator new(size, arena);
+   }
+   void* operator new(size_t size) {
+-	return ResourceObj::operator new(size);
++        return ResourceObj::operator new(size);
+   }
+ 
+   void  operator delete(void* p) {} // nothing to do
+diff -ruN openjdk6/hotspot/src/share/vm/memory/modRefBarrierSet.hpp openjdk/hotspot/src/share/vm/memory/modRefBarrierSet.hpp
+--- openjdk6/hotspot/src/share/vm/memory/modRefBarrierSet.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/modRefBarrierSet.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)modRefBarrierSet.hpp	1.16 07/05/05 17:05:53 JVM"
+-#endif
+ /*
+  * Copyright 2000-2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
+@@ -39,15 +36,15 @@
+   bool has_read_prim_barrier() { return false; }
+   bool has_write_ref_barrier() { return true; }
+   bool has_write_prim_barrier() { return false; }
+-  
++
+   bool read_ref_needs_barrier(oop* field) { return false; }
+   bool read_prim_needs_barrier(HeapWord* field, size_t bytes) { return false; }
+   virtual bool write_ref_needs_barrier(oop* field, oop new_val) = 0;
+   bool write_prim_needs_barrier(HeapWord* field, size_t bytes,
+-				juint val1, juint val2) { return false; }
++                                juint val1, juint val2) { return false; }
+ 
+   void write_prim_field(oop obj, size_t offset, size_t bytes,
+-			juint val1, juint val2) {}
++                        juint val1, juint val2) {}
+ 
+   void read_ref_field(oop* field) {}
+   void read_prim_field(HeapWord* field, size_t bytes) {}
+@@ -55,7 +52,7 @@
+   virtual void write_ref_field_work(oop* field, oop new_val) = 0;
+ public:
+   void write_prim_field(HeapWord* field, size_t bytes,
+-			juint val1, juint val2) {}
++                        juint val1, juint val2) {}
+ 
+   bool has_read_ref_array_opt() { return false; }
+   bool has_read_prim_array_opt() { return false; }
+@@ -91,7 +88,7 @@
+   // Causes all refs in "mr" to be assumed to be modified.
+   virtual void invalidate(MemRegion mr) = 0;
+ 
+-  // The caller guarantees that "mr" contains no references.  (Perhaps it's 
++  // The caller guarantees that "mr" contains no references.  (Perhaps it's
+   // objects have been moved elsewhere.)
+   virtual void clear(MemRegion mr) = 0;
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/memory/oopFactory.cpp openjdk/hotspot/src/share/vm/memory/oopFactory.cpp
+--- openjdk6/hotspot/src/share/vm/memory/oopFactory.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/oopFactory.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)oopFactory.cpp	1.83 07/05/05 17:05:53 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -97,7 +94,7 @@
+ }
+ 
+ 
+-klassOop oopFactory::new_instanceKlass(int vtable_len, int itable_len, int static_field_size, 
++klassOop oopFactory::new_instanceKlass(int vtable_len, int itable_len, int static_field_size,
+                                        int nonstatic_oop_map_size, ReferenceType rt, TRAPS) {
+   instanceKlassKlass* ikk = instanceKlassKlass::cast(Universe::instanceKlassKlassObj());
+   return ikk->allocate_instance_klass(vtable_len, itable_len, static_field_size, nonstatic_oop_map_size, rt, CHECK_NULL);
+@@ -146,4 +143,3 @@
+   c->set_holder_klass(klass());
+   return c;
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/memory/oopFactory.hpp openjdk/hotspot/src/share/vm/memory/oopFactory.hpp
+--- openjdk6/hotspot/src/share/vm/memory/oopFactory.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/oopFactory.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)oopFactory.hpp	1.61 07/05/05 17:05:53 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // oopFactory is a class used for creating new objects.
+@@ -52,7 +49,7 @@
+   static typeArrayOop    new_permanent_byteArray (int length, TRAPS);  // used for class file structures
+   static typeArrayOop    new_permanent_shortArray(int length, TRAPS);  // used for class file structures
+   static typeArrayOop    new_permanent_intArray  (int length, TRAPS);  // used for class file structures
+-  
++
+   static typeArrayOop    new_typeArray(BasicType type, int length, TRAPS);
+ 
+   // Symbols
+@@ -88,7 +85,7 @@
+   static constantPoolCacheOop new_constantPoolCache(int length, TRAPS);
+ 
+   // Instance classes
+-  static klassOop        new_instanceKlass(int vtable_len, int itable_len, int static_field_size, 
++  static klassOop        new_instanceKlass(int vtable_len, int itable_len, int static_field_size,
+                                            int nonstatic_oop_map_size, ReferenceType rt, TRAPS);
+ 
+   // Methods
+diff -ruN openjdk6/hotspot/src/share/vm/memory/permGen.cpp openjdk/hotspot/src/share/vm/memory/permGen.cpp
+--- openjdk6/hotspot/src/share/vm/memory/permGen.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/permGen.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)permGen.cpp	1.54 07/05/29 09:44:16 JVM"
+-#endif
+ /*
+  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -30,8 +27,8 @@
+ 
+ CompactingPermGen::CompactingPermGen(ReservedSpace rs,
+                                      ReservedSpace shared_rs,
+-				     size_t initial_byte_size,
+-				     GenRemSet* remset,
++                                     size_t initial_byte_size,
++                                     GenRemSet* remset,
+                                      PermanentGenerationSpec* perm_spec)
+ {
+   CompactingPermGenGen* g =
+diff -ruN openjdk6/hotspot/src/share/vm/memory/permGen.hpp openjdk/hotspot/src/share/vm/memory/permGen.hpp
+--- openjdk6/hotspot/src/share/vm/memory/permGen.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/permGen.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)permGen.hpp	1.38 07/05/29 09:44:16 JVM"
+-#endif
+ /*
+  * Copyright 2000-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // All heaps contains a "permanent generation," containing permanent
+@@ -33,20 +30,20 @@
+ class GenRemSet;
+ class CSpaceCounters;
+ 
+-// PermGen models the part of the heap 
++// PermGen models the part of the heap
+ 
+ class PermGen : public CHeapObj {
+   friend class VMStructs;
+  protected:
+   size_t _capacity_expansion_limit;  // maximum expansion allowed without a
+-				     // full gc occuring
++                                     // full gc occuring
+ 
+  public:
+   enum Name {
+     MarkSweepCompact, MarkSweep, ConcurrentMarkSweep
+   };
+ 
+-  // Permanent allocation (initialized)  
++  // Permanent allocation (initialized)
+   virtual HeapWord* mem_allocate(size_t size) = 0;
+ 
+   // Mark sweep support
+@@ -74,4 +71,3 @@
+     g->update_counters();
+   }
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/memory/referencePolicy.cpp openjdk/hotspot/src/share/vm/memory/referencePolicy.cpp
+--- openjdk6/hotspot/src/share/vm/memory/referencePolicy.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/referencePolicy.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)referencePolicy.cpp	1.12 07/05/05 17:05:54 JVM"
+-#endif
+ /*
+  * Copyright 2000-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -71,4 +68,3 @@
+ 
+   return true;
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/memory/referencePolicy.hpp openjdk/hotspot/src/share/vm/memory/referencePolicy.hpp
+--- openjdk6/hotspot/src/share/vm/memory/referencePolicy.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/referencePolicy.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)referencePolicy.hpp	1.11 07/05/05 17:05:54 JVM"
+-#endif
+ /*
+  * Copyright 2000 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // referencePolicy is used to determine when soft reference objects
+diff -ruN openjdk6/hotspot/src/share/vm/memory/referenceProcessor.cpp openjdk/hotspot/src/share/vm/memory/referenceProcessor.cpp
+--- openjdk6/hotspot/src/share/vm/memory/referenceProcessor.cpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/referenceProcessor.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)referenceProcessor.cpp	1.55 07/05/17 15:55:08 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -42,7 +39,7 @@
+   size_t _len;
+   oop   _head;
+ };
+-  
++
+ oop  ReferenceProcessor::_sentinelRef = NULL;
+ 
+ const int subclasses_of_ref = REF_PHANTOM - REF_OTHER;
+@@ -74,11 +71,11 @@
+ 
+ 
+ ReferenceProcessor* ReferenceProcessor::create_ref_processor(
+-    MemRegion          span, 
++    MemRegion          span,
+     bool               atomic_discovery,
+     bool               mt_discovery,
+     BoolObjectClosure* is_alive_non_header,
+-    int                parallel_gc_threads, 
++    int                parallel_gc_threads,
+     bool               mt_processing)
+ {
+   int mt_degree = 1;
+@@ -98,7 +95,7 @@
+ 
+ 
+ ReferenceProcessor::ReferenceProcessor(MemRegion span,
+-  bool atomic_discovery, bool mt_discovery, int mt_degree, 
++  bool atomic_discovery, bool mt_discovery, int mt_degree,
+   bool mt_processing) :
+   _discovering_refs(false),
+   _enqueuing_is_done(false),
+@@ -120,7 +117,7 @@
+   assert(_sentinelRef != NULL, "_sentinelRef is NULL");
+   // Initialized all entries to _sentinelRef
+   for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
+-	_discoveredSoftRefs[i].set_head(_sentinelRef);
++        _discoveredSoftRefs[i].set_head(_sentinelRef);
+     _discoveredSoftRefs[i].set_length(0);
+   }
+ }
+@@ -185,30 +182,30 @@
+   // Soft references
+   {
+     TraceTime tt("SoftReference", trace_time, false, gclog_or_tty);
+-    process_discovered_reflist(_discoveredSoftRefs, policy, true, 
++    process_discovered_reflist(_discoveredSoftRefs, policy, true,
+                                is_alive, keep_alive, complete_gc, task_executor);
+   }
+ 
+   update_soft_ref_master_clock();
+-  
++
+   // Weak references
+   {
+     TraceTime tt("WeakReference", trace_time, false, gclog_or_tty);
+-    process_discovered_reflist(_discoveredWeakRefs, NULL, true, 
++    process_discovered_reflist(_discoveredWeakRefs, NULL, true,
+                                is_alive, keep_alive, complete_gc, task_executor);
+   }
+ 
+   // Final references
+   {
+     TraceTime tt("FinalReference", trace_time, false, gclog_or_tty);
+-    process_discovered_reflist(_discoveredFinalRefs, NULL, false, 
++    process_discovered_reflist(_discoveredFinalRefs, NULL, false,
+                                is_alive, keep_alive, complete_gc, task_executor);
+   }
+ 
+   // Phantom references
+   {
+     TraceTime tt("PhantomReference", trace_time, false, gclog_or_tty);
+-    process_discovered_reflist(_discoveredPhantomRefs, NULL, false, 
++    process_discovered_reflist(_discoveredPhantomRefs, NULL, false,
+                                is_alive, keep_alive, complete_gc, task_executor);
+   }
+ 
+@@ -217,7 +214,7 @@
+   // that is not how the JDK1.2 specification is. See #4126360. Native code can
+   // thus use JNI weak references to circumvent the phantom references and
+   // resurrect a "post-mortem" object.
+-  { 
++  {
+     TraceTime tt("JNI Weak Reference", trace_time, false, gclog_or_tty);
+     if (task_executor != NULL) {
+       task_executor->set_single_threaded_mode();
+@@ -312,10 +309,10 @@
+     assert(obj->is_instanceRef(), "should be reference object");
+     oop next = java_lang_ref_Reference::discovered(obj);
+     if (TraceReferenceGC && PrintGCDetails) {
+-      gclog_or_tty->print_cr("	obj " INTPTR_FORMAT "/next " INTPTR_FORMAT,
++      gclog_or_tty->print_cr("  obj " INTPTR_FORMAT "/next " INTPTR_FORMAT,
+                              (oopDesc*) obj, (oopDesc*) next);
+     }
+-    assert(*java_lang_ref_Reference::next_addr(obj) == NULL, 
++    assert(*java_lang_ref_Reference::next_addr(obj) == NULL,
+       "The reference should not be enqueued");
+     if (next == _sentinelRef) {  // obj is last
+       // Swap refs_list into pendling_list_addr and
+@@ -345,12 +342,12 @@
+   RefProcEnqueueTask(ReferenceProcessor& ref_processor,
+                      DiscoveredList      discovered_refs[],
+                      oop*                pending_list_addr,
+-                     oop                 sentinel_ref, 
++                     oop                 sentinel_ref,
+                      int                 n_queues)
+     : EnqueueTask(ref_processor, discovered_refs,
+                   pending_list_addr, sentinel_ref, n_queues)
+   { }
+-    
++
+   virtual void work(unsigned int work_id)
+   {
+     assert(work_id < (unsigned int)_ref_processor.num_q(), "Index out-of-bounds");
+@@ -366,7 +363,7 @@
+ };
+ 
+ // Enqueue references that are not made active again
+-void ReferenceProcessor::enqueue_discovered_reflists(oop* pending_list_addr, 
++void ReferenceProcessor::enqueue_discovered_reflists(oop* pending_list_addr,
+   AbstractRefProcTaskExecutor* task_executor) {
+   if (_processing_is_mt && task_executor != NULL) {
+     // Parallel code
+@@ -386,41 +383,45 @@
+ // Iterator for the list of discovered references.
+ class DiscoveredListIterator {
+ public:
+-  inline DiscoveredListIterator(DiscoveredList&    refs_list, 
++  inline DiscoveredListIterator(DiscoveredList&    refs_list,
+                                 OopClosure*        keep_alive,
+                                 BoolObjectClosure* is_alive);
+-  
++
+   // End Of List.
+-  inline bool has_next() const 
++  inline bool has_next() const
+   { return _next != ReferenceProcessor::_sentinelRef; }
+- 
++
+   // Get oop to the Reference object.
+   inline oop  obj() const { return _ref; }
+ 
+   // Get oop to the referent object.
+   inline oop  referent() const { return _referent; }
+-  
++
+   // Returns true if referent is alive.
+   inline bool is_referent_alive() const;
+-  
++
+   // Loads data for the current reference.
+-  inline void load_ptrs();
+-  
++  // The "allow_null_referent" argument tells us to allow for the possibility
++  // of a NULL referent in the discovered Reference object. This typically
++  // happens in the case of concurrent collectors that may have done the
++  // discovery concurrently or interleaved with mutator execution.
++  inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
++
+   // Move to the next discovered reference.
+   inline void next();
+-  
++
+   // Remove the current reference from the list and move to the next.
+   inline void remove();
+-  
++
+   // Make the Reference object active again.
+   inline void make_active() { java_lang_ref_Reference::set_next(_ref, NULL); }
+-  
++
+   // Make the referent alive.
+   inline void make_referent_alive() { _keep_alive->do_oop(_referent_addr); }
+-  
++
+   // Update the discovered field.
+   inline void update_discovered() { _keep_alive->do_oop(_prev_next); }
+-  
++
+   // NULL out referent pointer.
+   inline void clear_referent() { *_referent_addr = NULL; }
+ 
+@@ -429,10 +430,10 @@
+   inline size_t processed() const { return _processed; }
+   inline size_t removed() const   { return _removed; }
+   )
+-  
++
+ private:
+   inline void move_to_next();
+-  
++
+ private:
+   DiscoveredList&    _refs_list;
+   oop*               _prev_next;
+@@ -475,7 +476,7 @@
+   return _is_alive->do_object_b(_referent);
+ }
+ 
+-inline void DiscoveredListIterator::load_ptrs()
++inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent))
+ {
+   _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
+   assert(_discovered_addr && (*_discovered_addr)->is_oop_or_null(),
+@@ -485,7 +486,10 @@
+   _referent = *_referent_addr;
+   assert(Universe::heap()->is_in_reserved_or_null(_referent),
+          "Wrong oop found in java.lang.Reference object");
+-  assert(_referent->is_oop(), "bad referent");
++  assert(allow_null_referent ?
++             _referent->is_oop_or_null()
++           : _referent->is_oop(),
++         "bad referent");
+ }
+ 
+ inline void DiscoveredListIterator::next()
+@@ -536,12 +540,12 @@
+   DiscoveredListIterator iter(refs_list_addr, keep_alive, is_alive);
+   // Decide which softly reachable refs should be kept alive.
+   while (iter.has_next()) {
+-    iter.load_ptrs();
+-    bool referent_is_dead = !iter.is_referent_alive();
++    iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
++    bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive();
+     if (referent_is_dead && !policy->should_clear_reference(iter.obj())) {
+       if (TraceReferenceGC) {
+-        gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s"  ") by policy",  
+-                               (address)iter.obj(), iter.obj()->blueprint()->internal_name()); 
++        gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s"  ") by policy",
++                               (address)iter.obj(), iter.obj()->blueprint()->internal_name());
+       }
+       // Make the Reference object active again
+       iter.make_active();
+@@ -573,13 +577,13 @@
+   assert(discovery_is_atomic(), "Error");
+   DiscoveredListIterator iter(refs_list_addr, keep_alive, is_alive);
+   while (iter.has_next()) {
+-    iter.load_ptrs();
++    iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
+     DEBUG_ONLY(oop* next_addr = java_lang_ref_Reference::next_addr(iter.obj());)
+     assert(*next_addr == NULL, "Should not discover inactive Reference");
+     if (iter.is_referent_alive()) {
+       if (TraceReferenceGC) {
+-        gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)",  
+-                               (address)iter.obj(), iter.obj()->blueprint()->internal_name()); 
++        gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)",
++                               (address)iter.obj(), iter.obj()->blueprint()->internal_name());
+       }
+       // The referent is reachable after all.
+       // Update the referent pointer as necessary: Note that this
+@@ -610,7 +614,7 @@
+   assert(!discovery_is_atomic(), "Error");
+   DiscoveredListIterator iter(refs_list_addr, keep_alive, is_alive);
+   while (iter.has_next()) {
+-    iter.load_ptrs();
++    iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
+     oop* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
+     if ((iter.referent() == NULL || iter.is_referent_alive() ||
+          *next_addr != NULL)) {
+@@ -646,7 +650,7 @@
+   DiscoveredListIterator iter(refs_list_addr, keep_alive, is_alive);
+   while (iter.has_next()) {
+     iter.update_discovered();
+-    iter.load_ptrs();
++    iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
+     if (clear_referent) {
+       // NULL out referent pointer
+       iter.clear_referent();
+@@ -655,9 +659,9 @@
+       iter.make_referent_alive();
+     }
+     if (TraceReferenceGC) {
+-      gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending",  
+-                             clear_referent ? "cleared " : "",  
+-                             (address)iter.obj(), iter.obj()->blueprint()->internal_name()); 
++      gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending",
++                             clear_referent ? "cleared " : "",
++                             (address)iter.obj(), iter.obj()->blueprint()->internal_name());
+     }
+     assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference");
+     // If discovery is concurrent, we may have objects with null referents,
+@@ -704,9 +708,9 @@
+   { }
+   virtual void work(unsigned int i, BoolObjectClosure& is_alive,
+                     OopClosure& keep_alive,
+-                    VoidClosure& complete_gc) 
++                    VoidClosure& complete_gc)
+   {
+-    _ref_processor.process_phase1(_refs_lists[i], _policy, 
++    _ref_processor.process_phase1(_refs_lists[i], _policy,
+                                   &is_alive, &keep_alive, &complete_gc);
+   }
+ private:
+@@ -718,13 +722,13 @@
+   RefProcPhase2Task(ReferenceProcessor& ref_processor,
+                     DiscoveredList      refs_lists[],
+                     bool                marks_oops_alive)
+-    : ProcessTask(ref_processor, refs_lists, marks_oops_alive) 
++    : ProcessTask(ref_processor, refs_lists, marks_oops_alive)
+   { }
+   virtual void work(unsigned int i, BoolObjectClosure& is_alive,
+                     OopClosure& keep_alive,
+-                    VoidClosure& complete_gc) 
++                    VoidClosure& complete_gc)
+   {
+-    _ref_processor.process_phase2(_refs_lists[i], 
++    _ref_processor.process_phase2(_refs_lists[i],
+                                   &is_alive, &keep_alive, &complete_gc);
+   }
+ };
+@@ -764,8 +768,8 @@
+       assert(to_idx < _num_q, "Sanity Check!");
+       if (ref_lists[to_idx].length() < avg_refs) {
+         // move superfluous refs
+-        size_t refs_to_move = 
+-          MIN2(ref_lists[from_idx].length() - avg_refs, 
++        size_t refs_to_move =
++          MIN2(ref_lists[from_idx].length() - avg_refs,
+                avg_refs - ref_lists[to_idx].length());
+         oop move_head = ref_lists[from_idx].head();
+         oop move_tail = move_head;
+@@ -795,7 +799,7 @@
+   BoolObjectClosure*           is_alive,
+   OopClosure*                  keep_alive,
+   VoidClosure*                 complete_gc,
+-  AbstractRefProcTaskExecutor* task_executor) 
++  AbstractRefProcTaskExecutor* task_executor)
+ {
+   bool mt = task_executor != NULL && _processing_is_mt;
+   if (mt && ParallelRefProcBalancingEnabled) {
+@@ -808,7 +812,7 @@
+     }
+     gclog_or_tty->print(", %u refs", total);
+   }
+-                            
++
+   // Phase 1 (soft refs only):
+   // . Traverse the list and remove any SoftReferences whose
+   //   referents are not alive, but that should be kept alive for
+@@ -825,7 +829,7 @@
+       }
+     }
+   } else { // policy == NULL
+-    assert(refs_lists != _discoveredSoftRefs, 
++    assert(refs_lists != _discoveredSoftRefs,
+            "Policy must be specified for soft references.");
+   }
+ 
+@@ -839,7 +843,7 @@
+       process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc);
+     }
+   }
+-  
++
+   // Phase 3:
+   // . Traverse the list and process referents as appropriate.
+   if (mt) {
+@@ -870,19 +874,19 @@
+   DiscoveredListIterator iter(refs_list, NULL, NULL);
+   size_t length = refs_list.length();
+   while (iter.has_next()) {
+-    iter.load_ptrs();
++    iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
+     oop* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
+     assert((*next_addr)->is_oop_or_null(), "bad next field");
+     // If referent has been cleared or Reference is not active,
+     // drop it.
+     if (iter.referent() == NULL || *next_addr != NULL) {
+       debug_only(
+-	if (PrintGCDetails && TraceReferenceGC) {
+-	  gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: " 
+-	    INTPTR_FORMAT " with next field: " INTPTR_FORMAT 
+-	    " and referent: " INTPTR_FORMAT, 
+-	    (address)iter.obj(), (address)*next_addr, (address)iter.referent());
+-	}
++        if (PrintGCDetails && TraceReferenceGC) {
++          gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: "
++            INTPTR_FORMAT " with next field: " INTPTR_FORMAT
++            " and referent: " INTPTR_FORMAT,
++            (address)iter.obj(), (address)*next_addr, (address)iter.referent());
++        }
+       )
+       // Remove Reference object from list
+       iter.remove();
+@@ -895,8 +899,8 @@
+   NOT_PRODUCT(
+     if (PrintGCDetails && TraceReferenceGC) {
+       gclog_or_tty->print(
+-	" Removed %d Refs with NULL referents out of %d discovered Refs", 
+-	iter.removed(), iter.processed());
++        " Removed %d Refs with NULL referents out of %d discovered Refs",
++        iter.removed(), iter.processed());
+     }
+   )
+ }
+@@ -962,8 +966,8 @@
+     // If retest was non NULL, another thread beat us to it:
+     // The reference has already been discovered...
+     if (TraceReferenceGC) {
+-      gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",  
+-                             obj, obj->blueprint()->internal_name()); 
++      gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
++                             obj, obj->blueprint()->internal_name());
+     }
+   }
+ }
+@@ -1036,8 +1040,8 @@
+   if (*discovered_addr != NULL) {
+     // The reference has already been discovered...
+     if (TraceReferenceGC) {
+-      gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",  
+-                             (oopDesc*)obj, obj->blueprint()->internal_name()); 
++      gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
++                             (oopDesc*)obj, obj->blueprint()->internal_name());
+     }
+     if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
+       // assumes that an object is not processed twice;
+@@ -1080,7 +1084,7 @@
+   }
+ 
+   // We do a raw store here, the field will be visited later when
+-  // processing the discovered references. 
++  // processing the discovered references.
+   if (_discovery_is_mt) {
+     add_to_discovered_list_mt(*list, obj, discovered_addr);
+   } else {
+@@ -1101,8 +1105,8 @@
+   if (TraceReferenceGC) {
+     oop referent = java_lang_ref_Reference::referent(obj);
+     if (PrintGCDetails) {
+-      gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)",  
+-                             (oopDesc*) obj, obj->blueprint()->internal_name()); 
++      gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)",
++                             (oopDesc*) obj, obj->blueprint()->internal_name());
+     }
+     assert(referent->is_oop(), "Enqueued a bad referent");
+   }
+@@ -1121,7 +1125,7 @@
+   YieldClosure* yield) {
+ 
+   NOT_PRODUCT(verify_ok_to_handle_reflists());
+-  
++
+   // Soft references
+   {
+     TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
+@@ -1184,15 +1188,15 @@
+   DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
+   size_t length = refs_list.length();
+   while (iter.has_next()) {
+-    iter.load_ptrs();
++    iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
+     oop* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
+-    if (iter.referent() == NULL || iter.is_referent_alive() || 
++    if (iter.referent() == NULL || iter.is_referent_alive() ||
+         *next_addr != NULL) {
+       // The referent has been cleared, or is alive, or the Reference is not
+       // active; we need to trace and mark its cohort.
+       if (TraceReferenceGC) {
+-        gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)",  
+-                               iter.obj(), iter.obj()->blueprint()->internal_name()); 
++        gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)",
++                               iter.obj(), iter.obj()->blueprint()->internal_name());
+       }
+       // Remove Reference object from list
+       iter.remove();
+@@ -1205,7 +1209,7 @@
+     }
+   }
+   refs_list.set_length(length);
+-  
++
+   // Close the reachable set
+   complete_gc->do_void();
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/memory/referenceProcessor.hpp openjdk/hotspot/src/share/vm/memory/referenceProcessor.hpp
+--- openjdk6/hotspot/src/share/vm/memory/referenceProcessor.hpp	2008-02-28 05:02:37.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/referenceProcessor.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)referenceProcessor.hpp	1.43 07/05/05 17:05:54 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ReferenceProcessor class encapsulates the per-"collector" processing
+@@ -57,9 +54,9 @@
+                      // subject to wkref discovery
+   bool        _discovering_refs;      // true when discovery enabled
+   bool        _discovery_is_atomic;   // if discovery is atomic wrt
+-				      // other collectors in configuration
++                                      // other collectors in configuration
+   bool        _discovery_is_mt;       // true if reference discovery is MT.
+-  bool	      _enqueuing_is_done;     // true if all weak references enqueued
++  bool        _enqueuing_is_done;     // true if all weak references enqueued
+   bool        _processing_is_mt;      // true during phases when
+                                       // reference processing is MT.
+   int         _next_id;               // round-robin counter in
+@@ -93,7 +90,7 @@
+                                   OopClosure*                  keep_alive,
+                                   VoidClosure*                 complete_gc,
+                                   AbstractRefProcTaskExecutor* task_executor);
+-                                          
++
+   void process_phaseJNI(BoolObjectClosure* is_alive,
+                         OopClosure*        keep_alive,
+                         VoidClosure*       complete_gc);
+@@ -110,7 +107,7 @@
+   // reachable.
+   inline void process_phase2(DiscoveredList&    refs_list_addr,
+                              BoolObjectClosure* is_alive,
+-                             OopClosure*        keep_alive, 
++                             OopClosure*        keep_alive,
+                              VoidClosure*       complete_gc) {
+     if (discovery_is_atomic()) {
+       // complete_gc is ignored in this case for this phase
+@@ -140,7 +137,7 @@
+ 
+   // Enqueue references with a certain reachability level
+   void enqueue_discovered_reflist(DiscoveredList& refs_list, oop* pending_list_addr);
+-                                  
++
+   // "Preclean" all the discovered reference lists
+   // by removing references with strongly reachable referents.
+   // The first argument is a predicate on an oop that indicates
+@@ -154,7 +151,7 @@
+                                       OopClosure*        keep_alive,
+                                       VoidClosure*       complete_gc,
+                                       YieldClosure*      yield);
+-                                      
++
+   // Delete entries in the discovered lists that have
+   // either a null referent or are not active. Such
+   // Reference objects can result from the clearing
+@@ -163,12 +160,12 @@
+   // For a definition of "active" see java.lang.ref.Reference;
+   // Refs are born active, become inactive when enqueued,
+   // and never become active again. The state of being
+-  // active is encoded as follows: A Ref is active 
++  // active is encoded as follows: A Ref is active
+   // if and only if its "next" field is NULL.
+   void clean_up_discovered_references();
+   void clean_up_discovered_reflist(DiscoveredList& refs_list);
+ 
+-  // Returns the name of the discovered reference list 
++  // Returns the name of the discovered reference list
+   // occupying the i / _num_q slot.
+   const char* list_name(int i);
+ 
+@@ -181,7 +178,7 @@
+                                    OopClosure*        keep_alive,
+                                    VoidClosure*       complete_gc,
+                                    YieldClosure*      yield);
+-                                   
++
+   void enqueue_discovered_reflists(oop* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
+   int next_id() {
+     int id = _next_id;
+@@ -200,13 +197,13 @@
+ 
+   // Calculate the number of jni handles.
+   unsigned int count_jni_refs();
+-  
++
+   // Balances reference queues.
+   void balance_queues(DiscoveredList ref_lists[]);
+-  
++
+   // Update (advance) the soft ref master clock field.
+   void update_soft_ref_master_clock();
+-  
++
+  public:
+   // constructor
+   ReferenceProcessor():
+@@ -224,16 +221,16 @@
+   {}
+ 
+   ReferenceProcessor(MemRegion span, bool atomic_discovery,
+-                     bool mt_discovery, int mt_degree = 1, 
++                     bool mt_discovery, int mt_degree = 1,
+                      bool mt_processing = false);
+-   
++
+   // Allocates and initializes a reference processor.
+   static ReferenceProcessor* create_ref_processor(
+-    MemRegion          span, 
++    MemRegion          span,
+     bool               atomic_discovery,
+     bool               mt_discovery,
+     BoolObjectClosure* is_alive_non_header = NULL,
+-    int                parallel_gc_threads = 1, 
++    int                parallel_gc_threads = 1,
+     bool               mt_processing = false);
+ 
+   // RefDiscoveryPolicy values
+@@ -440,11 +437,11 @@
+   // Abstract tasks to execute.
+   class ProcessTask;
+   class EnqueueTask;
+-  
+-  // Executes a task using worker threads.  
++
++  // Executes a task using worker threads.
+   virtual void execute(ProcessTask& task) = 0;
+   virtual void execute(EnqueueTask& task) = 0;
+-  
++
+   // Switch to single threaded mode.
+   virtual void set_single_threaded_mode() { };
+ };
+@@ -459,16 +456,16 @@
+       _refs_lists(refs_lists),
+       _marks_oops_alive(marks_oops_alive)
+   { }
+-    
++
+ public:
+   virtual void work(unsigned int work_id, BoolObjectClosure& is_alive,
+                     OopClosure& keep_alive,
+                     VoidClosure& complete_gc) = 0;
+-    
++
+   // Returns true if a task marks some oops as alive.
+   bool marks_oops_alive() const
+   { return _marks_oops_alive; }
+-    
++
+ protected:
+   ReferenceProcessor& _ref_processor;
+   DiscoveredList*     _refs_lists;
+@@ -481,7 +478,7 @@
+   EnqueueTask(ReferenceProcessor& ref_processor,
+               DiscoveredList      refs_lists[],
+               oop*                pending_list_addr,
+-              oop                 sentinel_ref, 
++              oop                 sentinel_ref,
+               int                 n_queues)
+     : _ref_processor(ref_processor),
+       _refs_lists(refs_lists),
+@@ -489,15 +486,14 @@
+       _sentinel_ref(sentinel_ref),
+       _n_queues(n_queues)
+   { }
+-    
++
+ public:
+   virtual void work(unsigned int work_id) = 0;
+-    
++
+ protected:
+   ReferenceProcessor& _ref_processor;
+   DiscoveredList*     _refs_lists;
+   oop*                _pending_list_addr;
+-  oop                 _sentinel_ref; 
++  oop                 _sentinel_ref;
+   int                 _n_queues;
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/memory/resourceArea.cpp openjdk/hotspot/src/share/vm/memory/resourceArea.cpp
+--- openjdk6/hotspot/src/share/vm/memory/resourceArea.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/resourceArea.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)resourceArea.cpp	1.57 07/05/05 17:05:55 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -33,7 +30,7 @@
+ 
+ // The following routines are declared in allocation.hpp and used everywhere:
+ 
+-// Allocation in thread-local resource area 
++// Allocation in thread-local resource area
+ extern char* resource_allocate_bytes(size_t size) {
+   return Thread::current()->resource_area()->allocate_bytes(size);
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/memory/resourceArea.hpp openjdk/hotspot/src/share/vm/memory/resourceArea.hpp
+--- openjdk6/hotspot/src/share/vm/memory/resourceArea.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/resourceArea.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)resourceArea.hpp	1.45 07/05/05 17:05:55 JVM"
+-#endif
+ /*
+  * Copyright 1997-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The resource area holds temporary data structures in the VM.
+@@ -42,20 +39,20 @@
+   friend class ResourceMark;
+   friend class DeoptResourceMark;
+   debug_only(int _nesting;)             // current # of nested ResourceMarks
+-  debug_only(static int _warned;)  	// to suppress multiple warnings
+-  	
++  debug_only(static int _warned;)       // to suppress multiple warnings
++
+ public:
+   ResourceArea() {
+-    debug_only(_nesting = 0;)    
++    debug_only(_nesting = 0;)
+   }
+ 
+   ResourceArea(size_t init_size) : Arena(init_size) {
+-    debug_only(_nesting = 0;);    
++    debug_only(_nesting = 0;);
+   }
+ 
+   char* allocate_bytes(size_t size) {
+ #ifdef ASSERT
+-    if (_nesting < 1 && !_warned++) 
++    if (_nesting < 1 && !_warned++)
+       fatal("memory leak: allocating without ResourceMark");
+     if (UseMallocOnly) {
+       // use malloc, but save pointer in res. area for later freeing
+@@ -66,7 +63,7 @@
+     return (char*)Amalloc(size);
+   }
+ 
+-  debug_only(int nesting() const { return _nesting; });  
++  debug_only(int nesting() const { return _nesting; });
+ };
+ 
+ 
+@@ -75,8 +72,8 @@
+ // when the destructor is called.  Typically used as a local variable.
+ class ResourceMark: public StackObj {
+ protected:
+-  ResourceArea *_area;		// Resource area to stack allocate
+-  Chunk *_chunk;		// saved arena chunk
++  ResourceArea *_area;          // Resource area to stack allocate
++  Chunk *_chunk;                // saved arena chunk
+   char *_hwm, *_max;
+   NOT_PRODUCT(size_t _size_in_bytes;)
+ 
+@@ -87,7 +84,7 @@
+     _max= _area->_max;
+     NOT_PRODUCT(_size_in_bytes = _area->size_in_bytes();)
+     debug_only(_area->_nesting++;)
+-    assert( _area->_nesting > 0, "must stack allocate RMs" ); 
++    assert( _area->_nesting > 0, "must stack allocate RMs" );
+   }
+ 
+  public:
+@@ -103,19 +100,19 @@
+ 
+   ResourceMark()               { initialize(Thread::current()); }
+ 
+-  ResourceMark( ResourceArea *r ) : 
++  ResourceMark( ResourceArea *r ) :
+     _area(r), _chunk(r->_chunk), _hwm(r->_hwm), _max(r->_max) {
+     NOT_PRODUCT(_size_in_bytes = _area->size_in_bytes();)
+     debug_only(_area->_nesting++;)
+-    assert( _area->_nesting > 0, "must stack allocate RMs" ); 
++    assert( _area->_nesting > 0, "must stack allocate RMs" );
+   }
+ 
+-  void reset_to_mark() { 
++  void reset_to_mark() {
+     if (UseMallocOnly) free_malloced_objects();
+ 
+-    if( _chunk->next() ) 	// Delete later chunks
++    if( _chunk->next() )        // Delete later chunks
+       _chunk->next_chop();
+-    _area->_chunk = _chunk;	// Roll back arena to saved chunk
++    _area->_chunk = _chunk;     // Roll back arena to saved chunk
+     _area->_hwm = _hwm;
+     _area->_max = _max;
+ 
+@@ -124,13 +121,13 @@
+     _area->set_size_in_bytes(size_in_bytes());
+   }
+ 
+-  ~ResourceMark() { 
++  ~ResourceMark() {
+     assert( _area->_nesting > 0, "must stack allocate RMs" );
+     debug_only(_area->_nesting--;)
+-    reset_to_mark(); 
++    reset_to_mark();
+   }
+ 
+-  
++
+  private:
+   void free_malloced_objects()                                         PRODUCT_RETURN;
+   size_t size_in_bytes()       NOT_PRODUCT({ return _size_in_bytes; }) PRODUCT_RETURN0;
+@@ -167,8 +164,8 @@
+ 
+ class DeoptResourceMark: public CHeapObj {
+ protected:
+-  ResourceArea *_area;		// Resource area to stack allocate
+-  Chunk *_chunk;		// saved arena chunk
++  ResourceArea *_area;          // Resource area to stack allocate
++  Chunk *_chunk;                // saved arena chunk
+   char *_hwm, *_max;
+   NOT_PRODUCT(size_t _size_in_bytes;)
+ 
+@@ -179,7 +176,7 @@
+     _max= _area->_max;
+     NOT_PRODUCT(_size_in_bytes = _area->size_in_bytes();)
+     debug_only(_area->_nesting++;)
+-    assert( _area->_nesting > 0, "must stack allocate RMs" ); 
++    assert( _area->_nesting > 0, "must stack allocate RMs" );
+   }
+ 
+  public:
+@@ -195,19 +192,19 @@
+ 
+   DeoptResourceMark()               { initialize(Thread::current()); }
+ 
+-  DeoptResourceMark( ResourceArea *r ) : 
++  DeoptResourceMark( ResourceArea *r ) :
+     _area(r), _chunk(r->_chunk), _hwm(r->_hwm), _max(r->_max) {
+     NOT_PRODUCT(_size_in_bytes = _area->size_in_bytes();)
+     debug_only(_area->_nesting++;)
+-    assert( _area->_nesting > 0, "must stack allocate RMs" ); 
++    assert( _area->_nesting > 0, "must stack allocate RMs" );
+   }
+ 
+-  void reset_to_mark() { 
++  void reset_to_mark() {
+     if (UseMallocOnly) free_malloced_objects();
+ 
+-    if( _chunk->next() ) 	// Delete later chunks
++    if( _chunk->next() )        // Delete later chunks
+       _chunk->next_chop();
+-    _area->_chunk = _chunk;	// Roll back arena to saved chunk
++    _area->_chunk = _chunk;     // Roll back arena to saved chunk
+     _area->_hwm = _hwm;
+     _area->_max = _max;
+ 
+@@ -216,13 +213,13 @@
+     _area->set_size_in_bytes(size_in_bytes());
+   }
+ 
+-  ~DeoptResourceMark() { 
++  ~DeoptResourceMark() {
+     assert( _area->_nesting > 0, "must stack allocate RMs" );
+     debug_only(_area->_nesting--;)
+-    reset_to_mark(); 
++    reset_to_mark();
+   }
+ 
+-  
++
+  private:
+   void free_malloced_objects()                                         PRODUCT_RETURN;
+   size_t size_in_bytes()       NOT_PRODUCT({ return _size_in_bytes; }) PRODUCT_RETURN0;
+diff -ruN openjdk6/hotspot/src/share/vm/memory/restore.cpp openjdk/hotspot/src/share/vm/memory/restore.cpp
+--- openjdk6/hotspot/src/share/vm/memory/restore.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/restore.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)restore.cpp	1.14 07/05/05 17:05:44 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -173,23 +170,23 @@
+   // are always added at the beginning of the linked lists, THESE LINKED
+   // LIST ELEMENTS ARE READ-ONLY.
+ 
+-  int len = *(intptr_t*)buffer;	// skip over symbol table entries
++  int len = *(intptr_t*)buffer; // skip over symbol table entries
+   buffer += sizeof(intptr_t);
+   buffer += len;
+ 
+-  len = *(intptr_t*)buffer;	// skip over string table entries
++  len = *(intptr_t*)buffer;     // skip over string table entries
+   buffer += sizeof(intptr_t);
+   buffer += len;
+ 
+-  len = *(intptr_t*)buffer;	// skip over shared dictionary entries
++  len = *(intptr_t*)buffer;     // skip over shared dictionary entries
+   buffer += sizeof(intptr_t);
+   buffer += len;
+ 
+-  len = *(intptr_t*)buffer;	// skip over package info table entries
++  len = *(intptr_t*)buffer;     // skip over package info table entries
+   buffer += sizeof(intptr_t);
+   buffer += len;
+ 
+-  len = *(intptr_t*)buffer;	// skip over package info table char[] arrays.
++  len = *(intptr_t*)buffer;     // skip over package info table char[] arrays.
+   buffer += sizeof(intptr_t);
+   buffer += len;
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/memory/serialize.cpp openjdk/hotspot/src/share/vm/memory/serialize.cpp
+--- openjdk6/hotspot/src/share/vm/memory/serialize.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/serialize.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)serialize.cpp	1.9 07/05/05 17:05:55 JVM"
+-#endif
+ /*
+  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/memory/sharedHeap.cpp openjdk/hotspot/src/share/vm/memory/sharedHeap.cpp
+--- openjdk6/hotspot/src/share/vm/memory/sharedHeap.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/sharedHeap.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)sharedHeap.cpp	1.59 07/05/17 15:55:10 JVM"
+-#endif
+ /*
+  * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -86,17 +83,17 @@
+ void SharedHeap::change_strong_roots_parity() {
+   // Also set the new collection parity.
+   assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
+-	 "Not in range.");
++         "Not in range.");
+   _strong_roots_parity++;
+   if (_strong_roots_parity == 3) _strong_roots_parity = 1;
+   assert(_strong_roots_parity >= 1 && _strong_roots_parity <= 2,
+-	 "Not in range.");
++         "Not in range.");
+ }
+ 
+ void SharedHeap::process_strong_roots(bool collecting_perm_gen,
+-				      ScanningOption so,
+-				      OopClosure* roots,
+-				      OopsInGenClosure* perm_blk) {
++                                      ScanningOption so,
++                                      OopClosure* roots,
++                                      OopsInGenClosure* perm_blk) {
+   // General strong roots.
+   if (n_par_threads() == 0) change_strong_roots_parity();
+   if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
+@@ -121,7 +118,7 @@
+   if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
+     Management::oops_do(roots);
+   if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
+-    JvmtiExport::oops_do(roots); 
++    JvmtiExport::oops_do(roots);
+ 
+   if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
+     if (so & SO_AllClasses) {
+@@ -166,7 +163,7 @@
+     }
+     if (blk != NULL) {
+       if (!_process_strong_tasks->is_task_claimed(SH_PS_vmSymbols_oops_do))
+-	vmSymbols::oops_do(blk);
++        vmSymbols::oops_do(blk);
+     }
+   }
+ 
+@@ -212,7 +209,7 @@
+ // just skip adjusting any shared entries in the string table.
+ 
+ void SharedHeap::process_weak_roots(OopClosure* root_closure,
+-				    OopClosure* non_root_closure) {
++                                    OopClosure* non_root_closure) {
+   // Global (weak) JNI handles
+   JNIHandles::weak_oops_do(&always_true, root_closure);
+ 
+@@ -244,8 +241,8 @@
+   // Disable the posting of JVMTI VMObjectAlloc events as we
+   // don't want the filling of tlabs with filler arrays to be
+   // reported to the profiler.
+-  NoJvmtiVMObjectAllocMark njm;    
+-  
++  NoJvmtiVMObjectAllocMark njm;
++
+   // Disable low memory detector because there is no real allocation.
+   LowMemoryDetectorDisabler lmd_dis;
+ 
+@@ -265,9 +262,9 @@
+     const size_t array_length_words =
+       array_length * (HeapWordSize/sizeof(jint));
+     post_allocation_setup_array(Universe::intArrayKlassObj(),
+-				mr.start(),
+-				mr.word_size(),
+-				(int)array_length_words);
++                                mr.start(),
++                                mr.word_size(),
++                                (int)array_length_words);
+ #ifdef ASSERT
+     HeapWord* elt_words = (mr.start() + typeArrayOopDesc::header_size(T_INT));
+     Copy::fill_to_words(elt_words, array_length, 0xDEAFBABE);
+@@ -275,20 +272,20 @@
+   } else {
+     assert(word_size == (size_t)oopDesc::header_size(), "Unaligned?");
+     post_allocation_setup_obj(SystemDictionary::object_klass(),
+-			      mr.start(),
+-			      mr.word_size());
++                              mr.start(),
++                              mr.word_size());
+   }
+ }
+ 
+ // Some utilities.
+ void SharedHeap::print_size_transition(size_t bytes_before,
+-				       size_t bytes_after,
+-				       size_t capacity) {
++                                       size_t bytes_after,
++                                       size_t capacity) {
+   tty->print(" %d%s->%d%s(%d%s)",
+-	     byte_size_in_proper_unit(bytes_before),
+-	     proper_unit_for_byte_size(bytes_before),
+-	     byte_size_in_proper_unit(bytes_after),
+-	     proper_unit_for_byte_size(bytes_after),
+-	     byte_size_in_proper_unit(capacity),
+-	     proper_unit_for_byte_size(capacity));  
++             byte_size_in_proper_unit(bytes_before),
++             proper_unit_for_byte_size(bytes_before),
++             byte_size_in_proper_unit(bytes_after),
++             proper_unit_for_byte_size(bytes_after),
++             byte_size_in_proper_unit(capacity),
++             proper_unit_for_byte_size(capacity));
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/memory/sharedHeap.hpp openjdk/hotspot/src/share/vm/memory/sharedHeap.hpp
+--- openjdk6/hotspot/src/share/vm/memory/sharedHeap.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/sharedHeap.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)sharedHeap.hpp	1.56 07/05/05 17:05:55 JVM"
+-#endif
+ /*
+  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A "SharedHeap" is an implementation of a java heap for HotSpot.  This
+@@ -144,20 +141,20 @@
+   // Such a call will involve claiming some fine-grained tasks, such as
+   // scanning of threads.  To make this process simpler, we provide the
+   // "strong_roots_parity()" method.  Collectors that start parallel tasks
+-  // whose threads invoke "process_strong_roots" must 
++  // whose threads invoke "process_strong_roots" must
+   // call "change_strong_roots_parity" in sequential code starting such a
+   // task.  (This also means that a parallel thread may only call
+   // process_strong_roots once.)
+-  // 
++  //
+   // For calls to process_strong_roots by sequential code, the parity is
+   // updated automatically.
+-  // 
++  //
+   // The idea is that objects representing fine-grained tasks, such as
+-  // threads, will contain a "parity" field.  A task will is claimed in the 
++  // threads, will contain a "parity" field.  A task will is claimed in the
+   // current "process_strong_roots" call only if its parity field is the
+   // same as the "strong_roots_parity"; task claiming is accomplished by
+   // updating the parity field to the strong_roots_parity with a CAS.
+-  // 
++  //
+   // If the client meats this spec, then strong_roots_parity() will have
+   // the following properties:
+   //   a) to return a different value than was returned before the last
+@@ -199,16 +196,16 @@
+   // "SO_Strings" applies the closure to all entries in StringTable;
+   // "SO_CodeCache" applies the closure to all elements of the CodeCache.
+   void process_strong_roots(bool collecting_perm_gen,
+-			    ScanningOption so,
+-			    OopClosure* roots,
+-			    OopsInGenClosure* perm_blk);
++                            ScanningOption so,
++                            OopClosure* roots,
++                            OopsInGenClosure* perm_blk);
+ 
+   // Apply "blk" to all the weak roots of the system.  These include
+   // JNI weak roots, the code cache, system dictionary, symbol table,
+   // string table.
+   void process_weak_roots(OopClosure* root_closure,
+-			  OopClosure* non_root_closure);
+-			  
++                          OopClosure* non_root_closure);
++
+ 
+   // Like CollectedHeap::collect, but assume that the caller holds the Heap_lock.
+   virtual void collect_locked(GCCause::Cause cause) = 0;
+@@ -245,7 +242,7 @@
+ 
+   // Different from is_in_permanent in that is_in_permanent
+   // only checks if p is in the reserved area of the heap
+-  // and this checks to see if it in the commited area. 
++  // and this checks to see if it in the commited area.
+   // This is typically used by things like the forte stackwalker
+   // during verification of suspicious frame values.
+   bool is_permanent(const void *p) const {
+@@ -270,8 +267,6 @@
+ 
+   // Some utilities.
+   void print_size_transition(size_t bytes_before,
+-			     size_t bytes_after,
+-			     size_t capacity);
++                             size_t bytes_after,
++                             size_t capacity);
+ };
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/memory/space.cpp openjdk/hotspot/src/share/vm/memory/space.cpp
+--- openjdk6/hotspot/src/share/vm/memory/space.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/space.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)space.cpp	1.217 07/05/29 09:44:13 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,30 +19,30 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_space.cpp.incl"
+ 
+ HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
+-						HeapWord* top_obj) {
++                                                HeapWord* top_obj) {
+   if (top_obj != NULL) {
+     if (_sp->block_is_obj(top_obj)) {
+       if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
+-	if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
+-	  // An arrayOop is starting on the dirty card - since we do exact
+-	  // store checks for objArrays we are done.
+-	} else {
+-	  // Otherwise, it is possible that the object starting on the dirty
+-	  // card spans the entire card, and that the store happened on a
+-	  // later card.  Figure out where the object ends.
+-          // Use the block_size() method of the space over which 
+-          // the iteration is being done.  That space (e.g. CMS) may have 
+-          // specific requirements on object sizes which will 
+-          // be reflected in the block_size() method. 
+-	  top = top_obj + oop(top_obj)->size();
+-	}
++        if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
++          // An arrayOop is starting on the dirty card - since we do exact
++          // store checks for objArrays we are done.
++        } else {
++          // Otherwise, it is possible that the object starting on the dirty
++          // card spans the entire card, and that the store happened on a
++          // later card.  Figure out where the object ends.
++          // Use the block_size() method of the space over which
++          // the iteration is being done.  That space (e.g. CMS) may have
++          // specific requirements on object sizes which will
++          // be reflected in the block_size() method.
++          top = top_obj + oop(top_obj)->size();
++        }
+       }
+     } else {
+       top = top_obj;
+@@ -57,8 +54,8 @@
+ }
+ 
+ void DirtyCardToOopClosure::walk_mem_region(MemRegion mr,
+-					    HeapWord* bottom,
+-					    HeapWord* top) {
++                                            HeapWord* bottom,
++                                            HeapWord* top) {
+   // 1. Blocks may or may not be objects.
+   // 2. Even when a block_is_obj(), it may not entirely
+   //    occupy the block if the block quantum is larger than
+@@ -75,7 +72,7 @@
+     // "adjust the object size" (for instance pad it up to its
+     // block alignment or minimum block size restrictions. XXX
+     if (_sp->block_is_obj(bottom) &&
+-	!_sp->obj_allocated_since_save_marks(oop(bottom))) {
++        !_sp->obj_allocated_since_save_marks(oop(bottom))) {
+       oop(bottom)->oop_iterate(_cl, mr);
+     }
+   }
+@@ -103,13 +100,13 @@
+   assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
+          _precision == CardTableModRefBS::Precise,
+          "Only ones we deal with for now.");
+-  
++
+   assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
+-	 _last_bottom == NULL ||
+-	 top <= _last_bottom,
+-	 "Not decreasing");
++         _last_bottom == NULL ||
++         top <= _last_bottom,
++         "Not decreasing");
+   NOT_PRODUCT(_last_bottom = mr.start());
+-  
++
+   bottom_obj = _sp->block_start(bottom);
+   top_obj    = _sp->block_start(last);
+ 
+@@ -120,24 +117,24 @@
+   // the start of the object at the top, get the actual
+   // value of the top.
+   top = get_actual_top(top, top_obj);
+-  
++
+   // If the previous call did some part of this region, don't redo.
+   if (_precision == CardTableModRefBS::ObjHeadPreciseArray &&
+       _min_done != NULL &&
+       _min_done < top) {
+     top = _min_done;
+   }
+-  
++
+   // Top may have been reset, and in fact may be below bottom,
+   // e.g. the dirty card region is entirely in a now free object
+   // -- something that could happen with a concurrent sweeper.
+   bottom = MIN2(bottom, top);
+   mr     = MemRegion(bottom, top);
+   assert(bottom <= top &&
+-	 (_precision != CardTableModRefBS::ObjHeadPreciseArray ||
+-	  _min_done == NULL ||
+-	  top <= _min_done),
+-	 "overlap!");
++         (_precision != CardTableModRefBS::ObjHeadPreciseArray ||
++          _min_done == NULL ||
++          top <= _min_done),
++         "overlap!");
+ 
+   // Walk the region if it is not empty; otherwise there is nothing to do.
+   if (!mr.is_empty()) {
+@@ -148,8 +145,8 @@
+ }
+ 
+ DirtyCardToOopClosure* Space::new_dcto_cl(OopClosure* cl,
+-					  CardTableModRefBS::PrecisionStyle precision,
+-					  HeapWord* boundary) {
++                                          CardTableModRefBS::PrecisionStyle precision,
++                                          HeapWord* boundary) {
+   return new DirtyCardToOopClosure(this, cl, precision, boundary);
+ }
+ 
+@@ -158,19 +155,19 @@
+ }
+ 
+ HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
+-					       HeapWord* top_obj) {
++                                               HeapWord* top_obj) {
+   if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
+     if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
+       if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
+-	// An arrayOop is starting on the dirty card - since we do exact
+-	// store checks for objArrays we are done.
++        // An arrayOop is starting on the dirty card - since we do exact
++        // store checks for objArrays we are done.
+       } else {
+-	// Otherwise, it is possible that the object starting on the dirty
+-	// card spans the entire card, and that the store happened on a
+-	// later card.  Figure out where the object ends.
+-	assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(), 
+-	  "Block size and object size mismatch");
+-	top = top_obj + oop(top_obj)->size();
++        // Otherwise, it is possible that the object starting on the dirty
++        // card spans the entire card, and that the store happened on a
++        // later card.  Figure out where the object ends.
++        assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(),
++          "Block size and object size mismatch");
++        top = top_obj + oop(top_obj)->size();
+       }
+     }
+   } else {
+@@ -180,8 +177,8 @@
+ }
+ 
+ void Filtering_DCTOC::walk_mem_region(MemRegion mr,
+-				      HeapWord* bottom,
+-				      HeapWord* top) {
++                                      HeapWord* bottom,
++                                      HeapWord* top) {
+   // Note that this assumption won't hold if we have a concurrent
+   // collector in this space, which may have freed up objects after
+   // they were dirtied and before the stop-the-world GC that is
+@@ -203,24 +200,24 @@
+ 
+ // We must replicate this so that the static type of "FilteringClosure"
+ // (see above) is apparent at the oop_iterate calls.
+-#define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType)	\
+-void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,	\
+-						   HeapWord* bottom,	\
+-						   HeapWord* top,	\
+-						   ClosureType* cl) {	\
+-  bottom += oop(bottom)->oop_iterate(cl, mr);				\
+-  if (bottom < top) {							\
+-    HeapWord* next_obj = bottom + oop(bottom)->size();			\
+-    while (next_obj < top) {						\
+-      /* Bottom lies entirely below top, so we can call the */		\
+-      /* non-memRegion version of oop_iterate below. */			\
+-      oop(bottom)->oop_iterate(cl);					\
+-      bottom = next_obj;						\
+-      next_obj = bottom + oop(bottom)->size();				\
+-    }									\
+-    /* Last object. */							\
+-    oop(bottom)->oop_iterate(cl, mr);					\
+-  }									\
++#define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
++void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,        \
++                                                   HeapWord* bottom,    \
++                                                   HeapWord* top,       \
++                                                   ClosureType* cl) {   \
++  bottom += oop(bottom)->oop_iterate(cl, mr);                           \
++  if (bottom < top) {                                                   \
++    HeapWord* next_obj = bottom + oop(bottom)->size();                  \
++    while (next_obj < top) {                                            \
++      /* Bottom lies entirely below top, so we can call the */          \
++      /* non-memRegion version of oop_iterate below. */                 \
++      oop(bottom)->oop_iterate(cl);                                     \
++      bottom = next_obj;                                                \
++      next_obj = bottom + oop(bottom)->size();                          \
++    }                                                                   \
++    /* Last object. */                                                  \
++    oop(bottom)->oop_iterate(cl, mr);                                   \
++  }                                                                     \
+ }
+ 
+ // (There are only two of these, rather than N, because the split is due
+@@ -231,8 +228,8 @@
+ 
+ DirtyCardToOopClosure*
+ ContiguousSpace::new_dcto_cl(OopClosure* cl,
+-			     CardTableModRefBS::PrecisionStyle precision,
+-			     HeapWord* boundary) {
++                             CardTableModRefBS::PrecisionStyle precision,
++                             HeapWord* boundary) {
+   return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
+ }
+ 
+@@ -307,8 +304,8 @@
+   _next_compaction_space = NULL;
+ }
+ 
+-HeapWord* CompactibleSpace::forward(oop q, size_t size, 
+-				    CompactPoint* cp, HeapWord* compact_top) {
++HeapWord* CompactibleSpace::forward(oop q, size_t size,
++                                    CompactPoint* cp, HeapWord* compact_top) {
+   // q is alive
+   // First check if we should switch compaction space
+   assert(this == cp->space, "'this' should be current compaction space.");
+@@ -335,7 +332,7 @@
+     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
+   } else {
+     // if the object isn't moving we can just set the mark to the default
+-    // mark and handle it specially later on.  
++    // mark and handle it specially later on.
+     q->init_mark();
+     assert(q->forwardee() == NULL, "should be forwarded to NULL");
+   }
+@@ -354,7 +351,7 @@
+ 
+ 
+ bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
+-					HeapWord* q, size_t deadlength) {
++                                        HeapWord* q, size_t deadlength) {
+   if (allowed_deadspace_words >= deadlength) {
+     allowed_deadspace_words -= deadlength;
+     oop(q)->set_mark(markOopDesc::prototype()->set_marked());
+@@ -365,11 +362,11 @@
+                                             * (HeapWordSize/sizeof(jint))));
+     } else {
+       assert((int) deadlength == instanceOopDesc::header_size(),
+-	     "size for smallest fake dead object doesn't match");
++             "size for smallest fake dead object doesn't match");
+       oop(q)->set_klass(SystemDictionary::object_klass());
+     }
+     assert((int) deadlength == oop(q)->size(),
+-	   "make sure size for fake dead object match");
++           "make sure size for fake dead object match");
+     // Recall that we required "q == compaction_top".
+     return true;
+   } else {
+@@ -408,15 +405,15 @@
+   while (q < t) {
+     if (oop(q)->is_gc_marked()) {
+       // q is alive
+-	
++
+       debug_only(MarkSweep::track_interior_pointers(oop(q)));
+       // point all the oops to the new location
+       size_t size = oop(q)->adjust_pointers();
+       debug_only(MarkSweep::check_interior_pointers());
+-      
++
+       debug_only(prev_q = q);
+       debug_only(MarkSweep::validate_live_oop(oop(q), size));
+-	
++
+       q += size;
+     } else {
+       // q is not a live object.  But we're not in a compactible space,
+@@ -445,7 +442,7 @@
+ void Space::print_short() const { print_short_on(tty); }
+ 
+ void Space::print_short_on(outputStream* st) const {
+-  st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K, 
++  st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K,
+               (int) ((double) used() * 100 / capacity()));
+ }
+ 
+@@ -456,13 +453,13 @@
+   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")",
+                 bottom(), end());
+ }
+-  
++
+ void ContiguousSpace::print_on(outputStream* st) const {
+   print_short_on(st);
+   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
+                 bottom(), top(), end());
+ }
+-  
++
+ void OffsetTableContigSpace::print_on(outputStream* st) const {
+   print_short_on(st);
+   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
+@@ -482,8 +479,8 @@
+   guarantee(p == top(), "end of last object must match end of space");
+   if (top() != end()) {
+     guarantee(top() == block_start(end()-1) &&
+-              top() == block_start(top()), 
+-	      "top should be start of unallocated block, if it exists");
++              top() == block_start(top()),
++              "top should be start of unallocated block, if it exists");
+   }
+ }
+ 
+@@ -706,7 +703,7 @@
+     }                                                                     \
+   } while (t < top());                                                    \
+                                                                           \
+-  set_saved_mark_word(p);           	                                  \
++  set_saved_mark_word(p);                                                 \
+ }
+ 
+ ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN)
+@@ -733,19 +730,19 @@
+ size_t ContiguousSpace::block_size(const HeapWord* p) const {
+   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
+   HeapWord* current_top = top();
+-  assert(p <= current_top, "p is not a block start"); 
+-  assert(p == current_top || oop(p)->is_oop(), "p is not a block start"); 
+-  if (p < current_top) 
++  assert(p <= current_top, "p is not a block start");
++  assert(p == current_top || oop(p)->is_oop(), "p is not a block start");
++  if (p < current_top)
+     return oop(p)->size();
+   else {
+-    assert(p == current_top, "just checking"); 
++    assert(p == current_top, "just checking");
+     return pointer_delta(end(), (HeapWord*) p);
+   }
+ }
+ 
+ // This version requires locking.
+ inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
+-						HeapWord* const end_value) {
++                                                HeapWord* const end_value) {
+   assert(Heap_lock->owned_by_self() ||
+          (SafepointSynchronize::is_at_safepoint() &&
+           Thread::current()->is_VM_thread()),
+@@ -763,7 +760,7 @@
+ 
+ // This version is lock-free.
+ inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size,
+-						    HeapWord* const end_value) {
++                                                    HeapWord* const end_value) {
+   do {
+     HeapWord* obj = top();
+     if (pointer_delta(end_value, obj) >= size) {
+@@ -773,8 +770,8 @@
+       //  the old top value: the exchange succeeded
+       //  otherwise: the new value of the top is returned.
+       if (result == obj) {
+-	assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
+-	return obj;
++        assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
++        return obj;
+       }
+     } else {
+       return NULL;
+@@ -793,7 +790,7 @@
+ }
+ 
+ void ContiguousSpace::allocate_temporary_filler(int factor) {
+-  // allocate temporary type array decreasing free size with factor 'factor' 
++  // allocate temporary type array decreasing free size with factor 'factor'
+   assert(factor >= 0, "just checking");
+   size_t size = pointer_delta(end(), top());
+ 
+@@ -856,8 +853,8 @@
+       //  the old top value: the exchange succeeded
+       //  otherwise: the new value of the top is returned.
+       if (result == obj) {
+-	assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
+-	return obj;
++        assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
++        return obj;
+       }
+     } else {
+       return NULL;
+@@ -876,7 +873,7 @@
+ }
+ 
+ OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
+-					       MemRegion mr) :
++                                               MemRegion mr) :
+   _offsets(sharedOffsetArray, mr),
+   _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
+ {
+@@ -900,7 +897,7 @@
+ void OffsetTableContigSpace::verify(bool allow_dirty) const {
+   HeapWord* p = bottom();
+   HeapWord* prev_p = NULL;
+-  VerifyOldOopClosure blk; 	// Does this do anything?
++  VerifyOldOopClosure blk;      // Does this do anything?
+   blk.allow_dirty = allow_dirty;
+   int objs = 0;
+   int blocks = 0;
+diff -ruN openjdk6/hotspot/src/share/vm/memory/space.hpp openjdk/hotspot/src/share/vm/memory/space.hpp
+--- openjdk6/hotspot/src/share/vm/memory/space.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/space.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)space.hpp	1.149 07/05/29 09:44:14 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A space is an abstraction for the "storage units" backing
+@@ -38,7 +35,7 @@
+ //     - ContiguousSpace -- a compactible space in which all free space
+ //                          is contiguous
+ //       - EdenSpace     -- contiguous space used as nursery
+-//         - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation 
++//         - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation
+ //       - OffsetTableContigSpace -- contiguous space with a block offset array
+ //                          that allows "fast" block_start calls
+ //         - TenuredSpace -- (used for TenuredGeneration)
+@@ -76,7 +73,7 @@
+ // Space supports allocation, size computation and GC support is provided.
+ //
+ // Invariant: bottom() and end() are on page_size boundaries and
+-// bottom() <= top() <= end() 
++// bottom() <= top() <= end()
+ // top() is inclusive and end() is exclusive.
+ 
+ class Space: public CHeapObj {
+@@ -119,7 +116,7 @@
+   // the space.
+   virtual MemRegion used_region() const { return MemRegion(bottom(), end()); }
+ 
+-  // Returns a region that is guaranteed to contain (at least) all objects 
++  // Returns a region that is guaranteed to contain (at least) all objects
+   // allocated at the time of the last call to "save_marks".  If the space
+   // initializes its DirtyCardToOopClosure's specifying the "contig" option
+   // (that is, if the space is contiguous), then this region must contain only
+@@ -280,13 +277,13 @@
+   OopClosure* _cl;
+   Space* _sp;
+   CardTableModRefBS::PrecisionStyle _precision;
+-  HeapWord* _boundary;		// If non-NULL, process only non-NULL oops 
++  HeapWord* _boundary;          // If non-NULL, process only non-NULL oops
+                                 // pointing below boundary.
+-  HeapWord* _min_done;		// ObjHeadPreciseArray precision requires
+-				// a downwards traversal; this is the
+-				// lowest location already done (or,
+-				// alternatively, the lowest address that
+-				// shouldn't be done again.  NULL means infinity.)
++  HeapWord* _min_done;          // ObjHeadPreciseArray precision requires
++                                // a downwards traversal; this is the
++                                // lowest location already done (or,
++                                // alternatively, the lowest address that
++                                // shouldn't be done again.  NULL means infinity.)
+   NOT_PRODUCT(HeapWord* _last_bottom;)
+ 
+   // Get the actual top of the area on which the closure will
+@@ -304,11 +301,11 @@
+   // classes should override this to provide more accurate
+   // or possibly more efficient walking.
+   virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
+-  
++
+ public:
+   DirtyCardToOopClosure(Space* sp, OopClosure* cl,
+-			CardTableModRefBS::PrecisionStyle precision,
+-			HeapWord* boundary) :
++                        CardTableModRefBS::PrecisionStyle precision,
++                        HeapWord* boundary) :
+     _sp(sp), _cl(cl), _precision(precision), _boundary(boundary),
+     _min_done(NULL) {
+     NOT_PRODUCT(_last_bottom = NULL;)
+@@ -401,7 +398,7 @@
+   // The maximum percentage of objects that can be dead in the compacted
+   // live part of a compacted space ("deadwood" support.)
+   virtual int allowed_dead_ratio() const { return 0; };
+-  
++
+   // Some contiguous spaces may maintain some data structures that should
+   // be updated whenever an allocation crosses a boundary.  This function
+   // returns the first such boundary.
+@@ -422,7 +419,7 @@
+   // function of the then-current compaction space, and updates "cp->threshold
+   // accordingly".
+   virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
+-		    HeapWord* compact_top); 
++                    HeapWord* compact_top);
+ 
+   // Return a size with adjusments as required of the space.
+   virtual size_t adjust_object_size_v(size_t size) const { return size; }
+@@ -430,7 +427,7 @@
+ protected:
+   // Used during compaction.
+   HeapWord* _first_dead;
+-  HeapWord* _end_of_live; 
++  HeapWord* _end_of_live;
+ 
+   // Minimum size of a free block.
+   virtual size_t minimum_free_block_size() const = 0;
+@@ -450,7 +447,7 @@
+   // "allowed_deadspace_words" to reflect the number of available deadspace
+   // words remaining after this operation.
+   bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q,
+-			size_t word_len);
++                        size_t word_len);
+ };
+ 
+ #define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) {            \
+@@ -494,24 +491,24 @@
+   HeapWord* t = scan_limit();                                                \
+                                                                              \
+   HeapWord*  end_of_live= q;    /* One byte beyond the last byte of the last \
+-				   live object. */                           \
++                                   live object. */                           \
+   HeapWord*  first_dead = end();/* The first dead object. */                 \
+   LiveRange* liveRange  = NULL; /* The current live range, recorded in the   \
+-				   first header of preceding free area. */   \
++                                   first header of preceding free area. */   \
+   _first_dead = first_dead;                                                  \
+                                                                              \
+   const intx interval = PrefetchScanIntervalInBytes;                         \
+                                                                              \
+   while (q < t) {                                                            \
+     assert(!block_is_obj(q) ||                                               \
+-	   oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() ||   \
++           oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() ||   \
+            oop(q)->mark()->has_bias_pattern(),                               \
+-	   "these are the only valid states during a mark sweep");           \
++           "these are the only valid states during a mark sweep");           \
+     if (block_is_obj(q) && oop(q)->is_gc_marked()) {                         \
+       /* prefetch beyond q */                                                \
+       Prefetch::write(q, interval);                                          \
+       /* size_t size = oop(q)->size();  changing this for cms for perm gen */\
+-      size_t size = block_size(q);					     \
++      size_t size = block_size(q);                                           \
+       compact_top = cp->space->forward(oop(q), size, cp, compact_top);       \
+       q += size;                                                             \
+       end_of_live = q;                                                       \
+@@ -521,27 +518,27 @@
+       do {                                                                   \
+         /* prefetch beyond end */                                            \
+         Prefetch::write(end, interval);                                      \
+-	end += block_size(end);                                              \
++        end += block_size(end);                                              \
+       } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\
+                                                                              \
+       /* see if we might want to pretend this object is alive so that        \
+        * we don't have to compact quite as often.                            \
+        */                                                                    \
+       if (allowed_deadspace > 0 && q == compact_top) {                       \
+-	size_t sz = pointer_delta(end, q);                                   \
+-	if (insert_deadspace(allowed_deadspace, q, sz)) {                    \
+-	  compact_top = cp->space->forward(oop(q), sz, cp, compact_top);     \
+-	  q = end;                                                           \
+-	  end_of_live = end;                                                 \
+-	  continue;                                                          \
+-	}                                                                    \
++        size_t sz = pointer_delta(end, q);                                   \
++        if (insert_deadspace(allowed_deadspace, q, sz)) {                    \
++          compact_top = cp->space->forward(oop(q), sz, cp, compact_top);     \
++          q = end;                                                           \
++          end_of_live = end;                                                 \
++          continue;                                                          \
++        }                                                                    \
+       }                                                                      \
+                                                                              \
+       /* otherwise, it really is a free region. */                           \
+                                                                              \
+       /* for the previous LiveRange, record the end of the live objects. */  \
+       if (liveRange) {                                                       \
+-	liveRange->set_end(q);                                               \
++        liveRange->set_end(q);                                               \
+       }                                                                      \
+                                                                              \
+       /* record the current LiveRange object.                                \
+@@ -553,7 +550,7 @@
+                                                                              \
+       /* see if this is the first dead region. */                            \
+       if (q < first_dead) {                                                  \
+-	first_dead = q;                                                      \
++        first_dead = q;                                                      \
+       }                                                                      \
+                                                                              \
+       /* move on to the next object */                                       \
+@@ -575,147 +572,147 @@
+   cp->space->set_compaction_top(compact_top);                                \
+ }
+ 
+-#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) {				\
+-  /* adjust all the interior pointers to point at the new locations of objects	\
+-   * Used by MarkSweep::mark_sweep_phase3() */					\
+-										\
+-  HeapWord* q = bottom();							\
+-  HeapWord* t = _end_of_live;  /* Established by "prepare_for_compaction". */	\
+-										\
+-  assert(_first_dead <= _end_of_live, "Stands to reason, no?");			\
+-										\
+-  if (q < t && _first_dead > q &&						\
+-      !oop(q)->is_gc_marked()) {				                \
+-    /* we have a chunk of the space which hasn't moved and we've		\
+-     * reinitialized the mark word during the previous pass, so we can't	\
+-     * use is_gc_marked for the traversal. */					\
+-    HeapWord* end = _first_dead;						\
+-										\
+-    while (q < end) {								\
+-      /* I originally tried to conjoin "block_start(q) == q" to the		\
+-       * assertion below, but that doesn't work, because you can't		\
+-       * accurately traverse previous objects to get to the current one		\
+-       * after their pointers (including pointers into permGen) have been	\
+-       * updated, until the actual compaction is done.  dld, 4/00 */		\
+-      assert(block_is_obj(q),							\
+-	     "should be at block boundaries, and should be looking at objs");	\
+-										\
+-      debug_only(MarkSweep::track_interior_pointers(oop(q)));			\
+-										\
+-      /* point all the oops to the new location */				\
+-      size_t size = oop(q)->adjust_pointers();					\
+-      size = adjust_obj_size(size);						\
+-										\
+-      debug_only(MarkSweep::check_interior_pointers());				\
+-      										\
+-      debug_only(MarkSweep::validate_live_oop(oop(q), size));			\
+-      										\
+-      q += size;								\
+-    }										\
+-										\
+-    if (_first_dead == t) {							\
+-      q = t;									\
+-    } else {									\
+-      /* $$$ This is funky.  Using this to read the previously written		\
+-       * LiveRange.  See also use below. */					\
+-      q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer();		\
+-    }										\
+-  }										\
+-										\
++#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) {                             \
++  /* adjust all the interior pointers to point at the new locations of objects  \
++   * Used by MarkSweep::mark_sweep_phase3() */                                  \
++                                                                                \
++  HeapWord* q = bottom();                                                       \
++  HeapWord* t = _end_of_live;  /* Established by "prepare_for_compaction". */   \
++                                                                                \
++  assert(_first_dead <= _end_of_live, "Stands to reason, no?");                 \
++                                                                                \
++  if (q < t && _first_dead > q &&                                               \
++      !oop(q)->is_gc_marked()) {                                                \
++    /* we have a chunk of the space which hasn't moved and we've                \
++     * reinitialized the mark word during the previous pass, so we can't        \
++     * use is_gc_marked for the traversal. */                                   \
++    HeapWord* end = _first_dead;                                                \
++                                                                                \
++    while (q < end) {                                                           \
++      /* I originally tried to conjoin "block_start(q) == q" to the             \
++       * assertion below, but that doesn't work, because you can't              \
++       * accurately traverse previous objects to get to the current one         \
++       * after their pointers (including pointers into permGen) have been       \
++       * updated, until the actual compaction is done.  dld, 4/00 */            \
++      assert(block_is_obj(q),                                                   \
++             "should be at block boundaries, and should be looking at objs");   \
++                                                                                \
++      debug_only(MarkSweep::track_interior_pointers(oop(q)));                   \
++                                                                                \
++      /* point all the oops to the new location */                              \
++      size_t size = oop(q)->adjust_pointers();                                  \
++      size = adjust_obj_size(size);                                             \
++                                                                                \
++      debug_only(MarkSweep::check_interior_pointers());                         \
++                                                                                \
++      debug_only(MarkSweep::validate_live_oop(oop(q), size));                   \
++                                                                                \
++      q += size;                                                                \
++    }                                                                           \
++                                                                                \
++    if (_first_dead == t) {                                                     \
++      q = t;                                                                    \
++    } else {                                                                    \
++      /* $$$ This is funky.  Using this to read the previously written          \
++       * LiveRange.  See also use below. */                                     \
++      q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer();                \
++    }                                                                           \
++  }                                                                             \
++                                                                                \
+   const intx interval = PrefetchScanIntervalInBytes;                            \
+-										\
+-  debug_only(HeapWord* prev_q = NULL);						\
+-  while (q < t) {								\
+-    /* prefetch beyond q */							\
++                                                                                \
++  debug_only(HeapWord* prev_q = NULL);                                          \
++  while (q < t) {                                                               \
++    /* prefetch beyond q */                                                     \
+     Prefetch::write(q, interval);                                               \
+-    if (oop(q)->is_gc_marked()) {						\
+-      /* q is alive */								\
+-      debug_only(MarkSweep::track_interior_pointers(oop(q)));			\
+-      /* point all the oops to the new location */				\
+-      size_t size = oop(q)->adjust_pointers();					\
+-      size = adjust_obj_size(size);						\
+-      debug_only(MarkSweep::check_interior_pointers());				\
+-      debug_only(MarkSweep::validate_live_oop(oop(q), size));			\
+-      debug_only(prev_q = q);							\
+-      q += size;								\
+-    } else {									\
+-      /* q is not a live object, so its mark should point at the next		\
+-       * live object */								\
+-      debug_only(prev_q = q);							\
+-      q = (HeapWord*) oop(q)->mark()->decode_pointer();				\
+-      assert(q > prev_q, "we should be moving forward through memory");		\
+-    }										\
+-  }										\
+-										\
+-  assert(q == t, "just checking");						\
++    if (oop(q)->is_gc_marked()) {                                               \
++      /* q is alive */                                                          \
++      debug_only(MarkSweep::track_interior_pointers(oop(q)));                   \
++      /* point all the oops to the new location */                              \
++      size_t size = oop(q)->adjust_pointers();                                  \
++      size = adjust_obj_size(size);                                             \
++      debug_only(MarkSweep::check_interior_pointers());                         \
++      debug_only(MarkSweep::validate_live_oop(oop(q), size));                   \
++      debug_only(prev_q = q);                                                   \
++      q += size;                                                                \
++    } else {                                                                    \
++      /* q is not a live object, so its mark should point at the next           \
++       * live object */                                                         \
++      debug_only(prev_q = q);                                                   \
++      q = (HeapWord*) oop(q)->mark()->decode_pointer();                         \
++      assert(q > prev_q, "we should be moving forward through memory");         \
++    }                                                                           \
++  }                                                                             \
++                                                                                \
++  assert(q == t, "just checking");                                              \
+ }
+ 
+-#define SCAN_AND_COMPACT(obj_size) {						\
+-  /* Copy all live objects to their new location				\
+-   * Used by MarkSweep::mark_sweep_phase4() */					\
+-										\
+-  HeapWord*       q = bottom();							\
+-  HeapWord* const t = _end_of_live;						\
+-  debug_only(HeapWord* prev_q = NULL);						\
+-										\
+-  if (q < t && _first_dead > q &&						\
+-      !oop(q)->is_gc_marked()) {				                \
+-    debug_only(									\
+-    /* we have a chunk of the space which hasn't moved and we've reinitialized the		\
+-     * mark word during the previous pass, so we can't use is_gc_marked for the	\
+-     * traversal. */								\
+-    HeapWord* const end = _first_dead;						\
+-      										\
+-    while (q < end) {								\
+-      size_t size = obj_size(q);						\
++#define SCAN_AND_COMPACT(obj_size) {                                            \
++  /* Copy all live objects to their new location                                \
++   * Used by MarkSweep::mark_sweep_phase4() */                                  \
++                                                                                \
++  HeapWord*       q = bottom();                                                 \
++  HeapWord* const t = _end_of_live;                                             \
++  debug_only(HeapWord* prev_q = NULL);                                          \
++                                                                                \
++  if (q < t && _first_dead > q &&                                               \
++      !oop(q)->is_gc_marked()) {                                                \
++    debug_only(                                                                 \
++    /* we have a chunk of the space which hasn't moved and we've reinitialized the              \
++     * mark word during the previous pass, so we can't use is_gc_marked for the \
++     * traversal. */                                                            \
++    HeapWord* const end = _first_dead;                                          \
++                                                                                \
++    while (q < end) {                                                           \
++      size_t size = obj_size(q);                                                \
+       assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)"); \
+-      debug_only(MarkSweep::live_oop_moved_to(q, size, q));			\
+-      debug_only(prev_q = q);							\
+-      q += size;								\
+-    }										\
+-    )  /* debug_only */								\
+-      										\
+-    if (_first_dead == t) {							\
+-      q = t;									\
+-    } else {									\
+-      /* $$$ Funky */ 								\
+-      q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer();		\
+-    }										\
+-  }										\
+-										\
+-  const intx scan_interval = PrefetchScanIntervalInBytes;			\
+-  const intx copy_interval = PrefetchCopyIntervalInBytes;			\
+-  while (q < t) {								\
+-    if (!oop(q)->is_gc_marked()) {						\
+-      /* mark is pointer to next marked oop */					\
+-      debug_only(prev_q = q);							\
+-      q = (HeapWord*) oop(q)->mark()->decode_pointer();				\
+-      assert(q > prev_q, "we should be moving forward through memory");		\
+-    } else {									\
+-      /* prefetch beyond q */							\
++      debug_only(MarkSweep::live_oop_moved_to(q, size, q));                     \
++      debug_only(prev_q = q);                                                   \
++      q += size;                                                                \
++    }                                                                           \
++    )  /* debug_only */                                                         \
++                                                                                \
++    if (_first_dead == t) {                                                     \
++      q = t;                                                                    \
++    } else {                                                                    \
++      /* $$$ Funky */                                                           \
++      q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer();               \
++    }                                                                           \
++  }                                                                             \
++                                                                                \
++  const intx scan_interval = PrefetchScanIntervalInBytes;                       \
++  const intx copy_interval = PrefetchCopyIntervalInBytes;                       \
++  while (q < t) {                                                               \
++    if (!oop(q)->is_gc_marked()) {                                              \
++      /* mark is pointer to next marked oop */                                  \
++      debug_only(prev_q = q);                                                   \
++      q = (HeapWord*) oop(q)->mark()->decode_pointer();                         \
++      assert(q > prev_q, "we should be moving forward through memory");         \
++    } else {                                                                    \
++      /* prefetch beyond q */                                                   \
+       Prefetch::read(q, scan_interval);                                         \
+-										\
+-      /* size and destination */						\
+-      size_t size = obj_size(q);						\
+-      HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();		\
+-										\
+-      /* prefetch beyond compaction_top */					\
++                                                                                \
++      /* size and destination */                                                \
++      size_t size = obj_size(q);                                                \
++      HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();                \
++                                                                                \
++      /* prefetch beyond compaction_top */                                      \
+       Prefetch::write(compaction_top, copy_interval);                           \
+-										\
+-      /* copy object and reinit its mark */					\
+-      debug_only(MarkSweep::live_oop_moved_to(q, size, compaction_top));	\
+-      assert(q != compaction_top, "everything in this pass should be moving");	\
+-      Copy::aligned_conjoint_words(q, compaction_top, size);		        \
+-      oop(compaction_top)->init_mark();						\
+-      assert(oop(compaction_top)->klass() != NULL, "should have a class");	\
+-										\
+-      debug_only(prev_q = q);							\
+-      q += size;								\
+-    }										\
+-  }										\
+-										\
+-  /* Reset space after compaction is complete */				\
+-  reset_after_compaction();							\
++                                                                                \
++      /* copy object and reinit its mark */                                     \
++      debug_only(MarkSweep::live_oop_moved_to(q, size, compaction_top));        \
++      assert(q != compaction_top, "everything in this pass should be moving");  \
++      Copy::aligned_conjoint_words(q, compaction_top, size);                    \
++      oop(compaction_top)->init_mark();                                         \
++      assert(oop(compaction_top)->klass() != NULL, "should have a class");      \
++                                                                                \
++      debug_only(prev_q = q);                                                   \
++      q += size;                                                                \
++    }                                                                           \
++  }                                                                             \
++                                                                                \
++  /* Reset space after compaction is complete */                                \
++  reset_after_compaction();                                                     \
+   /* We do this clear, below, since it has overloaded meanings for some */      \
+   /* space subtypes.  For example, OffsetTableContigSpace's that were   */      \
+   /* compacted into will have had their offset table thresholds updated */      \
+@@ -743,7 +740,7 @@
+ 
+  public:
+   virtual void initialize(MemRegion mr, bool clear_space);
+-  
++
+   // Accessors
+   HeapWord* top() const            { return _top;    }
+   void set_top(HeapWord* value)    { _top = value; }
+@@ -758,7 +755,7 @@
+   WaterMark saved_mark()      { return WaterMark(this, saved_mark_word()); }
+   bool saved_mark_at_top() const { return saved_mark_word() == top(); }
+ 
+-  void mangle_unused_area();  
++  void mangle_unused_area();
+   void mangle_region(MemRegion mr);
+ 
+   // Size computations: sizes in bytes.
+@@ -775,8 +772,8 @@
+   // contain objects.
+   MemRegion used_region() const { return MemRegion(bottom(), top()); }
+ 
+-  MemRegion used_region_at_save_marks() const { 
+-    return MemRegion(bottom(), saved_mark_word()); 
++  MemRegion used_region_at_save_marks() const {
++    return MemRegion(bottom(), saved_mark_word());
+   }
+ 
+   // Allocation (return NULL if full)
+@@ -784,7 +781,7 @@
+   virtual HeapWord* par_allocate(size_t word_size);
+ 
+   virtual bool obj_allocated_since_save_marks(const oop obj) const {
+-    return (HeapWord*)obj >= saved_mark_word(); 
++    return (HeapWord*)obj >= saved_mark_word();
+   }
+ 
+   // Iteration
+@@ -819,8 +816,8 @@
+ 
+   // Override.
+   DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl,
+-				     CardTableModRefBS::PrecisionStyle precision,
+-				     HeapWord* boundary = NULL);
++                                     CardTableModRefBS::PrecisionStyle precision,
++                                     HeapWord* boundary = NULL);
+ 
+   // Apply "blk->do_oop" to the addresses of all reference fields in objects
+   // starting with the _saved_mark_word, which was noted during a generation's
+@@ -828,7 +825,7 @@
+   // Fields in objects allocated by applications of the closure
+   // *are* included in the iteration.
+   // Updates _saved_mark_word to point to just after the last object
+-  // iterated over.  
++  // iterated over.
+ #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)  \
+   void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
+ 
+@@ -877,7 +874,7 @@
+ protected:
+   // Override.
+   void walk_mem_region(MemRegion mr,
+-		       HeapWord* bottom, HeapWord* top);
++                       HeapWord* bottom, HeapWord* top);
+ 
+   // Walk the given memory region, from bottom to top, applying
+   // the given oop closure to (possibly) all objects found. The
+@@ -887,16 +884,16 @@
+   // We offer two signatures, so the FilteringClosure static type is
+   // apparent.
+   virtual void walk_mem_region_with_cl(MemRegion mr,
+-				       HeapWord* bottom, HeapWord* top,
+-				       OopClosure* cl) = 0;
++                                       HeapWord* bottom, HeapWord* top,
++                                       OopClosure* cl) = 0;
+   virtual void walk_mem_region_with_cl(MemRegion mr,
+-				       HeapWord* bottom, HeapWord* top,
+-				       FilteringClosure* cl) = 0;
++                                       HeapWord* bottom, HeapWord* top,
++                                       FilteringClosure* cl) = 0;
+ 
+ public:
+   Filtering_DCTOC(Space* sp, OopClosure* cl,
+-		  CardTableModRefBS::PrecisionStyle precision,
+-		  HeapWord* boundary) :
++                  CardTableModRefBS::PrecisionStyle precision,
++                  HeapWord* boundary) :
+     DirtyCardToOopClosure(sp, cl, precision, boundary) {}
+ };
+ 
+@@ -916,16 +913,16 @@
+   HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
+ 
+   virtual void walk_mem_region_with_cl(MemRegion mr,
+-				       HeapWord* bottom, HeapWord* top,
+-				       OopClosure* cl);
++                                       HeapWord* bottom, HeapWord* top,
++                                       OopClosure* cl);
+   virtual void walk_mem_region_with_cl(MemRegion mr,
+-				       HeapWord* bottom, HeapWord* top,
+-				       FilteringClosure* cl);
++                                       HeapWord* bottom, HeapWord* top,
++                                       FilteringClosure* cl);
+ 
+ public:
+   ContiguousSpaceDCTOC(ContiguousSpace* sp, OopClosure* cl,
+-		       CardTableModRefBS::PrecisionStyle precision,
+-		       HeapWord* boundary) :
++                       CardTableModRefBS::PrecisionStyle precision,
++                       HeapWord* boundary) :
+     Filtering_DCTOC(sp, cl, precision, boundary)
+   {}
+ };
+@@ -944,7 +941,7 @@
+   // reached, the slow-path allocation code can invoke other actions and then
+   // adjust _soft_end up to a new soft limit or to end().
+   HeapWord* _soft_end;
+-  
++
+  public:
+   EdenSpace(DefNewGeneration* gen) : _gen(gen) { _soft_end = NULL; }
+ 
+@@ -973,7 +970,7 @@
+ class ConcEdenSpace : public EdenSpace {
+  public:
+   ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { }
+-  
++
+   // Allocation (return NULL if full)
+   HeapWord* par_allocate(size_t word_size);
+ };
+@@ -993,7 +990,7 @@
+  public:
+   // Constructor
+   OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
+-			 MemRegion mr);
++                         MemRegion mr);
+ 
+   void set_bottom(HeapWord* value);
+   void set_end(HeapWord* value);
+@@ -1030,7 +1027,7 @@
+  public:
+   // Constructor
+   TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
+-	       MemRegion mr) :
++               MemRegion mr) :
+     OffsetTableContigSpace(sharedOffsetArray, mr) {}
+ };
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/memory/space.inline.hpp openjdk/hotspot/src/share/vm/memory/space.inline.hpp
+--- openjdk6/hotspot/src/share/vm/memory/space.inline.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/space.inline.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)space.inline.hpp	1.17 07/05/05 17:05:54 JVM"
+-#endif
+ /*
+  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ inline HeapWord* OffsetTableContigSpace::allocate(size_t size) {
+@@ -59,7 +56,7 @@
+ 
+ inline HeapWord* ContiguousSpace::concurrent_iteration_safe_limit()
+ {
+-  assert(_concurrent_iteration_safe_limit <= top(), 
++  assert(_concurrent_iteration_safe_limit <= top(),
+          "_concurrent_iteration_safe_limit update missed");
+   return _concurrent_iteration_safe_limit;
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/memory/specialized_oop_closures.cpp openjdk/hotspot/src/share/vm/memory/specialized_oop_closures.cpp
+--- openjdk6/hotspot/src/share/vm/memory/specialized_oop_closures.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/specialized_oop_closures.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)specialized_oop_closures.cpp	1.14 07/05/05 17:05:56 JVM"
+-#endif
+ /*
+  * Copyright 2001-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -59,57 +56,57 @@
+   int all_numCalls_nv =
+     _numCalls_nv[ik] + _numCalls_nv[irk] + _numCalls_nv[oa];
+   gclog_or_tty->print_cr("\nOf %d oop_oop_iterate calls %d (%6.3f%%) are in (ik, irk, oa).",
+-		_numCallsAll, all_numCallsTotal,
+-		100.0 * (float)all_numCallsTotal / (float)_numCallsAll);
++                _numCallsAll, all_numCallsTotal,
++                100.0 * (float)all_numCallsTotal / (float)_numCallsAll);
+   // irk calls are double-counted.
+   int real_ik_numCallsTotal = _numCallsTotal[ik] - _numCallsTotal[irk];
+   int real_ik_numCalls_nv   = _numCalls_nv[ik]   - _numCalls_nv[irk];
+   gclog_or_tty->print_cr("");
+   gclog_or_tty->print_cr(header_format, "oop_oop_iterate:", "calls", "non-virtual", "pct");
+   gclog_or_tty->print_cr(header_format,
+-		"----------",
+-		"----------",
+-		"-----------",
+-		"----------");
+-  gclog_or_tty->print_cr(line_format, "all", 
+-		all_numCallsTotal,
+-		all_numCalls_nv,
+-		100.0 * (float)all_numCalls_nv / (float)all_numCallsTotal);
+-  gclog_or_tty->print_cr(line_format, "ik", 
+-		real_ik_numCallsTotal, real_ik_numCalls_nv,
+-		100.0 * (float)real_ik_numCalls_nv /
+-		(float)real_ik_numCallsTotal);
+-  gclog_or_tty->print_cr(line_format, "irk", 
+-		_numCallsTotal[irk], _numCalls_nv[irk],
+-		100.0 * (float)_numCalls_nv[irk] / (float)_numCallsTotal[irk]);
+-  gclog_or_tty->print_cr(line_format, "oa", 
+-		_numCallsTotal[oa], _numCalls_nv[oa],
+-		100.0 * (float)_numCalls_nv[oa] / (float)_numCallsTotal[oa]);
++                "----------",
++                "----------",
++                "-----------",
++                "----------");
++  gclog_or_tty->print_cr(line_format, "all",
++                all_numCallsTotal,
++                all_numCalls_nv,
++                100.0 * (float)all_numCalls_nv / (float)all_numCallsTotal);
++  gclog_or_tty->print_cr(line_format, "ik",
++                real_ik_numCallsTotal, real_ik_numCalls_nv,
++                100.0 * (float)real_ik_numCalls_nv /
++                (float)real_ik_numCallsTotal);
++  gclog_or_tty->print_cr(line_format, "irk",
++                _numCallsTotal[irk], _numCalls_nv[irk],
++                100.0 * (float)_numCalls_nv[irk] / (float)_numCallsTotal[irk]);
++  gclog_or_tty->print_cr(line_format, "oa",
++                _numCallsTotal[oa], _numCalls_nv[oa],
++                100.0 * (float)_numCalls_nv[oa] / (float)_numCallsTotal[oa]);
+ 
+ 
+   gclog_or_tty->print_cr("");
+   gclog_or_tty->print_cr(header_format, "do_oop:", "calls", "non-virtual", "pct");
+   gclog_or_tty->print_cr(header_format,
+-		"----------",
+-		"----------",
+-		"-----------",
+-		"----------");
++                "----------",
++                "----------",
++                "-----------",
++                "----------");
+   int all_numDoOopCallsTotal =
+     _numDoOopCallsTotal[ik] + _numDoOopCallsTotal[irk] + _numDoOopCallsTotal[oa];
+   int all_numDoOopCalls_nv =
+     _numDoOopCalls_nv[ik] + _numDoOopCalls_nv[irk] + _numDoOopCalls_nv[oa];
+-  gclog_or_tty->print_cr(line_format, "all", 
+-		all_numDoOopCallsTotal, all_numDoOopCalls_nv,
+-		100.0 * (float)all_numDoOopCalls_nv /
+-		(float)all_numDoOopCallsTotal);
++  gclog_or_tty->print_cr(line_format, "all",
++                all_numDoOopCallsTotal, all_numDoOopCalls_nv,
++                100.0 * (float)all_numDoOopCalls_nv /
++                (float)all_numDoOopCallsTotal);
+   const char* kind_names[] = { "ik", "irk", "oa" };
+   for (int k = ik; k < NUM_Kinds; k++) {
+     gclog_or_tty->print_cr(line_format, kind_names[k],
+-		  _numDoOopCallsTotal[k], _numDoOopCalls_nv[k],
+-		  (_numDoOopCallsTotal[k] > 0 ? 
+-		   100.0 * (float)_numDoOopCalls_nv[k] /
+-		   (float)_numDoOopCallsTotal[k]
+-		   : 0.0));
++                  _numDoOopCallsTotal[k], _numDoOopCalls_nv[k],
++                  (_numDoOopCallsTotal[k] > 0 ?
++                   100.0 * (float)_numDoOopCalls_nv[k] /
++                   (float)_numDoOopCallsTotal[k]
++                   : 0.0));
+   }
+ }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/memory/specialized_oop_closures.hpp openjdk/hotspot/src/share/vm/memory/specialized_oop_closures.hpp
+--- openjdk6/hotspot/src/share/vm/memory/specialized_oop_closures.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/specialized_oop_closures.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)specialized_oop_closures.hpp	1.30 07/05/29 09:44:17 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The following OopClosure types get specialized versions of
+@@ -69,7 +66,7 @@
+ 
+ #ifndef SERIALGC
+ #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_P(f)       \
+-  f(ParScanWithBarrierClosure,_nv)	        	\
++  f(ParScanWithBarrierClosure,_nv)                      \
+   f(ParScanWithoutBarrierClosure,_nv)
+ #else  // SERIALGC
+ #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_P(f)
+@@ -81,11 +78,11 @@
+ 
+ #ifndef SERIALGC
+ #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_3(f)       \
+-  f(MarkRefsIntoAndScanClosure,_nv)			\
+-  f(Par_MarkRefsIntoAndScanClosure,_nv)			\
+-  f(PushAndMarkClosure,_nv)				\
++  f(MarkRefsIntoAndScanClosure,_nv)                     \
++  f(Par_MarkRefsIntoAndScanClosure,_nv)                 \
++  f(PushAndMarkClosure,_nv)                             \
+   f(Par_PushAndMarkClosure,_nv)                         \
+-  f(PushOrMarkClosure,_nv)				\
++  f(PushOrMarkClosure,_nv)                              \
+   f(Par_PushOrMarkClosure,_nv)                          \
+   f(CMSKeepAliveClosure,_nv)                            \
+   f(CMSInnerParMarkAndPushClosure,_nv)
+@@ -129,12 +126,12 @@
+ // "OopClosure" in some applications and "OopsInGenClosure" in others.
+ 
+ #define SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG_S(f) \
+-  f(ScanClosure,_nv)					 \
++  f(ScanClosure,_nv)                                     \
+   f(FastScanClosure,_nv)
+ 
+ #ifndef SERIALGC
+ #define SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG_P(f) \
+-  f(ParScanWithBarrierClosure,_nv)			 \
++  f(ParScanWithBarrierClosure,_nv)                       \
+   f(ParScanWithoutBarrierClosure,_nv)
+ #else  // SERIALGC
+ #define SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG_P(f)
+@@ -200,7 +197,7 @@
+   static inline void record_do_oop_call_nv(Kind k)  PRODUCT_RETURN;
+ 
+   static void print() PRODUCT_RETURN;
+-};  
++};
+ 
+ #ifndef PRODUCT
+ #if ENABLE_SPECIALIZATION_STATS
+diff -ruN openjdk6/hotspot/src/share/vm/memory/tenuredGeneration.cpp openjdk/hotspot/src/share/vm/memory/tenuredGeneration.cpp
+--- openjdk6/hotspot/src/share/vm/memory/tenuredGeneration.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/tenuredGeneration.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)tenuredGeneration.cpp	1.47 07/05/29 09:44:17 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,17 +19,17 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_tenuredGeneration.cpp.incl"
+ 
+ TenuredGeneration::TenuredGeneration(ReservedSpace rs,
+-				     size_t initial_byte_size, int level, 
+-				     GenRemSet* remset) :
++                                     size_t initial_byte_size, int level,
++                                     GenRemSet* remset) :
+   OneContigSpaceCardGeneration(rs, initial_byte_size,
+-			       MinHeapDeltaBytes, level, remset, NULL)
++                               MinHeapDeltaBytes, level, remset, NULL)
+ {
+   HeapWord* bottom = (HeapWord*) _virtual_space.low();
+   HeapWord* end    = (HeapWord*) _virtual_space.high();
+@@ -59,14 +56,14 @@
+   if (UseParNewGC && ParallelGCThreads > 0) {
+     typedef ParGCAllocBufferWithBOT* ParGCAllocBufferWithBOTPtr;
+     _alloc_buffers = NEW_C_HEAP_ARRAY(ParGCAllocBufferWithBOTPtr,
+-				      ParallelGCThreads);
+-    if (_alloc_buffers == NULL) 
++                                      ParallelGCThreads);
++    if (_alloc_buffers == NULL)
+       vm_exit_during_initialization("Could not allocate alloc_buffers");
+     for (uint i = 0; i < ParallelGCThreads; i++) {
+       _alloc_buffers[i] =
+-	new ParGCAllocBufferWithBOT(OldPLABSize, _bts);
+-      if (_alloc_buffers[i] == NULL) 
+-	vm_exit_during_initialization("Could not allocate alloc_buffers");
++        new ParGCAllocBufferWithBOT(OldPLABSize, _bts);
++      if (_alloc_buffers[i] == NULL)
++        vm_exit_during_initialization("Could not allocate alloc_buffers");
+     }
+   } else {
+     _alloc_buffers = NULL;
+@@ -97,7 +94,7 @@
+   size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
+   // Don't shrink less than the initial generation size
+   minimum_desired_capacity = MAX2(minimum_desired_capacity,
+-				  spec()->init_size());
++                                  spec()->init_size());
+   assert(used_after_gc <= minimum_desired_capacity, "sanity check");
+ 
+   if (PrintGC && Verbose) {
+@@ -138,7 +135,7 @@
+                     _min_heap_delta_bytes / (double) K);
+     }
+     return;
+-  } 
++  }
+ 
+   // No expansion, now see if we want to shrink
+   size_t shrink_bytes = 0;
+@@ -151,23 +148,23 @@
+     const double max_tmp = used_after_gc / minimum_used_percentage;
+     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
+     maximum_desired_capacity = MAX2(maximum_desired_capacity,
+-				    spec()->init_size());
++                                    spec()->init_size());
+     if (PrintGC && Verbose) {
+       gclog_or_tty->print_cr("  "
+-			     "  maximum_free_percentage: %6.2f"
+-			     "  minimum_used_percentage: %6.2f",
+-			     maximum_free_percentage,
+-			     minimum_used_percentage);
++                             "  maximum_free_percentage: %6.2f"
++                             "  minimum_used_percentage: %6.2f",
++                             maximum_free_percentage,
++                             minimum_used_percentage);
+       gclog_or_tty->print_cr("  "
+-			     "  _capacity_at_prologue: %6.1fK"
+-			     "  minimum_desired_capacity: %6.1fK"
+-			     "  maximum_desired_capacity: %6.1fK",
+-			     _capacity_at_prologue / (double) K,
+-			     minimum_desired_capacity / (double) K,
+-			     maximum_desired_capacity / (double) K);
++                             "  _capacity_at_prologue: %6.1fK"
++                             "  minimum_desired_capacity: %6.1fK"
++                             "  maximum_desired_capacity: %6.1fK",
++                             _capacity_at_prologue / (double) K,
++                             minimum_desired_capacity / (double) K,
++                             maximum_desired_capacity / (double) K);
+     }
+     assert(minimum_desired_capacity <= maximum_desired_capacity,
+-	   "sanity check");
++           "sanity check");
+ 
+     if (capacity_after_gc > maximum_desired_capacity) {
+       // Capacity too large, compute shrinking size
+@@ -216,15 +213,15 @@
+     assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
+     if (PrintGC && Verbose) {
+       gclog_or_tty->print_cr("  "
+-			     "  aggressive shrinking:"
+-			     "  _capacity_at_prologue: %.1fK"
+-			     "  capacity_after_gc: %.1fK"
+-			     "  expansion_for_promotion: %.1fK"
+-			     "  shrink_bytes: %.1fK",
+-			     capacity_after_gc / (double) K,
+-			     _capacity_at_prologue / (double) K,
+-			     expansion_for_promotion / (double) K,
+-			     shrink_bytes / (double) K);
++                             "  aggressive shrinking:"
++                             "  _capacity_at_prologue: %.1fK"
++                             "  capacity_after_gc: %.1fK"
++                             "  expansion_for_promotion: %.1fK"
++                             "  shrink_bytes: %.1fK",
++                             capacity_after_gc / (double) K,
++                             _capacity_at_prologue / (double) K,
++                             expansion_for_promotion / (double) K,
++                             shrink_bytes / (double) K);
+     }
+   }
+   // Don't shrink unless it's significant
+@@ -232,7 +229,7 @@
+     shrink(shrink_bytes);
+   }
+   assert(used() == used_after_gc && used_after_gc <= capacity(),
+-	 "sanity check");
++         "sanity check");
+ }
+ 
+ void TenuredGeneration::gc_prologue(bool full) {
+@@ -254,9 +251,9 @@
+ bool TenuredGeneration::should_collect(bool  full,
+                                        size_t size,
+                                        bool   is_tlab) {
+-  // This should be one big conditional or (||), but I want to be able to tell 
+-  // why it returns what it returns (without re-evaluating the conditionals 
+-  // in case they aren't idempotent), so I'm doing it this way.  
++  // This should be one big conditional or (||), but I want to be able to tell
++  // why it returns what it returns (without re-evaluating the conditionals
++  // in case they aren't idempotent), so I'm doing it this way.
+   // DeMorgan says it's okay.
+   bool result = false;
+   if (!result && full) {
+@@ -297,16 +294,16 @@
+ }
+ 
+ void TenuredGeneration::collect(bool   full,
+-				bool   clear_all_soft_refs,
+-				size_t size,
+-				bool   is_tlab) {
+-  retire_alloc_buffers_before_full_gc();  
++                                bool   clear_all_soft_refs,
++                                size_t size,
++                                bool   is_tlab) {
++  retire_alloc_buffers_before_full_gc();
+   OneContigSpaceCardGeneration::collect(full, clear_all_soft_refs,
+-					size, is_tlab);
++                                        size, is_tlab);
+ }
+ 
+-void TenuredGeneration::update_gc_stats(int current_level, 
+-					bool full) {
++void TenuredGeneration::update_gc_stats(int current_level,
++                                        bool full) {
+   // If the next lower level(s) has been collected, gather any statistics
+   // that are of interest at this point.
+   if (!full && (current_level + 1) == level()) {
+@@ -335,32 +332,32 @@
+ 
+ #ifndef SERIALGC
+ oop TenuredGeneration::par_promote(int thread_num,
+-				   oop old, markOop m, size_t word_sz) {
++                                   oop old, markOop m, size_t word_sz) {
+ 
+   ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
+   HeapWord* obj_ptr = buf->allocate(word_sz);
+   bool is_lab = true;
+   if (obj_ptr == NULL) {
+-#ifndef	PRODUCT
++#ifndef PRODUCT
+     if (Universe::heap()->promotion_should_fail()) {
+       return NULL;
+     }
+-#endif	// #ifndef PRODUCT
++#endif  // #ifndef PRODUCT
+ 
+     // Slow path:
+     if (word_sz * 100 < ParallelGCBufferWastePct * buf->word_sz()) {
+       // Is small enough; abandon this buffer and start a new one.
+       size_t buf_size = buf->word_sz();
+       HeapWord* buf_space =
+-	TenuredGeneration::par_allocate(buf_size, false);
++        TenuredGeneration::par_allocate(buf_size, false);
+       if (buf_space == NULL) {
+-	buf_space = expand_and_allocate(buf_size, false, true /* parallel*/);
++        buf_space = expand_and_allocate(buf_size, false, true /* parallel*/);
+       }
+       if (buf_space != NULL) {
+-	buf->retire(false, false);
+-	buf->set_buf(buf_space);
+-	obj_ptr = buf->allocate(word_sz);
+-	assert(obj_ptr != NULL, "Buffer was definitely big enough...");
++        buf->retire(false, false);
++        buf->set_buf(buf_space);
++        obj_ptr = buf->allocate(word_sz);
++        assert(obj_ptr != NULL, "Buffer was definitely big enough...");
+       }
+     };
+     // Otherwise, buffer allocation failed; try allocating object
+@@ -368,7 +365,7 @@
+     if (obj_ptr == NULL) {
+       obj_ptr = TenuredGeneration::par_allocate(word_sz, false);
+       if (obj_ptr == NULL) {
+-	obj_ptr = expand_and_allocate(word_sz, false, true /* parallel */);
++        obj_ptr = expand_and_allocate(word_sz, false, true /* parallel */);
+       }
+     }
+     if (obj_ptr == NULL) return NULL;
+@@ -382,12 +379,12 @@
+ }
+ 
+ void TenuredGeneration::par_promote_alloc_undo(int thread_num,
+-					       HeapWord* obj,
+-					       size_t word_sz) {
++                                               HeapWord* obj,
++                                               size_t word_sz) {
+   ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
+   if (buf->contains(obj)) {
+     guarantee(buf->contains(obj + word_sz - 1),
+-	      "should contain whole object");
++              "should contain whole object");
+     buf->undo_allocation(obj, word_sz);
+   } else {
+     SharedHeap::fill_region_with_object(MemRegion(obj, word_sz));
+@@ -428,14 +425,14 @@
+   bool result = max_contiguous_available() >= max_promotion_in_bytes;
+ 
+   if (younger_handles_promotion_failure && !result) {
+-    result = max_contiguous_available() >= 
++    result = max_contiguous_available() >=
+       (size_t) gc_stats()->avg_promoted()->padded_average();
+     if (PrintGC && Verbose && result) {
+       gclog_or_tty->print_cr("TenuredGeneration::promotion_attempt_is_safe"
+                   " contiguous_available: " SIZE_FORMAT
+                   " avg_promoted: " SIZE_FORMAT,
+-                  max_contiguous_available(), 
+-		  gc_stats()->avg_promoted()->padded_average());
++                  max_contiguous_available(),
++                  gc_stats()->avg_promoted()->padded_average());
+     }
+   } else {
+     if (PrintGC && Verbose) {
+diff -ruN openjdk6/hotspot/src/share/vm/memory/tenuredGeneration.hpp openjdk/hotspot/src/share/vm/memory/tenuredGeneration.hpp
+--- openjdk6/hotspot/src/share/vm/memory/tenuredGeneration.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/tenuredGeneration.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)tenuredGeneration.hpp	1.27 07/05/29 09:44:17 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // TenuredGeneration models the heap containing old (promoted/tenured) objects.
+@@ -35,7 +32,7 @@
+   // current shrinking effect: this damps shrinking when the heap gets empty.
+   size_t _shrink_factor;
+   // Some statistics from before gc started.
+-  // These are gathered in the gc_prologue (and should_collect) 
++  // These are gathered in the gc_prologue (and should_collect)
+   // to control growing/shrinking policy in spite of promotions.
+   size_t _capacity_at_prologue;
+   size_t _used_at_prologue;
+@@ -52,13 +49,13 @@
+ 
+   GenerationCounters*   _gen_counters;
+   CSpaceCounters*       _space_counters;
+-  
++
+  public:
+   TenuredGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
+-		    GenRemSet* remset);
+-  
++                    GenRemSet* remset);
++
+   Generation::Name kind() { return Generation::MarkSweepCompact; }
+-  
++
+   // Printing
+   const char* name() const;
+   const char* short_name() const { return "Tenured"; }
+@@ -86,15 +83,15 @@
+ 
+   virtual void collect(bool full,
+                        bool clear_all_soft_refs,
+-                       size_t size, 
++                       size_t size,
+                        bool is_tlab);
+ 
+ #ifndef SERIALGC
+   // Overrides.
+   virtual oop par_promote(int thread_num,
+-			  oop obj, markOop m, size_t word_sz);
++                          oop obj, markOop m, size_t word_sz);
+   virtual void par_promote_alloc_undo(int thread_num,
+-				      HeapWord* obj, size_t word_sz);
++                                      HeapWord* obj, size_t word_sz);
+   virtual void par_promote_alloc_done(int thread_num);
+ #endif // SERIALGC
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/memory/threadLocalAllocBuffer.cpp openjdk/hotspot/src/share/vm/memory/threadLocalAllocBuffer.cpp
+--- openjdk6/hotspot/src/share/vm/memory/threadLocalAllocBuffer.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/threadLocalAllocBuffer.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)threadLocalAllocBuffer.cpp	1.55 07/07/05 17:12:38 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Thread-Local Edens support
+@@ -184,7 +181,7 @@
+   initialize(NULL,                    // start
+              NULL,                    // top
+              NULL);                   // end
+-  
++
+   set_desired_size(initial_desired_size());
+ 
+   // Following check is needed because at startup the main (primordial)
+diff -ruN openjdk6/hotspot/src/share/vm/memory/threadLocalAllocBuffer.hpp openjdk/hotspot/src/share/vm/memory/threadLocalAllocBuffer.hpp
+--- openjdk6/hotspot/src/share/vm/memory/threadLocalAllocBuffer.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/threadLocalAllocBuffer.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)threadLocalAllocBuffer.hpp	1.35 07/07/05 17:12:36 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class GlobalTLABStats;
+@@ -63,7 +60,7 @@
+   void set_refill_waste_limit(size_t waste)      { _refill_waste_limit = waste;  }
+ 
+   size_t initial_refill_waste_limit()            { return desired_size() / TLABRefillWasteFraction; }
+-  
++
+   static int    target_refills()                 { return _target_refills; }
+   size_t initial_desired_size();
+ 
+@@ -130,7 +127,7 @@
+ 
+   // Make an in-use tlab parsable, optionally also retiring it.
+   void make_parsable(bool retire);
+-  
++
+   // Retire in-use tlab before allocation of a new tlab
+   void clear_before_allocation();
+ 
+@@ -150,8 +147,8 @@
+   static ByteSize end_offset()                   { return byte_offset_of(ThreadLocalAllocBuffer, _end  ); }
+   static ByteSize top_offset()                   { return byte_offset_of(ThreadLocalAllocBuffer, _top  ); }
+   static ByteSize pf_top_offset()                { return byte_offset_of(ThreadLocalAllocBuffer, _pf_top  ); }
+-  static ByteSize size_offset()                  { return byte_offset_of(ThreadLocalAllocBuffer, _desired_size ); }  
+-  static ByteSize refill_waste_limit_offset()    { return byte_offset_of(ThreadLocalAllocBuffer, _refill_waste_limit ); }  
++  static ByteSize size_offset()                  { return byte_offset_of(ThreadLocalAllocBuffer, _desired_size ); }
++  static ByteSize refill_waste_limit_offset()    { return byte_offset_of(ThreadLocalAllocBuffer, _refill_waste_limit ); }
+ 
+   static ByteSize number_of_refills_offset()     { return byte_offset_of(ThreadLocalAllocBuffer, _number_of_refills ); }
+   static ByteSize fast_refill_waste_offset()     { return byte_offset_of(ThreadLocalAllocBuffer, _fast_refill_waste ); }
+@@ -178,7 +175,7 @@
+   size_t   _max_fast_refill_waste;
+   unsigned _total_slow_allocations;
+   unsigned _max_slow_allocations;
+-  
++
+   PerfVariable* _perf_allocating_threads;
+   PerfVariable* _perf_total_refills;
+   PerfVariable* _perf_max_refills;
+diff -ruN openjdk6/hotspot/src/share/vm/memory/threadLocalAllocBuffer.inline.hpp openjdk/hotspot/src/share/vm/memory/threadLocalAllocBuffer.inline.hpp
+--- openjdk6/hotspot/src/share/vm/memory/threadLocalAllocBuffer.inline.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/threadLocalAllocBuffer.inline.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)threadLocalAllocBuffer.inline.hpp	1.29 07/05/05 17:05:56 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ inline HeapWord* ThreadLocalAllocBuffer::allocate(size_t size) {
+@@ -30,12 +27,12 @@
+   HeapWord* obj = top();
+   if (pointer_delta(end(), obj) >= size) {
+     // successful thread-local allocation
+-    
++
+     DEBUG_ONLY(Copy::fill_to_words(obj, size, badHeapWordVal));
+     // This addition is safe because we know that top is
+     // at least size below end, so the add can't wrap.
+     set_top(obj + size);
+-    
++
+     invariants();
+     return obj;
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/memory/universe.cpp openjdk/hotspot/src/share/vm/memory/universe.cpp
+--- openjdk6/hotspot/src/share/vm/memory/universe.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/universe.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)universe.cpp	1.359 07/05/29 09:44:16 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,13 +19,13 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_universe.cpp.incl"
+ 
+-// Known objects 
++// Known objects
+ klassOop Universe::_boolArrayKlassObj                 = NULL;
+ klassOop Universe::_byteArrayKlassObj                 = NULL;
+ klassOop Universe::_charArrayKlassObj                 = NULL;
+@@ -52,8 +49,18 @@
+ klassOop Universe::_constantPoolCacheKlassObj         = NULL;
+ klassOop Universe::_compiledICHolderKlassObj          = NULL;
+ klassOop Universe::_systemObjArrayKlassObj            = NULL;
+-oop      Universe::_main_thread_group                 = NULL;
+-oop      Universe::_system_thread_group               = NULL;
++oop Universe::_int_mirror                             =  NULL;
++oop Universe::_float_mirror                           =  NULL;
++oop Universe::_double_mirror                          =  NULL;
++oop Universe::_byte_mirror                            =  NULL;
++oop Universe::_bool_mirror                            =  NULL;
++oop Universe::_char_mirror                            =  NULL;
++oop Universe::_long_mirror                            =  NULL;
++oop Universe::_short_mirror                           =  NULL;
++oop Universe::_void_mirror                            =  NULL;
++oop Universe::_mirrors[T_VOID+1]                      =  { NULL /*, NULL...*/ };
++oop Universe::_main_thread_group                      = NULL;
++oop Universe::_system_thread_group                    = NULL;
+ typeArrayOop Universe::_the_empty_byte_array          = NULL;
+ typeArrayOop Universe::_the_empty_short_array         = NULL;
+ typeArrayOop Universe::_the_empty_int_array           = NULL;
+@@ -81,7 +88,7 @@
+ debug_only(int Universe::_fullgc_alot_dummy_next      = 0;)
+ 
+ 
+-// Heap  
++// Heap
+ int             Universe::_verify_count = 0;
+ 
+ int             Universe::_base_vtable_size = 0;
+@@ -121,11 +128,24 @@
+ }
+ 
+ void Universe::oops_do(OopClosure* f, bool do_all) {
+-  // Although most of the SystemDictionary oops are klasses,
+-  // a few non-klass objects are defined over there.
+-  // They must be treated in all ways like the random objects in Universe.
+-  // So adopt them into Universe, by the following cross-module call:
+-  SystemDictionary::shared_oops_do(f);
++
++  f->do_oop((oop*) &_int_mirror);
++  f->do_oop((oop*) &_float_mirror);
++  f->do_oop((oop*) &_double_mirror);
++  f->do_oop((oop*) &_byte_mirror);
++  f->do_oop((oop*) &_bool_mirror);
++  f->do_oop((oop*) &_char_mirror);
++  f->do_oop((oop*) &_long_mirror);
++  f->do_oop((oop*) &_short_mirror);
++  f->do_oop((oop*) &_void_mirror);
++
++  // It's important to iterate over these guys even if they are null,
++  // since that's how shared heaps are restored.
++  for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
++    f->do_oop((oop*) &_mirrors[i]);
++  }
++  assert(_mirrors[0] == NULL && _mirrors[T_BOOLEAN - 1] == NULL, "checking");
++
+   // %%% Consider moving those "shared oops" over here with the others.
+   f->do_oop((oop*)&_boolArrayKlassObj);
+   f->do_oop((oop*)&_byteArrayKlassObj);
+@@ -139,8 +159,8 @@
+   {
+     for (int i = 0; i < T_VOID+1; i++) {
+       if (_typeArrayKlassObjs[i] != NULL) {
+-	assert(i >= T_BOOLEAN, "checking");
+-	f->do_oop((oop*)&_typeArrayKlassObjs[i]);
++        assert(i >= T_BOOLEAN, "checking");
++        f->do_oop((oop*)&_typeArrayKlassObjs[i]);
+       } else if (do_all) {
+         f->do_oop((oop*)&_typeArrayKlassObjs[i]);
+       }
+@@ -162,9 +182,9 @@
+   f->do_oop((oop*)&_the_empty_byte_array);
+   f->do_oop((oop*)&_the_empty_short_array);
+   f->do_oop((oop*)&_the_empty_int_array);
+-  f->do_oop((oop*)&_the_empty_system_obj_array);    
+-  f->do_oop((oop*)&_the_empty_class_klass_array);    
+-  f->do_oop((oop*)&_the_array_interfaces_array);    
++  f->do_oop((oop*)&_the_empty_system_obj_array);
++  f->do_oop((oop*)&_the_empty_class_klass_array);
++  f->do_oop((oop*)&_the_array_interfaces_array);
+   _finalizer_register_cache->oops_do(f);
+   _loader_addClass_cache->oops_do(f);
+   _reflect_invoke_cache->oops_do(f);
+@@ -172,9 +192,9 @@
+   f->do_oop((oop*)&_out_of_memory_error_perm_gen);
+   f->do_oop((oop*)&_out_of_memory_error_array_size);
+   f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit);
+-  if (_preallocated_out_of_memory_error_array != (oop)NULL) {	// NULL when DumpSharedSpaces
++  if (_preallocated_out_of_memory_error_array != (oop)NULL) {   // NULL when DumpSharedSpaces
+     f->do_oop((oop*)&_preallocated_out_of_memory_error_array);
+-  } 
++  }
+   f->do_oop((oop*)&_null_ptr_exception_instance);
+   f->do_oop((oop*)&_arithmetic_exception_instance);
+   f->do_oop((oop*)&_virtual_machine_error_instance);
+@@ -200,7 +220,7 @@
+ void Universe::genesis(TRAPS) {
+   ResourceMark rm;
+   { FlagSetting fs(_bootstrapping, true);
+-      
++
+     { MutexLocker mc(Compile_lock);
+ 
+       // determine base vtable size; without that we cannot create the array klasses
+@@ -257,7 +277,7 @@
+         FileMapInfo *mapinfo = FileMapInfo::current_info();
+         char* buffer = mapinfo->region_base(CompactingPermGenGen::md);
+         void** vtbl_list = (void**)buffer;
+-        init_self_patching_vtbl_list(vtbl_list, 
++        init_self_patching_vtbl_list(vtbl_list,
+                                      CompactingPermGenGen::vtbl_list_size);
+       }
+     }
+@@ -342,7 +362,7 @@
+   // its vtable is initialized after core bootstrapping is completed.
+   Klass::cast(_objectArrayKlassObj)->append_to_sibling_list();
+ 
+-  // Compute is_jdk version flags. 
++  // Compute is_jdk version flags.
+   // Only 1.3 or later has the java.lang.Shutdown class.
+   // Only 1.4 or later has the java.lang.CharSequence interface.
+   // Only 1.5 or later has the java.lang.management.MemoryUsage class.
+@@ -412,7 +432,7 @@
+     assert(i == _fullgc_alot_dummy_array->length(), "just checking");
+   }
+   #endif
+-}    
++}
+ 
+ 
+ static inline void add_vtable(void** list, int* n, Klass* o, int count) {
+@@ -462,6 +482,45 @@
+   }
+ };
+ 
++void Universe::initialize_basic_type_mirrors(TRAPS) {
++  if (UseSharedSpaces) {
++    assert(_int_mirror != NULL, "already loaded");
++    assert(_void_mirror == _mirrors[T_VOID], "consistently loaded");
++  } else {
++
++    assert(_int_mirror==NULL, "basic type mirrors already initialized");
++    _int_mirror     =
++      java_lang_Class::create_basic_type_mirror("int",    T_INT, CHECK);
++    _float_mirror   =
++      java_lang_Class::create_basic_type_mirror("float",  T_FLOAT,   CHECK);
++    _double_mirror  =
++      java_lang_Class::create_basic_type_mirror("double", T_DOUBLE,  CHECK);
++    _byte_mirror    =
++      java_lang_Class::create_basic_type_mirror("byte",   T_BYTE, CHECK);
++    _bool_mirror    =
++      java_lang_Class::create_basic_type_mirror("boolean",T_BOOLEAN, CHECK);
++    _char_mirror    =
++      java_lang_Class::create_basic_type_mirror("char",   T_CHAR, CHECK);
++    _long_mirror    =
++      java_lang_Class::create_basic_type_mirror("long",   T_LONG, CHECK);
++    _short_mirror   =
++      java_lang_Class::create_basic_type_mirror("short",  T_SHORT,   CHECK);
++    _void_mirror    =
++      java_lang_Class::create_basic_type_mirror("void",   T_VOID, CHECK);
++
++    _mirrors[T_INT]     = _int_mirror;
++    _mirrors[T_FLOAT]   = _float_mirror;
++    _mirrors[T_DOUBLE]  = _double_mirror;
++    _mirrors[T_BYTE]    = _byte_mirror;
++    _mirrors[T_BOOLEAN] = _bool_mirror;
++    _mirrors[T_CHAR]    = _char_mirror;
++    _mirrors[T_LONG]    = _long_mirror;
++    _mirrors[T_SHORT]   = _short_mirror;
++    _mirrors[T_VOID]    = _void_mirror;
++    //_mirrors[T_OBJECT]  = instanceKlass::cast(_object_klass)->java_mirror();
++    //_mirrors[T_ARRAY]   = instanceKlass::cast(_object_klass)->java_mirror();
++  }
++}
+ 
+ void Universe::fixup_mirrors(TRAPS) {
+   // Bootstrap problem: all classes gets a mirror (java.lang.Class instance) assigned eagerly,
+@@ -482,14 +541,14 @@
+ 
+   // Called on VM exit. This ought to be run in a separate thread.
+   if (TraceReferenceGC) tty->print_cr("Callback to run finalizers on exit");
+-  { 
++  {
+     PRESERVE_EXCEPTION_MARK;
+     KlassHandle finalizer_klass(THREAD, SystemDictionary::finalizer_klass());
+     JavaValue result(T_VOID);
+     JavaCalls::call_static(
+-      &result, 
+-      finalizer_klass, 
+-      vmSymbolHandles::run_finalizers_on_exit_name(), 
++      &result,
++      finalizer_klass,
++      vmSymbolHandles::run_finalizers_on_exit_name(),
+       vmSymbolHandles::void_method_signature(),
+       THREAD
+     );
+@@ -503,7 +562,7 @@
+ // 1) we specified true to initialize_vtable and
+ // 2) this ran after gc was enabled
+ // In case those ever change we use handles for oops
+-void Universe::reinitialize_vtable_of(KlassHandle k_h, TRAPS) {  
++void Universe::reinitialize_vtable_of(KlassHandle k_h, TRAPS) {
+   // init vtable of k and all subclasses
+   Klass* ko = k_h()->klass_part();
+   klassVtable* vt = ko->vtable();
+@@ -518,7 +577,7 @@
+ 
+ 
+ void initialize_itable_for_klass(klassOop k, TRAPS) {
+-  instanceKlass::cast(k)->itable()->initialize_itable(false, CHECK);        
++  instanceKlass::cast(k)->itable()->initialize_itable(false, CHECK);
+ }
+ 
+ 
+@@ -542,7 +601,7 @@
+   return ((throwable() != Universe::_out_of_memory_error_java_heap) &&
+           (throwable() != Universe::_out_of_memory_error_perm_gen)  &&
+           (throwable() != Universe::_out_of_memory_error_array_size) &&
+-	  (throwable() != Universe::_out_of_memory_error_gc_overhead_limit));
++          (throwable() != Universe::_out_of_memory_error_gc_overhead_limit));
+ }
+ 
+ 
+@@ -572,7 +631,7 @@
+ 
+     // use the message from the default error
+     oop msg = java_lang_Throwable::message(default_err);
+-    assert(msg != NULL, "no message"); 
++    assert(msg != NULL, "no message");
+     java_lang_Throwable::set_message(exc, msg);
+ 
+     // populate the stack trace and return it.
+@@ -605,10 +664,10 @@
+ jint universe_init() {
+   assert(!Universe::_fully_initialized, "called after initialize_vtables");
+   guarantee(1 << LogHeapWordSize == sizeof(HeapWord),
+-	 "LogHeapWordSize is incorrect.");
++         "LogHeapWordSize is incorrect.");
+   guarantee(sizeof(oop) >= sizeof(HeapWord), "HeapWord larger than oop?");
+   guarantee(sizeof(oop) % sizeof(HeapWord) == 0,
+-	 "oop size is not not a multiple of HeapWord size");
++         "oop size is not not a multiple of HeapWord size");
+   TraceTime timer("Genesis", TraceStartupTime);
+   GC_locker::lock();  // do not allow gc during bootstrapping
+   JavaClasses::compute_hard_coded_offsets();
+@@ -682,9 +741,9 @@
+     } else if (UseConcMarkSweepGC) {
+ #ifndef SERIALGC
+       if (UseAdaptiveSizePolicy) {
+-	gc_policy = new ASConcurrentMarkSweepPolicy();
++        gc_policy = new ASConcurrentMarkSweepPolicy();
+       } else {
+-	gc_policy = new ConcurrentMarkSweepPolicy();
++        gc_policy = new ConcurrentMarkSweepPolicy();
+       }
+ #else   // SERIALGC
+     fatal("UseConcMarkSweepGC not supported in java kernel vm.");
+@@ -692,7 +751,7 @@
+     } else { // default old generation
+       gc_policy = new MarkSweepPolicy();
+     }
+-    
++
+     Universe::_collectedHeap = new GenCollectedHeap(gc_policy);
+   }
+ 
+@@ -729,7 +788,7 @@
+   // itself to the threads list (so, using current interfaces
+   // we can't "fill" its TLAB), unless TLABs are disabled.
+   if (VerifyBeforeGC && !UseTLAB &&
+-      Universe::heap()->total_collections() >= VerifyGCStartAt) { 
++      Universe::heap()->total_collections() >= VerifyGCStartAt) {
+      Universe::heap()->prepare_for_verify();
+      Universe::verify();   // make sure we're starting with a clean slate
+   }
+@@ -762,7 +821,7 @@
+     Universe::_out_of_memory_error_java_heap = k_h->allocate_permanent_instance(CHECK_false);
+     Universe::_out_of_memory_error_perm_gen = k_h->allocate_permanent_instance(CHECK_false);
+     Universe::_out_of_memory_error_array_size = k_h->allocate_permanent_instance(CHECK_false);
+-    Universe::_out_of_memory_error_gc_overhead_limit = 
++    Universe::_out_of_memory_error_gc_overhead_limit =
+       k_h->allocate_permanent_instance(CHECK_false);
+ 
+     // Setup preallocated NullPointerException
+@@ -781,7 +840,7 @@
+       tty->print_cr("Unable to link/verify VirtualMachineError class");
+       return false; // initialization failed
+     }
+-    Universe::_virtual_machine_error_instance = 
++    Universe::_virtual_machine_error_instance =
+       instanceKlass::cast(k)->allocate_permanent_instance(CHECK_false);
+   }
+   if (!DumpSharedSpaces) {
+@@ -815,43 +874,43 @@
+       java_lang_Throwable::allocate_backtrace(err_h, CHECK_false);
+       Universe::preallocated_out_of_memory_errors()->obj_at_put(i, err_h());
+     }
+-    Universe::_preallocated_out_of_memory_error_avail_count = (jint)len;    
++    Universe::_preallocated_out_of_memory_error_avail_count = (jint)len;
+   }
+ 
+-  
++
+   // Setup static method for registering finalizers
+   // The finalizer klass must be linked before looking up the method, in
+   // case it needs to get rewritten.
+   instanceKlass::cast(SystemDictionary::finalizer_klass())->link_class(CHECK_false);
+   methodOop m = instanceKlass::cast(SystemDictionary::finalizer_klass())->find_method(
+-                                  vmSymbols::register_method_name(), 
++                                  vmSymbols::register_method_name(),
+                                   vmSymbols::register_method_signature());
+   if (m == NULL || !m->is_static()) {
+-    THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(), 
++    THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
+       "java.lang.ref.Finalizer.register", false);
+   }
+   Universe::_finalizer_register_cache->init(
+     SystemDictionary::finalizer_klass(), m, CHECK_false);
+ 
+-  // Resolve on first use and initialize class. 
++  // Resolve on first use and initialize class.
+   // Note: No race-condition here, since a resolve will always return the same result
+ 
+-  // Setup method for security checks 
+-  k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_reflect_Method(), true, CHECK_false);  
++  // Setup method for security checks
++  k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_reflect_Method(), true, CHECK_false);
+   k_h = instanceKlassHandle(THREAD, k);
+   k_h->link_class(CHECK_false);
+   m = k_h->find_method(vmSymbols::invoke_name(), vmSymbols::object_array_object_object_signature());
+   if (m == NULL || m->is_static()) {
+-    THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(), 
++    THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
+       "java.lang.reflect.Method.invoke", false);
+   }
+   Universe::_reflect_invoke_cache->init(k_h(), m, CHECK_false);
+ 
+-  // Setup method for registering loaded classes in class loader vector 
++  // Setup method for registering loaded classes in class loader vector
+   instanceKlass::cast(SystemDictionary::classloader_klass())->link_class(CHECK_false);
+   m = instanceKlass::cast(SystemDictionary::classloader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature());
+   if (m == NULL || m->is_static()) {
+-    THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(), 
++    THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
+       "java.lang.ClassLoader.addClass", false);
+   }
+   Universe::_loader_addClass_cache->init(
+@@ -881,43 +940,25 @@
+ }
+ 
+ 
++// %%% The Universe::flush_foo methods belong in CodeCache.
++
+ // Flushes compiled methods dependent on dependee.
+-void Universe::flush_dependents_on(instanceKlassHandle dependee) {  
+-  assert_lock_strong(Compile_lock);  
++void Universe::flush_dependents_on(instanceKlassHandle dependee) {
++  assert_lock_strong(Compile_lock);
++
+   if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
+ 
+   // CodeCache can only be updated by a thread_in_VM and they will all be
+   // stopped dring the safepoint so CodeCache will be safe to update without
+   // holding the CodeCache_lock.
+-  
+-  // Mark all dependee and all its superclasses
+-  for (klassOop d = dependee(); d != NULL; d = instanceKlass::cast(d)->super()) {
+-    assert(!instanceKlass::cast(d)->is_marked_dependent(), "checking");
+-    instanceKlass::cast(d)->set_is_marked_dependent(true);
+-  }
+-  // Mark transitive interfaces
+-  int i;
+-  for (i = 0; i < dependee->transitive_interfaces()->length(); i++) {
+-    instanceKlass* klass = instanceKlass::cast((klassOop)dependee->transitive_interfaces()->obj_at(i));
+-    assert(!klass->is_marked_dependent(), "checking");
+-    klass->set_is_marked_dependent(true);
+-  }
+ 
+-  // Compute the dependent nmethods
+-  if (CodeCache::mark_for_deoptimization(dependee()) > 0) {
+-    // At least one nmethod has been marked for deoptimization 
+-    VM_Deoptimize op;  
+-    VMThread::execute(&op);    
+-  }
++  DepChange changes(dependee);
+ 
+-  // Unmark all dependee and all its superclasses
+-  for (klassOop e = dependee(); e != NULL; e = instanceKlass::cast(e)->super()) {
+-    instanceKlass::cast(e)->set_is_marked_dependent(false);
+-  }
+-  // Unmark transitive interfaces
+-  for (i = 0; i < dependee->transitive_interfaces()->length(); i++) {
+-    instanceKlass* klass = instanceKlass::cast((klassOop)dependee->transitive_interfaces()->obj_at(i));
+-    klass->set_is_marked_dependent(false);
++  // Compute the dependent nmethods
++  if (CodeCache::mark_for_deoptimization(changes) > 0) {
++    // At least one nmethod has been marked for deoptimization
++    VM_Deoptimize op;
++    VMThread::execute(&op);
+   }
+ }
+ 
+@@ -931,11 +972,11 @@
+   // CodeCache can only be updated by a thread_in_VM and they will all be
+   // stopped dring the safepoint so CodeCache will be safe to update without
+   // holding the CodeCache_lock.
+-  
++
+   // Compute the dependent nmethods
+   if (CodeCache::mark_for_evol_deoptimization(ev_k_h) > 0) {
+-    // At least one nmethod has been marked for deoptimization 
+-    
++    // At least one nmethod has been marked for deoptimization
++
+     // All this already happens inside a VM_Operation, so we'll do all the work here.
+     // Stuff copied from VM_Deoptimize and modified slightly.
+ 
+@@ -943,11 +984,11 @@
+     ResourceMark rm;
+     DeoptimizationMarker dm;
+ 
+-    // Deoptimize all activations depending on marked nmethods  
++    // Deoptimize all activations depending on marked nmethods
+     Deoptimization::deoptimize_dependents();
+ 
+     // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
+-    CodeCache::make_marked_nmethods_not_entrant(); 
++    CodeCache::make_marked_nmethods_not_entrant();
+   }
+ }
+ #endif // HOTSWAP
+@@ -961,11 +1002,11 @@
+   // CodeCache can only be updated by a thread_in_VM and they will all be
+   // stopped dring the safepoint so CodeCache will be safe to update without
+   // holding the CodeCache_lock.
+-  
++
+   // Compute the dependent nmethods
+   if (CodeCache::mark_for_deoptimization(m_h()) > 0) {
+-    // At least one nmethod has been marked for deoptimization 
+-    
++    // At least one nmethod has been marked for deoptimization
++
+     // All this already happens inside a VM_Operation, so we'll do all the work here.
+     // Stuff copied from VM_Deoptimize and modified slightly.
+ 
+@@ -973,11 +1014,11 @@
+     ResourceMark rm;
+     DeoptimizationMarker dm;
+ 
+-    // Deoptimize all activations depending on marked nmethods  
++    // Deoptimize all activations depending on marked nmethods
+     Deoptimization::deoptimize_dependents();
+ 
+     // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
+-    CodeCache::make_marked_nmethods_not_entrant(); 
++    CodeCache::make_marked_nmethods_not_entrant();
+   }
+ }
+ 
+@@ -997,17 +1038,17 @@
+   }
+ }
+ 
+-void Universe::print_heap_before_gc(outputStream* st) {  
++void Universe::print_heap_before_gc(outputStream* st) {
+   st->print_cr("{Heap before GC invocations=%u (full %u):",
+-	       heap()->total_collections(),
+-	       heap()->total_full_collections());
++               heap()->total_collections(),
++               heap()->total_full_collections());
+   heap()->print_on(st);
+ }
+ 
+ void Universe::print_heap_after_gc(outputStream* st) {
+   st->print_cr("Heap after GC invocations=%u (full %u):",
+-	       heap()->total_collections(),
+-	       heap()->total_full_collections());
++               heap()->total_collections(),
++               heap()->total_full_collections());
+   heap()->print_on(st);
+   st->print_cr("}");
+ }
+@@ -1034,24 +1075,24 @@
+   _verify_count++;
+ 
+   if (!silent) gclog_or_tty->print("[Verifying ");
+-  if (!silent) gclog_or_tty->print("threads ");     
++  if (!silent) gclog_or_tty->print("threads ");
+   Threads::verify();
+   heap()->verify(allow_dirty, silent);
+ 
+-  if (!silent) gclog_or_tty->print("syms ");        
++  if (!silent) gclog_or_tty->print("syms ");
+   SymbolTable::verify();
+-  if (!silent) gclog_or_tty->print("strs ");        
++  if (!silent) gclog_or_tty->print("strs ");
+   StringTable::verify();
+   {
+     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+-    if (!silent) gclog_or_tty->print("zone ");      
++    if (!silent) gclog_or_tty->print("zone ");
+     CodeCache::verify();
+   }
+-  if (!silent) gclog_or_tty->print("dict ");        
++  if (!silent) gclog_or_tty->print("dict ");
+   SystemDictionary::verify();
+-  if (!silent) gclog_or_tty->print("hand ");        
++  if (!silent) gclog_or_tty->print("hand ");
+   JNIHandles::verify();
+-  if (!silent) gclog_or_tty->print("C-heap ");      
++  if (!silent) gclog_or_tty->print("C-heap ");
+   os::check_heap();
+   if (!silent) gclog_or_tty->print_cr("]");
+ 
+@@ -1065,8 +1106,8 @@
+ 
+ 
+ static void calculate_verify_data(uintptr_t verify_data[2],
+-				  HeapWord* low_boundary,
+-				  HeapWord* high_boundary) {
++                                  HeapWord* low_boundary,
++                                  HeapWord* high_boundary) {
+   assert(low_boundary < high_boundary, "bad interval");
+ 
+   // decide which low-order bits we require to be clear:
+@@ -1105,8 +1146,8 @@
+ uintptr_t Universe::verify_oop_mask() {
+   MemRegion m = heap()->reserved_region();
+   calculate_verify_data(_verify_oop_data,
+-			m.start(),
+-			m.end());
++                        m.start(),
++                        m.end());
+   return _verify_oop_data[0];
+ }
+ 
+@@ -1127,9 +1168,9 @@
+   size_t min_new_size = Universe::new_size();   // in bytes
+   size_t min_old_size = Universe::old_size();   // in bytes
+   calculate_verify_data(_verify_klass_data,
+-	  (HeapWord*)((uintptr_t)_new_gen->low_boundary + min_new_size + min_old_size),
+-	  _perm_gen->high_boundary);
+-			*/
++          (HeapWord*)((uintptr_t)_new_gen->low_boundary + min_new_size + min_old_size),
++          _perm_gen->high_boundary);
++                        */
+   // Why doesn't the above just say that klass's always live in the perm
+   // gen?  I'll see if that seems to work...
+   MemRegion permanent_reserved;
+@@ -1152,9 +1193,9 @@
+ #endif // SERIALGC
+   }
+   calculate_verify_data(_verify_klass_data,
+-                        permanent_reserved.start(), 
++                        permanent_reserved.start(),
+                         permanent_reserved.end());
+-  
++
+   return _verify_klass_data[0];
+ }
+ 
+@@ -1256,7 +1297,7 @@
+       // do anything special with the index.
+       continue;  // robustness
+     }
+-      
++
+     methodOop m = (methodOop)JNIHandles::resolve(method_ref);
+     if (m == NULL) {
+       // this method entry has been GC'ed so remove it
+diff -ruN openjdk6/hotspot/src/share/vm/memory/universe.hpp openjdk/hotspot/src/share/vm/memory/universe.hpp
+--- openjdk6/hotspot/src/share/vm/memory/universe.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/universe.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)universe.hpp	1.182 07/05/17 15:55:24 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,13 +19,13 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Universe is a name space holding known system classes and objects in the VM.
+-// 
++//
+ // Loaded classes are accessible through the SystemDictionary.
+-// 
++//
+ // The object heap is allocated and accessed through Universe, and various allocation
+ // support is provided. Allocation by the interpreter and compiled code is done inline
+ // and bails out to Scavenge::invoke_and_allocate.
+@@ -119,7 +116,7 @@
+   static klassOop _singleArrayKlassObj;
+   static klassOop _doubleArrayKlassObj;
+   static klassOop _typeArrayKlassObjs[T_VOID+1];
+-  
++
+   static klassOop _objectArrayKlassObj;
+ 
+   static klassOop _symbolKlassObj;
+@@ -137,6 +134,18 @@
+   static klassOop _systemObjArrayKlassObj;
+ 
+   // Known objects in the VM
++
++  // Primitive objects
++  static oop _int_mirror;
++  static oop _float_mirror;
++  static oop _double_mirror;
++  static oop _byte_mirror;
++  static oop _bool_mirror;
++  static oop _char_mirror;
++  static oop _long_mirror;
++  static oop _short_mirror;
++  static oop _void_mirror;
++
+   static oop          _main_thread_group;             // Reference to the main thread group object
+   static oop          _system_thread_group;           // Reference to the system thread group object
+ 
+@@ -175,7 +184,7 @@
+   // array of dummy objects used with +FullGCAlot
+   debug_only(static objArrayOop _fullgc_alot_dummy_array;)
+  // index of next entry to clear
+-  debug_only(static int         _fullgc_alot_dummy_next;) 
++  debug_only(static int         _fullgc_alot_dummy_next;)
+ 
+   // Compiler/dispatch support
+   static int  _base_vtable_size;                      // Java vtbl size of klass Object (in words)
+@@ -189,13 +198,14 @@
+ 
+   // generate an out of memory error; if possible using an error with preallocated backtrace;
+   // otherwise return the given default error.
+-  static oop	    gen_out_of_memory_error(oop default_err);
++  static oop        gen_out_of_memory_error(oop default_err);
+ 
+   // Historic gc information
+   static size_t _heap_capacity_at_last_gc;
+   static size_t _heap_used_at_last_gc;
+ 
+   static jint initialize_heap();
++  static void initialize_basic_type_mirrors(TRAPS);
+   static void fixup_mirrors(TRAPS);
+ 
+   static void reinitialize_vtable_of(KlassHandle h_k, TRAPS);
+@@ -204,10 +214,16 @@
+ 
+   static void genesis(TRAPS);                         // Create the initial world
+ 
++  // Mirrors for primitive classes (created eagerly)
++  static oop check_mirror(oop m) {
++    assert(m != NULL, "mirror not initialized");
++    return m;
++  }
++
+   // Debugging
+   static int _verify_count;                           // number of verifies done
+   // True during call to verify().  Should only be set/cleared in verify().
+-  static bool _verify_in_progress;		      
++  static bool _verify_in_progress;
+ 
+   static void compute_verify_oop_data();
+ 
+@@ -225,7 +241,7 @@
+   static klassOop objectArrayKlassObj() {
+     return _objectArrayKlassObj;
+   }
+-  
++
+   static klassOop typeArrayKlassObj(BasicType t) {
+     assert((uint)t < T_VOID+1, "range check");
+     assert(_typeArrayKlassObjs[t] != NULL, "domain check");
+@@ -247,6 +263,24 @@
+   static klassOop systemObjArrayKlassObj()            { return _systemObjArrayKlassObj;    }
+ 
+   // Known objects in tbe VM
++  static oop int_mirror()                   { return check_mirror(_int_mirror);
++}
++  static oop float_mirror()                 { return check_mirror(_float_mirror); }
++  static oop double_mirror()                { return check_mirror(_double_mirror); }
++  static oop byte_mirror()                  { return check_mirror(_byte_mirror); }
++  static oop bool_mirror()                  { return check_mirror(_bool_mirror); }
++  static oop char_mirror()                  { return check_mirror(_char_mirror); }
++  static oop long_mirror()                  { return check_mirror(_long_mirror); }
++  static oop short_mirror()                 { return check_mirror(_short_mirror); }
++  static oop void_mirror()                  { return check_mirror(_void_mirror); }
++
++  // table of same
++  static oop _mirrors[T_VOID+1];
++
++  static oop java_mirror(BasicType t) {
++    assert((uint)t < T_VOID+1, "range check");
++    return check_mirror(_mirrors[t]);
++  }
+   static oop      main_thread_group()                 { return _main_thread_group; }
+   static void set_main_thread_group(oop group)        { _main_thread_group = group;}
+ 
+@@ -256,9 +290,9 @@
+   static typeArrayOop the_empty_byte_array()          { return _the_empty_byte_array;          }
+   static typeArrayOop the_empty_short_array()         { return _the_empty_short_array;         }
+   static typeArrayOop the_empty_int_array()           { return _the_empty_int_array;           }
+-  static objArrayOop  the_empty_system_obj_array ()   { return _the_empty_system_obj_array;    }  
+-  static objArrayOop  the_empty_class_klass_array ()  { return _the_empty_class_klass_array;   }  
+-  static objArrayOop  the_array_interfaces_array()    { return _the_array_interfaces_array;    }  
++  static objArrayOop  the_empty_system_obj_array ()   { return _the_empty_system_obj_array;    }
++  static objArrayOop  the_empty_class_klass_array ()  { return _the_empty_class_klass_array;   }
++  static objArrayOop  the_array_interfaces_array()    { return _the_array_interfaces_array;    }
+   static methodOop    finalizer_register_method()     { return _finalizer_register_cache->get_methodOop(); }
+   static methodOop    loader_addClass_method()        { return _loader_addClass_cache->get_methodOop(); }
+   static ActiveMethodOopsCache* reflect_invoke_cache() { return _reflect_invoke_cache; }
+@@ -268,12 +302,12 @@
+   static oop          vm_exception()                  { return _vm_exception; }
+   static oop          emptySymbol()                   { return _emptySymbol; }
+ 
+-  // OutOfMemoryError support. Returns an error with the required message. The returned error 
++  // OutOfMemoryError support. Returns an error with the required message. The returned error
+   // may or may not have a backtrace. If error has a backtrace then the stack trace is already
+   // filled in.
+-  static oop out_of_memory_error_java_heap()	      { return gen_out_of_memory_error(_out_of_memory_error_java_heap);  }
+-  static oop out_of_memory_error_perm_gen()	      { return gen_out_of_memory_error(_out_of_memory_error_perm_gen);   }
+-  static oop out_of_memory_error_array_size()	      { return gen_out_of_memory_error(_out_of_memory_error_array_size); }
++  static oop out_of_memory_error_java_heap()          { return gen_out_of_memory_error(_out_of_memory_error_java_heap);  }
++  static oop out_of_memory_error_perm_gen()           { return gen_out_of_memory_error(_out_of_memory_error_perm_gen);   }
++  static oop out_of_memory_error_array_size()         { return gen_out_of_memory_error(_out_of_memory_error_array_size); }
+   static oop out_of_memory_error_gc_overhead_limit()  { return gen_out_of_memory_error(_out_of_memory_error_gc_overhead_limit);  }
+ 
+   // Accessors needed for fast allocation
+@@ -317,7 +351,7 @@
+   // Apply "f" to all klasses for basic types (classes not present in
+   // SystemDictionary).
+   static void basic_type_classes_do(void f(klassOop));
+-  
++
+   // Apply "f" to all system klasses (classes not present in SystemDictionary).
+   static void system_classes_do(void f(klassOop));
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/memory/universe.inline.hpp openjdk/hotspot/src/share/vm/memory/universe.inline.hpp
+--- openjdk6/hotspot/src/share/vm/memory/universe.inline.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/universe.inline.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)universe.inline.hpp	1.47 07/05/05 17:05:57 JVM"
+-#endif
+ /*
+  * Copyright 1997-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Check whether an element of a typeArrayOop with the given type must be
+@@ -38,6 +35,3 @@
+ inline bool Universe::field_type_should_be_aligned(BasicType type) {
+   return type == T_DOUBLE || type == T_LONG;
+ }
+-
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/memory/watermark.hpp openjdk/hotspot/src/share/vm/memory/watermark.hpp
+--- openjdk6/hotspot/src/share/vm/memory/watermark.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/memory/watermark.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)watermark.hpp	1.14 07/05/05 17:05:56 JVM"
+-#endif
+ /*
+  * Copyright 2000-2001 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A water mark points into a space and is used during GC to keep track of
+@@ -54,4 +51,3 @@
+ inline bool operator!=(const WaterMark& x, const WaterMark& y) {
+   return !(x == y);
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/arrayKlass.cpp openjdk/hotspot/src/share/vm/oops/arrayKlass.cpp
+--- openjdk6/hotspot/src/share/vm/oops/arrayKlass.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/arrayKlass.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)arrayKlass.cpp	1.95 07/05/05 17:05:59 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -97,7 +94,7 @@
+   assert(k()->is_parsable(), "should be parsable here.");
+   // Make sure size calculation is right
+   assert(k()->size() == align_object_size(header_size + vtable_size), "wrong size for object");
+-  
++
+   return k;
+ }
+ 
+@@ -105,9 +102,9 @@
+ // Initialization of vtables and mirror object is done separatly from base_create_array_klass,
+ // since a GC can happen. At this point all instance variables of the arrayKlass must be setup.
+ void arrayKlass::complete_create_array_klass(arrayKlassHandle k, KlassHandle super_klass, TRAPS) {
+-  ResourceMark rm(THREAD);    
++  ResourceMark rm(THREAD);
+   k->initialize_supers(super_klass(), CHECK);
+-  k->vtable()->initialize_vtable(false, CHECK);  
++  k->vtable()->initialize_vtable(false, CHECK);
+   java_lang_Class::create_mirror(k, CHECK);
+ }
+ 
+@@ -128,7 +125,7 @@
+ 
+ inline intptr_t* arrayKlass::start_of_vtable() const {
+   // all vtables start at the same place, that's why we use instanceKlass::header_size here
+-  return ((intptr_t*)as_klassOop()) + instanceKlass::header_size(); 
++  return ((intptr_t*)as_klassOop()) + instanceKlass::header_size();
+ }
+ 
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/oops/arrayKlass.hpp openjdk/hotspot/src/share/vm/oops/arrayKlass.hpp
+--- openjdk6/hotspot/src/share/vm/oops/arrayKlass.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/arrayKlass.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)arrayKlass.hpp	1.66 07/05/05 17:05:58 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // arrayKlass is the abstract baseclass for all array classes
+@@ -33,7 +30,7 @@
+   int      _dimension;         // This is n'th-dimensional array.
+   klassOop _higher_dimension;  // Refers the (n+1)'th-dimensional array (if present).
+   klassOop _lower_dimension;   // Refers the (n-1)'th-dimensional array (if present).
+-  int      _vtable_len;        // size of vtable for this klass  
++  int      _vtable_len;        // size of vtable for this klass
+   juint    _alloc_size;        // allocation profiling support
+   oop      _component_mirror;  // component type, as a java/lang/Class
+ 
+@@ -135,4 +132,3 @@
+   // Verification
+   void oop_verify_on(oop obj, outputStream* st);
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/arrayKlassKlass.cpp openjdk/hotspot/src/share/vm/oops/arrayKlassKlass.cpp
+--- openjdk6/hotspot/src/share/vm/oops/arrayKlassKlass.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/arrayKlassKlass.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)arrayKlassKlass.cpp	1.56 07/05/29 09:44:17 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -60,7 +57,7 @@
+ 
+ #ifndef SERIALGC
+ void arrayKlassKlass::oop_follow_contents(ParCompactionManager* cm,
+-					  oop obj) {
++                                          oop obj) {
+   assert(obj->is_klass(), "must be klass");
+   arrayKlass* ak = arrayKlass::cast(klassOop(obj));
+   PSParallelCompact::mark_and_push(cm, ak->adr_component_mirror());
+@@ -134,7 +131,7 @@
+ 
+ int
+ arrayKlassKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
+-				     HeapWord* beg_addr, HeapWord* end_addr) {
++                                     HeapWord* beg_addr, HeapWord* end_addr) {
+   assert(obj->is_klass(), "must be klass");
+   arrayKlass* ak = arrayKlass::cast(klassOop(obj));
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/oops/arrayKlassKlass.hpp openjdk/hotspot/src/share/vm/oops/arrayKlassKlass.hpp
+--- openjdk6/hotspot/src/share/vm/oops/arrayKlassKlass.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/arrayKlassKlass.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)arrayKlassKlass.hpp	1.39 07/05/29 09:44:18 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // arrayKlassKlass is the abstract baseclass for all array class classes
+@@ -35,11 +32,11 @@
+   // Allocation
+   DEFINE_ALLOCATE_PERMANENT(arrayKlassKlass);
+   static klassOop create_klass(TRAPS);
+- 
++
+   // Casting from klassOop
+   static arrayKlassKlass* cast(klassOop k) {
+     assert(k->klass_part()->oop_is_klass(), "cast to arrayKlassKlass");
+-    return (arrayKlassKlass*) k->klass_part(); 
++    return (arrayKlassKlass*) k->klass_part();
+   }
+ 
+   // Sizing
+@@ -70,4 +67,3 @@
+   const char* internal_name() const;
+   void oop_verify_on(oop obj, outputStream* st);
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/arrayOop.cpp openjdk/hotspot/src/share/vm/oops/arrayOop.cpp
+--- openjdk6/hotspot/src/share/vm/oops/arrayOop.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/arrayOop.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)arrayOop.cpp	1.18 07/05/05 17:05:58 JVM"
+-#endif
+ /*
+  * Copyright 1997 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,11 +19,10 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_arrayOop.cpp.incl"
+ 
+ // <<this page is intentionally left blank>>
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/arrayOop.hpp openjdk/hotspot/src/share/vm/oops/arrayOop.hpp
+--- openjdk6/hotspot/src/share/vm/oops/arrayOop.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/arrayOop.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)arrayOop.hpp	1.35 07/05/05 17:06:00 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // arrayOopDesc is the abstract baseclass for all arrays.
+@@ -41,31 +38,31 @@
+   void* base(BasicType type) const              { return (void*) (((intptr_t) this) + base_offset_in_bytes(type)); }
+ 
+   // Tells whether index is within bounds.
+-  bool is_within_bounds(int index) const	{ return 0 <= index && index < length(); }
++  bool is_within_bounds(int index) const        { return 0 <= index && index < length(); }
+ 
+   // Accessores for instance variable
+-  int length() const				{ return _length;   }
+-  void set_length(int length)			{ _length = length; }
++  int length() const                            { return _length;   }
++  void set_length(int length)                   { _length = length; }
+ 
+-  // Header size computation. 
++  // Header size computation.
+   // Should only be called with constants as argument (will not constant fold otherwise)
+   static int header_size(BasicType type) {
+-    return Universe::element_type_should_be_aligned(type) 
+-      ? align_object_size(sizeof(arrayOopDesc)/HeapWordSize) 
+-      : sizeof(arrayOopDesc)/HeapWordSize; 
++    return Universe::element_type_should_be_aligned(type)
++      ? align_object_size(sizeof(arrayOopDesc)/HeapWordSize)
++      : sizeof(arrayOopDesc)/HeapWordSize;
+   }
+ 
+   // This method returns the  maximum length that can passed into
+   // typeArrayOop::object_size(scale, length, header_size) without causing an
+   // overflow. We substract an extra 2*wordSize to guard against double word
+   // alignments.  It gets the scale from the type2aelembytes array.
+-  static int32_t max_array_length(BasicType type) { 
++  static int32_t max_array_length(BasicType type) {
+     assert(type >= 0 && type < T_CONFLICT, "wrong type");
+     assert(type2aelembytes[type] != 0, "wrong type");
+     // We use max_jint, since object_size is internally represented by an 'int'
+     // This gives us an upper bound of max_jint words for the size of the oop.
+     int32_t max_words = (max_jint - header_size(type) - 2);
+-    int elembytes = (type == T_OBJECT) ? T_OBJECT_aelem_bytes : type2aelembytes[type]; 
++    int elembytes = (type == T_OBJECT) ? T_OBJECT_aelem_bytes : type2aelembytes[type];
+     jlong len = ((jlong)max_words * HeapWordSize) / elembytes;
+     return (len > max_jint) ? max_jint : (int32_t)len;
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/oops/compiledICHolderKlass.cpp openjdk/hotspot/src/share/vm/oops/compiledICHolderKlass.cpp
+--- openjdk6/hotspot/src/share/vm/oops/compiledICHolderKlass.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/compiledICHolderKlass.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)compiledICHolderKlass.cpp	1.41 07/05/29 09:44:18 JVM"
+-#endif
+ /*
+  * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -30,7 +27,7 @@
+ 
+ klassOop compiledICHolderKlass::create_klass(TRAPS) {
+   compiledICHolderKlass o;
+-  KlassHandle h_this_klass(THREAD, Universe::klassKlassObj());  
++  KlassHandle h_this_klass(THREAD, Universe::klassKlassObj());
+   KlassHandle k = base_create_klass(h_this_klass, header_size(), o.vtbl_value(), CHECK_NULL);
+   // Make sure size calculation is right
+   assert(k()->size() == align_object_size(header_size()), "wrong size for object");
+@@ -66,7 +63,7 @@
+ 
+ #ifndef SERIALGC
+ void compiledICHolderKlass::oop_follow_contents(ParCompactionManager* cm,
+-						oop obj) {
++                                                oop obj) {
+   assert(obj->is_compiledICHolder(), "must be compiledICHolder");
+   compiledICHolderOop c = compiledICHolderOop(obj);
+ 
+@@ -91,7 +88,7 @@
+ }
+ 
+ int compiledICHolderKlass::oop_oop_iterate_m(oop obj, OopClosure* blk,
+-					      MemRegion mr) {
++                                              MemRegion mr) {
+   assert(obj->is_compiledICHolder(), "must be compiledICHolder");
+   compiledICHolderOop c = compiledICHolderOop(obj);
+   // Get size before changing pointers.
+@@ -132,7 +129,7 @@
+ }
+ 
+ int compiledICHolderKlass::oop_update_pointers(ParCompactionManager* cm,
+-					       oop obj) {
++                                               oop obj) {
+   assert(obj->is_compiledICHolder(), "must be compiledICHolder");
+   compiledICHolderOop c = compiledICHolderOop(obj);
+ 
+@@ -142,9 +139,9 @@
+ }
+ 
+ int compiledICHolderKlass::oop_update_pointers(ParCompactionManager* cm,
+-					       oop obj,
+-					       HeapWord* beg_addr,
+-					       HeapWord* end_addr) {
++                                               oop obj,
++                                               HeapWord* beg_addr,
++                                               HeapWord* end_addr) {
+   assert(obj->is_compiledICHolder(), "must be compiledICHolder");
+   compiledICHolderOop c = compiledICHolderOop(obj);
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/oops/compiledICHolderKlass.hpp openjdk/hotspot/src/share/vm/oops/compiledICHolderKlass.hpp
+--- openjdk6/hotspot/src/share/vm/oops/compiledICHolderKlass.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/compiledICHolderKlass.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)compiledICHolderKlass.hpp	1.32 07/05/29 09:44:18 JVM"
+-#endif
+ /*
+  * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class CMSIsAliveClosure;
+@@ -49,7 +46,7 @@
+   // Casting from klassOop
+   static compiledICHolderKlass* cast(klassOop k) {
+     assert(k->klass_part()->oop_is_compiledICHolder(), "cast to compiledICHolderKlass");
+-    return (compiledICHolderKlass*) k->klass_part(); 
++    return (compiledICHolderKlass*) k->klass_part();
+   }
+ 
+   // Sizing
+@@ -84,4 +81,3 @@
+   void oop_verify_on(oop obj, outputStream* st);
+ 
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/compiledICHolderOop.cpp openjdk/hotspot/src/share/vm/oops/compiledICHolderOop.cpp
+--- openjdk6/hotspot/src/share/vm/oops/compiledICHolderOop.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/compiledICHolderOop.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)compiledICHolderOop.cpp	1.12 07/05/05 17:05:59 JVM"
+-#endif
+ /*
+  * Copyright 1998 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,11 +19,10 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_compiledICHolderOop.cpp.incl"
+ 
+ // <<this page is intentionally left blank>>
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/compiledICHolderOop.hpp openjdk/hotspot/src/share/vm/oops/compiledICHolderOop.hpp
+--- openjdk6/hotspot/src/share/vm/oops/compiledICHolderOop.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/compiledICHolderOop.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)compiledICHolderOop.hpp	1.20 07/05/05 17:05:59 JVM"
+-#endif
+ /*
+  * Copyright 1998-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A compiledICHolderOop is a helper object for the inline cache implementation.
+@@ -45,7 +42,7 @@
+ 
+   void set_holder_method(methodOop m) { oop_store_without_check((oop*)&_holder_method, (oop)m); }
+   void set_holder_klass(klassOop k)   { oop_store_without_check((oop*)&_holder_klass, (oop)k); }
+-  
++
+   static int header_size()            { return sizeof(compiledICHolderOopDesc)/HeapWordSize; }
+   static int object_size()            { return align_object_size(header_size()); }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/oops/constantPoolKlass.cpp openjdk/hotspot/src/share/vm/oops/constantPoolKlass.cpp
+--- openjdk6/hotspot/src/share/vm/oops/constantPoolKlass.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/constantPoolKlass.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)constantPoolKlass.cpp	1.105 07/05/29 09:44:18 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -31,7 +28,7 @@
+ constantPoolOop constantPoolKlass::allocate(int length, TRAPS) {
+   int size = constantPoolOopDesc::object_size(length);
+   KlassHandle klass (THREAD, as_klassOop());
+-  constantPoolOop c = 
++  constantPoolOop c =
+     (constantPoolOop)CollectedHeap::permanent_array_allocate(klass, size, length, CHECK_NULL);
+ 
+   c->set_tags(NULL);
+@@ -57,7 +54,7 @@
+ 
+ klassOop constantPoolKlass::create_klass(TRAPS) {
+   constantPoolKlass o;
+-  KlassHandle klassklass(THREAD, Universe::arrayKlassKlassObj());  
++  KlassHandle klassklass(THREAD, Universe::arrayKlassKlassObj());
+   arrayKlassHandle k = base_create_array_klass(o.vtbl_value(), header_size(), klassklass, CHECK_NULL);
+   arrayKlassHandle super (THREAD, k->super());
+   complete_create_array_klass(k, super, CHECK_NULL);
+@@ -65,7 +62,7 @@
+ }
+ 
+ 
+-int constantPoolKlass::oop_size(oop obj) const { 
++int constantPoolKlass::oop_size(oop obj) const {
+   assert(obj->is_constantPool(), "must be constantPool");
+   return constantPoolOop(obj)->object_size();
+ }
+@@ -74,7 +71,7 @@
+ void constantPoolKlass::oop_follow_contents(oop obj) {
+   assert (obj->is_constantPool(), "obj must be constant pool");
+   constantPoolOop cp = (constantPoolOop) obj;
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::constantPoolKlassObj never moves.
+ 
+   // If the tags array is null we are in the middle of allocating this constant pool
+@@ -83,8 +80,8 @@
+     oop* base = (oop*)cp->base();
+     for (int i = 0; i < cp->length(); i++) {
+       if (cp->is_pointer_entry(i)) {
+-        if (*base != NULL) MarkSweep::mark_and_push(base); 
+-      } 
++        if (*base != NULL) MarkSweep::mark_and_push(base);
++      }
+       base++;
+     }
+     // gc of constant pool instance variables
+@@ -96,10 +93,10 @@
+ 
+ #ifndef SERIALGC
+ void constantPoolKlass::oop_follow_contents(ParCompactionManager* cm,
+-					    oop obj) {
++                                            oop obj) {
+   assert (obj->is_constantPool(), "obj must be constant pool");
+   constantPoolOop cp = (constantPoolOop) obj;
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::constantPoolKlassObj never moves.
+ 
+   // If the tags array is null we are in the middle of allocating this constant
+@@ -109,8 +106,8 @@
+     oop* base = (oop*)cp->base();
+     for (int i = 0; i < cp->length(); i++) {
+       if (cp->is_pointer_entry(i)) {
+-        if (*base != NULL) PSParallelCompact::mark_and_push(cm, base); 
+-      } 
++        if (*base != NULL) PSParallelCompact::mark_and_push(cm, base);
++      }
+       base++;
+     }
+     // gc of constant pool instance variables
+@@ -125,10 +122,10 @@
+ int constantPoolKlass::oop_adjust_pointers(oop obj) {
+   assert (obj->is_constantPool(), "obj must be constant pool");
+   constantPoolOop cp = (constantPoolOop) obj;
+-  // Get size before changing pointers. 
++  // Get size before changing pointers.
+   // Don't call size() or oop_size() since that is a virtual call.
+   int size = cp->object_size();
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::constantPoolKlassObj never moves.
+ 
+   // If the tags array is null we are in the middle of allocating this constant
+@@ -137,8 +134,8 @@
+     oop* base = (oop*)cp->base();
+     for (int i = 0; i< cp->length();  i++) {
+       if (cp->is_pointer_entry(i)) {
+-        MarkSweep::adjust_pointer(base); 
+-      } 
++        MarkSweep::adjust_pointer(base);
++      }
+       base++;
+     }
+   }
+@@ -151,10 +148,10 @@
+ 
+ int constantPoolKlass::oop_oop_iterate(oop obj, OopClosure* blk) {
+   assert (obj->is_constantPool(), "obj must be constant pool");
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::constantPoolKlassObj never moves.
+   constantPoolOop cp = (constantPoolOop) obj;
+-  // Get size before changing pointers. 
++  // Get size before changing pointers.
+   // Don't call size() or oop_size() since that is a virtual call.
+   int size = cp->object_size();
+ 
+@@ -164,8 +161,8 @@
+     oop* base = (oop*)cp->base();
+     for (int i = 0; i < cp->length(); i++) {
+       if (cp->is_pointer_entry(i)) {
+-        blk->do_oop(base); 
+-      } 
++        blk->do_oop(base);
++      }
+       base++;
+     }
+   }
+@@ -178,10 +175,10 @@
+ 
+ int constantPoolKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) {
+   assert (obj->is_constantPool(), "obj must be constant pool");
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::constantPoolKlassObj never moves.
+   constantPoolOop cp = (constantPoolOop) obj;
+-  // Get size before changing pointers. 
++  // Get size before changing pointers.
+   // Don't call size() or oop_size() since that is a virtual call.
+   int size = cp->object_size();
+ 
+@@ -192,7 +189,7 @@
+     for (int i = 0; i < cp->length(); i++) {
+       if (mr.contains(base)) {
+         if (cp->is_pointer_entry(i)) {
+-          blk->do_oop(base); 
++          blk->do_oop(base);
+         }
+       }
+       base++;
+@@ -219,7 +216,7 @@
+     oop* base = (oop*)cp->base();
+     for (int i = 0; i < cp->length(); ++i, ++base) {
+       if (cp->is_pointer_entry(i)) {
+-        PSParallelCompact::adjust_pointer(base); 
++        PSParallelCompact::adjust_pointer(base);
+       }
+     }
+   }
+@@ -231,7 +228,7 @@
+ 
+ int
+ constantPoolKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
+-				       HeapWord* beg_addr, HeapWord* end_addr) {
++                                       HeapWord* beg_addr, HeapWord* end_addr) {
+   assert (obj->is_constantPool(), "obj must be constant pool");
+   constantPoolOop cp = (constantPoolOop) obj;
+ 
+@@ -279,7 +276,7 @@
+   oop anObj;
+   assert(obj->is_constantPool(), "must be constantPool");
+   arrayKlass::oop_print_on(obj, st);
+-  constantPoolOop cp = constantPoolOop(obj);  
++  constantPoolOop cp = constantPoolOop(obj);
+ 
+   // Temp. remove cache so we can do lookups with original indicies.
+   constantPoolCacheHandle cache (THREAD, cp->cache());
+@@ -298,9 +295,9 @@
+         break;
+       case JVM_CONSTANT_Fieldref :
+       case JVM_CONSTANT_Methodref :
+-      case JVM_CONSTANT_InterfaceMethodref :        
++      case JVM_CONSTANT_InterfaceMethodref :
+         st->print("klass_index=%d", cp->klass_ref_index_at(index));
+-        st->print(" name_and_type_index=%d", cp->name_and_type_ref_index_at(index));        
++        st->print(" name_and_type_index=%d", cp->name_and_type_ref_index_at(index));
+         break;
+       case JVM_CONSTANT_UnresolvedString :
+       case JVM_CONSTANT_String :
+@@ -322,14 +319,14 @@
+         st->print("%lf", cp->double_at(index));
+         index++;   // Skip entry following eigth-byte constant
+         break;
+-      case JVM_CONSTANT_NameAndType :        
++      case JVM_CONSTANT_NameAndType :
+         st->print("name_index=%d", cp->name_ref_index_at(index));
+         st->print(" signature_index=%d", cp->signature_ref_index_at(index));
+         break;
+       case JVM_CONSTANT_Utf8 :
+         cp->symbol_at(index)->print_value_on(st);
+         break;
+-      case JVM_CONSTANT_UnresolvedClass :		// fall-through
++      case JVM_CONSTANT_UnresolvedClass :               // fall-through
+       case JVM_CONSTANT_UnresolvedClassInError: {
+         // unresolved_klass_at requires lock or safe world.
+         oop entry = *cp->obj_at_addr(index);
+@@ -360,7 +357,7 @@
+ void constantPoolKlass::oop_verify_on(oop obj, outputStream* st) {
+   Klass::oop_verify_on(obj, st);
+   guarantee(obj->is_constantPool(), "object must be constant pool");
+-  constantPoolOop cp = constantPoolOop(obj);  
++  constantPoolOop cp = constantPoolOop(obj);
+   guarantee(cp->is_perm(), "should be in permspace");
+   if (!cp->partially_loaded()) {
+     oop* base = (oop*)cp->base();
+@@ -424,18 +421,18 @@
+ // CompileTheWorld support. Preload all classes loaded references in the passed in constantpool
+ void constantPoolKlass::preload_and_initialize_all_classes(oop obj, TRAPS) {
+   guarantee(obj->is_constantPool(), "object must be constant pool");
+-  constantPoolHandle cp(THREAD, (constantPoolOop)obj);  
++  constantPoolHandle cp(THREAD, (constantPoolOop)obj);
+   guarantee(!cp->partially_loaded(), "must be fully loaded");
+-    
+-  for (int i = 0; i< cp->length();  i++) {    
++
++  for (int i = 0; i< cp->length();  i++) {
+     if (cp->tag_at(i).is_unresolved_klass()) {
+       // This will force loading of the class
+       klassOop klass = cp->klass_at(i, CHECK);
+       if (klass->is_instance()) {
+         // Force initialization of class
+         instanceKlass::cast(klass)->initialize(CHECK);
+-      } 
+-    }    
++      }
++    }
+   }
+ }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/oops/constantPoolKlass.hpp openjdk/hotspot/src/share/vm/oops/constantPoolKlass.hpp
+--- openjdk6/hotspot/src/share/vm/oops/constantPoolKlass.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/constantPoolKlass.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)constantPoolKlass.hpp	1.51 07/05/29 09:44:18 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A constantPoolKlass is the klass of a constantPoolOop
+@@ -36,13 +33,13 @@
+ 
+   // Allocation
+   DEFINE_ALLOCATE_PERMANENT(constantPoolKlass);
+-  constantPoolOop allocate(int length, TRAPS); 
++  constantPoolOop allocate(int length, TRAPS);
+   static klassOop create_klass(TRAPS);
+ 
+   // Casting from klassOop
+   static constantPoolKlass* cast(klassOop k) {
+     assert(k->klass_part()->oop_is_constantPool(), "cast to constantPoolKlass");
+-    return (constantPoolKlass*) k->klass_part(); 
++    return (constantPoolKlass*) k->klass_part();
+   }
+ 
+   // Sizing
+@@ -78,4 +75,3 @@
+   static void preload_and_initialize_all_classes(oop constant_pool, TRAPS);
+ #endif
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/constantPoolOop.cpp openjdk/hotspot/src/share/vm/oops/constantPoolOop.cpp
+--- openjdk6/hotspot/src/share/vm/oops/constantPoolOop.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/constantPoolOop.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)constantPoolOop.cpp	1.104 07/05/05 17:06:01 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,21 +19,21 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_constantPoolOop.cpp.incl"
+ 
+-klassOop constantPoolOopDesc::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS) {   
+-  // A resolved constantPool entry will contain a klassOop, otherwise a symbolOop. 
++klassOop constantPoolOopDesc::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS) {
++  // A resolved constantPool entry will contain a klassOop, otherwise a symbolOop.
+   // It is not safe to rely on the tag bit's here, since we don't have a lock, and the entry and
+-  // tag is not updated atomicly.  
++  // tag is not updated atomicly.
+   oop entry = *(this_oop->obj_at_addr(which));
+   if (entry->is_klass()) {
+     // Already resolved - return entry.
+     return (klassOop)entry;
+-  }  
++  }
+ 
+   // Acquire lock on constant oop while doing update. After we get the lock, we check if another object
+   // already has updated the object
+@@ -65,11 +62,11 @@
+   if (in_error) {
+     symbolOop error = SystemDictionary::find_resolution_error(this_oop, which);
+     guarantee(error != (symbolOop)NULL, "tag mismatch with resolution error table");
+-    ResourceMark rm;   
++    ResourceMark rm;
+     // exception text will be the class name
+     const char* className = this_oop->unresolved_klass_at(which)->as_C_string();
+     THROW_MSG_0(error, className);
+-  }    
++  }
+ 
+   if (do_resolve) {
+     // this_oop must be unlocked during resolve_or_fail
+@@ -77,7 +74,7 @@
+     Handle h_prot (THREAD, protection_domain);
+     klassOop k_oop = SystemDictionary::resolve_or_fail(name, loader, h_prot, true, THREAD);
+     KlassHandle k;
+-    if (!HAS_PENDING_EXCEPTION) {   
++    if (!HAS_PENDING_EXCEPTION) {
+       k = KlassHandle(THREAD, k_oop);
+       // Do access check for klasses
+       verify_constant_pool_resolve(this_oop, k, THREAD);
+@@ -90,7 +87,7 @@
+       symbolHandle error(PENDING_EXCEPTION->klass()->klass_part()->name());
+ 
+       bool throw_orig_error = false;
+-      {      
++      {
+         ObjectLocker ol (this_oop, THREAD);
+ 
+         // some other thread has beaten us and has resolved the class.
+@@ -106,8 +103,8 @@
+           // being loaded due to virtual machine errors like StackOverflow
+           // and OutOfMemoryError, etc, or if the thread was hit by stop()
+           // Needs clarification to section 5.4.3 of the VM spec (see 6308271)
+-        } 
+-	else if (!this_oop->tag_at(which).is_unresolved_klass_in_error()) {
++        }
++        else if (!this_oop->tag_at(which).is_unresolved_klass_in_error()) {
+           SystemDictionary::add_resolution_error(this_oop, which, error);
+           this_oop->tag_at_put(which, JVM_CONSTANT_UnresolvedClassInError);
+         } else {
+@@ -120,14 +117,14 @@
+ 
+       if (throw_orig_error) {
+         CLEAR_PENDING_EXCEPTION;
+-        ResourceMark rm;   
++        ResourceMark rm;
+         const char* className = this_oop->unresolved_klass_at(which)->as_C_string();
+         THROW_MSG_0(error, className);
+       }
+ 
+       return 0;
+     }
+-    
++
+     if (TraceClassResolution && !k()->klass_part()->oop_is_array()) {
+       // skip resolving the constant pool so that this code get's
+       // called the next time some bytecodes refer to this class.
+@@ -163,11 +160,11 @@
+       // Only updated constant pool - if it is resolved.
+       do_resolve = this_oop->tag_at(which).is_unresolved_klass();
+       if (do_resolve) {
+-        this_oop->klass_at_put(which, k());       
++        this_oop->klass_at_put(which, k());
+       }
+     }
+   }
+- 
++
+   entry = this_oop->resolved_klass_at(which);
+   assert(entry->is_klass(), "must be resolved at this point");
+   return (klassOop)entry;
+@@ -178,7 +175,7 @@
+ // by compiler and exception handling.  Also used to avoid classloads for
+ // instanceof operations. Returns NULL if the class has not been loaded or
+ // if the verification of constant pool failed
+-klassOop constantPoolOopDesc::klass_at_if_loaded(constantPoolHandle this_oop, int which) {  
++klassOop constantPoolOopDesc::klass_at_if_loaded(constantPoolHandle this_oop, int which) {
+   oop entry = *this_oop->obj_at_addr(which);
+   if (entry->is_klass()) {
+     return (klassOop)entry;
+@@ -190,7 +187,7 @@
+     oop protection_domain = Klass::cast(this_oop->pool_holder())->protection_domain();
+     Handle h_prot (thread, protection_domain);
+     Handle h_loader (thread, loader);
+-    klassOop k = SystemDictionary::find(name, h_loader, h_prot, thread);    
++    klassOop k = SystemDictionary::find(name, h_loader, h_prot, thread);
+ 
+     if (k != NULL) {
+       // Make sure that resolving is legal
+@@ -232,7 +229,7 @@
+     Handle h_loader(THREAD, loader);
+     Handle h_prot  (THREAD, protection_domain);
+     KlassHandle k(THREAD, SystemDictionary::find(name, h_loader, h_prot, THREAD));
+-  
++
+     // Do access check for klasses
+     if( k.not_null() ) verify_constant_pool_resolve(this_oop, k, CHECK_NULL);
+     return k();
+@@ -267,16 +264,16 @@
+ 
+ 
+ void constantPoolOopDesc::verify_constant_pool_resolve(constantPoolHandle this_oop, KlassHandle k, TRAPS) {
+- if (k->oop_is_instance() || k->oop_is_objArray()) {        
++ if (k->oop_is_instance() || k->oop_is_objArray()) {
+     instanceKlassHandle holder (THREAD, this_oop->pool_holder());
+     klassOop elem_oop = k->oop_is_instance() ? k() : objArrayKlass::cast(k())->bottom_klass();
+     KlassHandle element (THREAD, elem_oop);
+-    
++
+     // The element type could be a typeArray - we only need the access check if it is
+     // an reference to another class
+     if (element->oop_is_instance()) {
+       LinkResolver::check_klass_accessability(holder, element, CHECK);
+-    }        
++    }
+   }
+ }
+ 
+@@ -313,9 +310,9 @@
+ symbolOop constantPoolOopDesc::klass_name_at(int which) {
+   assert(tag_at(which).is_unresolved_klass() || tag_at(which).is_klass(),
+          "Corrupted constant pool");
+-  // A resolved constantPool entry will contain a klassOop, otherwise a symbolOop. 
++  // A resolved constantPool entry will contain a klassOop, otherwise a symbolOop.
+   // It is not safe to rely on the tag bit's here, since we don't have a lock, and the entry and
+-  // tag is not updated atomicly.  
++  // tag is not updated atomicly.
+   oop entry = *(obj_at_addr(which));
+   if (entry->is_klass()) {
+     // Already resolved - return entry's name.
+@@ -328,7 +325,7 @@
+ 
+ symbolOop constantPoolOopDesc::klass_ref_at_noresolve(int which) {
+   jint ref_index = klass_ref_index_at(which);
+-  return klass_at_noresolve(ref_index);  
++  return klass_at_noresolve(ref_index);
+ }
+ 
+ char* constantPoolOopDesc::string_at_noresolve(int which) {
+@@ -388,7 +385,7 @@
+ }
+ 
+ 
+-bool constantPoolOopDesc::klass_name_at_matches(instanceKlassHandle k, 
++bool constantPoolOopDesc::klass_name_at_matches(instanceKlassHandle k,
+                                                 int which) {
+   // Names are interned, so we can compare symbolOops directly
+   symbolOop cp_name = klass_name_at(which);
+@@ -543,7 +540,7 @@
+     // From the constantPoolOop API point of view, this is correct
+     // behavior. See constantPoolKlass::merge() to see how this plays
+     // out in the context of constantPoolOop merging.
+-    return false;    
++    return false;
+   }
+ 
+   switch (t1) {
+@@ -819,7 +816,7 @@
+   {
+     symbolOop k = unresolved_klass_at(from_i);
+     to_cp->unresolved_klass_at_put(to_i, k);
+-    to_cp->tag_at_put(to_i, JVM_CONSTANT_UnresolvedClassInError);    
++    to_cp->tag_at_put(to_i, JVM_CONSTANT_UnresolvedClassInError);
+   } break;
+ 
+ 
+@@ -1072,15 +1069,15 @@
+       case JVM_CONSTANT_Utf8: {
+         symbolOop sym = symbol_at(idx);
+         symmap->add_entry(sym, idx);
+-        DBG(printf("adding symbol entry %s = %d\n", sym->as_utf8(), idx)); 
++        DBG(printf("adding symbol entry %s = %d\n", sym->as_utf8(), idx));
+         break;
+       }
+-      case JVM_CONSTANT_Class:          
++      case JVM_CONSTANT_Class:
+       case JVM_CONSTANT_UnresolvedClass:
+       case JVM_CONSTANT_UnresolvedClassInError: {
+         symbolOop sym = klass_name_at(idx);
+         classmap->add_entry(sym, idx);
+-        DBG(printf("adding class entry %s = %d\n", sym->as_utf8(), idx)); 
++        DBG(printf("adding class entry %s = %d\n", sym->as_utf8(), idx));
+         break;
+       }
+       case JVM_CONSTANT_Long:
+@@ -1159,7 +1156,7 @@
+         idx++;             // Double takes two cpool slots
+         break;
+       }
+-      case JVM_CONSTANT_Class:          
++      case JVM_CONSTANT_Class:
+       case JVM_CONSTANT_UnresolvedClass:
+       case JVM_CONSTANT_UnresolvedClassInError: {
+         *bytes = JVM_CONSTANT_Class;
+@@ -1190,8 +1187,8 @@
+         DBG(printf("JVM_CONSTANT_UnresolvedString: idx=#%03hd, %s", idx1, str));
+         break;
+       }
+-      case JVM_CONSTANT_Fieldref:       
+-      case JVM_CONSTANT_Methodref:      
++      case JVM_CONSTANT_Fieldref:
++      case JVM_CONSTANT_Methodref:
+       case JVM_CONSTANT_InterfaceMethodref: {
+         idx1 = uncached_klass_ref_index_at(idx);
+         idx2 = uncached_name_and_type_ref_index_at(idx);
+diff -ruN openjdk6/hotspot/src/share/vm/oops/constantPoolOop.hpp openjdk/hotspot/src/share/vm/oops/constantPoolOop.hpp
+--- openjdk6/hotspot/src/share/vm/oops/constantPoolOop.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/constantPoolOop.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)constantPoolOop.hpp	1.104 07/05/17 15:55:26 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A constantPool is an array containing class constants as described in the
+@@ -35,17 +32,11 @@
+ // the entry in the constant pool is a klass or String object and
+ // not a symbolOop.
+ 
+-#ifdef CC_INTERP
+-class cInterpreter;
+-#endif /* CC_INTERP */
+-
+ class SymbolHashMap;
+ 
+ class constantPoolOopDesc : public arrayOopDesc {
+   friend class VMStructs;
+-#ifdef CC_INTERP
+-  friend class cInterpreter;  // Directly extracts an oop in the pool for fast instanceof/checkcast
+-#endif /* CC_INTERP */
++  friend class BytecodeInterpreter;  // Directly extracts an oop in the pool for fast instanceof/checkcast
+  private:
+   typeArrayOop         _tags; // the tag array describing the constant pool's contents
+   constantPoolCacheOop _cache;         // the cache holding interpreter runtime information
+@@ -59,7 +50,7 @@
+ 
+  private:
+   intptr_t* base() const { return (intptr_t*) (((char*) this) + sizeof(constantPoolOopDesc)); }
+-  oop* tags_addr()	 { return (oop*)&_tags; }
++  oop* tags_addr()       { return (oop*)&_tags; }
+   oop* cache_addr()      { return (oop*)&_cache; }
+ 
+   oop* obj_at_addr(int which) const {
+@@ -106,7 +97,7 @@
+ 
+   // Storing constants
+ 
+-  void klass_at_put(int which, klassOop k) { 
++  void klass_at_put(int which, klassOop k) {
+     oop_store_without_check((volatile oop *)obj_at_addr(which), oop(k));
+     // The interpreter assumes when the tag is stored, the klass is resolved
+     // and the klassOop is a klass rather than a symbolOop, so we need
+@@ -122,7 +113,7 @@
+   // For temporary use while constructing constant pool
+   void klass_index_at_put(int which, int name_index) {
+     tag_at_put(which, JVM_CONSTANT_ClassIndex);
+-    *int_at_addr(which) = name_index; 
++    *int_at_addr(which) = name_index;
+   }
+ 
+   // Temporary until actual use
+@@ -142,25 +133,25 @@
+     oop_store_without_check(obj_at_addr(which), oop(s));
+   }
+ 
+-  void int_at_put(int which, jint i) { 
++  void int_at_put(int which, jint i) {
+     tag_at_put(which, JVM_CONSTANT_Integer);
+-    *int_at_addr(which) = i; 
++    *int_at_addr(which) = i;
+   }
+-  
+-  void long_at_put(int which, jlong l) { 
++
++  void long_at_put(int which, jlong l) {
+     tag_at_put(which, JVM_CONSTANT_Long);
+-    // *long_at_addr(which) = l; 
++    // *long_at_addr(which) = l;
+     Bytes::put_native_u8((address)long_at_addr(which), *((u8*) &l));
+   }
+ 
+-  void float_at_put(int which, jfloat f) { 
++  void float_at_put(int which, jfloat f) {
+     tag_at_put(which, JVM_CONSTANT_Float);
+-    *float_at_addr(which) = f; 
++    *float_at_addr(which) = f;
+   }
+ 
+-  void double_at_put(int which, jdouble d) { 
++  void double_at_put(int which, jdouble d) {
+     tag_at_put(which, JVM_CONSTANT_Double);
+-    // *double_at_addr(which) = d; 
++    // *double_at_addr(which) = d;
+     // u8 temp = *(u8*) &d;
+     Bytes::put_native_u8((address) double_at_addr(which), *((u8*) &d));
+   }
+@@ -183,7 +174,7 @@
+   // For temporary use while constructing constant pool
+   void string_index_at_put(int which, int string_index) {
+     tag_at_put(which, JVM_CONSTANT_StringIndex);
+-    *int_at_addr(which) = string_index; 
++    *int_at_addr(which) = string_index;
+   }
+ 
+   void field_at_put(int which, int class_index, int name_and_type_index) {
+@@ -205,7 +196,7 @@
+     tag_at_put(which, JVM_CONSTANT_NameAndType);
+     *int_at_addr(which) = ((jint) signature_index<<16) | name_index;  // Not so nice
+   }
+-  
++
+   // Tag query
+ 
+   constantTag tag_at(int which) const { return (constantTag)tags()->byte_at_acquire(which); }
+@@ -222,14 +213,14 @@
+ 
+   // Fetching constants
+ 
+-  klassOop klass_at(int which, TRAPS) { 
++  klassOop klass_at(int which, TRAPS) {
+     constantPoolHandle h_this(THREAD, this);
+-    return klass_at_impl(h_this, which, CHECK_NULL); 
+-  }  
+-    
++    return klass_at_impl(h_this, which, CHECK_NULL);
++  }
++
+   symbolOop klass_name_at(int which);  // Returns the name, w/o resolving.
+ 
+-  klassOop resolved_klass_at(int which) {  // Used by Compiler 
++  klassOop resolved_klass_at(int which) {  // Used by Compiler
+     guarantee(tag_at(which).is_klass(), "Corrupted constant pool");
+     // Must do an acquire here in case another thread resolved the klass
+     // behind our back, lest we later load stale values thru the oop.
+@@ -277,7 +268,7 @@
+ 
+   oop string_at(int which, TRAPS) {
+     constantPoolHandle h_this(THREAD, this);
+-    return string_at_impl(h_this, which, CHECK_NULL); 
++    return string_at_impl(h_this, which, CHECK_NULL);
+   }
+ 
+   // only called when we are sure a string entry is already resolved (via an
+@@ -319,7 +310,7 @@
+   symbolOop klass_ref_at_noresolve(int which);
+   symbolOop name_ref_at(int which);
+   symbolOop signature_ref_at(int which);    // the type descriptor
+-  
++
+   int klass_ref_index_at(int which);
+   int name_and_type_ref_index_at(int which);
+ 
+@@ -332,7 +323,7 @@
+   // Resolve string constants (to prevent allocation during compilation)
+   void resolve_string_constants(TRAPS) {
+     constantPoolHandle h_this(THREAD, this);
+-    resolve_string_constants_impl(h_this, CHECK); 
++    resolve_string_constants_impl(h_this, CHECK);
+   }
+ 
+   // Klass name matches name at offset
+@@ -347,11 +338,11 @@
+   friend class ClassFileParser;
+   friend class SystemDictionary;
+ 
+-  // Used by compiler to prevent classloading. 
+-  static klassOop klass_at_if_loaded          (constantPoolHandle this_oop, int which);  
++  // Used by compiler to prevent classloading.
++  static klassOop klass_at_if_loaded          (constantPoolHandle this_oop, int which);
+   static klassOop klass_ref_at_if_loaded      (constantPoolHandle this_oop, int which);
+   // Same as above - but does LinkResolving.
+-  static klassOop klass_ref_at_if_loaded_check(constantPoolHandle this_oop, int which, TRAPS);  
++  static klassOop klass_ref_at_if_loaded_check(constantPoolHandle this_oop, int which, TRAPS);
+ 
+   // Routines currently used for annotations (only called by jvm.cpp) but which might be used in the
+   // future by other Java code. These take constant pool indices rather than possibly-byte-swapped
+@@ -401,7 +392,7 @@
+   // Performs the LinkResolver checks
+   static void verify_constant_pool_resolve(constantPoolHandle this_oop, KlassHandle klass, TRAPS);
+ 
+-  // Implementation of methods that needs an exposed 'this' pointer, in order to 
++  // Implementation of methods that needs an exposed 'this' pointer, in order to
+   // handle GC while executing the method
+   static klassOop klass_at_impl(constantPoolHandle this_oop, int which, TRAPS);
+   static oop string_at_impl(constantPoolHandle this_oop, int which, TRAPS);
+@@ -529,7 +520,7 @@
+     for (int i = 0; i < _table_size; i++) {
+       for (SymbolHashMapEntry* cur = bucket(i); cur != NULL; cur = next) {
+         next = cur->next();
+-        delete(cur); 
++        delete(cur);
+       }
+     }
+     delete _buckets;
+diff -ruN openjdk6/hotspot/src/share/vm/oops/constMethodKlass.cpp openjdk/hotspot/src/share/vm/oops/constMethodKlass.cpp
+--- openjdk6/hotspot/src/share/vm/oops/constMethodKlass.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/constMethodKlass.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)constMethodKlass.cpp	1.24 07/05/29 09:44:18 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -31,7 +28,7 @@
+ 
+ klassOop constMethodKlass::create_klass(TRAPS) {
+   constMethodKlass o;
+-  KlassHandle h_this_klass(THREAD, Universe::klassKlassObj());  
++  KlassHandle h_this_klass(THREAD, Universe::klassKlassObj());
+   KlassHandle k = base_create_klass(h_this_klass, header_size(),
+                                     o.vtbl_value(), CHECK_NULL);
+   // Make sure size calculation is right
+@@ -67,7 +64,7 @@
+     CollectedHeap::permanent_obj_allocate(h_k, size, CHECK_NULL);
+   assert(!cm->is_parsable(), "Not yet safely parsable");
+   No_Safepoint_Verifier no_safepoint;
+-  cm->set_interpreter_kind(AbstractInterpreter::invalid);
++  cm->set_interpreter_kind(Interpreter::invalid);
+   cm->init_fingerprint();
+   cm->set_method(NULL);
+   cm->set_stackmap_data(NULL);
+@@ -78,7 +75,7 @@
+                                 compressed_line_number_size,
+                                 localvariable_table_length);
+   assert(cm->size() == size, "wrong size for object");
+-  cm->set_partially_loaded();     
++  cm->set_partially_loaded();
+   assert(cm->is_parsable(), "Is safely parsable by gc");
+   return cm;
+ }
+@@ -89,19 +86,19 @@
+   MarkSweep::mark_and_push(cm->adr_method());
+   MarkSweep::mark_and_push(cm->adr_stackmap_data());
+   MarkSweep::mark_and_push(cm->adr_exception_table());
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::constMethodKlassObj never moves.
+ }
+ 
+ #ifndef SERIALGC
+ void constMethodKlass::oop_follow_contents(ParCompactionManager* cm,
+-					   oop obj) {
++                                           oop obj) {
+   assert (obj->is_constMethod(), "object must be constMethod");
+   constMethodOop cm_oop = constMethodOop(obj);
+   PSParallelCompact::mark_and_push(cm, cm_oop->adr_method());
+   PSParallelCompact::mark_and_push(cm, cm_oop->adr_stackmap_data());
+   PSParallelCompact::mark_and_push(cm, cm_oop->adr_exception_table());
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::constMethodKlassObj never moves.
+ }
+ #endif // SERIALGC
+@@ -112,9 +109,9 @@
+   blk->do_oop(cm->adr_method());
+   blk->do_oop(cm->adr_stackmap_data());
+   blk->do_oop(cm->adr_exception_table());
+-  // Get size before changing pointers. 
++  // Get size before changing pointers.
+   // Don't call size() or oop_size() since that is a virtual call.
+-  int size = cm->object_size();  
++  int size = cm->object_size();
+   return size;
+ }
+ 
+@@ -131,8 +128,8 @@
+   if (mr.contains(adr)) blk->do_oop(adr);
+   // Get size before changing pointers.
+   // Don't call size() or oop_size() since that is a virtual call.
+-  int size = cm->object_size();  
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  int size = cm->object_size();
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::constMethodKlassObj never moves.
+   return size;
+ }
+@@ -146,8 +143,8 @@
+   MarkSweep::adjust_pointer(cm->adr_exception_table());
+   // Get size before changing pointers.
+   // Don't call size() or oop_size() since that is a virtual call.
+-  int size = cm->object_size();  
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  int size = cm->object_size();
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::constMethodKlassObj never moves.
+   return size;
+ }
+@@ -178,8 +175,8 @@
+ }
+ 
+ int constMethodKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
+-					  HeapWord* beg_addr,
+-					  HeapWord* end_addr) {
++                                          HeapWord* beg_addr,
++                                          HeapWord* end_addr) {
+   assert(obj->is_constMethod(), "should be constMethod");
+   constMethodOop cm_oop = constMethodOop(obj);
+ 
+@@ -207,7 +204,7 @@
+   st->print(" - exceptions:   " INTPTR_FORMAT "\n", (address)m->exception_table());
+   if (m->has_stackmap_table()) {
+     st->print(" - stackmap data:       ");
+-    m->stackmap_data()->print_value_on(st); 
++    m->stackmap_data()->print_value_on(st);
+     st->cr();
+   }
+ }
+@@ -237,7 +234,7 @@
+   constMethodOop m = constMethodOop(obj);
+   guarantee(m->is_perm(),                            "should be in permspace");
+ 
+-  // Verification can occur during oop construction before the method or 
++  // Verification can occur during oop construction before the method or
+   // other fields have been initialized.
+   if (!obj->partially_loaded()) {
+     guarantee(m->method()->is_perm(), "should be in permspace");
+@@ -291,7 +288,7 @@
+   assert(obj->is_constMethod(), "object must be klass");
+   constMethodOop m = constMethodOop(obj);
+   // check whether exception_table points to self (flag for partially loaded)
+-  return m->exception_table() == (typeArrayOop)obj; 
++  return m->exception_table() == (typeArrayOop)obj;
+ }
+ 
+ 
+@@ -302,4 +299,3 @@
+   // Temporarily set exception_table to point to self
+   m->set_exception_table((typeArrayOop)obj);
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/constMethodKlass.hpp openjdk/hotspot/src/share/vm/oops/constMethodKlass.hpp
+--- openjdk6/hotspot/src/share/vm/oops/constMethodKlass.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/constMethodKlass.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)constMethodKlass.hpp	1.15 07/05/29 09:44:18 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A constMethodKlass is the klass of a constMethodOop
+@@ -50,7 +47,7 @@
+   // Casting from klassOop
+   static constMethodKlass* cast(klassOop k) {
+     assert(k->klass_part()->oop_is_constMethod(), "cast to constMethodKlass");
+-    return (constMethodKlass*) k->klass_part(); 
++    return (constMethodKlass*) k->klass_part();
+   }
+ 
+   // Sizing
+@@ -91,4 +88,3 @@
+   bool oop_partially_loaded(oop obj) const;
+   void oop_set_partially_loaded(oop obj);
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/constMethodOop.cpp openjdk/hotspot/src/share/vm/oops/constMethodOop.cpp
+--- openjdk6/hotspot/src/share/vm/oops/constMethodOop.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/constMethodOop.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)constMethodOop.cpp	1.11 07/05/05 17:06:00 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -98,7 +95,7 @@
+     _flags |= _has_checked_exceptions;
+     *(checked_exceptions_length_addr()) = checked_exceptions_len;
+   }
+-  if (localvariable_table_len > 0) {    
++  if (localvariable_table_len > 0) {
+     _flags |= _has_localvariable_table;
+     *(localvariable_table_length_addr()) = localvariable_table_len;
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/oops/constMethodOop.hpp openjdk/hotspot/src/share/vm/oops/constMethodOop.hpp
+--- openjdk6/hotspot/src/share/vm/oops/constMethodOop.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/constMethodOop.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)constMethodOop.hpp	1.24 07/05/05 17:05:59 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,11 +19,11 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // An constMethodOop represents portions of a Java method which
+-// do not vary. 
++// do not vary.
+ //
+ // Memory layout (each line represents a word). Note that most
+ // applications load thousands of methods, so keeping the size of this
+@@ -113,9 +110,9 @@
+   oop* oop_block_end() const { return adr_exception_table() + 1; }
+ 
+ private:
+-  // 
++  //
+   // The oop block.  See comment in klass.hpp before making changes.
+-  // 
++  //
+ 
+   // Backpointer to non-const methodOop (needed for some JVMTI operations)
+   methodOop         _method;
+@@ -126,7 +123,7 @@
+   // The exception handler table. 4-tuples of ints [start_pc, end_pc,
+   // handler_pc, catch_type index] For methods with no exceptions the
+   // table is pointing to Universe::the_empty_int_array
+-  typeArrayOop      _exception_table;            
++  typeArrayOop      _exception_table;
+ 
+   //
+   // End of the oop block.
+@@ -167,11 +164,11 @@
+   methodOop method() const                 { return _method; }
+   void set_method(methodOop m)             { oop_store_without_check((oop*)&_method, (oop) m); }
+ 
+-  
++
+   // stackmap table data
+   typeArrayOop stackmap_data() const { return _stackmap_data; }
+-  void set_stackmap_data(typeArrayOop sd) { 
+-    oop_store_without_check((oop*)&_stackmap_data, (oop)sd); 
++  void set_stackmap_data(typeArrayOop sd) {
++    oop_store_without_check((oop*)&_stackmap_data, (oop)sd);
+   }
+   bool has_stackmap_table() const { return _stackmap_data != NULL; }
+ 
+@@ -180,7 +177,7 @@
+   void set_exception_table(typeArrayOop e)       { oop_store_without_check((oop*) &_exception_table, (oop) e); }
+   bool has_exception_handler() const             { return exception_table() != NULL && exception_table()->length() > 0; }
+ 
+-  void init_fingerprint() { 
++  void init_fingerprint() {
+     const uint64_t initval = CONST64(0x8000000000000000);
+     _fingerprint = initval;
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/oops/cpCacheKlass.cpp openjdk/hotspot/src/share/vm/oops/cpCacheKlass.cpp
+--- openjdk6/hotspot/src/share/vm/oops/cpCacheKlass.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/cpCacheKlass.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)cpCacheKlass.cpp	1.46 07/05/29 09:44:18 JVM"
+-#endif
+ /*
+  * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,14 +19,14 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+ #include "incls/_cpCacheKlass.cpp.incl"
+ 
+ 
+-int constantPoolCacheKlass::oop_size(oop obj) const { 
++int constantPoolCacheKlass::oop_size(oop obj) const {
+   assert(obj->is_constantPoolCache(), "must be constantPool");
+   return constantPoolCacheOop(obj)->object_size();
+ }
+@@ -48,7 +45,7 @@
+ 
+ klassOop constantPoolCacheKlass::create_klass(TRAPS) {
+   constantPoolCacheKlass o;
+-  KlassHandle klassklass(THREAD, Universe::arrayKlassKlassObj());  
++  KlassHandle klassklass(THREAD, Universe::arrayKlassKlassObj());
+   arrayKlassHandle k = base_create_array_klass(o.vtbl_value(), header_size(), klassklass, CHECK_NULL);
+   KlassHandle super (THREAD, k->super());
+   complete_create_array_klass(k, super, CHECK_NULL);
+@@ -59,7 +56,7 @@
+ void constantPoolCacheKlass::oop_follow_contents(oop obj) {
+   assert(obj->is_constantPoolCache(), "obj must be constant pool cache");
+   constantPoolCacheOop cache = (constantPoolCacheOop)obj;
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::constantPoolCacheKlassObj never moves.
+   // gc of constant pool cache instance variables
+   MarkSweep::mark_and_push((oop*)cache->constant_pool_addr());
+@@ -70,10 +67,10 @@
+ 
+ #ifndef SERIALGC
+ void constantPoolCacheKlass::oop_follow_contents(ParCompactionManager* cm,
+-						 oop obj) {
++                                                 oop obj) {
+   assert(obj->is_constantPoolCache(), "obj must be constant pool cache");
+   constantPoolCacheOop cache = (constantPoolCacheOop)obj;
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::constantPoolCacheKlassObj never moves.
+   // gc of constant pool cache instance variables
+   PSParallelCompact::mark_and_push(cm, (oop*)cache->constant_pool_addr());
+@@ -89,8 +86,8 @@
+   constantPoolCacheOop cache = (constantPoolCacheOop)obj;
+   // Get size before changing pointers.
+   // Don't call size() or oop_size() since that is a virtual call.
+-  int size = cache->object_size();  
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  int size = cache->object_size();
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::constantPoolCacheKlassObj never moves.
+   // iteration over constant pool cache instance variables
+   blk->do_oop((oop*)cache->constant_pool_addr());
+@@ -105,8 +102,8 @@
+   constantPoolCacheOop cache = (constantPoolCacheOop)obj;
+   // Get size before changing pointers.
+   // Don't call size() or oop_size() since that is a virtual call.
+-  int size = cache->object_size();  
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  int size = cache->object_size();
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::constantPoolCacheKlassObj never moves.
+   // iteration over constant pool cache instance variables
+   oop* addr = (oop*)cache->constant_pool_addr();
+@@ -122,8 +119,8 @@
+   constantPoolCacheOop cache = (constantPoolCacheOop)obj;
+   // Get size before changing pointers.
+   // Don't call size() or oop_size() since that is a virtual call.
+-  int size = cache->object_size();  
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  int size = cache->object_size();
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::constantPoolCacheKlassObj never moves.
+   // Iteration over constant pool cache instance variables
+   MarkSweep::adjust_pointer((oop*)cache->constant_pool_addr());
+@@ -134,13 +131,13 @@
+ }
+ 
+ #ifndef SERIALGC
+-void constantPoolCacheKlass::oop_copy_contents(PSPromotionManager* pm, 
+-					       oop obj) {
++void constantPoolCacheKlass::oop_copy_contents(PSPromotionManager* pm,
++                                               oop obj) {
+   assert(obj->is_constantPoolCache(), "should be constant pool");
+ }
+ 
+-void constantPoolCacheKlass::oop_push_contents(PSPromotionManager* pm, 
+-					       oop obj) {
++void constantPoolCacheKlass::oop_push_contents(PSPromotionManager* pm,
++                                               oop obj) {
+   assert(obj->is_constantPoolCache(), "should be constant pool");
+ }
+ 
+@@ -162,8 +159,8 @@
+ 
+ int
+ constantPoolCacheKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
+-					    HeapWord* beg_addr,
+-					    HeapWord* end_addr) {
++                                            HeapWord* beg_addr,
++                                            HeapWord* end_addr) {
+   assert(obj->is_constantPoolCache(), "obj must be constant pool cache");
+   constantPoolCacheOop cache = (constantPoolCacheOop)obj;
+ 
+@@ -203,7 +200,6 @@
+ }
+ 
+ 
+-const char* constantPoolCacheKlass::internal_name() const { 
+-  return "{constant pool cache}"; 
++const char* constantPoolCacheKlass::internal_name() const {
++  return "{constant pool cache}";
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/cpCacheKlass.hpp openjdk/hotspot/src/share/vm/oops/cpCacheKlass.hpp
+--- openjdk6/hotspot/src/share/vm/oops/cpCacheKlass.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/cpCacheKlass.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)cpCacheKlass.hpp	1.33 07/05/29 09:44:19 JVM"
+-#endif
+ /*
+  * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class constantPoolCacheKlass: public arrayKlass {
+@@ -34,13 +31,13 @@
+ 
+   // Allocation
+   DEFINE_ALLOCATE_PERMANENT(constantPoolCacheKlass);
+-  constantPoolCacheOop allocate(int length, TRAPS); 
++  constantPoolCacheOop allocate(int length, TRAPS);
+   static klassOop create_klass(TRAPS);
+ 
+   // Casting from klassOop
+   static constantPoolCacheKlass* cast(klassOop k) {
+     assert(k->klass_part()->oop_is_constantPoolCache(), "cast to constantPoolCacheKlass");
+-    return (constantPoolCacheKlass*)k->klass_part(); 
++    return (constantPoolCacheKlass*)k->klass_part();
+   }
+ 
+   // Sizing
+@@ -69,4 +66,3 @@
+   const char* internal_name() const;
+   void oop_verify_on(oop obj, outputStream* st);
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/cpCacheOop.cpp openjdk/hotspot/src/share/vm/oops/cpCacheOop.cpp
+--- openjdk6/hotspot/src/share/vm/oops/cpCacheOop.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/cpCacheOop.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)cpCacheOop.cpp	1.79 07/05/29 09:44:19 JVM"
+-#endif
+ /*
+  * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -91,8 +88,8 @@
+ // case.
+ bool ConstantPoolCacheEntry::same_methodOop(oop cur_f1, oop f1) {
+   return (cur_f1 == f1 || ((methodOop)cur_f1)->name() ==
+-	 ((methodOop)f1)->name() || ((methodOop)cur_f1)->signature() == 
+-	 ((methodOop)f1)->signature());
++         ((methodOop)f1)->name() || ((methodOop)cur_f1)->signature() ==
++         ((methodOop)f1)->signature());
+ }
+ #endif
+ 
+@@ -101,12 +98,12 @@
+ // the damaged entry.  More seriously, the memory synchronization is needed
+ // to flush other fields (f1, f2) completely to memory before the bytecodes
+ // are updated, lest other processors see a non-zero bytecode but zero f1/f2.
+-void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code, 
++void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code,
+                                        Bytecodes::Code put_code,
+-                                       KlassHandle field_holder, 
+-                                       int orig_field_index, 
+-                                       int field_offset, 
+-                                       TosState field_type, 
++                                       KlassHandle field_holder,
++                                       int orig_field_index,
++                                       int field_offset,
++                                       TosState field_type,
+                                        bool is_final,
+                                        bool is_volatile) {
+   set_f1(field_holder());
+@@ -143,7 +140,7 @@
+     case Bytecodes::_invokeinterface: {
+         if (method->can_be_statically_bound()) {
+           set_f2((intptr_t)method());
+-	  needs_vfinal_flag = true;
++          needs_vfinal_flag = true;
+         } else {
+           assert(vtable_index >= 0, "valid index");
+           set_f2(vtable_index);
+@@ -166,9 +163,9 @@
+   }
+ 
+   set_flags(as_flags(as_TosState(method->result_type()),
+-                     method->is_final_method(), 
+-                     needs_vfinal_flag, 
+-                     false, 
++                     method->is_final_method(),
++                     needs_vfinal_flag,
++                     false,
+                      change_to_virtual,
+                      true)|
+             method()->size_of_parameters());
+@@ -181,7 +178,7 @@
+       // NOTE: THIS IS A HACK - BE VERY CAREFUL!!!
+       //
+       // Workaround for the case where we encounter an invokeinterface, but we
+-      // should really have an _invokevirtual since the resolved method is a 
++      // should really have an _invokevirtual since the resolved method is a
+       // virtual method in java.lang.Object. This is a corner case in the spec
+       // but is presumably legal. javac does not generate this code.
+       //
+@@ -210,7 +207,7 @@
+   set_f1(interf);
+   set_f2(index);
+   set_flags(as_flags(as_TosState(method->result_type()), method->is_final_method(), false, false, false, true) | method()->size_of_parameters());
+-  set_bytecode_1(Bytecodes::_invokeinterface);  
++  set_bytecode_1(Bytecodes::_invokeinterface);
+ }
+ 
+ 
+@@ -290,7 +287,7 @@
+ }
+ 
+ void ConstantPoolCacheEntry::update_pointers(HeapWord* beg_addr,
+-					     HeapWord* end_addr) {
++                                             HeapWord* end_addr) {
+   assert(in_words(size()) == 4, "check code below - may need adjustment");
+   // field[1] is always oop or NULL
+   PSParallelCompact::adjust_pointer((oop*)&_f1, beg_addr, end_addr);
+diff -ruN openjdk6/hotspot/src/share/vm/oops/cpCacheOop.hpp openjdk/hotspot/src/share/vm/oops/cpCacheOop.hpp
+--- openjdk6/hotspot/src/share/vm/oops/cpCacheOop.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/cpCacheOop.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)cpCacheOop.hpp	1.74 07/05/29 09:44:19 JVM"
+-#endif
+ /*
+  * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A ConstantPoolCacheEntry describes an individual entry of the constant
+@@ -143,14 +140,14 @@
+     volatileField = 25,
+     vfinalMethod  = 26,
+     finalField    = 27
+-  }; 
++  };
+ 
+   enum { field_index_mask = 0xFFFF };
+ 
+   // start of type bits in flags
+   // Note: the interpreter knows this layout!
+   enum FlagValues {
+-    tosBits      = 28 
++    tosBits      = 28
+   };
+ 
+   // Initialization
+@@ -162,9 +159,9 @@
+     KlassHandle     field_holder,                // the object/klass holding the field
+     int             orig_field_index,            // the original field index in the field holder
+     int             field_offset,                // the field offset in words in the field holder
+-    TosState        field_type,                  // the (machine) field type    
+-    bool            is_final,                     // the field is final 
+-    bool            is_volatile                  // the field is volatile 
++    TosState        field_type,                  // the (machine) field type
++    bool            is_final,                     // the field is final
++    bool            is_volatile                  // the field is volatile
+   );
+ 
+   void set_method(                               // sets entry to resolved method entry
+@@ -174,12 +171,12 @@
+   );
+ 
+   void set_interface_call(
+-    methodHandle method,                         // Resolved method    
++    methodHandle method,                         // Resolved method
+     int index                                    // Method index into interface
+-  );               
++  );
+ 
+   void set_parameter_size(int value) {
+-    assert(parameter_size() == 0 || parameter_size() == value, 
++    assert(parameter_size() == 0 || parameter_size() == value,
+            "size must not change");
+     // Setting the parameter size by itself is only safe if the
+     // current value of _flags is 0, otherwise another thread may have
+@@ -326,4 +323,3 @@
+   void adjust_method_entries(methodOop* old_methods, methodOop* new_methods,
+                              int methods_length, bool * trace_name_printed);
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/generateOopMap.cpp openjdk/hotspot/src/share/vm/oops/generateOopMap.cpp
+--- openjdk6/hotspot/src/share/vm/oops/generateOopMap.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/generateOopMap.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)generateOopMap.cpp	1.141 07/05/05 17:06:03 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,20 +19,20 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //
+ //
+ // Compute stack layouts for each instruction in method.
+-// 
+-//  Problems: 
+-//  - What to do about jsr with different types of local vars?  
++//
++//  Problems:
++//  - What to do about jsr with different types of local vars?
+ //  Need maps that are conditional on jsr path?
+ //  - Jsr and exceptions should be done more efficiently (the retAddr stuff)
+ //
+-//  Alternative: 
+-//  - Could extend verifier to provide this information. 
++//  Alternative:
++//  - Could extend verifier to provide this information.
+ //    For: one fewer abstract interpreter to maintain. Against: the verifier
+ //    solves a bigger problem so slower (undesirable to force verification of
+ //    everything?).
+@@ -44,7 +41,7 @@
+ //    Partition bytecodes into basic blocks
+ //    For each basic block: store entry state (vars, stack). For instructions
+ //    inside basic blocks we do not store any state (instead we recompute it
+-//    from state produced by previous instruction). 
++//    from state produced by previous instruction).
+ //
+ //    Perform abstract interpretation of bytecodes over this lattice:
+ //
+@@ -83,7 +80,7 @@
+ //  To be able to process "ret" bytecodes, we keep track of these return
+ //  PC's in a 'retAddrs' structure in abstract interpreter context (when
+ //  processing a "ret" bytecodes, it is not sufficient to know that it gets
+-//  an argument of the right type 'p'; we need to know which address it 
++//  an argument of the right type 'p'; we need to know which address it
+ //  returns to).
+ //
+ // (Note this comment is borrowed form the original author of the algorithm)
+@@ -98,29 +95,29 @@
+ class ComputeCallStack : public SignatureIterator {
+   CellTypeState *_effect;
+   int _idx;
+-  
++
+   void setup();
+   void set(CellTypeState state)         { _effect[_idx++] = state; }
+   int  length()                         { return _idx; };
+-  
++
+   virtual void do_bool  ()              { set(CellTypeState::value); };
+   virtual void do_char  ()              { set(CellTypeState::value); };
+-  virtual void do_float ()              { set(CellTypeState::value); };  
++  virtual void do_float ()              { set(CellTypeState::value); };
+   virtual void do_byte  ()              { set(CellTypeState::value); };
+   virtual void do_short ()              { set(CellTypeState::value); };
+-  virtual void do_int   ()              { set(CellTypeState::value); };  
++  virtual void do_int   ()              { set(CellTypeState::value); };
+   virtual void do_void  ()              { set(CellTypeState::bottom);};
+   virtual void do_object(int begin, int end)  { set(CellTypeState::ref); };
+   virtual void do_array (int begin, int end)  { set(CellTypeState::ref); };
+ 
+-  void do_double()                      { set(CellTypeState::value); 
++  void do_double()                      { set(CellTypeState::value);
+                                           set(CellTypeState::value); }
+-  void do_long  ()                      { set(CellTypeState::value); 
++  void do_long  ()                      { set(CellTypeState::value);
+                                            set(CellTypeState::value); }
+ 
+ public:
+   ComputeCallStack(symbolOop signature) : SignatureIterator(signature) {};
+-  
++
+   // Compute methods
+   int compute_for_parameters(bool is_static, CellTypeState *effect) {
+     _idx    = 0;
+@@ -130,7 +127,7 @@
+       effect[_idx++] = CellTypeState::ref;
+ 
+     iterate_parameters();
+-  
++
+     return length();
+   };
+ 
+@@ -152,29 +149,29 @@
+ class ComputeEntryStack : public SignatureIterator {
+   CellTypeState *_effect;
+   int _idx;
+-  
++
+   void setup();
+   void set(CellTypeState state)         { _effect[_idx++] = state; }
+   int  length()                         { return _idx; };
+-  
++
+   virtual void do_bool  ()              { set(CellTypeState::value); };
+   virtual void do_char  ()              { set(CellTypeState::value); };
+-  virtual void do_float ()              { set(CellTypeState::value); };  
++  virtual void do_float ()              { set(CellTypeState::value); };
+   virtual void do_byte  ()              { set(CellTypeState::value); };
+   virtual void do_short ()              { set(CellTypeState::value); };
+-  virtual void do_int   ()              { set(CellTypeState::value); };  
++  virtual void do_int   ()              { set(CellTypeState::value); };
+   virtual void do_void  ()              { set(CellTypeState::bottom);};
+   virtual void do_object(int begin, int end)  { set(CellTypeState::make_slot_ref(_idx)); }
+   virtual void do_array (int begin, int end)  { set(CellTypeState::make_slot_ref(_idx)); }
+ 
+-  void do_double()                      { set(CellTypeState::value); 
++  void do_double()                      { set(CellTypeState::value);
+                                           set(CellTypeState::value); }
+-  void do_long  ()                      { set(CellTypeState::value); 
++  void do_long  ()                      { set(CellTypeState::value);
+                                           set(CellTypeState::value); }
+ 
+ public:
+   ComputeEntryStack(symbolOop signature) : SignatureIterator(signature) {};
+-  
++
+   // Compute methods
+   int compute_for_parameters(bool is_static, CellTypeState *effect) {
+     _idx    = 0;
+@@ -184,7 +181,7 @@
+       effect[_idx++] = CellTypeState::make_slot_ref(0);
+ 
+     iterate_parameters();
+-  
++
+     return length();
+   };
+ 
+@@ -203,12 +200,12 @@
+ // Implementation of RetTable/RetTableEntry
+ //
+ // Contains function to itereate through all bytecodes
+-// and find all return entry points 
+-// 
++// and find all return entry points
++//
+ int RetTable::_init_nof_entries = 10;
+ int RetTableEntry::_init_nof_jsrs = 5;
+ 
+-void RetTableEntry::add_delta(int bci, int delta) {  
++void RetTableEntry::add_delta(int bci, int delta) {
+   if (_target_bci > bci) _target_bci += delta;
+ 
+   for (int k = 0; k < _jsrs->length(); k++) {
+@@ -225,32 +222,32 @@
+     switch (bytecode) {
+       case Bytecodes::_jsr:
+         add_jsr(i.next_bci(), i.dest());
+-	break;
++        break;
+       case Bytecodes::_jsr_w:
+-	add_jsr(i.next_bci(), i.dest_w());
+-        break;	    
+-    } 
+-  }  
++        add_jsr(i.next_bci(), i.dest_w());
++        break;
++    }
++  }
+ }
+ 
+-void RetTable::add_jsr(int return_bci, int target_bci) { 
++void RetTable::add_jsr(int return_bci, int target_bci) {
+   RetTableEntry* entry = _first;
+-    
+-  // Scan table for entry 
++
++  // Scan table for entry
+   for (;entry && entry->target_bci() != target_bci; entry = entry->next());
+-  
++
+   if (!entry) {
+-    // Allocate new entry and put in list 
+-    entry = new RetTableEntry(target_bci, _first);    
++    // Allocate new entry and put in list
++    entry = new RetTableEntry(target_bci, _first);
+     _first = entry;
+   }
+ 
+   // Now "entry" is set.  Make sure that the entry is initialized
+-  // and has room for the new jsr.     
++  // and has room for the new jsr.
+   entry->add_jsr(return_bci);
+ }
+ 
+-RetTableEntry* RetTable::find_jsrs_for_target(int targBci) {    
++RetTableEntry* RetTable::find_jsrs_for_target(int targBci) {
+   RetTableEntry *cur = _first;
+ 
+   while(cur) {
+@@ -284,7 +281,7 @@
+ CellTypeState CellTypeState::addr        = CellTypeState::make_any(addr_conflict);
+ 
+ // Commonly used constants
+-static CellTypeState epsilonCTS[1] = { CellTypeState::bottom }; 
++static CellTypeState epsilonCTS[1] = { CellTypeState::bottom };
+ static CellTypeState   refCTS   = CellTypeState::ref;
+ static CellTypeState   valCTS   = CellTypeState::value;
+ static CellTypeState    vCTS[2] = { CellTypeState::value, CellTypeState::bottom };
+@@ -372,12 +369,12 @@
+ 
+ void GenerateOopMap ::initialize_bb() {
+   _gc_points = 0;
+-  _bb_count  = 0;    
++  _bb_count  = 0;
+   int size = binsToHold(method()->code_size());
+   _bb_hdr_bits = NEW_RESOURCE_ARRAY(uintptr_t,size);
+   memset(_bb_hdr_bits, 0, size*sizeof(uintptr_t));
+ }
+- 
++
+ void GenerateOopMap ::set_bbmark_bit(int bci) {
+   int idx  = bci >> LogBitsPerWord;
+   uintptr_t bit = (uintptr_t)1 << (bci & (BitsPerWord-1));
+@@ -390,23 +387,23 @@
+   _bb_hdr_bits[idx] &= (~bit);
+ }
+ 
+-void GenerateOopMap::bb_mark_fct(GenerateOopMap *c, int bci, int *data) {  
++void GenerateOopMap::bb_mark_fct(GenerateOopMap *c, int bci, int *data) {
+   assert(bci>= 0 && bci < c->method()->code_size(), "index out of bounds");
+-  if (c->is_bb_header(bci)) 
++  if (c->is_bb_header(bci))
+      return;
+-  
++
+   if (TraceNewOopMapGeneration) {
+      tty->print_cr("Basicblock#%d begins at: %d", c->_bb_count, bci);
+   }
+-  c->set_bbmark_bit(bci);  
+-  c->_bb_count++;  
++  c->set_bbmark_bit(bci);
++  c->_bb_count++;
+ }
+ 
+ 
+ void GenerateOopMap::mark_bbheaders_and_count_gc_points() {
+   initialize_bb();
+-  
+-  bool fellThrough = false;  // False to get first BB marked.   
++
++  bool fellThrough = false;  // False to get first BB marked.
+ 
+   // First mark all exception handlers as start of a basic-block
+   typeArrayOop excps = method()->exception_table();
+@@ -422,77 +419,77 @@
+   while( (bytecode = bcs.next()) >= 0) {
+     int bci = bcs.bci();
+ 
+-    if (!fellThrough) 
++    if (!fellThrough)
+         bb_mark_fct(this, bci, NULL);
+ 
+     fellThrough = jump_targets_do(&bcs, &GenerateOopMap::bb_mark_fct, NULL);
+-        
++
+      /* We will also mark successors of jsr's as basic block headers. */
+     switch (bytecode) {
+       case Bytecodes::_jsr:
+-	assert(!fellThrough, "should not happen");
+-        bb_mark_fct(this, bci + Bytecodes::length_for(bytecode), NULL); 
++        assert(!fellThrough, "should not happen");
++        bb_mark_fct(this, bci + Bytecodes::length_for(bytecode), NULL);
+         break;
+       case Bytecodes::_jsr_w:
+-	assert(!fellThrough, "should not happen");
+-        bb_mark_fct(this, bci + Bytecodes::length_for(bytecode), NULL); 
+-        break;	      
++        assert(!fellThrough, "should not happen");
++        bb_mark_fct(this, bci + Bytecodes::length_for(bytecode), NULL);
++        break;
+     }
+-      
++
+     if (possible_gc_point(&bcs))
+-      _gc_points++;        
++      _gc_points++;
+   }
+ }
+ 
+-void GenerateOopMap::reachable_basicblock(GenerateOopMap *c, int bci, int *data) {  
++void GenerateOopMap::reachable_basicblock(GenerateOopMap *c, int bci, int *data) {
+   assert(bci>= 0 && bci < c->method()->code_size(), "index out of bounds");
+-  BasicBlock* bb = c->get_basic_block_at(bci);  
++  BasicBlock* bb = c->get_basic_block_at(bci);
+   if (bb->is_dead()) {
+-    bb->mark_as_alive();    
++    bb->mark_as_alive();
+     *data = 1; // Mark basicblock as changed
+-  }  
++  }
+ }
+ 
+ 
+-void GenerateOopMap::mark_reachable_code() {  
++void GenerateOopMap::mark_reachable_code() {
+   int change = 1; // int to get function pointers to work
+-  
++
+   // Mark entry basic block as alive and all exception handlers
+-  _basic_blocks[0].mark_as_alive();  
++  _basic_blocks[0].mark_as_alive();
+   typeArrayOop excps = method()->exception_table();
+   for(int i = 0; i < excps->length(); i += 4) {
+     int handler_pc_idx = i+2;
+     BasicBlock *bb = get_basic_block_at(excps->int_at(handler_pc_idx));
+-    // If block is not already alive (due to multiple exception handlers to same bb), then 
++    // If block is not already alive (due to multiple exception handlers to same bb), then
+     // make it alive
+     if (bb->is_dead()) bb->mark_as_alive();
+   }
+ 
+-  BytecodeStream bcs(_method);  
++  BytecodeStream bcs(_method);
+ 
+   // Iterate through all basic blocks until we reach a fixpoint
+   while (change) {
+     change = 0;
+ 
+-    for (int i = 0; i < _bb_count; i++) {      
++    for (int i = 0; i < _bb_count; i++) {
+       BasicBlock *bb = &_basic_blocks[i];
+-      if (bb->is_alive()) {         
++      if (bb->is_alive()) {
+         // Position bytecodestream at last bytecode in basicblock
+-        bcs.set_start(bb->_end_bci); 
++        bcs.set_start(bb->_end_bci);
+         bcs.next();
+         Bytecodes::Code bytecode = bcs.code();
+         int bci = bcs.bci();
+         assert(bci == bb->_end_bci, "wrong bci");
+ 
+         bool fell_through = jump_targets_do(&bcs, &GenerateOopMap::reachable_basicblock, &change);
+-                
++
+         // We will also mark successors of jsr's as alive.
+         switch (bytecode) {
+           case Bytecodes::_jsr:
+           case Bytecodes::_jsr_w:
+-	    assert(!fell_through, "should not happen");
+-            reachable_basicblock(this, bci + Bytecodes::length_for(bytecode), &change); 
+-            break;          
++            assert(!fell_through, "should not happen");
++            reachable_basicblock(this, bci + Bytecodes::length_for(bytecode), &change);
++            break;
+         }
+         if (fell_through) {
+           // Mark successor as alive
+@@ -517,7 +514,7 @@
+   int bci = bcs->bci();
+ 
+   switch (bcs->code()) {
+-    case Bytecodes::_ifeq: 
++    case Bytecodes::_ifeq:
+     case Bytecodes::_ifne:
+     case Bytecodes::_iflt:
+     case Bytecodes::_ifge:
+@@ -531,52 +528,52 @@
+     case Bytecodes::_if_icmple:
+     case Bytecodes::_if_acmpeq:
+     case Bytecodes::_if_acmpne:
+-    case Bytecodes::_ifnull:   
+-    case Bytecodes::_ifnonnull:      
+-      (*jmpFct)(this, bcs->dest(), data); 
+-      (*jmpFct)(this, bci + 3, data);            
++    case Bytecodes::_ifnull:
++    case Bytecodes::_ifnonnull:
++      (*jmpFct)(this, bcs->dest(), data);
++      (*jmpFct)(this, bci + 3, data);
+       break;
+ 
+-    case Bytecodes::_goto:           
+-      (*jmpFct)(this, bcs->dest(), data);  
++    case Bytecodes::_goto:
++      (*jmpFct)(this, bcs->dest(), data);
+       break;
+-    case Bytecodes::_goto_w:         
+-      (*jmpFct)(this, bcs->dest_w(), data);    
++    case Bytecodes::_goto_w:
++      (*jmpFct)(this, bcs->dest_w(), data);
+       break;
+-    case Bytecodes::_tableswitch:  
++    case Bytecodes::_tableswitch:
+       { Bytecode_tableswitch *tableswitch = Bytecode_tableswitch_at(bcs->bcp());
+-        int len = tableswitch->length();        
+-        
++        int len = tableswitch->length();
++
+         (*jmpFct)(this, bci + tableswitch->default_offset(), data); /* Default. jump address */
+         while (--len >= 0) {
+           (*jmpFct)(this, bci + tableswitch->dest_offset_at(len), data);
+         }
+-        break; 
++        break;
+       }
+-      
++
+     case Bytecodes::_lookupswitch:
+-      { Bytecode_lookupswitch *lookupswitch = Bytecode_lookupswitch_at(bcs->bcp());        
+-        int npairs = lookupswitch->number_of_pairs(); 
++      { Bytecode_lookupswitch *lookupswitch = Bytecode_lookupswitch_at(bcs->bcp());
++        int npairs = lookupswitch->number_of_pairs();
+         (*jmpFct)(this, bci + lookupswitch->default_offset(), data); /* Default. */
+         while(--npairs >= 0) {
+           LookupswitchPair *pair = lookupswitch->pair_at(npairs);
+           (*jmpFct)(this, bci + pair->offset(), data);
+-        }        
+-        break; 
+-      }          
++        }
++        break;
++      }
+     case Bytecodes::_jsr:
+-      assert(bcs->is_wide()==false, "sanity check");      
+-      (*jmpFct)(this, bcs->dest(), data);             
++      assert(bcs->is_wide()==false, "sanity check");
++      (*jmpFct)(this, bcs->dest(), data);
++
+ 
+-    
+ 
+-      break;     
+-    case Bytecodes::_jsr_w:      
+-      (*jmpFct)(this, bcs->dest_w(), data); 
+-      break;          
+-    case Bytecodes::_wide:           
++      break;
++    case Bytecodes::_jsr_w:
++      (*jmpFct)(this, bcs->dest_w(), data);
++      break;
++    case Bytecodes::_wide:
+       ShouldNotReachHere();
+-      return true;    
++      return true;
+       break;
+     case Bytecodes::_athrow:
+     case Bytecodes::_ireturn:
+@@ -584,10 +581,10 @@
+     case Bytecodes::_freturn:
+     case Bytecodes::_dreturn:
+     case Bytecodes::_areturn:
+-    case Bytecodes::_return:         
++    case Bytecodes::_return:
+     case Bytecodes::_ret:
+-      break;    
+-    default:                 
++      break;
++    default:
+       return true;
+   }
+   return false;
+@@ -603,7 +600,7 @@
+ 
+ // Requires "pc" to be the start of an instruction; returns the basic
+ //   block containing that instruction. */
+-BasicBlock  *GenerateOopMap::get_basic_block_containing(int bci) const {  
++BasicBlock  *GenerateOopMap::get_basic_block_containing(int bci) const {
+   BasicBlock *bbs = _basic_blocks;
+   int lo = 0, hi = _bb_count - 1;
+ 
+@@ -611,7 +608,7 @@
+     int m = (lo + hi) / 2;
+     int mbci = bbs[m]._bci;
+     int nbci;
+-	
++
+     if ( m == _bb_count-1) {
+       assert( bci >= mbci && bci < method()->code_size(), "sanity check failed");
+       return bbs+m;
+@@ -629,22 +626,22 @@
+     }
+   }
+ 
+-  fatal("should have found BB");  
++  fatal("should have found BB");
+   return NULL;
+ }
+ 
+ void GenerateOopMap::restore_state(BasicBlock *bb)
+-{  
++{
+   memcpy(_state, bb->_state, _state_len*sizeof(CellTypeState));
+-  _stack_top = bb->_stack_top;  
+-  _monitor_top = bb->_monitor_top;    
++  _stack_top = bb->_stack_top;
++  _monitor_top = bb->_monitor_top;
+ }
+ 
+ int GenerateOopMap::next_bb_start_pc(BasicBlock *bb) {
+  int bbNum = bb - _basic_blocks + 1;
+- if (bbNum == _bb_count) 
++ if (bbNum == _bb_count)
+     return method()->code_size();
+-    
++
+  return _basic_blocks[bbNum]._bci;
+ }
+ 
+@@ -652,7 +649,7 @@
+ // CellType handling methods
+ //
+ 
+-void GenerateOopMap::init_state() {  
++void GenerateOopMap::init_state() {
+   _state_len     = _max_locals + _max_stack + _max_monitors;
+   _state         = NEW_RESOURCE_ARRAY(CellTypeState, _state_len);
+   memset(_state, 0, _state_len * sizeof(CellTypeState));
+@@ -661,15 +658,15 @@
+ 
+ void GenerateOopMap::make_context_uninitialized() {
+   CellTypeState* vs = vars();
+-    
+-  for (int i = 0; i < _max_locals; i++) 
++
++  for (int i = 0; i < _max_locals; i++)
+       vs[i] = CellTypeState::uninit;
+ 
+   _stack_top = 0;
+   _monitor_top = 0;
+ }
+ 
+-int GenerateOopMap::methodsig_to_effect(symbolOop signature, bool is_static, CellTypeState* effect) {  
++int GenerateOopMap::methodsig_to_effect(symbolOop signature, bool is_static, CellTypeState* effect) {
+   ComputeEntryStack ces(signature);
+   return ces.compute_for_parameters(is_static, effect);
+ }
+@@ -743,7 +740,7 @@
+   return change;
+ }
+ 
+-void GenerateOopMap::copy_state(CellTypeState *dst, CellTypeState *src) { 
++void GenerateOopMap::copy_state(CellTypeState *dst, CellTypeState *src) {
+   int len = _max_locals + _stack_top;
+   for (int i = 0; i < len; i++) {
+     if (src[i].is_nonlock_reference()) {
+@@ -802,7 +799,7 @@
+     copy_state(bb->_state, _state);
+     bb->_stack_top = _stack_top;
+     bb->_monitor_top = _monitor_top;
+-    bb->set_changed(true);  
++    bb->set_changed(true);
+   } else {
+     verify_error("stack height conflict: %d vs. %d",  _stack_top, bb->_stack_top);
+   }
+@@ -812,7 +809,7 @@
+    gom->merge_state_into_bb(gom->get_basic_block_at(bci));
+ }
+ 
+-void GenerateOopMap::set_var(int localNo, CellTypeState cts) {  
++void GenerateOopMap::set_var(int localNo, CellTypeState cts) {
+   assert(cts.is_reference() || cts.is_value() || cts.is_address(),
+          "wrong celltypestate");
+   if (localNo < 0 || localNo > _max_locals) {
+@@ -831,7 +828,7 @@
+   return vars()[localNo];
+ }
+ 
+-CellTypeState GenerateOopMap::pop() {  
++CellTypeState GenerateOopMap::pop() {
+   if ( _stack_top <= 0) {
+     verify_error("stack underflow");
+     return valCTS; // just to pick something
+@@ -880,12 +877,12 @@
+ 
+ //
+ // Interpretation handling methods
+-// 
++//
+ 
+ void GenerateOopMap::do_interpretation()
+ {
+   // "i" is just for debugging, so we can detect cases where this loop is
+-  // iterated more than once.   
++  // iterated more than once.
+   int i = 0;
+   do {
+ #ifndef PRODUCT
+@@ -931,29 +928,29 @@
+       monitor_count++;
+     }
+ 
+-    int bci = j.bci();        
+-    if (is_bb_header(bci)) {                 
++    int bci = j.bci();
++    if (is_bb_header(bci)) {
+       // Initialize the basicblock structure
+       BasicBlock *bb   = _basic_blocks + bbNo;
+-      bb->_bci         = bci;      
++      bb->_bci         = bci;
+       bb->_max_locals  = _max_locals;
+       bb->_max_stack   = _max_stack;
+-      bb->set_changed(false);      
++      bb->set_changed(false);
+       bb->_stack_top   = BasicBlock::_dead_basic_block; // Initialize all basicblocks are dead.
+-      bb->_monitor_top = bad_monitors;      
+-      
+-      if (bbNo > 0) {        
++      bb->_monitor_top = bad_monitors;
++
++      if (bbNo > 0) {
+         _basic_blocks[bbNo - 1]._end_bci = prev_bci;
+-      }      
++      }
+ 
+       bbNo++;
+     }
+     // Remember prevous bci.
+     prev_bci = bci;
+   }
+-  // Set 
++  // Set
+   _basic_blocks[bbNo-1]._end_bci = prev_bci;
+-  
++
+ 
+   _max_monitors = monitor_count;
+ 
+@@ -986,7 +983,7 @@
+     int bc_len = Bytecodes::java_length_at(bcp);
+     assert(bb->_end_bci + bc_len == _method->code_size(), "wrong end bci");
+   }
+-#endif  
++#endif
+ 
+   // Check that the correct number of basicblocks was found
+   if (bbNo !=_bb_count) {
+@@ -1007,12 +1004,12 @@
+ 
+     // Initialize all locals to 'uninit' and set stack-height to 0
+     make_context_uninitialized();
+-  
+-    // Initialize CellState type of arguments 
++
++    // Initialize CellState type of arguments
+     methodsig_to_effect(method()->signature(), method()->is_static(), vars());
+ 
+     // If some references must be pre-assigned to null, then set that up
+-    initialize_vars(); 
++    initialize_vars();
+ 
+     // This is the start state
+     merge_state_into_bb(&_basic_blocks[0]);
+@@ -1020,10 +1017,10 @@
+     assert(_basic_blocks[0].changed(), "we are not getting off the ground");
+ }
+ 
+-// The instruction at bci is changing size by "delta".  Update the basic blocks. 
++// The instruction at bci is changing size by "delta".  Update the basic blocks.
+ void GenerateOopMap::update_basic_blocks(int bci, int delta,
+                                          int new_method_size) {
+-  assert(new_method_size >= method()->code_size() + delta, 
++  assert(new_method_size >= method()->code_size() + delta,
+          "new method size is too small");
+   int newWords = binsToHold(new_method_size);
+ 
+@@ -1031,14 +1028,14 @@
+ 
+   BitMap bb_bits(new_bb_hdr_bits, new_method_size);
+   bb_bits.clear();
+-    
+-  for(int k = 0; k < _bb_count; k++) {        
+-    if (_basic_blocks[k]._bci > bci) {      
+-      _basic_blocks[k]._bci     += delta;    
+-      _basic_blocks[k]._end_bci += delta;    
+-    }    
++
++  for(int k = 0; k < _bb_count; k++) {
++    if (_basic_blocks[k]._bci > bci) {
++      _basic_blocks[k]._bci     += delta;
++      _basic_blocks[k]._end_bci += delta;
++    }
+     bb_bits.at_put(_basic_blocks[k]._bci, true);
+-  }  
++  }
+   _bb_hdr_bits = new_bb_hdr_bits ;
+ }
+ 
+@@ -1046,10 +1043,10 @@
+ // Initvars handling
+ //
+ 
+-void GenerateOopMap::initialize_vars() {  
++void GenerateOopMap::initialize_vars() {
+   for (int k = 0; k < _init_vars->length(); k++)
+-    _state[_init_vars->at(k)] = CellTypeState::make_slot_ref(k);    
+-} 
++    _state[_init_vars->at(k)] = CellTypeState::make_slot_ref(k);
++}
+ 
+ void GenerateOopMap::add_to_ref_init_set(int localNo) {
+ 
+@@ -1059,13 +1056,13 @@
+   // Is it already in the set?
+   if (_init_vars->contains(localNo) )
+     return;
+-  
+-   _init_vars->append(localNo);    
++
++   _init_vars->append(localNo);
+ }
+ 
+ //
+ // Interpreration code
+-// 
++//
+ 
+ void GenerateOopMap::interp_all() {
+   bool change = true;
+@@ -1085,23 +1082,23 @@
+ }
+ 
+ void GenerateOopMap::interp_bb(BasicBlock *bb) {
+-  
++
+   // We do not want to do anything in case the basic-block has not been initialized. This
+   // will happen in the case where there is dead-code hang around in a method.
+-  assert(bb->is_reachable(), "should be reachable or deadcode exist");  
++  assert(bb->is_reachable(), "should be reachable or deadcode exist");
+   restore_state(bb);
+ 
+-  BytecodeStream itr(_method);  
+-  
++  BytecodeStream itr(_method);
++
+   // Set iterator interval to be the current basicblock
+   int lim_bci = next_bb_start_pc(bb);
+-  itr.set_interval(bb->_bci, lim_bci); 
++  itr.set_interval(bb->_bci, lim_bci);
+   assert(lim_bci != bb->_bci, "must be at least one instruction in a basicblock");
+   itr.next(); // read first instruction
+ 
+   // Iterates through all bytecodes except the last in a basic block.
+   // We handle the last one special, since there is controlflow change.
+-  while(itr.next_bci() < lim_bci && !_got_error) {      
++  while(itr.next_bci() < lim_bci && !_got_error) {
+     if (_has_exceptions || _monitor_top != 0) {
+       // We do not need to interpret the results of exceptional
+       // continuation from this instruction when the method has no
+@@ -1109,38 +1106,38 @@
+       // empty.
+       do_exception_edge(&itr);
+     }
+-    interp1(&itr);      
++    interp1(&itr);
+     itr.next();
+-  }        
++  }
+ 
+-  // Handle last instruction.  
++  // Handle last instruction.
+   if (!_got_error) {
+-    assert(itr.next_bci() == lim_bci, "must point to end");    
++    assert(itr.next_bci() == lim_bci, "must point to end");
+     if (_has_exceptions || _monitor_top != 0) {
+       do_exception_edge(&itr);
+     }
+     interp1(&itr);
+-    
++
+     bool fall_through = jump_targets_do(&itr, GenerateOopMap::merge_state, NULL);
+     if (_got_error)  return;
+-    
++
+     if (itr.code() == Bytecodes::_ret) {
+       assert(!fall_through, "cannot be set if ret instruction");
+       // Automatically handles 'wide' ret indicies
+-      ret_jump_targets_do(&itr, GenerateOopMap::merge_state, itr.get_index(), NULL);  
++      ret_jump_targets_do(&itr, GenerateOopMap::merge_state, itr.get_index(), NULL);
+     } else if (fall_through) {
+      // Hit end of BB, but the instr. was a fall-through instruction,
+-     // so perform transition as if the BB ended in a "jump".      
++     // so perform transition as if the BB ended in a "jump".
+      if (lim_bci != bb[1]._bci) {
+        verify_error("bytecodes fell through last instruction");
+        return;
+      }
+-     merge_state_into_bb(bb + 1);        
++     merge_state_into_bb(bb + 1);
+     }
+   }
+ }
+ 
+-void GenerateOopMap::do_exception_edge(BytecodeStream* itr) {  
++void GenerateOopMap::do_exception_edge(BytecodeStream* itr) {
+   // Only check exception edge, if bytecode can trap
+   if (!Bytecodes::can_trap(itr->code())) return;
+   switch (itr->code()) {
+@@ -1186,32 +1183,32 @@
+       int catch_type = exct->int_at(i+3);
+ 
+       if (start_pc <= bci && bci < end_pc) {
+-	BasicBlock *excBB = get_basic_block_at(handler_pc);
+-	CellTypeState *excStk = excBB->stack();
+-	CellTypeState *cOpStck = stack();
+-	CellTypeState cOpStck_0 = cOpStck[0];
+-	int cOpStackTop = _stack_top;
+-
+-	// Exception stacks are always the same.
+-	assert(method()->max_stack() > 0, "sanity check");
+-
+-	// We remembered the size and first element of "cOpStck"
+-	// above; now we temporarily set them to the appropriate
+-	// values for an exception handler. */
+-	cOpStck[0] = CellTypeState::make_slot_ref(_max_locals);
+-	_stack_top = 1;
+-
+-	merge_state_into_bb(excBB);
+-
+-	// Now undo the temporary change.
+-	cOpStck[0] = cOpStck_0;
+-	_stack_top = cOpStackTop;
+-
+-	// If this is a "catch all" handler, then we do not need to
+-	// consider any additional handlers.
+-	if (catch_type == 0) {
+-	  return;
+-	}
++        BasicBlock *excBB = get_basic_block_at(handler_pc);
++        CellTypeState *excStk = excBB->stack();
++        CellTypeState *cOpStck = stack();
++        CellTypeState cOpStck_0 = cOpStck[0];
++        int cOpStackTop = _stack_top;
++
++        // Exception stacks are always the same.
++        assert(method()->max_stack() > 0, "sanity check");
++
++        // We remembered the size and first element of "cOpStck"
++        // above; now we temporarily set them to the appropriate
++        // values for an exception handler. */
++        cOpStck[0] = CellTypeState::make_slot_ref(_max_locals);
++        _stack_top = 1;
++
++        merge_state_into_bb(excBB);
++
++        // Now undo the temporary change.
++        cOpStck[0] = cOpStck_0;
++        _stack_top = cOpStackTop;
++
++        // If this is a "catch all" handler, then we do not need to
++        // consider any additional handlers.
++        if (catch_type == 0) {
++          return;
++        }
+       }
+     }
+   }
+@@ -1229,7 +1226,7 @@
+ 
+   // We don't set _monitor_top to bad_monitors because there are no successors
+   // to this exceptional exit.
+- 
++
+   if (TraceMonitorMismatch && _monitor_safe) {
+     // We check _monitor_safe so that we only report the first mismatched
+     // exceptional exit.
+@@ -1265,9 +1262,9 @@
+     os->print("    %s", Bytecodes::name(currentBC->code()));
+     switch(currentBC->code()) {
+       case Bytecodes::_invokevirtual:
+-      case Bytecodes::_invokespecial:     
+-      case Bytecodes::_invokestatic:      
+-      case Bytecodes::_invokeinterface:   
++      case Bytecodes::_invokespecial:
++      case Bytecodes::_invokestatic:
++      case Bytecodes::_invokeinterface:
+         int idx = currentBC->get_index_big();
+         constantPoolOop cp    = method()->constants();
+         int nameAndTypeIdx    = cp->name_and_type_ref_index_at(idx);
+@@ -1296,9 +1293,9 @@
+     }
+     switch(currentBC->code()) {
+       case Bytecodes::_invokevirtual:
+-      case Bytecodes::_invokespecial:     
+-      case Bytecodes::_invokestatic:      
+-      case Bytecodes::_invokeinterface:   
++      case Bytecodes::_invokespecial:
++      case Bytecodes::_invokestatic:
++      case Bytecodes::_invokeinterface:
+         int idx = currentBC->get_index_big();
+         constantPoolOop cp    = method()->constants();
+         int nameAndTypeIdx    = cp->name_and_type_ref_index_at(idx);
+@@ -1312,7 +1309,7 @@
+ 
+ // Sets the current state to be the state after executing the
+ // current instruction, starting in the current state.
+-void GenerateOopMap::interp1(BytecodeStream *itr) {  
++void GenerateOopMap::interp1(BytecodeStream *itr) {
+   if (TraceNewOopMapGeneration) {
+     print_current_state(tty, itr, TraceNewOopMapGenerationDetailed);
+   }
+@@ -1323,59 +1320,59 @@
+   if (_report_result == true) {
+     switch(itr->code()) {
+       case Bytecodes::_invokevirtual:
+-      case Bytecodes::_invokespecial:     
+-      case Bytecodes::_invokestatic:      
+-      case Bytecodes::_invokeinterface:   
++      case Bytecodes::_invokespecial:
++      case Bytecodes::_invokestatic:
++      case Bytecodes::_invokeinterface:
+         _itr_send = itr;
+         _report_result_for_send = true;
+-        break;  
++        break;
+       default:
+        fill_stackmap_for_opcodes(itr, vars(), stack(), _stack_top);
+        break;
+     }
+   }
+ 
+-  // abstract interpretation of current opcode   
++  // abstract interpretation of current opcode
+   switch(itr->code()) {
+     case Bytecodes::_nop:                                           break;
+-    case Bytecodes::_goto:                                          break; 
++    case Bytecodes::_goto:                                          break;
+     case Bytecodes::_goto_w:                                        break;
+     case Bytecodes::_iinc:                                          break;
+     case Bytecodes::_return:            do_return_monitor_check();
+                                         break;
+ 
+-    case Bytecodes::_aconst_null:       
++    case Bytecodes::_aconst_null:
+     case Bytecodes::_new:               ppush1(CellTypeState::make_line_ref(itr->bci()));
+-                                        break; 
++                                        break;
+ 
+-    case Bytecodes::_iconst_m1:          
+-    case Bytecodes::_iconst_0:          
+-    case Bytecodes::_iconst_1:          
+-    case Bytecodes::_iconst_2:          
+-    case Bytecodes::_iconst_3:          
+-    case Bytecodes::_iconst_4:          
+-    case Bytecodes::_iconst_5:          
+-    case Bytecodes::_fconst_0:          
+-    case Bytecodes::_fconst_1:          
+-    case Bytecodes::_fconst_2:          
+-    case Bytecodes::_bipush:            
++    case Bytecodes::_iconst_m1:
++    case Bytecodes::_iconst_0:
++    case Bytecodes::_iconst_1:
++    case Bytecodes::_iconst_2:
++    case Bytecodes::_iconst_3:
++    case Bytecodes::_iconst_4:
++    case Bytecodes::_iconst_5:
++    case Bytecodes::_fconst_0:
++    case Bytecodes::_fconst_1:
++    case Bytecodes::_fconst_2:
++    case Bytecodes::_bipush:
+     case Bytecodes::_sipush:            ppush1(valCTS);             break;
+ 
+     case Bytecodes::_lconst_0:
+-    case Bytecodes::_lconst_1:          
++    case Bytecodes::_lconst_1:
+     case Bytecodes::_dconst_0:
+-    case Bytecodes::_dconst_1:          ppush(vvCTS);               break; 
+-    
++    case Bytecodes::_dconst_1:          ppush(vvCTS);               break;
++
+     case Bytecodes::_ldc2_w:            ppush(vvCTS);               break;
+ 
+-    case Bytecodes::_ldc:               do_ldc(itr->get_index(), itr->bci());    break; 
+-    case Bytecodes::_ldc_w:             do_ldc(itr->get_index_big(), itr->bci());break; 
+-    
+-    case Bytecodes::_iload:             
+-    case Bytecodes::_fload:             ppload(vCTS, itr->get_index()); break; 
++    case Bytecodes::_ldc:               do_ldc(itr->get_index(), itr->bci());    break;
++    case Bytecodes::_ldc_w:             do_ldc(itr->get_index_big(), itr->bci());break;
+ 
+-    case Bytecodes::_lload:             
+-    case Bytecodes::_dload:             ppload(vvCTS,itr->get_index()); break; 
++    case Bytecodes::_iload:
++    case Bytecodes::_fload:             ppload(vCTS, itr->get_index()); break;
++
++    case Bytecodes::_lload:
++    case Bytecodes::_dload:             ppload(vvCTS,itr->get_index()); break;
+ 
+     case Bytecodes::_aload:             ppload(rCTS, itr->get_index()); break;
+ 
+@@ -1388,23 +1385,23 @@
+     case Bytecodes::_iload_3:
+     case Bytecodes::_fload_3:           ppload(vCTS, 3);            break;
+ 
+-    case Bytecodes::_lload_0:           
++    case Bytecodes::_lload_0:
+     case Bytecodes::_dload_0:           ppload(vvCTS, 0);           break;
+-    case Bytecodes::_lload_1:           
++    case Bytecodes::_lload_1:
+     case Bytecodes::_dload_1:           ppload(vvCTS, 1);           break;
+-    case Bytecodes::_lload_2:           
++    case Bytecodes::_lload_2:
+     case Bytecodes::_dload_2:           ppload(vvCTS, 2);           break;
+-    case Bytecodes::_lload_3:           
++    case Bytecodes::_lload_3:
+     case Bytecodes::_dload_3:           ppload(vvCTS, 3);           break;
+-             
++
+     case Bytecodes::_aload_0:           ppload(rCTS, 0);            break;
+     case Bytecodes::_aload_1:           ppload(rCTS, 1);            break;
+     case Bytecodes::_aload_2:           ppload(rCTS, 2);            break;
+     case Bytecodes::_aload_3:           ppload(rCTS, 3);            break;
+ 
+-    case Bytecodes::_iaload:            
+-    case Bytecodes::_faload:            
+-    case Bytecodes::_baload:    
++    case Bytecodes::_iaload:
++    case Bytecodes::_faload:
++    case Bytecodes::_baload:
+     case Bytecodes::_caload:
+     case Bytecodes::_saload:            pp(vrCTS, vCTS); break;
+ 
+@@ -1412,47 +1409,47 @@
+     case Bytecodes::_daload:            pp(vrCTS, vvCTS); break;
+ 
+     case Bytecodes::_aaload:            pp_new_ref(vrCTS, itr->bci()); break;
+-    
+-    case Bytecodes::_istore:            
++
++    case Bytecodes::_istore:
+     case Bytecodes::_fstore:            ppstore(vCTS, itr->get_index()); break;
+ 
+-    case Bytecodes::_lstore:            
++    case Bytecodes::_lstore:
+     case Bytecodes::_dstore:            ppstore(vvCTS, itr->get_index()); break;
+ 
+     case Bytecodes::_astore:            do_astore(itr->get_index());     break;
+ 
+-    case Bytecodes::_istore_0:          
++    case Bytecodes::_istore_0:
+     case Bytecodes::_fstore_0:          ppstore(vCTS, 0);           break;
+-    case Bytecodes::_istore_1:          
++    case Bytecodes::_istore_1:
+     case Bytecodes::_fstore_1:          ppstore(vCTS, 1);           break;
+-    case Bytecodes::_istore_2:          
++    case Bytecodes::_istore_2:
+     case Bytecodes::_fstore_2:          ppstore(vCTS, 2);           break;
+-    case Bytecodes::_istore_3:          
++    case Bytecodes::_istore_3:
+     case Bytecodes::_fstore_3:          ppstore(vCTS, 3);           break;
+ 
+-    case Bytecodes::_lstore_0:          
++    case Bytecodes::_lstore_0:
+     case Bytecodes::_dstore_0:          ppstore(vvCTS, 0);          break;
+-    case Bytecodes::_lstore_1:          
++    case Bytecodes::_lstore_1:
+     case Bytecodes::_dstore_1:          ppstore(vvCTS, 1);          break;
+-    case Bytecodes::_lstore_2:          
++    case Bytecodes::_lstore_2:
+     case Bytecodes::_dstore_2:          ppstore(vvCTS, 2);          break;
+-    case Bytecodes::_lstore_3:          
++    case Bytecodes::_lstore_3:
+     case Bytecodes::_dstore_3:          ppstore(vvCTS, 3);          break;
+-            
++
+     case Bytecodes::_astore_0:          do_astore(0);               break;
+     case Bytecodes::_astore_1:          do_astore(1);               break;
+     case Bytecodes::_astore_2:          do_astore(2);               break;
+     case Bytecodes::_astore_3:          do_astore(3);               break;
+ 
+-    case Bytecodes::_iastore:           
+-    case Bytecodes::_fastore:           
++    case Bytecodes::_iastore:
++    case Bytecodes::_fastore:
+     case Bytecodes::_bastore:
+     case Bytecodes::_castore:
+     case Bytecodes::_sastore:           ppop(vvrCTS);               break;
+-    case Bytecodes::_lastore:           
++    case Bytecodes::_lastore:
+     case Bytecodes::_dastore:           ppop(vvvrCTS);              break;
+     case Bytecodes::_aastore:           ppop(rvrCTS);               break;
+-        
++
+     case Bytecodes::_pop:               ppop_any(1);                break;
+     case Bytecodes::_pop2:              ppop_any(2);                break;
+ 
+@@ -1464,66 +1461,66 @@
+     case Bytecodes::_dup2_x2:           ppdupswap(4, "214321");     break;
+     case Bytecodes::_swap:              ppdupswap(2, "12");         break;
+ 
+-    case Bytecodes::_iadd:              
+-    case Bytecodes::_fadd:              
+-    case Bytecodes::_isub:              
+-    case Bytecodes::_fsub:              
+-    case Bytecodes::_imul:              
+-    case Bytecodes::_fmul:              
+-    case Bytecodes::_idiv:              
+-    case Bytecodes::_fdiv:              
+-    case Bytecodes::_irem:              
+-    case Bytecodes::_frem:              
+-    case Bytecodes::_ishl:              
+-    case Bytecodes::_ishr:              
+-    case Bytecodes::_iushr:             
+-    case Bytecodes::_iand:              
+-    case Bytecodes::_ior:               
+-    case Bytecodes::_ixor:              
+-    case Bytecodes::_l2f:               
++    case Bytecodes::_iadd:
++    case Bytecodes::_fadd:
++    case Bytecodes::_isub:
++    case Bytecodes::_fsub:
++    case Bytecodes::_imul:
++    case Bytecodes::_fmul:
++    case Bytecodes::_idiv:
++    case Bytecodes::_fdiv:
++    case Bytecodes::_irem:
++    case Bytecodes::_frem:
++    case Bytecodes::_ishl:
++    case Bytecodes::_ishr:
++    case Bytecodes::_iushr:
++    case Bytecodes::_iand:
++    case Bytecodes::_ior:
++    case Bytecodes::_ixor:
++    case Bytecodes::_l2f:
+     case Bytecodes::_l2i:
+-    case Bytecodes::_d2f:               
+-    case Bytecodes::_d2i:               
++    case Bytecodes::_d2f:
++    case Bytecodes::_d2i:
+     case Bytecodes::_fcmpl:
+     case Bytecodes::_fcmpg:             pp(vvCTS, vCTS); break;
+-    
+-    case Bytecodes::_ladd:              
+-    case Bytecodes::_dadd:              
+-    case Bytecodes::_lsub:              
+-    case Bytecodes::_dsub:              
+-    case Bytecodes::_lmul:              
+-    case Bytecodes::_dmul:              
+-    case Bytecodes::_ldiv:              
+-    case Bytecodes::_ddiv:              
+-    case Bytecodes::_lrem:              
+-    case Bytecodes::_drem:              
+-    case Bytecodes::_land:              
+-    case Bytecodes::_lor:               
++
++    case Bytecodes::_ladd:
++    case Bytecodes::_dadd:
++    case Bytecodes::_lsub:
++    case Bytecodes::_dsub:
++    case Bytecodes::_lmul:
++    case Bytecodes::_dmul:
++    case Bytecodes::_ldiv:
++    case Bytecodes::_ddiv:
++    case Bytecodes::_lrem:
++    case Bytecodes::_drem:
++    case Bytecodes::_land:
++    case Bytecodes::_lor:
+     case Bytecodes::_lxor:              pp(vvvvCTS, vvCTS); break;
+ 
+-    case Bytecodes::_ineg:              
+-    case Bytecodes::_fneg:              
+-    case Bytecodes::_i2f:               
+-    case Bytecodes::_f2i:               
++    case Bytecodes::_ineg:
++    case Bytecodes::_fneg:
++    case Bytecodes::_i2f:
++    case Bytecodes::_f2i:
+     case Bytecodes::_i2c:
+-    case Bytecodes::_i2s:               
++    case Bytecodes::_i2s:
+     case Bytecodes::_i2b:               pp(vCTS, vCTS); break;
+ 
+-    case Bytecodes::_lneg:              
+-    case Bytecodes::_dneg:              
+-    case Bytecodes::_l2d:               
++    case Bytecodes::_lneg:
++    case Bytecodes::_dneg:
++    case Bytecodes::_l2d:
+     case Bytecodes::_d2l:               pp(vvCTS, vvCTS); break;
+-      
+-    case Bytecodes::_lshl:              
+-    case Bytecodes::_lshr:              
+-    case Bytecodes::_lushr:             pp(vvvCTS, vvCTS); break;    
+-      
+-    case Bytecodes::_i2l:               
+-    case Bytecodes::_i2d:               
++
++    case Bytecodes::_lshl:
++    case Bytecodes::_lshr:
++    case Bytecodes::_lushr:             pp(vvvCTS, vvCTS); break;
++
++    case Bytecodes::_i2l:
++    case Bytecodes::_i2d:
+     case Bytecodes::_f2l:
+     case Bytecodes::_f2d:               pp(vCTS, vvCTS); break;
+-            
+-    case Bytecodes::_lcmp:              pp(vvvvCTS, vCTS); break;    
++
++    case Bytecodes::_lcmp:              pp(vvvvCTS, vCTS); break;
+     case Bytecodes::_dcmpl:
+     case Bytecodes::_dcmpg:             pp(vvvvCTS, vCTS); break;
+ 
+@@ -1532,10 +1529,10 @@
+     case Bytecodes::_iflt:
+     case Bytecodes::_ifge:
+     case Bytecodes::_ifgt:
+-    case Bytecodes::_ifle:              
++    case Bytecodes::_ifle:
+     case Bytecodes::_tableswitch:       ppop1(valCTS);
+                                         break;
+-    case Bytecodes::_ireturn:           
++    case Bytecodes::_ireturn:
+     case Bytecodes::_freturn:           do_return_monitor_check();
+                                         ppop1(valCTS);
+                                         break;
+@@ -1557,27 +1554,27 @@
+ 
+     case Bytecodes::_if_acmpeq:
+     case Bytecodes::_if_acmpne:         ppop(rrCTS);                 break;
+-    
++
+     case Bytecodes::_jsr:               do_jsr(itr->dest());         break;
+     case Bytecodes::_jsr_w:             do_jsr(itr->dest_w());       break;
+-    
++
+     case Bytecodes::_getstatic:         do_field(true,  true,
+                                                  itr->get_index_big(),
+                                                  itr->bci()); break;
+-    case Bytecodes::_putstatic:         do_field(false, true,  itr->get_index_big(), itr->bci()); break; 
++    case Bytecodes::_putstatic:         do_field(false, true,  itr->get_index_big(), itr->bci()); break;
+     case Bytecodes::_getfield:          do_field(true,  false, itr->get_index_big(), itr->bci()); break;
+     case Bytecodes::_putfield:          do_field(false, false, itr->get_index_big(), itr->bci()); break;
+ 
+     case Bytecodes::_invokevirtual:
+     case Bytecodes::_invokespecial:     do_method(false, false, itr->get_index_big(), itr->bci()); break;
+     case Bytecodes::_invokestatic:      do_method(true,  false, itr->get_index_big(), itr->bci()); break;
+-    case Bytecodes::_invokeinterface:   do_method(false, true,  itr->get_index_big(), itr->bci()); break;            
++    case Bytecodes::_invokeinterface:   do_method(false, true,  itr->get_index_big(), itr->bci()); break;
+     case Bytecodes::_newarray:
+     case Bytecodes::_anewarray:         pp_new_ref(vCTS, itr->bci()); break;
+     case Bytecodes::_checkcast:         do_checkcast(); break;
+-    case Bytecodes::_arraylength:       
++    case Bytecodes::_arraylength:
+     case Bytecodes::_instanceof:        pp(rCTS, vCTS); break;
+-    case Bytecodes::_monitorenter:      do_monitorenter(itr->bci()); break;  
++    case Bytecodes::_monitorenter:      do_monitorenter(itr->bci()); break;
+     case Bytecodes::_monitorexit:       do_monitorexit(itr->bci()); break;
+ 
+     case Bytecodes::_athrow:            // handled by do_exception_edge() BUT ...
+@@ -1587,21 +1584,21 @@
+                                           _monitor_safe = false;
+                                         }
+                                         break;
+-                                        
++
+     case Bytecodes::_areturn:           do_return_monitor_check();
+                                         ppop1(refCTS);
+                                         break;
+-    case Bytecodes::_ifnull:   
++    case Bytecodes::_ifnull:
+     case Bytecodes::_ifnonnull:         ppop1(refCTS); break;
+-    case Bytecodes::_multianewarray:    do_multianewarray(*(itr->bcp()+3), itr->bci()); break; 
+-            
++    case Bytecodes::_multianewarray:    do_multianewarray(*(itr->bcp()+3), itr->bci()); break;
++
+     case Bytecodes::_wide:              fatal("Iterator should skip this bytecode"); break;
+     case Bytecodes::_ret:                                           break;
+-    
++
+     // Java opcodes
+-    case Bytecodes::_lookupswitch:      ppop1(valCTS);             break;  
++    case Bytecodes::_lookupswitch:      ppop1(valCTS);             break;
+ 
+-    default: 
++    default:
+          tty->print("unexpected opcode: %d\n", itr->code());
+          ShouldNotReachHere();
+     break;
+@@ -1618,7 +1615,7 @@
+   while(!(*in).is_bottom()) {
+     CellTypeState expected =*in++;
+     CellTypeState actual   = pop();
+-    check_type(expected, actual);  
++    check_type(expected, actual);
+     assert(loc_no >= 0, "sanity check");
+     set_var(loc_no++, actual);
+   }
+@@ -1660,7 +1657,7 @@
+     }
+     loc_no++;
+   }
+-} 
++}
+ 
+ void GenerateOopMap::ppdupswap(int poplen, const char *out) {
+   CellTypeState actual[5];
+@@ -1668,7 +1665,7 @@
+ 
+   // pop all arguments
+   for(int i = 0; i < poplen; i++) actual[i] = pop();
+-  
++
+   // put them back
+   char push_ch = *out++;
+   while (push_ch != '\0') {
+@@ -1689,7 +1686,7 @@
+     ppop1(*out++);
+   }
+ }
+- 
++
+ void GenerateOopMap::ppush1(CellTypeState in) {
+   assert(in.is_reference() | in.is_value(), "sanity check");
+   push(in);
+@@ -1834,7 +1831,7 @@
+   }
+ }
+ 
+-void GenerateOopMap::do_jsr(int targ_bci) {  
++void GenerateOopMap::do_jsr(int targ_bci) {
+   push(CellTypeState::make_addr(targ_bci));
+ }
+ 
+@@ -1845,8 +1842,8 @@
+   constantTag tag    = cp->tag_at(idx);
+ 
+   CellTypeState cts = (tag.is_string() || tag.is_unresolved_string() ||
+-                       tag.is_klass()  || tag.is_unresolved_klass()) 
+-                    ? CellTypeState::make_line_ref(bci) : valCTS; 
++                       tag.is_klass()  || tag.is_unresolved_klass())
++                    ? CellTypeState::make_line_ref(bci) : valCTS;
+   ppush1(cts);
+ }
+ 
+@@ -1865,11 +1862,11 @@
+     // really matter (at least for now)
+     verify_error("wrong type on stack (found: %c, expected: {pr})", r_or_p.to_char());
+     return;
+-  }   
++  }
+   set_var(idx, r_or_p);
+ }
+ 
+-// Copies bottom/zero terminated CTS string from "src" into "dst". 
++// Copies bottom/zero terminated CTS string from "src" into "dst".
+ //   Does NOT terminate with a bottom. Returns the number of cells copied.
+ int GenerateOopMap::copy_cts(CellTypeState *dst, CellTypeState *src) {
+   int idx = 0;
+@@ -1893,7 +1890,7 @@
+   char sigch = (char)*(signature->base());
+   CellTypeState temp[4];
+   CellTypeState *eff  = sigchar_to_effect(sigch, bci, temp);
+-  
++
+   CellTypeState in[4];
+   CellTypeState *out;
+   int i =  0;
+@@ -1916,14 +1913,14 @@
+   int nameAndTypeIdx    = cp->name_and_type_ref_index_at(idx);
+   int signatureIdx      = cp->signature_ref_index_at(nameAndTypeIdx);
+   symbolOop signature   = cp->symbol_at(signatureIdx);
+-  
++
+   // Parse method signature
+   CellTypeState out[4];
+-  CellTypeState in[MAXARGSIZE+1];   // Includes result      
++  CellTypeState in[MAXARGSIZE+1];   // Includes result
+   ComputeCallStack cse(signature);
+ 
+   // Compute return type
+-  int res_length=  cse.compute_for_returntype(out);       
++  int res_length=  cse.compute_for_returntype(out);
+ 
+   // Temporary hack.
+   if (out[0].equal(CellTypeState::ref) && out[1].equal(CellTypeState::bottom)) {
+@@ -1941,12 +1938,12 @@
+ 
+   // Report results
+   if (_report_result_for_send == true) {
+-     fill_stackmap_for_opcodes(_itr_send, vars(), stack(), _stack_top); 
++     fill_stackmap_for_opcodes(_itr_send, vars(), stack(), _stack_top);
+      _report_result_for_send = false;
+   }
+ 
+   // Push return address
+-  ppush(out);  
++  ppush(out);
+ }
+ 
+ // This is used to parse the signature for fields, since they are very simple...
+@@ -1969,34 +1966,34 @@
+ // state is valid for that instruction. Furthermore, the ret instruction
+ // must be the last instruction in "bb" (we store information about the
+ // "ret" in "bb").
+-void GenerateOopMap::ret_jump_targets_do(BytecodeStream *bcs, jmpFct_t jmpFct, int varNo, int *data) {    
+-  CellTypeState ra = vars()[varNo];      
++void GenerateOopMap::ret_jump_targets_do(BytecodeStream *bcs, jmpFct_t jmpFct, int varNo, int *data) {
++  CellTypeState ra = vars()[varNo];
+   if (!ra.is_good_address()) {
+     verify_error("ret returns from two jsr subroutines?");
+     return;
+   }
+-  int target = ra.get_info();  
++  int target = ra.get_info();
+ 
+   RetTableEntry* rtEnt = _rt.find_jsrs_for_target(target);
+   int bci = bcs->bci();
+   for (int i = 0; i < rtEnt->nof_jsrs(); i++) {
+-    int target_bci = rtEnt->jsrs(i);    
+-    // Make sure a jrtRet does not set the changed bit for dead basicblock.     
++    int target_bci = rtEnt->jsrs(i);
++    // Make sure a jrtRet does not set the changed bit for dead basicblock.
+     BasicBlock* jsr_bb    = get_basic_block_containing(target_bci - 1);
+     debug_only(BasicBlock* target_bb = &jsr_bb[1];)
+-    assert(target_bb  == get_basic_block_at(target_bci), "wrong calc. of successor basicblock");    
++    assert(target_bb  == get_basic_block_at(target_bci), "wrong calc. of successor basicblock");
+     bool alive = jsr_bb->is_alive();
+     if (TraceNewOopMapGeneration) {
+       tty->print("pc = %d, ret -> %d alive: %s\n", bci, target_bci, alive ? "true" : "false");
+     }
+-    if (alive) jmpFct(this, target_bci, data);        
++    if (alive) jmpFct(this, target_bci, data);
+   }
+ }
+ 
+ //
+ // Debug method
+-// 
+-char* GenerateOopMap::state_vec_to_string(CellTypeState* vec, int len) {  
++//
++char* GenerateOopMap::state_vec_to_string(CellTypeState* vec, int len) {
+ #ifdef ASSERT
+   int checklen = MAX3(_max_locals, _max_stack, _max_monitors) + 1;
+   assert(len < checklen, "state_vec_buf overflow");
+@@ -2014,11 +2011,11 @@
+   GenerateOopMap::_total_byte_count / GenerateOopMap::_total_oopmap_time.seconds());
+ }
+ 
+-// 
++//
+ //  ============ Main Entry Point ===========
+ //
+ GenerateOopMap::GenerateOopMap(methodHandle method) {
+-  // We have to initialize all variables here, that can be queried direcly   
++  // We have to initialize all variables here, that can be queried direcly
+   _method = method;
+   _max_locals=0;
+   _init_vars = NULL;
+@@ -2044,7 +2041,7 @@
+   TraceTime t_single("oopmap time", TimeOopMap2);
+   TraceTime t_all(NULL, &_total_oopmap_time, TimeOopMap);
+ 
+-  // Initialize values  
++  // Initialize values
+   _got_error      = false;
+   _conflict       = false;
+   _max_locals     = method()->max_locals();
+@@ -2057,24 +2054,24 @@
+   _new_var_map    = NULL;
+   _ret_adr_tos    = new GrowableArray<intptr_t>(5);  // 5 seems like a good number;
+   _did_rewriting  = false;
+-  _did_relocation = false;  
++  _did_relocation = false;
+ 
+-  if (TraceNewOopMapGeneration) {    
+-    tty->print("Method name: %s\n", method()->name()->as_C_string());    
++  if (TraceNewOopMapGeneration) {
++    tty->print("Method name: %s\n", method()->name()->as_C_string());
+     if (Verbose) {
+       _method->print_codes();
+       tty->print_cr("Exception table:");
+       typeArrayOop excps = method()->exception_table();
+       for(int i = 0; i < excps->length(); i += 4) {
+-        tty->print_cr("[%d - %d] -> %d", excps->int_at(i + 0), excps->int_at(i + 1), excps->int_at(i + 2));        
++        tty->print_cr("[%d - %d] -> %d", excps->int_at(i + 0), excps->int_at(i + 1), excps->int_at(i + 2));
+       }
+-    }    
++    }
+   }
+ 
+   // if no code - do nothing
+   // compiler needs info
+-  if (method()->code_size() == 0 || _max_locals + method()->max_stack() == 0) {    
+-    fill_stackmap_prolog(0);    
++  if (method()->code_size() == 0 || _max_locals + method()->max_stack() == 0) {
++    fill_stackmap_prolog(0);
+     fill_stackmap_epilog();
+     return;
+   }
+@@ -2090,9 +2087,9 @@
+   if (!_got_error)
+     do_interpretation();
+ 
+-  // Step 4:Return results  
++  // Step 4:Return results
+   if (!_got_error && report_results())
+-     report_result();  
++     report_result();
+ 
+   if (_got_error) {
+     THROW_HANDLE(_exception);
+@@ -2114,13 +2111,13 @@
+   // Append method name
+   char msg_buffer2[512];
+   jio_snprintf(msg_buffer2, sizeof(msg_buffer2), "%s in method %s", msg_buffer, method()->name()->as_C_string());
+-  _exception = Exceptions::new_exception(Thread::current(), 
+-                vmSymbols::java_lang_LinkageError(), msg_buffer2);  
++  _exception = Exceptions::new_exception(Thread::current(),
++                vmSymbols::java_lang_LinkageError(), msg_buffer2);
+ }
+ 
+ void GenerateOopMap::report_error(const char *format, ...) {
+   va_list ap;
+-  va_start(ap, format);  
++  va_start(ap, format);
+   error_work(format, ap);
+ }
+ 
+@@ -2135,16 +2132,16 @@
+ // Report result opcodes
+ //
+ void GenerateOopMap::report_result() {
+-  
++
+   if (TraceNewOopMapGeneration) tty->print_cr("Report result pass");
+ 
+   // We now want to report the result of the parse
+   _report_result = true;
+-  
++
+   // Prolog code
+-  fill_stackmap_prolog(_gc_points);  
++  fill_stackmap_prolog(_gc_points);
+ 
+-   // Mark everything changed, then do one interpretation pass. 
++   // Mark everything changed, then do one interpretation pass.
+   for (int i = 0; i<_bb_count; i++) {
+     if (_basic_blocks[i].is_reachable()) {
+       _basic_blocks[i].set_changed(true);
+@@ -2163,17 +2160,17 @@
+   _report_result = false;
+ }
+ 
+-void GenerateOopMap::result_for_basicblock(int bci) {  
++void GenerateOopMap::result_for_basicblock(int bci) {
+  if (TraceNewOopMapGeneration) tty->print_cr("Report result pass for basicblock");
+ 
+   // We now want to report the result of the parse
+   _report_result = true;
+- 
++
+   // Find basicblock and report results
+   BasicBlock* bb = get_basic_block_containing(bci);
+   assert(bb->is_reachable(), "getting result from unreachable basicblock");
+   bb->set_changed(true);
+-  interp_bb(bb);  
++  interp_bb(bb);
+ }
+ 
+ //
+@@ -2191,7 +2188,7 @@
+     _new_var_map = NEW_RESOURCE_ARRAY(int, _max_locals);
+     for (int k = 0; k < _max_locals; k++)  _new_var_map[k] = k;
+   }
+-   
++
+   if ( _new_var_map[varNo] == varNo) {
+     // Check if max. number of locals has been reached
+     if (_max_locals + _nof_refval_conflicts >= MAX_LOCAL_VARS) {
+@@ -2215,12 +2212,12 @@
+   if ( _nof_refval_conflicts == 0 )
+      return;
+ 
+-  // Check if rewrites are allowed in this parse. 
++  // Check if rewrites are allowed in this parse.
+   if (!allow_rewrites() && !IgnoreRewrites) {
+     fatal("Rewriting method not allowed at this stage");
+   }
+ 
+- 
++
+   // This following flag is to tempoary supress rewrites. The locals that might conflict will
+   // all be set to contain values. This is UNSAFE - however, until the rewriting has been completely
+   // tested it is nice to have.
+@@ -2235,7 +2232,7 @@
+        }
+        tty->cr();
+     }
+-     
++
+     // That was that...
+     _new_var_map = NULL;
+     _nof_refval_conflicts = 0;
+@@ -2245,10 +2242,10 @@
+   }
+ 
+   // Tracing flag
+-  _did_rewriting = true;  
+-    
++  _did_rewriting = true;
++
+   if (TraceOopMapRewrites) {
+-    tty->print_cr("ref/value conflict for method %s - bytecodes are getting rewritten", method()->name()->as_C_string());      
++    tty->print_cr("ref/value conflict for method %s - bytecodes are getting rewritten", method()->name()->as_C_string());
+     method()->print();
+     method()->print_codes();
+   }
+@@ -2263,35 +2260,35 @@
+         if (TraceOopMapRewrites) {
+           tty->print_cr("Rewriting: %d -> %d", k, _new_var_map[k]);
+         }
+-        rewrite_refval_conflict(k, _new_var_map[k]);	
++        rewrite_refval_conflict(k, _new_var_map[k]);
+         if (_got_error) return;
+         nof_conflicts++;
+       }
+     }
+   }
+-    
++
+   assert(nof_conflicts == _nof_refval_conflicts, "sanity check");
+ 
+   // Adjust the number of locals
+   method()->set_max_locals(_max_locals+_nof_refval_conflicts);
+   _max_locals += _nof_refval_conflicts;
+-  
++
+   // That was that...
+   _new_var_map = NULL;
+   _nof_refval_conflicts = 0;
+ }
+ 
+ void GenerateOopMap::rewrite_refval_conflict(int from, int to) {
+-  bool startOver;  
++  bool startOver;
+   do {
+-    // Make sure that the BytecodeStream is constructed in the loop, since 
++    // Make sure that the BytecodeStream is constructed in the loop, since
+     // during rewriting a new method oop is going to be used, and the next time
+     // around we want to use that.
+-    BytecodeStream bcs(_method);  
+-    startOver = false;    
++    BytecodeStream bcs(_method);
++    startOver = false;
+ 
+-    while( bcs.next() >=0 && !startOver && !_got_error) {      
+-      startOver = rewrite_refval_conflict_inst(&bcs, from, to);	    
++    while( bcs.next() >=0 && !startOver && !_got_error) {
++      startOver = rewrite_refval_conflict_inst(&bcs, from, to);
+     }
+   } while (startOver && !_got_error);
+ }
+@@ -2300,23 +2297,23 @@
+    in a ref way, change it to use "to". There's a subtle reason why we
+    renumber the ref uses and not the non-ref uses: non-ref uses may be
+    2 slots wide (double, long) which would necessitate keeping track of
+-   whether we should add one or two variables to the method. If the change 
+-   affected the width of some instruction, returns "TRUE"; otherwise, returns "FALSE". 
++   whether we should add one or two variables to the method. If the change
++   affected the width of some instruction, returns "TRUE"; otherwise, returns "FALSE".
+    Another reason for moving ref's value is for solving (addr, ref) conflicts, which
+    both uses aload/astore methods.
+-*/   
++*/
+ bool GenerateOopMap::rewrite_refval_conflict_inst(BytecodeStream *itr, int from, int to) {
+   Bytecodes::Code bc = itr->code();
+-  int index;  
+-  int bci = itr->bci();  
++  int index;
++  int bci = itr->bci();
+ 
+   if (is_aload(itr, &index) && index == from) {
+     if (TraceOopMapRewrites) {
+       tty->print_cr("Rewriting aload at bci: %d", bci);
+-    }    
++    }
+     return rewrite_load_or_store(itr, Bytecodes::_aload, Bytecodes::_aload_0, to);
+   }
+-  
++
+   if (is_astore(itr, &index) && index == from) {
+     if (!stack_top_holds_ret_addr(bci)) {
+       if (TraceOopMapRewrites) {
+@@ -2326,9 +2323,9 @@
+     } else {
+       if (TraceOopMapRewrites) {
+         tty->print_cr("Supress rewriting of astore at bci: %d", bci);
+-      }     
++      }
+     }
+-  }  
++  }
+ 
+   return false;
+ }
+@@ -2340,17 +2337,17 @@
+ bool GenerateOopMap::rewrite_load_or_store(BytecodeStream *bcs, Bytecodes::Code bcN, Bytecodes::Code bc0, unsigned int varNo) {
+   assert(bcN == Bytecodes::_astore   || bcN == Bytecodes::_aload,   "wrong argument (bcN)");
+   assert(bc0 == Bytecodes::_astore_0 || bc0 == Bytecodes::_aload_0, "wrong argument (bc0)");
+-  int ilen = Bytecodes::length_at(bcs->bcp());  
++  int ilen = Bytecodes::length_at(bcs->bcp());
+   int newIlen;
+ 
+   if (ilen == 4) {
+     // Original instruction was wide; keep it wide for simplicity
+     newIlen = 4;
+-  } else if (varNo < 4) 
++  } else if (varNo < 4)
+      newIlen = 1;
+-  else if (varNo >= 256) 
++  else if (varNo >= 256)
+      newIlen = 4;
+-  else 
++  else
+      newIlen = 2;
+ 
+   // If we need to relocate in order to patch the byte, we
+@@ -2386,11 +2383,11 @@
+     Bytes::put_Java_u2(bcp+2, varNo);
+   }
+ 
+-  if (newIlen != ilen) {    
++  if (newIlen != ilen) {
+     expand_current_instr(bcs->bci(), ilen, newIlen, inst_buffer);
+   }
+ 
+-  
++
+   return (newIlen != ilen);
+ }
+ 
+@@ -2404,7 +2401,7 @@
+   virtual void relocated(int bci, int delta, int new_code_length) {
+     _gom->update_basic_blocks  (bci, delta, new_code_length);
+     _gom->update_ret_adr_at_TOS(bci, delta);
+-    _gom->_rt.update_ret_table (bci, delta); 
++    _gom->_rt.update_ret_table (bci, delta);
+   }
+ };
+ 
+@@ -2422,16 +2419,16 @@
+ 
+   // Relocator returns a new method oop.
+   _did_relocation = true;
+-  _method = m;   
++  _method = m;
+ }
+ 
+ 
+ bool GenerateOopMap::is_astore(BytecodeStream *itr, int *index) {
+   Bytecodes::Code bc = itr->code();
+   switch(bc) {
+-    case Bytecodes::_astore_0: 
+-    case Bytecodes::_astore_1: 
+-    case Bytecodes::_astore_2: 
++    case Bytecodes::_astore_0:
++    case Bytecodes::_astore_1:
++    case Bytecodes::_astore_2:
+     case Bytecodes::_astore_3:
+       *index = bc - Bytecodes::_astore_0;
+       return true;
+@@ -2445,9 +2442,9 @@
+ bool GenerateOopMap::is_aload(BytecodeStream *itr, int *index) {
+   Bytecodes::Code bc = itr->code();
+   switch(bc) {
+-    case Bytecodes::_aload_0:  
++    case Bytecodes::_aload_0:
+     case Bytecodes::_aload_1:
+-    case Bytecodes::_aload_2:  
++    case Bytecodes::_aload_2:
+     case Bytecodes::_aload_3:
+       *index = bc - Bytecodes::_aload_0;
+       return true;
+@@ -2462,7 +2459,7 @@
+ 
+ // Return true iff the top of the operand stack holds a return address at
+ // the current instruction
+-bool GenerateOopMap::stack_top_holds_ret_addr(int bci) { 
++bool GenerateOopMap::stack_top_holds_ret_addr(int bci) {
+   for(int i = 0; i < _ret_adr_tos->length(); i++) {
+     if (_ret_adr_tos->at(i) == bci)
+       return true;
+@@ -2476,7 +2473,7 @@
+   _ret_adr_tos->clear();
+ 
+   for (int i = 0; i < bb_count(); i++) {
+-    BasicBlock* bb = &_basic_blocks[i]; 
++    BasicBlock* bb = &_basic_blocks[i];
+ 
+     // Make sure to only check basicblocks that are reachable
+     if (bb->is_reachable()) {
+@@ -2493,11 +2490,11 @@
+           _ret_adr_tos->append(bcs.bci());
+           if (TraceNewOopMapGeneration) {
+             tty->print_cr("Ret_adr TOS at bci: %d", bcs.bci());
+-          } 
+-        } 
++          }
++        }
+         interp1(&bcs);
+       }
+-    }   
++    }
+   }
+ }
+ 
+@@ -2505,18 +2502,18 @@
+   for(int i = 0; i < _ret_adr_tos->length(); i++) {
+     int v = _ret_adr_tos->at(i);
+     if (v > bci)  _ret_adr_tos->at_put(i, v + delta);
+-  }  
++  }
+ }
+ 
+ // ===================================================================
+ 
+ #ifndef PRODUCT
+-int ResolveOopMapConflicts::_nof_invocations  = 0; 
++int ResolveOopMapConflicts::_nof_invocations  = 0;
+ int ResolveOopMapConflicts::_nof_rewrites     = 0;
+ int ResolveOopMapConflicts::_nof_relocations  = 0;
+ #endif
+ 
+-methodHandle ResolveOopMapConflicts::do_potential_rewrite(TRAPS) {     
++methodHandle ResolveOopMapConflicts::do_potential_rewrite(TRAPS) {
+   compute_map(CHECK_(methodHandle()));
+ 
+ #ifndef PRODUCT
+@@ -2526,7 +2523,7 @@
+     if (did_rewriting()) {
+       _nof_rewrites++;
+       if (did_relocation()) _nof_relocations++;
+-      tty->print("Method was rewritten %s: ", (did_relocation()) ? "and relocated" : "");      
++      tty->print("Method was rewritten %s: ", (did_relocation()) ? "and relocated" : "");
+       method()->print_value(); tty->cr();
+       tty->print_cr("Cand.: %d rewrts: %d (%d%%) reloc.: %d (%d%%)",
+           _nof_invocations,
+@@ -2537,4 +2534,3 @@
+ #endif
+   return methodHandle(THREAD, method());
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/generateOopMap.hpp openjdk/hotspot/src/share/vm/oops/generateOopMap.hpp
+--- openjdk6/hotspot/src/share/vm/oops/generateOopMap.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/generateOopMap.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)generateOopMap.hpp	1.65 07/05/05 17:06:01 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Forward definition
+@@ -33,7 +30,7 @@
+ class StackMap;
+ 
+ // These two should be removed. But requires som code to be cleaned up
+-#define MAXARGSIZE      256      // This should be enough                 
++#define MAXARGSIZE      256      // This should be enough
+ #define MAX_LOCAL_VARS  65536    // 16-bit entry
+ 
+ typedef void (*jmpFct_t)(GenerateOopMap *c, int bcpDelta, int* data);
+@@ -46,9 +43,9 @@
+ class RetTableEntry : public ResourceObj {
+  private:
+   static int _init_nof_jsrs;                      // Default size of jsrs list
+-  int _target_bci;                                // Target PC address of jump (bytecode index)  
+-  GrowableArray<intptr_t> * _jsrs;                     // List of return addresses  (bytecode index)  
+-  RetTableEntry *_next;                           // Link to next entry    
++  int _target_bci;                                // Target PC address of jump (bytecode index)
++  GrowableArray<intptr_t> * _jsrs;                     // List of return addresses  (bytecode index)
++  RetTableEntry *_next;                           // Link to next entry
+  public:
+    RetTableEntry(int target, RetTableEntry *next)  { _target_bci=target; _jsrs = new GrowableArray<intptr_t>(_init_nof_jsrs); _next = next;  }
+ 
+@@ -59,7 +56,7 @@
+ 
+   // Update entry
+   void add_jsr    (int return_bci)            { _jsrs->append(return_bci); }
+-  void add_delta  (int bci, int delta);       
++  void add_delta  (int bci, int delta);
+   RetTableEntry * next()  const               { return _next; }
+ };
+ 
+@@ -72,8 +69,8 @@
+   void add_jsr(int return_bci, int target_bci);   // Adds entry to list
+  public:
+   RetTable()                                                  { _first = NULL; }
+-  void compute_ret_table(methodHandle method);  
+-  void update_ret_table(int bci, int delta);  
++  void compute_ret_table(methodHandle method);
++  void update_ret_table(int bci, int delta);
+   RetTableEntry* find_jsrs_for_target(int targBci);
+ };
+ 
+@@ -109,8 +106,8 @@
+          ref_slot_bit         = nth_bit(24),  // 1 if this reference is a "slot" reference,
+                                               // 0 if it is a "line" reference.
+          ref_data_mask        = right_n_bits(24) };
+-  
+-  
++
++
+   // These values are used to initialize commonly used CellTypeState
+   // constants.
+   enum { bottom_value         = 0,
+@@ -155,7 +152,7 @@
+   static CellTypeState make_slot_ref(int slot_num) {
+     assert(slot_num >= 0 && slot_num < ref_data_mask, "slot out of range");
+     return make_any(ref_bit | not_bottom_info_bit | ref_not_lock_bit | ref_slot_bit |
+-                    (slot_num & ref_data_mask)); 
++                    (slot_num & ref_data_mask));
+   }
+ 
+   static CellTypeState make_line_ref(int bci) {
+@@ -218,9 +215,9 @@
+   bool equal_kind(CellTypeState a) const {
+     return (_state & bits_mask) == (a._state & bits_mask);
+   }
+-  
++
+   char to_char() const;
+-  
++
+   // Merge
+   CellTypeState merge (CellTypeState cts, int slot) const;
+ 
+@@ -244,8 +241,8 @@
+ //
+ class BasicBlock: ResourceObj {
+  private:
+-  bool            _changed;                 // Reached a fixpoint or not      
+- public:   
++  bool            _changed;                 // Reached a fixpoint or not
++ public:
+   enum Constants {
+     _dead_basic_block = -2,
+     _unreached        = -1                  // Alive but not yet reached by analysis
+@@ -258,17 +255,17 @@
+   int             _max_stack;               // Determines split between stack and monitors
+   CellTypeState*  _state;                   // State (vars, stack) at entry.
+   int             _stack_top;               // -1 indicates bottom stack value.
+-  int             _monitor_top;             // -1 indicates bottom monitor stack value.    
++  int             _monitor_top;             // -1 indicates bottom monitor stack value.
+ 
+   CellTypeState* vars()                     { return _state; }
+   CellTypeState* stack()                    { return _state + _max_locals; }
+ 
+   bool changed()                            { return _changed; }
+   void set_changed(bool s)                  { _changed = s; }
+-    
++
+   bool is_reachable() const                 { return _stack_top >= 0; }  // Analysis has reached this basicblock
+ 
+-  // All basicblocks that are unreachable are going to have a _stack_top == _dead_basic_block. 
++  // All basicblocks that are unreachable are going to have a _stack_top == _dead_basic_block.
+   // This info. is setup in a pre-parse before the real abstract interpretation starts.
+   bool is_dead() const                      { return _stack_top == _dead_basic_block; }
+   bool is_alive() const                     { return _stack_top != _dead_basic_block; }
+@@ -294,8 +291,8 @@
+   int          _max_locals;                 // Cached value of no. of locals
+   int          _max_stack;                  // Cached value of max. stack depth
+   int          _max_monitors;               // Cached value of max. monitor stack depth
+-  int          _has_exceptions;             // True, if exceptions exist for method    
+-  bool         _got_error;                  // True, if an error occured during interpretation. 
++  int          _has_exceptions;             // True, if exceptions exist for method
++  bool         _got_error;                  // True, if an error occured during interpretation.
+   Handle       _exception;                  // Exception if got_error is true.
+   bool         _did_rewriting;              // was bytecodes rewritten
+   bool         _did_relocation;             // was relocation neccessary
+@@ -319,7 +316,7 @@
+   int             methodsig_to_effect        (symbolOop signature, bool isStatic, CellTypeState* effect);
+   bool            merge_local_state_vectors  (CellTypeState* cts, CellTypeState* bbts);
+   bool            merge_monitor_state_vectors(CellTypeState* cts, CellTypeState* bbts);
+-  void            copy_state                 (CellTypeState *dst, CellTypeState *src);  
++  void            copy_state                 (CellTypeState *dst, CellTypeState *src);
+   void            merge_state_into_bb        (BasicBlock *bb);
+   static void     merge_state                (GenerateOopMap *gom, int bcidelta, int* data);
+   void            set_var                    (int localNo, CellTypeState cts);
+@@ -331,7 +328,7 @@
+   CellTypeState * vars                       ()                                             { return _state; }
+   CellTypeState * stack                      ()                                             { return _state+_max_locals; }
+   CellTypeState * monitors                   ()                                             { return _state+_max_locals+_max_stack; }
+-  
++
+   void            replace_all_CTS_matches    (CellTypeState match,
+                                               CellTypeState replace);
+   void            print_states               (outputStream *os, CellTypeState *vector, int num);
+@@ -353,14 +350,14 @@
+   int           gc_points                   () const                          { return _gc_points; }
+   int           bb_count                    () const                          { return _bb_count; }
+   void          set_bbmark_bit              (int bci);
+-  void          clear_bbmark_bit            (int bci);  
++  void          clear_bbmark_bit            (int bci);
+   BasicBlock *  get_basic_block_at          (int bci) const;
+-  BasicBlock *  get_basic_block_containing  (int bci) const;    
++  BasicBlock *  get_basic_block_containing  (int bci) const;
+   void          interp_bb                   (BasicBlock *bb);
+   void          restore_state               (BasicBlock *bb);
+   int           next_bb_start_pc            (BasicBlock *bb);
+   void          update_basic_blocks         (int bci, int delta, int new_method_size);
+-  static void   bb_mark_fct                 (GenerateOopMap *c, int deltaBci, int *data);    
++  static void   bb_mark_fct                 (GenerateOopMap *c, int deltaBci, int *data);
+ 
+   // Dead code detection
+   void          mark_reachable_code();
+@@ -373,16 +370,16 @@
+   void  interp_all                          ();
+ 
+   // Interpretation methods (secondary)
+-  void  interp1                             (BytecodeStream *itr); 
+-  void  do_exception_edge                   (BytecodeStream *itr);  
++  void  interp1                             (BytecodeStream *itr);
++  void  do_exception_edge                   (BytecodeStream *itr);
+   void  check_type                          (CellTypeState expected, CellTypeState actual);
+   void  ppstore                             (CellTypeState *in,  int loc_no);
+-  void  ppload                              (CellTypeState *out, int loc_no); 
++  void  ppload                              (CellTypeState *out, int loc_no);
+   void  ppush1                              (CellTypeState in);
+-  void  ppush                               (CellTypeState *in);  
++  void  ppush                               (CellTypeState *in);
+   void  ppop1                               (CellTypeState out);
+   void  ppop                                (CellTypeState *out);
+-  void  ppop_any                            (int poplen);  
++  void  ppop_any                            (int poplen);
+   void  pp                                  (CellTypeState *in, CellTypeState *out);
+   void  pp_new_ref                          (CellTypeState *in, int bci);
+   void  ppdupswap                           (int poplen, const char *out);
+@@ -402,27 +399,27 @@
+   // Error handling
+   void  error_work                          (const char *format, va_list ap);
+   void  report_error                        (const char *format, ...);
+-  void  verify_error                        (const char *format, ...);  
++  void  verify_error                        (const char *format, ...);
+   bool  got_error()                         { return _got_error; }
+-  
++
+   // Create result set
+   bool  _report_result;
+   bool  _report_result_for_send;            // Unfortunatly, stackmaps for sends are special, so we need some extra
+-  BytecodeStream *_itr_send;                // variables to handle them properly. 
++  BytecodeStream *_itr_send;                // variables to handle them properly.
+ 
+-  void  report_result                       ();  
++  void  report_result                       ();
+ 
+-  // Initvars 
++  // Initvars
+   GrowableArray<intptr_t> * _init_vars;
+-  
++
+   void  initialize_vars                     ();
+   void  add_to_ref_init_set                 (int localNo);
+ 
+-  // Conflicts rewrite logic 
++  // Conflicts rewrite logic
+   bool      _conflict;                      // True, if a conflict occured during interpretation
+   int       _nof_refval_conflicts;          // No. of conflicts that require rewrites
+-  int *     _new_var_map;                
+-  
++  int *     _new_var_map;
++
+   void record_refval_conflict               (int varNo);
+   void rewrite_refval_conflicts             ();
+   void rewrite_refval_conflict              (int from, int to);
+@@ -432,21 +429,21 @@
+   void expand_current_instr                 (int bci, int ilen, int newIlen, u_char inst_buffer[]);
+   bool is_astore                            (BytecodeStream *itr, int *index);
+   bool is_aload                             (BytecodeStream *itr, int *index);
+-  
++
+   // List of bci's where a return address is on top of the stack
+-  GrowableArray<intptr_t> *_ret_adr_tos;         
++  GrowableArray<intptr_t> *_ret_adr_tos;
+ 
+   bool stack_top_holds_ret_addr             (int bci);
+   void compute_ret_adr_at_TOS               ();
+   void update_ret_adr_at_TOS                (int bci, int delta);
+-    
++
+   int  binsToHold                           (int no)                      { return  ((no+(BitsPerWord-1))/BitsPerWord); }
+   char *state_vec_to_string                 (CellTypeState* vec, int len);
+ 
+   // Helper method. Can be used in subclasses to fx. calculate gc_points. If the current instuction
+   // is a control transfer, then calls the jmpFct all possible destinations.
+   void  ret_jump_targets_do                 (BytecodeStream *bcs, jmpFct_t jmpFct, int varNo,int *data);
+-  bool  jump_targets_do                     (BytecodeStream *bcs, jmpFct_t jmpFct, int *data);  
++  bool  jump_targets_do                     (BytecodeStream *bcs, jmpFct_t jmpFct, int *data);
+ 
+   friend class RelocCallback;
+  public:
+@@ -457,7 +454,7 @@
+   void result_for_basicblock(int bci);    // Do a callback on fill_stackmap_for_opcodes for basicblock containing bci
+ 
+   // Query
+-  int max_locals() const                           { return _max_locals; }  
++  int max_locals() const                           { return _max_locals; }
+   methodOop method() const                         { return _method(); }
+   methodHandle method_as_handle() const            { return _method; }
+ 
+@@ -481,7 +478,7 @@
+   //
+   // All these methods are used during a call to: compute_map. Note: Non of the return results are valid
+   // after compute_map returns, since all values are allocated as resource objects.
+-  //  
++  //
+   // All virtual method must be implemented in subclasses
+   virtual bool allow_rewrites             () const                        { return false; }
+   virtual bool report_results             () const                        { return true;  }
+@@ -490,19 +487,19 @@
+   virtual void fill_stackmap_prolog       (int nof_gc_points)             { ShouldNotReachHere(); }
+   virtual void fill_stackmap_epilog       ()                              { ShouldNotReachHere(); }
+   virtual void fill_stackmap_for_opcodes  (BytecodeStream *bcs,
+-                                           CellTypeState* vars, 
+-                                           CellTypeState* stack, 
++                                           CellTypeState* vars,
++                                           CellTypeState* stack,
+                                            int stackTop)                  { ShouldNotReachHere(); }
+   virtual void fill_init_vars             (GrowableArray<intptr_t> *init_vars) { ShouldNotReachHere();; }
+ };
+ 
+ //
+ // Subclass of the GenerateOopMap Class that just do rewrites of the method, if needed.
+-// It does not store any oopmaps. 
++// It does not store any oopmaps.
+ //
+ class ResolveOopMapConflicts: public GenerateOopMap {
+- private:  
+-  
++ private:
++
+   bool _must_clear_locals;
+ 
+   virtual bool report_results() const     { return false; }
+@@ -512,9 +509,9 @@
+   virtual void fill_stackmap_prolog       (int nof_gc_points)             {}
+   virtual void fill_stackmap_epilog       ()                              {}
+   virtual void fill_stackmap_for_opcodes  (BytecodeStream *bcs,
+-                                           CellTypeState* vars, 
+-                                           CellTypeState* stack, 
+-                                           int stack_top)                 {}   
++                                           CellTypeState* vars,
++                                           CellTypeState* stack,
++                                           int stack_top)                 {}
+   virtual void fill_init_vars             (GrowableArray<intptr_t> *init_vars) { _must_clear_locals = init_vars->length() > 0; }
+ 
+ #ifndef PRODUCT
+@@ -532,12 +529,12 @@
+ };
+ 
+ 
+-// 
++//
+ // Subclass used by the compiler to generate pairing infomation
+ //
+ class GeneratePairingInfo: public GenerateOopMap {
+- private:  
+-  
++ private:
++
+   virtual bool report_results() const     { return false; }
+   virtual bool report_init_vars() const   { return false; }
+   virtual bool allow_rewrites() const     { return false;  }
+@@ -545,15 +542,12 @@
+   virtual void fill_stackmap_prolog       (int nof_gc_points)             {}
+   virtual void fill_stackmap_epilog       ()                              {}
+   virtual void fill_stackmap_for_opcodes  (BytecodeStream *bcs,
+-                                           CellTypeState* vars, 
+-                                           CellTypeState* stack, 
+-                                           int stack_top)                 {}   
++                                           CellTypeState* vars,
++                                           CellTypeState* stack,
++                                           int stack_top)                 {}
+   virtual void fill_init_vars             (GrowableArray<intptr_t> *init_vars) {}
+  public:
+   GeneratePairingInfo(methodHandle method) : GenerateOopMap(method)       {};
+ 
+-  // Call compute_map(CHECK) to generate info.  
++  // Call compute_map(CHECK) to generate info.
+ };
+-
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/instanceKlass.cpp openjdk/hotspot/src/share/vm/oops/instanceKlass.cpp
+--- openjdk6/hotspot/src/share/vm/oops/instanceKlass.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/instanceKlass.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)instanceKlass.cpp	1.322 07/05/29 09:44:19 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -82,7 +79,7 @@
+   } else {
+     // linking successfull, mark class as initialized
+     this_oop->set_init_state (fully_initialized);
+-    // trace 
++    // trace
+     if (TraceClassInitialization) {
+       ResourceMark rm(THREAD);
+       tty->print_cr("[Initialized %s without side effects]", this_oop->external_name());
+@@ -111,7 +108,7 @@
+ bool instanceKlass::verify_code(
+     instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) {
+   // 1) Verify the bytecodes
+-  Verifier::Mode mode = 
++  Verifier::Mode mode =
+     throw_verifyerror ? Verifier::ThrowException : Verifier::NoException;
+   return Verifier::verify(this_oop, mode, CHECK_false);
+ }
+@@ -125,10 +122,10 @@
+   _init_state = loaded;
+ }
+ 
+-void instanceKlass::link_class(TRAPS) {    
++void instanceKlass::link_class(TRAPS) {
+   assert(is_loaded(), "must be loaded");
+   if (!is_linked()) {
+-    instanceKlassHandle this_oop(THREAD, this->as_klassOop());  
++    instanceKlassHandle this_oop(THREAD, this->as_klassOop());
+     link_class_impl(this_oop, true, CHECK);
+   }
+ }
+@@ -138,7 +135,7 @@
+ bool instanceKlass::link_class_or_fail(TRAPS) {
+   assert(is_loaded(), "must be loaded");
+   if (!is_linked()) {
+-    instanceKlassHandle this_oop(THREAD, this->as_klassOop());  
++    instanceKlassHandle this_oop(THREAD, this->as_klassOop());
+     link_class_impl(this_oop, false, CHECK_false);
+   }
+   return is_linked();
+@@ -149,8 +146,8 @@
+   // check for error state
+   if (this_oop->is_in_error_state()) {
+     ResourceMark rm(THREAD);
+-    THROW_MSG_(vmSymbols::java_lang_NoClassDefFoundError(), 
+-               this_oop->external_name(), false);  
++    THROW_MSG_(vmSymbols::java_lang_NoClassDefFoundError(),
++               this_oop->external_name(), false);
+   }
+   // return if already verified
+   if (this_oop->is_linked()) {
+@@ -164,13 +161,13 @@
+   PerfTraceTimedEvent vmtimer(ClassLoader::perf_class_link_time(),
+                         ClassLoader::perf_classes_linked(),
+                         jt->get_thread_stat()->class_link_recursion_count_addr());
+-  
++
+   // link super class before linking this class
+   instanceKlassHandle super(THREAD, this_oop->super());
+   if (super.not_null()) {
+     if (super->is_interface()) {  // check if super class is an interface
+       ResourceMark rm(THREAD);
+-      Exceptions::fthrow(  
++      Exceptions::fthrow(
+         THREAD_AND_LOCATION,
+         vmSymbolHandles::java_lang_IncompatibleClassChangeError(),
+         "class %s has interface %s as super class",
+@@ -182,7 +179,7 @@
+ 
+     link_class_impl(super, throw_verifyerror, CHECK_false);
+   }
+-  
++
+   // link all interfaces implemented by this class before linking this class
+   objArrayHandle interfaces (THREAD, this_oop->local_interfaces());
+   int num_interfaces = interfaces->length();
+@@ -191,12 +188,12 @@
+     instanceKlassHandle ih(THREAD, klassOop(interfaces->obj_at(index)));
+     link_class_impl(ih, throw_verifyerror, CHECK_false);
+   }
+-  
++
+   // in case the class is linked in the process of linking its superclasses
+   if (this_oop->is_linked()) {
+     return true;
+   }
+-   
++
+   // verification & rewriting
+   {
+     ObjectLocker ol(this_oop, THREAD);
+@@ -219,7 +216,7 @@
+         }
+ 
+         // Just in case a side-effect of verify linked this class already
+-        // (which can sometimes happen since the verifier loads classes 
++        // (which can sometimes happen since the verifier loads classes
+         // using custom class loaders, which are free to initialize things)
+         if (this_oop->is_linked()) {
+           return true;
+@@ -228,7 +225,7 @@
+         // also sets rewritten
+         this_oop->rewrite_class(CHECK_false);
+       }
+-  
++
+       // Initialize the vtable and interface table after
+       // methods have been rewritten since rewrite may
+       // fabricate new methodOops.
+@@ -253,7 +250,7 @@
+         JvmtiExport::post_class_prepare((JavaThread *) thread, this_oop());
+       }
+     }
+-  }    
++  }
+   return true;
+ }
+ 
+@@ -262,17 +259,17 @@
+ // Three cases:
+ //    During the link of a newly loaded class.
+ //    During the preloading of classes to be written to the shared spaces.
+-//	- Rewrite the methods and update the method entry points.
++//      - Rewrite the methods and update the method entry points.
+ //
+ //    During the link of a class in the shared spaces.
+-//	- The methods were already rewritten, update the metho entry points.
++//      - The methods were already rewritten, update the metho entry points.
+ //
+ // The rewriter must be called exactly once. Rewriting must happen after
+ // verification but before the first method of the class is executed.
+ 
+ void instanceKlass::rewrite_class(TRAPS) {
+   assert(is_loaded(), "must be loaded");
+-  instanceKlassHandle this_oop(THREAD, this->as_klassOop());  
++  instanceKlassHandle this_oop(THREAD, this->as_klassOop());
+   if (this_oop->is_rewritten()) {
+     assert(this_oop()->is_shared(), "rewriting an unshared class?");
+     return;
+@@ -294,18 +291,18 @@
+     Thread *self = THREAD; // it's passed the current thread
+ 
+     // Step 2
+-    // If we were to use wait() instead of waitInterruptibly() then 
++    // If we were to use wait() instead of waitInterruptibly() then
+     // we might end up throwing IE from link/symbol resolution sites
+-    // that aren't expected to throw.  This would wreak havoc.  See 6320309.  
+-    while(this_oop->is_being_initialized() && !this_oop->is_reentrant_initialization(self)) {      
+-      ol.waitUninterruptibly(CHECK);      
++    // that aren't expected to throw.  This would wreak havoc.  See 6320309.
++    while(this_oop->is_being_initialized() && !this_oop->is_reentrant_initialization(self)) {
++      ol.waitUninterruptibly(CHECK);
+     }
+ 
+     // Step 3
+     if (this_oop->is_being_initialized() && this_oop->is_reentrant_initialization(self))
+       return;
+ 
+-    // Step 4 
++    // Step 4
+     if (this_oop->is_initialized())
+       return;
+ 
+@@ -318,10 +315,10 @@
+       char* message = NEW_C_HEAP_ARRAY(char, msglen);
+       if (NULL == message) {
+         // Out of memory: can't create detailed error message
+-        THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), className);  
++        THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), className);
+       } else {
+         jio_snprintf(message, msglen, "%s%s", desc, className);
+-        THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), message);  
++        THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), message);
+       }
+     }
+ 
+@@ -329,7 +326,7 @@
+     this_oop->set_init_state(being_initialized);
+     this_oop->set_init_thread(self);
+   }
+-  
++
+   // Step 7
+   klassOop super_klass = this_oop->super();
+   if (super_klass != NULL && !this_oop->is_interface() && Klass::cast(super_klass)->should_be_initialized()) {
+@@ -347,7 +344,7 @@
+     }
+   }
+ 
+-  // Step 8  
++  // Step 8
+   {
+     assert(THREAD->is_Java_thread(), "non-JavaThread in initialize_impl");
+     JavaThread* jt = (JavaThread*)THREAD;
+@@ -360,17 +357,17 @@
+   }
+ 
+   // Step 9
+-  if (!HAS_PENDING_EXCEPTION) {    
++  if (!HAS_PENDING_EXCEPTION) {
+     this_oop->set_initialization_state_and_notify(fully_initialized, CHECK);
+     { ResourceMark rm(THREAD);
+       debug_only(this_oop->vtable()->verify(tty, true);)
+     }
+   }
+-  else {    
++  else {
+     // Step 10 and 11
+     Handle e(THREAD, PENDING_EXCEPTION);
+     CLEAR_PENDING_EXCEPTION;
+-    { 
++    {
+       EXCEPTION_MARK;
+       this_oop->set_initialization_state_and_notify(initialization_error, THREAD);
+       CLEAR_PENDING_EXCEPTION;   // ignore any exception thrown, class initialization error is thrown below
+@@ -381,11 +378,11 @@
+       JavaCallArguments args(e);
+       THROW_ARG(vmSymbolHandles::java_lang_ExceptionInInitializerError(),
+                 vmSymbolHandles::throwable_void_signature(),
+-                &args);      
++                &args);
+     }
+   }
+ }
+-  
++
+ 
+ // Note: implementation moved to static method to expose the this pointer.
+ void instanceKlass::set_initialization_state_and_notify(ClassState state, TRAPS) {
+@@ -393,10 +390,10 @@
+   set_initialization_state_and_notify_impl(kh, state, CHECK);
+ }
+ 
+-void instanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) {    
+-  ObjectLocker ol(this_oop, THREAD);   
++void instanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) {
++  ObjectLocker ol(this_oop, THREAD);
+   this_oop->set_init_state(state);
+-  ol.notify_all(CHECK);  
++  ol.notify_all(CHECK);
+ }
+ 
+ void instanceKlass::add_implementor(klassOop k) {
+@@ -448,7 +445,7 @@
+     assert(local_interfaces()->obj_at(i)->is_klass(), "must be a klass");
+     instanceKlass* interf = instanceKlass::cast(klassOop(local_interfaces()->obj_at(i)));
+     assert(interf->is_interface(), "expected interface");
+-    interf->add_implementor(this_as_oop()); 
++    interf->add_implementor(this_as_oop());
+   }
+ }
+ 
+@@ -519,7 +516,7 @@
+   instanceHandle h_i(THREAD, i);
+   // Pass the handle as argument, JavaCalls::call expects oop as jobjects
+   JavaValue result(T_VOID);
+-  JavaCallArguments args(h_i);  
++  JavaCallArguments args(h_i);
+   methodHandle mh (THREAD, Universe::finalizer_register_method());
+   JavaCalls::call(&result, mh, &args, CHECK_NULL);
+   return h_i();
+@@ -532,7 +529,7 @@
+   KlassHandle h_k(THREAD, as_klassOop());
+ 
+   instanceOop i;
+- 
++
+   i = (instanceOop)CollectedHeap::obj_allocate(h_k, size, CHECK_NULL);
+   if (has_finalizer_flag && !RegisterFinalizersAtInit) {
+     i = register_finalizer(i, CHECK_NULL);
+@@ -571,7 +568,7 @@
+   return array_klass_impl(this_oop, or_null, n, THREAD);
+ }
+ 
+-klassOop instanceKlass::array_klass_impl(instanceKlassHandle this_oop, bool or_null, int n, TRAPS) {    
++klassOop instanceKlass::array_klass_impl(instanceKlassHandle this_oop, bool or_null, int n, TRAPS) {
+   if (this_oop->array_klasses() == NULL) {
+     if (or_null) return NULL;
+ 
+@@ -582,12 +579,12 @@
+       MutexLocker mc(Compile_lock, THREAD);   // for vtables
+       MutexLocker ma(MultiArray_lock, THREAD);
+ 
+-      // Check if update has already taken place    
++      // Check if update has already taken place
+       if (this_oop->array_klasses() == NULL) {
+         objArrayKlassKlass* oakk =
+           (objArrayKlassKlass*)Universe::objArrayKlassKlassObj()->klass_part();
+ 
+-        klassOop  k = oakk->allocate_objArray_klass(1, this_oop, CHECK_NULL);                  
++        klassOop  k = oakk->allocate_objArray_klass(1, this_oop, CHECK_NULL);
+         this_oop->set_array_klasses(k);
+       }
+     }
+@@ -612,10 +609,10 @@
+ static int call_class_initializer_impl_counter = 0;   // for debugging
+ 
+ methodOop instanceKlass::class_initializer() {
+-  return find_method(vmSymbols::class_initializer_name(), vmSymbols::void_method_signature()); 
++  return find_method(vmSymbols::class_initializer_name(), vmSymbols::void_method_signature());
+ }
+ 
+-void instanceKlass::call_class_initializer_impl(instanceKlassHandle this_oop, TRAPS) {  
++void instanceKlass::call_class_initializer_impl(instanceKlassHandle this_oop, TRAPS) {
+   methodHandle h_method(THREAD, this_oop->class_initializer());
+   assert(!this_oop->is_initialized(), "we cannot initialize twice");
+   if (TraceClassInitialization) {
+@@ -623,7 +620,7 @@
+     this_oop->name()->print_value();
+     tty->print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", (address)this_oop());
+   }
+-  if (h_method() != NULL) {  
++  if (h_method() != NULL) {
+     JavaCallArguments args; // No arguments
+     JavaValue result(T_VOID);
+     JavaCalls::call(&result, h_method, &args, CHECK); // Static call (no args)
+@@ -647,7 +644,7 @@
+ }
+ 
+ 
+-bool instanceKlass::find_local_field(symbolOop name, symbolOop sig, fieldDescriptor* fd) const {  
++bool instanceKlass::find_local_field(symbolOop name, symbolOop sig, fieldDescriptor* fd) const {
+   const int n = fields()->length();
+   for (int i = 0; i < n; i += next_offset ) {
+     int name_index = fields()->ushort_at(i + name_index_offset);
+@@ -663,7 +660,7 @@
+ }
+ 
+ 
+-void instanceKlass::field_names_and_sigs_iterate(OopClosure* closure) {  
++void instanceKlass::field_names_and_sigs_iterate(OopClosure* closure) {
+   const int n = fields()->length();
+   for (int i = 0; i < n; i += next_offset ) {
+     int name_index = fields()->ushort_at(i + name_index_offset);
+@@ -735,11 +732,11 @@
+ }
+ 
+ 
+-bool instanceKlass::find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {  
++bool instanceKlass::find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
+   int length = fields()->length();
+   for (int i = 0; i < length; i += next_offset) {
+     if (offset_from_fields( i ) == offset) {
+-      fd->initialize(as_klassOop(), i);      
++      fd->initialize(as_klassOop(), i);
+       if (fd->is_static() == is_static) return true;
+     }
+   }
+@@ -768,13 +765,12 @@
+   }
+ }
+ 
+-
+-void instanceKlass::do_local_static_fields(void f(fieldDescriptor*, oop), oop obj) {  
++void instanceKlass::do_local_static_fields(FieldClosure* cl) {
+   fieldDescriptor fd;
+   int length = fields()->length();
+   for (int i = 0; i < length; i += next_offset) {
+     fd.initialize(as_klassOop(), i);
+-    if (fd.is_static()) f(&fd, obj);
++    if (fd.is_static()) cl->do_field(&fd);
+   }
+ }
+ 
+@@ -783,7 +779,7 @@
+   instanceKlassHandle h_this(THREAD, as_klassOop());
+   do_local_static_fields_impl(h_this, f, CHECK);
+ }
+- 
++
+ 
+ void instanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) {
+   fieldDescriptor fd;
+@@ -795,17 +791,17 @@
+ }
+ 
+ 
+-void instanceKlass::do_nonstatic_fields(void f(fieldDescriptor*, oop), oop obj) {
++void instanceKlass::do_nonstatic_fields(FieldClosure* cl) {
+   fieldDescriptor fd;
+   instanceKlass* super = superklass();
+   if (super != NULL) {
+-    super->do_nonstatic_fields(f, obj);
++    super->do_nonstatic_fields(cl);
+   }
+   int length = fields()->length();
+   for (int i = 0; i < length; i += next_offset) {
+     fd.initialize(as_klassOop(), i);
+-    if (!(fd.is_static())) f(&fd, obj);
+-  }  
++    if (!(fd.is_static())) cl->do_field(&fd);
++  }
+ }
+ 
+ 
+@@ -850,7 +846,7 @@
+     int res = m->name()->fast_compare(name);
+     if (res == 0) {
+       // found matching name; do linear search to find matching signature
+-      // first, quick check for common case 
++      // first, quick check for common case
+       if (m->signature() == signature) return m;
+       // search downwards through overloaded methods
+       int i;
+@@ -884,9 +880,9 @@
+   if (index != -1) fatal1("binary search bug: should have found entry %d", index);
+ #endif
+   return NULL;
+-} 
++}
+ 
+-methodOop instanceKlass::uncached_lookup_method(symbolOop name, symbolOop signature) const {  
++methodOop instanceKlass::uncached_lookup_method(symbolOop name, symbolOop signature) const {
+   klassOop klass = as_klassOop();
+   while (klass != NULL) {
+     methodOop method = instanceKlass::cast(klass)->find_method(name, signature);
+@@ -897,7 +893,7 @@
+ }
+ 
+ // lookup a method in all the interfaces that this class implements
+-methodOop instanceKlass::lookup_method_in_all_interfaces(symbolOop name, 
++methodOop instanceKlass::lookup_method_in_all_interfaces(symbolOop name,
+                                                          symbolOop signature) const {
+   objArrayOop all_ifs = instanceKlass::cast(as_klassOop())->transitive_interfaces();
+   int num_ifs = all_ifs->length();
+@@ -938,7 +934,7 @@
+ 
+ // Lookup or create a jmethodID.
+ // This code can be called by the VM thread.  For this reason it is critical that
+-// there are no blocking operations (safepoints) while the lock is held -- or a 
++// there are no blocking operations (safepoints) while the lock is held -- or a
+ // deadlock can occur.
+ jmethodID instanceKlass::jmethod_id_for_impl(instanceKlassHandle ik_h, methodHandle method_h) {
+   size_t idnum = (size_t)method_h->method_idnum();
+@@ -974,7 +970,7 @@
+       methodHandle current_method_h(current_method == NULL? method_h() : current_method);
+       new_id = JNIHandles::make_jmethod_id(current_method_h);
+     } else {
+-      // It is the current version of the method or an obsolete method, 
++      // It is the current version of the method or an obsolete method,
+       // use the version passed in
+       new_id = JNIHandles::make_jmethod_id(method_h);
+     }
+@@ -1038,7 +1034,7 @@
+   if (indices == NULL ||                         // If there is no index array,
+       ((size_t)indices[0]) <= idnum) {           // or if it is too short
+     // Lock before we allocate the array so we don't leak
+-    MutexLocker ml(JNICachedItableIndex_lock);      
++    MutexLocker ml(JNICachedItableIndex_lock);
+     // Retry lookup after we got the lock
+     indices = methods_cached_itable_indices_acquire();
+     size_t length = 0;
+@@ -1064,7 +1060,7 @@
+     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
+   }
+   // This is a cache, if there is a race to set it, it doesn't matter
+-  indices[idnum+1] = index; 
++  indices[idnum+1] = index;
+ }
+ 
+ 
+@@ -1073,7 +1069,7 @@
+   int* indices = methods_cached_itable_indices_acquire();
+   if (indices != NULL && ((size_t)indices[0]) > idnum) {
+      // indices exist and are long enough, retrieve possible cached
+-    return indices[idnum+1]; 
++    return indices[idnum+1];
+   }
+   return -1;
+ }
+@@ -1101,6 +1097,7 @@
+     _next = next;
+     _count = 1;
+   }
++  int count()                             { return _count; }
+   int increment()                         { _count += 1; return _count; }
+   int decrement()                         { _count -= 1; assert(_count >= 0, "don't underflow"); return _count; }
+   nmethodBucket* next()                   { return _next; }
+@@ -1114,7 +1111,7 @@
+ // are dependent on the klassOop that was passed in and mark them for
+ // deoptimization.  Returns the number of nmethods found.
+ //
+-int instanceKlass::mark_dependent_nmethods(klassOop dependee) {
++int instanceKlass::mark_dependent_nmethods(DepChange& changes) {
+   assert_locked_or_safepoint(CodeCache_lock);
+   int found = 0;
+   nmethodBucket* b = _dependencies;
+@@ -1122,11 +1119,12 @@
+     nmethod* nm = b->get_nmethod();
+     // since dependencies aren't removed until an nmethod becomes a zombie,
+     // the dependency list may contain nmethods which aren't alive.
+-    if (nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->is_dependent_on(dependee)) {
++    if (nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
+       if (TraceDependencies) {
+         ResourceMark rm;
+         tty->print_cr("Marked for deoptimization");
+-        tty->print_cr("  dependee = %s", this->external_name());
++        tty->print_cr("  context = %s", this->external_name());
++        changes.print();
+         nm->print();
+         nm->print_dependencies();
+       }
+@@ -1160,12 +1158,12 @@
+ }
+ 
+ 
+-// 
++//
+ // Decrement count of the nmethod in the dependency list and remove
+ // the bucket competely when the count goes to 0.  This method must
+ // find a corresponding bucket otherwise there's a bug in the
+ // recording of dependecies.
+-// 
++//
+ void instanceKlass::remove_dependent_nmethod(nmethod* nm) {
+   assert_locked_or_safepoint(CodeCache_lock);
+   nmethodBucket* b = _dependencies;
+@@ -1193,13 +1191,46 @@
+ }
+ 
+ 
++#ifndef PRODUCT
++void instanceKlass::print_dependent_nmethods(bool verbose) {
++  nmethodBucket* b = _dependencies;
++  int idx = 0;
++  while (b != NULL) {
++    nmethod* nm = b->get_nmethod();
++    tty->print("[%d] count=%d { ", idx++, b->count());
++    if (!verbose) {
++      nm->print_on(tty, "nmethod");
++      tty->print_cr(" } ");
++    } else {
++      nm->print();
++      nm->print_dependencies();
++      tty->print_cr("--- } ");
++    }
++    b = b->next();
++  }
++}
++
++
++bool instanceKlass::is_dependent_nmethod(nmethod* nm) {
++  nmethodBucket* b = _dependencies;
++  while (b != NULL) {
++    if (nm == b->get_nmethod()) {
++      return true;
++    }
++    b = b->next();
++  }
++  return false;
++}
++#endif //PRODUCT
++
++
+ void instanceKlass::follow_static_fields() {
+   oop* start = start_of_static_fields();
+   oop* end   = start + static_oop_field_size();
+   while (start < end) {
+     if (*start != NULL) {
+       assert(Universe::heap()->is_in_closed_subset(*start),
+-	     "should be in heap");
++             "should be in heap");
+       MarkSweep::mark_and_push(start);
+     }
+     start++;
+@@ -1262,7 +1293,7 @@
+     while (start < end) {
+       if (*start != NULL) {
+         assert(Universe::heap()->is_in_closed_subset(*start),
+-	       "should be in heap");
++               "should be in heap");
+         MarkSweep::mark_and_push(start);
+       }
+       start++;
+@@ -1273,7 +1304,7 @@
+ 
+ #ifndef SERIALGC
+ void instanceKlass::oop_follow_contents(ParCompactionManager* cm,
+-					oop obj) {
++                                        oop obj) {
+   assert (obj!=NULL, "can't follow the content of NULL object");
+   obj->follow_header(cm);
+   OopMapBlock* map     = start_of_nonstatic_oop_maps();
+@@ -1533,7 +1564,7 @@
+ }
+ 
+ int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
+-				       HeapWord* beg_addr, HeapWord* end_addr) {
++                                       HeapWord* beg_addr, HeapWord* end_addr) {
+   // Compute oopmap block range.  The common case is nonstatic_oop_map_size==1.
+   OopMapBlock* map           = start_of_nonstatic_oop_maps();
+   OopMapBlock* const end_map = map + nonstatic_oop_map_size();
+@@ -1717,7 +1748,7 @@
+   if (Klass::cast(class2)->oop_is_objArray()) {
+     class2 = objArrayKlass::cast(class2)->bottom_klass();
+   }
+-  oop classloader2;  
++  oop classloader2;
+   if (Klass::cast(class2)->oop_is_instance()) {
+     classloader2 = instanceKlass::cast(class2)->class_loader();
+   } else {
+@@ -1739,9 +1770,9 @@
+                                               classloader2, classname2);
+ }
+ 
+-// return true if two classes are in the same package, classloader 
++// return true if two classes are in the same package, classloader
+ // and classname information is enough to determine a class's package
+-bool instanceKlass::is_same_class_package(oop class_loader1, symbolOop class_name1, 
++bool instanceKlass::is_same_class_package(oop class_loader1, symbolOop class_name1,
+                                           oop class_loader2, symbolOop class_name2) {
+   if (class_loader1 != class_loader2) {
+     return false;
+@@ -1756,11 +1787,11 @@
+ 
+     jbyte *last_slash1 = UTF8::strrchr(name1, class_name1->utf8_length(), '/');
+     jbyte *last_slash2 = UTF8::strrchr(name2, class_name2->utf8_length(), '/');
+-    
++
+     if ((last_slash1 == NULL) || (last_slash2 == NULL)) {
+       // One of the two doesn't have a package.  Only return true
+       // if the other one also doesn't have a package.
+-      return last_slash1 == last_slash2; 
++      return last_slash1 == last_slash2;
+     } else {
+       // Skip over '['s
+       if (*name1 == '[') {
+@@ -1786,7 +1817,7 @@
+       int length1 = last_slash1 - name1;
+       int length2 = last_slash2 - name2;
+ 
+-      return UTF8::equal(name1, length1, name2, length2);      
++      return UTF8::equal(name1, length1, name2, length2);
+     }
+   }
+ }
+@@ -1871,18 +1902,18 @@
+ void instanceKlass::add_osr_nmethod(nmethod* n) {
+   // only one compilation can be active
+   NEEDS_CLEANUP
+-  // This is a short non-blocking critical region, so the no safepoint check is ok. 
++  // This is a short non-blocking critical region, so the no safepoint check is ok.
+   OsrList_lock->lock_without_safepoint_check();
+-  assert(n->is_osr_method(), "wrong kind of nmethod");  
++  assert(n->is_osr_method(), "wrong kind of nmethod");
+   n->set_link(osr_nmethods_head());
+-  set_osr_nmethods_head(n);  
++  set_osr_nmethods_head(n);
+   // Remember to unlock again
+   OsrList_lock->unlock();
+ }
+ 
+ 
+ void instanceKlass::remove_osr_nmethod(nmethod* n) {
+-  // This is a short non-blocking critical region, so the no safepoint check is ok. 
++  // This is a short non-blocking critical region, so the no safepoint check is ok.
+   OsrList_lock->lock_without_safepoint_check();
+   assert(n->is_osr_method(), "wrong kind of nmethod");
+   nmethod* last = NULL;
+@@ -1890,33 +1921,33 @@
+   // Search for match
+   while(cur != NULL && cur != n) {
+     last = cur;
+-    cur = cur->link();    
+-  }   
++    cur = cur->link();
++  }
+   if (cur == n) {
+     if (last == NULL) {
+       // Remove first element
+-      set_osr_nmethods_head(osr_nmethods_head()->link());      
++      set_osr_nmethods_head(osr_nmethods_head()->link());
+     } else {
+       last->set_link(cur->link());
+     }
+   }
+-  n->set_link(NULL);  
++  n->set_link(NULL);
+   // Remember to unlock again
+   OsrList_lock->unlock();
+ }
+ 
+ nmethod* instanceKlass::lookup_osr_nmethod(const methodOop m, int bci) const {
+-  // This is a short non-blocking critical region, so the no safepoint check is ok. 
++  // This is a short non-blocking critical region, so the no safepoint check is ok.
+   OsrList_lock->lock_without_safepoint_check();
+   nmethod* osr = osr_nmethods_head();
+   while (osr != NULL) {
+     assert(osr->is_osr_method(), "wrong kind of nmethod found in chain");
+-    if (osr->method() == m && 
++    if (osr->method() == m &&
+         (bci == InvocationEntryBci || osr->osr_entry_bci() == bci)) {
+-      // Found a match - return it.        
++      // Found a match - return it.
+       OsrList_lock->unlock();
+       return osr;
+-    }    
++    }
+     osr = osr->link();
+   }
+   OsrList_lock->unlock();
+@@ -1924,26 +1955,19 @@
+ }
+ 
+ // -----------------------------------------------------------------------------------------------------
+-
+ #ifndef PRODUCT
+ 
+ // Printing
+ 
+-static void print_nonstatic_fields(outputStream* st, instanceKlass* ik, oop obj) {
+-  fieldDescriptor fd;
+-  instanceKlass* super = ik->superklass();
+-  if (super != NULL) {
+-    print_nonstatic_fields(st, super, obj);
+-  }
+-  int length = ik->fields()->length();
+-  for (int i = 0; i < length; i += instanceKlass::next_offset) {
+-    fd.initialize(ik->as_klassOop(), i);
+-    if (!(fd.is_static())) {
+-      st->print("   - ");
+-      fd.print_on_for(st, obj);
+-      st->cr();
+-    }
+-  }  
++void FieldPrinter::do_field(fieldDescriptor* fd) {
++   if (fd->is_static() == (_obj == NULL)) {
++     _st->print("   - ");
++     fd->print_on(_st);
++     _st->cr();
++   } else {
++     fd->print_on_for(_st, _obj);
++     _st->cr();
++   }
+ }
+ 
+ 
+@@ -1967,7 +1991,8 @@
+   }
+ 
+   st->print_cr("fields:");
+-  print_nonstatic_fields(st, this, obj);
++  FieldPrinter print_nonstatic_field(st, obj);
++  do_nonstatic_fields(&print_nonstatic_field);
+ 
+   if (as_klassOop() == SystemDictionary::class_klass()) {
+     klassOop mirrored_klass = java_lang_Class::as_klassOop(obj);
+@@ -2056,7 +2081,7 @@
+ 
+ #endif
+ 
+- 
++
+ /* JNIid class for jfieldIDs only */
+  JNIid::JNIid(klassOop holder, int offset, JNIid* next) {
+    _holder = holder;
+@@ -2064,8 +2089,8 @@
+    _next = next;
+    debug_only(_is_static_field_id = false;)
+  }
+- 
+- 
++
++
+  JNIid* JNIid::find(int offset) {
+    JNIid* current = this;
+    while (current != NULL) {
+@@ -2074,13 +2099,13 @@
+    }
+    return NULL;
+  }
+- 
++
+ void JNIid::oops_do(OopClosure* f) {
+   for (JNIid* cur = this; cur != NULL; cur = cur->next()) {
+     f->do_oop(cur->holder_addr());
+   }
+ }
+- 
++
+ void JNIid::deallocate(JNIid* current) {
+    while (current != NULL) {
+      JNIid* next = current->next();
+@@ -2088,13 +2113,13 @@
+      current = next;
+    }
+  }
+- 
+- 
++
++
+  void JNIid::verify(klassOop holder) {
+    int first_field_offset  = instanceKlass::cast(holder)->offset_of_static_fields();
+    int end_field_offset;
+    end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize);
+- 
++
+    JNIid* current = this;
+    while (current != NULL) {
+      guarantee(current->holder() == holder, "Invalid klass in JNIid");
+@@ -2110,11 +2135,11 @@
+ 
+ 
+ #ifdef ASSERT
+-  void instanceKlass::set_init_state(ClassState state) { 
++  void instanceKlass::set_init_state(ClassState state) {
+     bool good_state = as_klassOop()->is_shared() ? (_init_state <= state)
+                                                  : (_init_state < state);
+     assert(good_state || state == allocated, "illegal state transition");
+-    _init_state = state; 
++    _init_state = state;
+   }
+ #endif
+ 
+@@ -2217,7 +2242,7 @@
+           // do anything special with the index.
+           continue;  // robustness
+         }
+-      
++
+         methodOop method = (methodOop)JNIHandles::resolve(method_ref);
+         if (method == NULL || emcp_method_count == 0) {
+           // This method entry has been GC'ed or the current
+@@ -2299,7 +2324,7 @@
+               // have to do anything special with the index.
+               continue;  // robustness
+             }
+-          
++
+             methodOop method = (methodOop)JNIHandles::resolve(method_ref);
+             if (method == NULL) {
+               // this method entry has been GC'ed so skip it
+@@ -2467,7 +2492,7 @@
+     // the instanceKlass did not have any EMCP methods
+     return;
+   }
+-  
++
+   _prev_EMCP_method_handles = new GrowableArray<methodHandle>(10);
+ 
+   int n_methods = method_refs->length();
+diff -ruN openjdk6/hotspot/src/share/vm/oops/instanceKlass.hpp openjdk/hotspot/src/share/vm/oops/instanceKlass.hpp
+--- openjdk6/hotspot/src/share/vm/oops/instanceKlass.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/instanceKlass.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)instanceKlass.hpp	1.199 07/05/29 09:44:20 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,10 +19,10 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+-// An instanceKlass is the VM level representation of a Java class. 
++// An instanceKlass is the VM level representation of a Java class.
+ // It contains all information needed for at class at execution runtime.
+ 
+ //  instanceKlass layout:
+@@ -36,7 +33,7 @@
+ //    [instance size              ] Klass
+ //    [java mirror                ] Klass
+ //    [super                      ] Klass
+-//    [access_flags               ] Klass 
++//    [access_flags               ] Klass
+ //    [name                       ] Klass
+ //    [first subklass             ] Klass
+ //    [next sibling               ] Klass
+@@ -79,14 +76,33 @@
+ class jniIdMapBase;
+ class BreakpointInfo;
+ class fieldDescriptor;
++class DepChange;
+ class nmethodBucket;
+ class PreviousVersionNode;
+ class JvmtiCachedClassFieldMap;
+ 
++// This is used in iterators below.
++class FieldClosure: public StackObj {
++public:
++  virtual void do_field(fieldDescriptor* fd) = 0;
++};
++
++#ifndef PRODUCT
++// Print fields.
++// If "obj" argument to constructor is NULL, prints static fields, otherwise prints non-static fields.
++class FieldPrinter: public FieldClosure {
++   oop _obj;
++   outputStream* _st;
++ public:
++   FieldPrinter(outputStream* st, oop obj = NULL) : _obj(obj), _st(st) {}
++   void do_field(fieldDescriptor* fd);
++};
++#endif  // !PRODUCT
++
+ class instanceKlass: public Klass {
+   friend class VMStructs;
+  public:
+-  // See "The Java Virtual Machine Specification" section 2.16.2-5 for a detailed description 
++  // See "The Java Virtual Machine Specification" section 2.16.2-5 for a detailed description
+   // of the class loading & initialization procedure, and the use of the states.
+   enum ClassState {
+     unparsable_by_gc = 0,               // object is not yet parsable by gc. Value of _init_state at object allocation.
+@@ -107,9 +123,9 @@
+   };
+ 
+  protected:
+-  // 
++  //
+   // The oop block.  See comment in klass.hpp before making changes.
+-  // 
++  //
+ 
+   // Array classes holding elements of this class.
+   klassOop        _array_klasses;
+@@ -152,11 +168,11 @@
+   // Index is the idnum, which is initially the same as the methods array index.
+   objArrayOop     _methods_annotations;
+   // Annotation objects (byte arrays) for methods' parameters, or null if no
+-  // such annotations. 
++  // such annotations.
+   // Index is the idnum, which is initially the same as the methods array index.
+   objArrayOop     _methods_parameter_annotations;
+   // Annotation objects (byte arrays) for methods' default values, or null if no
+-  // such annotations.  
++  // such annotations.
+   // Index is the idnum, which is initially the same as the methods array index.
+   objArrayOop     _methods_default_annotations;
+ 
+@@ -208,13 +224,13 @@
+   // field sizes
+   int nonstatic_field_size() const         { return _nonstatic_field_size; }
+   void set_nonstatic_field_size(int size)  { _nonstatic_field_size = size; }
+-  
++
+   int static_field_size() const            { return _static_field_size; }
+   void set_static_field_size(int size)     { _static_field_size = size; }
+-  
++
+   int static_oop_field_size() const        { return _static_oop_field_size; }
+   void set_static_oop_field_size(int size) { _static_oop_field_size = size; }
+-  
++
+   // Java vtable
+   int  vtable_length() const               { return _vtable_len; }
+   void set_vtable_length(int len)          { _vtable_len = len; }
+@@ -222,7 +238,7 @@
+   // Java itable
+   int  itable_length() const               { return _itable_len; }
+   void set_itable_length(int len)          { _itable_len = len; }
+-  
++
+   // array klasses
+   klassOop array_klasses() const           { return _array_klasses; }
+   void set_array_klasses(klassOop k)       { oop_store_without_check((oop*) &_array_klasses, (oop) k); }
+@@ -244,7 +260,7 @@
+ 
+   // fields
+   // Field info extracted from the class file and stored
+-  // as an array of 7 shorts 
++  // as an array of 7 shorts
+   enum FieldOffset {
+     access_flags_offset    = 0,
+     name_index_offset      = 1,
+@@ -261,7 +277,7 @@
+     return build_int_from_shorts( fields()->ushort_at(index + low_offset),
+                                   fields()->ushort_at(index + high_offset) );
+   }
+- 
++
+   void set_fields(typeArrayOop f)          { oop_store_without_check((oop*) &_fields, (oop) f); }
+ 
+   // inner classes
+@@ -281,8 +297,8 @@
+   bool is_same_class_package(klassOop class2);
+   bool is_same_class_package(oop classloader2, symbolOop classname2);
+   static bool is_same_class_package(oop class_loader1, symbolOop class_name1, oop class_loader2, symbolOop class_name2);
+-  
+-  // initialization state  
++
++  // initialization state
+   bool is_loaded() const                   { return _init_state >= loaded; }
+   bool is_linked() const                   { return _init_state >= linked; }
+   bool is_initialized() const              { return _init_state == fully_initialized; }
+@@ -305,7 +321,7 @@
+   void unlink_class();
+   void rewrite_class(TRAPS);
+   methodOop class_initializer();
+-  
++
+   // set the class to initialized if no static initializer is present
+   void eager_initialize(Thread *thread);
+ 
+@@ -321,16 +337,16 @@
+   klassOop find_field(symbolOop name, symbolOop sig, fieldDescriptor* fd) const;
+   // find instance or static fields according to JVM spec 5.4.3.2, returns the klass in which the field is defined
+   klassOop find_field(symbolOop name, symbolOop sig, bool is_static, fieldDescriptor* fd) const;
+-  
++
+   // find a non-static or static field given its offset within the class.
+-  bool contains_field_offset(int offset) { 
+-      return ((offset/wordSize) >= instanceOopDesc::header_size() && 
+-             (offset/wordSize)-instanceOopDesc::header_size() < nonstatic_field_size()); 
++  bool contains_field_offset(int offset) {
++      return ((offset/wordSize) >= instanceOopDesc::header_size() &&
++             (offset/wordSize)-instanceOopDesc::header_size() < nonstatic_field_size());
+   }
+ 
+   bool find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const;
+   bool find_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const;
+-  
++
+   // find a local method (returns NULL if not found)
+   methodOop find_method(symbolOop name, symbolOop signature) const;
+   static methodOop find_method(objArrayOop methods, symbolOop name, symbolOop signature);
+@@ -341,15 +357,15 @@
+   // lookup a method in all the interfaces that this class implements
+   // (returns NULL if not found)
+   methodOop lookup_method_in_all_interfaces(symbolOop name, symbolOop signature) const;
+-  
++
+   // constant pool
+   constantPoolOop constants() const        { return _constants; }
+   void set_constants(constantPoolOop c)    { oop_store_without_check((oop*) &_constants, (oop) c); }
+- 
++
+   // class loader
+   oop class_loader() const                 { return _class_loader; }
+   void set_class_loader(oop l)             { oop_store((oop*) &_class_loader, l); }
+- 
++
+   // protection domain
+   oop protection_domain()                  { return _protection_domain; }
+   void set_protection_domain(oop pd)       { oop_store((oop*) &_protection_domain, pd); }
+@@ -357,7 +373,7 @@
+   // signers
+   objArrayOop signers() const              { return _signers; }
+   void set_signers(objArrayOop s)          { oop_store((oop*) &_signers, oop(s)); }
+- 
++
+   // source file name
+   symbolOop source_file_name() const       { return _source_file_name; }
+   void set_source_file_name(symbolOop n)   { oop_store_without_check((oop*) &_source_file_name, (oop) n); }
+@@ -394,12 +410,12 @@
+   jint get_cached_class_file_len()                    { return _cached_class_file_len; }
+   unsigned char * get_cached_class_file_bytes()       { return _cached_class_file_bytes; }
+ 
+-  // JVMTI: Support for caching of field indices, types, and offsets 
+-  void set_jvmti_cached_class_field_map(JvmtiCachedClassFieldMap* descriptor) { 
++  // JVMTI: Support for caching of field indices, types, and offsets
++  void set_jvmti_cached_class_field_map(JvmtiCachedClassFieldMap* descriptor) {
+     _jvmti_cached_class_field_map = descriptor;
+   }
+-  JvmtiCachedClassFieldMap* jvmti_cached_class_field_map() const { 
+-    return _jvmti_cached_class_field_map; 
++  JvmtiCachedClassFieldMap* jvmti_cached_class_field_map() const {
++    return _jvmti_cached_class_field_map;
+   }
+ 
+   // for adding methods, constMethodOopDesc::UNSET_IDNUM means no more ids available
+@@ -416,8 +432,8 @@
+                                                         _enclosing_method_method_index = method_index; }
+ 
+   // jmethodID support
+-  static jmethodID jmethod_id_for_impl(instanceKlassHandle ik_h, methodHandle method_h);      
+-  jmethodID jmethod_id_or_null(methodOop method);      
++  static jmethodID jmethod_id_for_impl(instanceKlassHandle ik_h, methodHandle method_h);
++  jmethodID jmethod_id_or_null(methodOop method);
+ 
+   // cached itable index support
+   void set_cached_itable_index(size_t idnum, int index);
+@@ -434,17 +450,17 @@
+   void set_methods_annotations(objArrayOop md)           { set_annotations(md, &_methods_annotations); }
+   void set_methods_parameter_annotations(objArrayOop md) { set_annotations(md, &_methods_parameter_annotations); }
+   void set_methods_default_annotations(objArrayOop md)   { set_annotations(md, &_methods_default_annotations); }
+-  typeArrayOop get_method_annotations_of(int idnum)    
++  typeArrayOop get_method_annotations_of(int idnum)
+                                                 { return get_method_annotations_from(idnum, _methods_annotations); }
+   typeArrayOop get_method_parameter_annotations_of(int idnum)
+                                                 { return get_method_annotations_from(idnum, _methods_parameter_annotations); }
+   typeArrayOop get_method_default_annotations_of(int idnum)
+                                                 { return get_method_annotations_from(idnum, _methods_default_annotations); }
+-  void set_method_annotations_of(int idnum, typeArrayOop anno)          
++  void set_method_annotations_of(int idnum, typeArrayOop anno)
+                                                 { set_methods_annotations_of(idnum, anno, &_methods_annotations); }
+   void set_method_parameter_annotations_of(int idnum, typeArrayOop anno)
+                                                 { set_methods_annotations_of(idnum, anno, &_methods_annotations); }
+-  void set_method_default_annotations_of(int idnum, typeArrayOop anno)  
++  void set_method_default_annotations_of(int idnum, typeArrayOop anno)
+                                                 { set_methods_annotations_of(idnum, anno, &_methods_annotations); }
+ 
+   // allocation
+@@ -478,7 +494,7 @@
+   JNIid* jni_id_for(int offset);
+ 
+   // maintenance of deoptimization dependencies
+-  int mark_dependent_nmethods(klassOop dependee);
++  int mark_dependent_nmethods(DepChange& changes);
+   void add_dependent_nmethod(nmethod* nm);
+   void remove_dependent_nmethod(nmethod* nm);
+ 
+@@ -525,9 +541,10 @@
+   bool oop_is_instance_slow() const        { return true; }
+ 
+   // Iterators
+-  void do_local_static_fields(void f(fieldDescriptor*, oop), oop obj);
++  void do_local_static_fields(FieldClosure* cl);
++  void do_nonstatic_fields(FieldClosure* cl); // including inherited fields
+   void do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS);
+-  void do_nonstatic_fields(void f(fieldDescriptor*, oop), oop obj); // including inherited fields
++
+   void methods_do(void f(methodOop method));
+   void array_klasses_do(void f(klassOop k));
+   void with_array_klasses_do(void f(klassOop k));
+@@ -546,9 +563,9 @@
+   static int vtable_start_offset()    { return header_size(); }
+   static int vtable_length_offset()   { return oopDesc::header_size() + offset_of(instanceKlass, _vtable_len) / HeapWordSize; }
+   static int object_size(int extra)   { return align_object_size(header_size() + extra); }
+-  
++
+   intptr_t* start_of_vtable() const        { return ((intptr_t*)as_klassOop()) + vtable_start_offset(); }
+-  intptr_t* start_of_itable() const        { return start_of_vtable() + align_object_offset(vtable_length()); }  
++  intptr_t* start_of_itable() const        { return start_of_vtable() + align_object_offset(vtable_length()); }
+   int  itable_offset_in_words() const { return start_of_itable() - (intptr_t*)as_klassOop(); }
+ 
+   oop* start_of_static_fields() const { return (oop*)(start_of_itable() + align_object_offset(itable_length())); }
+@@ -634,9 +651,9 @@
+   void iterate_static_fields(OopClosure* closure, MemRegion mr);
+ 
+ private:
+-  // initialization state  
++  // initialization state
+ #ifdef ASSERT
+-  void set_init_state(ClassState state);  
++  void set_init_state(ClassState state);
+ #else
+   void set_init_state(ClassState state) { _init_state = state; }
+ #endif
+@@ -644,12 +661,12 @@
+   void set_init_thread(Thread *thread)  { _init_thread = thread; }
+ 
+   u2 idnum_allocated_count() const      { return _idnum_allocated_count; }
+-  jmethodID* methods_jmethod_ids_acquire() const 
++  jmethodID* methods_jmethod_ids_acquire() const
+          { return (jmethodID*)OrderAccess::load_ptr_acquire(&_methods_jmethod_ids); }
+   void release_set_methods_jmethod_ids(jmethodID* jmeths)
+          { OrderAccess::release_store_ptr(&_methods_jmethod_ids, jmeths); }
+ 
+-  int* methods_cached_itable_indices_acquire() const 
++  int* methods_cached_itable_indices_acquire() const
+          { return (int*)OrderAccess::load_ptr_acquire(&_methods_cached_itable_indices); }
+   void release_set_methods_cached_itable_indices(int* indices)
+          { OrderAccess::release_store_ptr(&_methods_cached_itable_indices, indices); }
+@@ -691,13 +708,13 @@
+   static void set_initialization_state_and_notify_impl  (instanceKlassHandle this_oop, ClassState state, TRAPS);
+   static void call_class_initializer_impl               (instanceKlassHandle this_oop, TRAPS);
+   static klassOop array_klass_impl                      (instanceKlassHandle this_oop, bool or_null, int n, TRAPS);
+-  static void do_local_static_fields_impl               (instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS);              
++  static void do_local_static_fields_impl               (instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS);
+   /* jni_id_for_impl for jfieldID only */
+-  static JNIid* jni_id_for_impl                         (instanceKlassHandle this_oop, int offset);      
++  static JNIid* jni_id_for_impl                         (instanceKlassHandle this_oop, int offset);
+ 
+   // Returns the array class for the n'th dimension
+   klassOop array_klass_impl(bool or_null, int n, TRAPS);
+-  
++
+   // Returns the array class with this class as element type
+   klassOop array_klass_impl(bool or_null, TRAPS);
+ 
+@@ -718,13 +735,16 @@
+   // Printing
+   void oop_print_on      (oop obj, outputStream* st);
+   void oop_print_value_on(oop obj, outputStream* st);
++
++  void print_dependent_nmethods(bool verbose = false);
++  bool is_dependent_nmethod(nmethod* nm);
+ #endif
+ 
+  public:
+   // Verification
+   const char* internal_name() const;
+   void oop_verify_on(oop obj, outputStream* st);
+-  
++
+ #ifndef PRODUCT
+   static void verify_class_klass_nonstatic_oop_maps(klassOop k) PRODUCT_RETURN;
+ #endif
+@@ -750,11 +770,11 @@
+ 
+ // for adding methods
+ // UNSET_IDNUM return means no more ids available
+-inline u2 instanceKlass::next_method_idnum() { 
++inline u2 instanceKlass::next_method_idnum() {
+   if (_idnum_allocated_count == constMethodOopDesc::MAX_IDNUM) {
+     return constMethodOopDesc::UNSET_IDNUM; // no more ids available
+   } else {
+-    return _idnum_allocated_count++; 
++    return _idnum_allocated_count++;
+   }
+ }
+ 
+@@ -766,7 +786,7 @@
+   jushort _offset;    // Offset of first oop in oop-map block
+   jushort _length;    // Length of oop-map block
+  public:
+-  // Accessors  
++  // Accessors
+   jushort offset() const          { return _offset; }
+   void set_offset(jushort offset) { _offset = offset; }
+ 
+@@ -775,38 +795,38 @@
+ };
+ 
+ /* JNIid class for jfieldIDs only */
+-class JNIid: public CHeapObj { 
+-  friend class VMStructs; 
+- private: 
+-  klassOop           _holder; 
+-  JNIid*             _next; 
+-  int                _offset; 
+-#ifdef ASSERT 
+-  bool               _is_static_field_id; 
+-#endif 
+-
+- public: 
+-  // Accessors 
+-  klassOop holder() const         { return _holder; } 
+-  int offset() const              { return _offset; } 
+-  JNIid* next()                   { return _next; } 
+-  // Constructor 
+-  JNIid(klassOop holder, int offset, JNIid* next); 
+-  // Identifier lookup 
+-  JNIid* find(int offset); 
+- 
+-  // Garbage collection support 
++class JNIid: public CHeapObj {
++  friend class VMStructs;
++ private:
++  klassOop           _holder;
++  JNIid*             _next;
++  int                _offset;
++#ifdef ASSERT
++  bool               _is_static_field_id;
++#endif
++
++ public:
++  // Accessors
++  klassOop holder() const         { return _holder; }
++  int offset() const              { return _offset; }
++  JNIid* next()                   { return _next; }
++  // Constructor
++  JNIid(klassOop holder, int offset, JNIid* next);
++  // Identifier lookup
++  JNIid* find(int offset);
++
++  // Garbage collection support
+   oop* holder_addr() { return (oop*)&_holder; }
+-  void oops_do(OopClosure* f); 
+-  static void deallocate(JNIid* id); 
+-  // Debugging 
+-#ifdef ASSERT 
+-  bool is_static_field_id() const { return _is_static_field_id; } 
+-  void set_is_static_field_id()   { _is_static_field_id = true; } 
+-#endif 
+-  void verify(klassOop holder); 
+-}; 
+- 
++  void oops_do(OopClosure* f);
++  static void deallocate(JNIid* id);
++  // Debugging
++#ifdef ASSERT
++  bool is_static_field_id() const { return _is_static_field_id; }
++  void set_is_static_field_id()   { _is_static_field_id = true; }
++#endif
++  void verify(klassOop holder);
++};
++
+ 
+ // If breakpoints are more numerous than just JVMTI breakpoints,
+ // consider compressing this data structure.
+diff -ruN openjdk6/hotspot/src/share/vm/oops/instanceKlassKlass.cpp openjdk/hotspot/src/share/vm/oops/instanceKlassKlass.cpp
+--- openjdk6/hotspot/src/share/vm/oops/instanceKlassKlass.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/instanceKlassKlass.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)instanceKlassKlass.cpp	1.156 07/05/29 09:44:20 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -30,7 +27,7 @@
+ 
+ klassOop instanceKlassKlass::create_klass(TRAPS) {
+   instanceKlassKlass o;
+-  KlassHandle h_this_klass(THREAD, Universe::klassKlassObj());  
++  KlassHandle h_this_klass(THREAD, Universe::klassKlassObj());
+   KlassHandle k = base_create_klass(h_this_klass, header_size(), o.vtbl_value(), CHECK_NULL);
+   // Make sure size calculation is right
+   assert(k()->size() == align_object_size(header_size()), "wrong size for object");
+@@ -50,7 +47,7 @@
+ }
+ 
+ void instanceKlassKlass::iterate_c_heap_oops(instanceKlass* ik,
+-					     OopClosure* closure) {
++                                             OopClosure* closure) {
+   if (ik->oop_map_cache() != NULL) {
+     ik->oop_map_cache()->oop_iterate(closure);
+   }
+@@ -102,7 +99,7 @@
+ 
+ #ifndef SERIALGC
+ void instanceKlassKlass::oop_follow_contents(ParCompactionManager* cm,
+-					     oop obj) {
++                                             oop obj) {
+   assert(obj->is_klass(),"must be a klass");
+   assert(klassOop(obj)->klass_part()->oop_is_instance_slow(), "must be instance klass");
+ 
+@@ -183,7 +180,7 @@
+ }
+ 
+ int instanceKlassKlass::oop_oop_iterate_m(oop obj, OopClosure* blk,
+-					   MemRegion mr) {
++                                           MemRegion mr) {
+   assert(obj->is_klass(),"must be a klass");
+   assert(klassOop(obj)->klass_part()->oop_is_instance_slow(), "must be instance klass");
+   instanceKlass* ik = instanceKlass::cast(klassOop(obj));
+@@ -332,7 +329,7 @@
+ int instanceKlassKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
+   assert(obj->is_klass(),"must be a klass");
+   assert(klassOop(obj)->klass_part()->oop_is_instance_slow(),
+-	 "must be instance klass");
++         "must be instance klass");
+ 
+   instanceKlass* ik = instanceKlass::cast(klassOop(obj));
+   ik->update_static_fields();
+@@ -353,11 +350,11 @@
+ }
+ 
+ int instanceKlassKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
+-					    HeapWord* beg_addr,
+-					    HeapWord* end_addr) {
++                                            HeapWord* beg_addr,
++                                            HeapWord* end_addr) {
+   assert(obj->is_klass(),"must be a klass");
+   assert(klassOop(obj)->klass_part()->oop_is_instance_slow(),
+-	 "must be instance klass");
++         "must be instance klass");
+ 
+   instanceKlass* ik = instanceKlass::cast(klassOop(obj));
+   ik->update_static_fields(beg_addr, end_addr);
+@@ -383,13 +380,13 @@
+ }
+ #endif // SERIALGC
+ 
+-klassOop instanceKlassKlass::allocate_instance_klass(int vtable_len, int itable_len, int static_field_size, 
++klassOop instanceKlassKlass::allocate_instance_klass(int vtable_len, int itable_len, int static_field_size,
+                                                      int nonstatic_oop_map_size, ReferenceType rt, TRAPS) {
+ 
+   int size = instanceKlass::object_size(align_object_offset(vtable_len) + align_object_offset(itable_len) + static_field_size + nonstatic_oop_map_size);
+ 
+   // Allocation
+-  KlassHandle h_this_klass(THREAD, as_klassOop());  
++  KlassHandle h_this_klass(THREAD, as_klassOop());
+   KlassHandle k;
+   if (rt == REF_NONE) {
+     // regular klass
+@@ -408,12 +405,12 @@
+     // The sizes of these these three variables are used for determining the
+     // size of the instanceKlassOop. It is critical that these are set to the right
+     // sizes before the first GC, i.e., when we allocate the mirror.
+-    ik->set_vtable_length(vtable_len);  
+-    ik->set_itable_length(itable_len);  
++    ik->set_vtable_length(vtable_len);
++    ik->set_itable_length(itable_len);
+     ik->set_static_field_size(static_field_size);
+     ik->set_nonstatic_oop_map_size(nonstatic_oop_map_size);
+     assert(k()->size() == size, "wrong size for object");
+-  
++
+     ik->set_array_klasses(NULL);
+     ik->set_methods(NULL);
+     ik->set_method_ordering(NULL);
+@@ -427,9 +424,9 @@
+     ik->set_signers(NULL);
+     ik->set_source_file_name(NULL);
+     ik->set_source_debug_extension(NULL);
+-    ik->set_inner_classes(NULL);  
++    ik->set_inner_classes(NULL);
+     ik->set_static_oop_field_size(0);
+-    ik->set_nonstatic_field_size(0);  
++    ik->set_nonstatic_field_size(0);
+     ik->set_is_marked_dependent(false);
+     ik->set_init_state(instanceKlass::allocated);
+     ik->set_init_thread(NULL);
+@@ -451,15 +448,15 @@
+     ik->set_jvmti_cached_class_field_map(NULL);
+     ik->set_initial_method_idnum(0);
+     assert(k()->is_parsable(), "should be parsable here.");
+-  
++
+     // initialize the non-header words to zero
+     intptr_t* p = (intptr_t*)k();
+     for (int index = instanceKlass::header_size(); index < size; index++) {
+       p[index] = NULL_WORD;
+     }
+-  
++
+     // To get verify to work - must be set to partial loaded before first GC point.
+-    k()->set_partially_loaded();     
++    k()->set_partially_loaded();
+   }
+ 
+   // GC can happen here
+@@ -473,23 +470,6 @@
+ 
+ // Printing
+ 
+-static outputStream* printing_stream = NULL;
+-
+-static void print_nonstatic_field(fieldDescriptor* fd, oop obj) {
+-  assert(printing_stream != NULL, "Invalid printing stream");
+-  printing_stream->print("   - ");
+-  fd->print_on(printing_stream);
+-  printing_stream->cr();
+-}
+-
+-static void print_static_field(fieldDescriptor* fd, oop obj) {
+-  assert(printing_stream != NULL, "Invalid printing stream");
+-  printing_stream->print("   - ");
+-  fd->print_on_for(printing_stream, obj);
+-  printing_stream->cr();
+-}
+-
+-
+ static const char* state_names[] = {
+   "unparseable_by_gc", "allocated", "loaded", "linked", "being_initialized", "fully_initialized", "initialization_error"
+ };
+@@ -506,13 +486,13 @@
+   st->print(" - state:             "); st->print_cr(state_names[ik->_init_state]);
+   st->print(" - name:              "); ik->name()->print_value_on(st);             st->cr();
+   st->print(" - super:             "); ik->super()->print_value_on(st);            st->cr();
+-  st->print(" - sub:               "); 
+-  Klass* sub = ik->subklass(); 
++  st->print(" - sub:               ");
++  Klass* sub = ik->subklass();
+   int n;
+   for (n = 0; sub != NULL; n++, sub = sub->next_sibling()) {
+-    if (n < MaxSubklassPrintSize) {       
+-      sub->as_klassOop()->print_value_on(st); 
+-      st->print("   "); 
++    if (n < MaxSubklassPrintSize) {
++      sub->as_klassOop()->print_value_on(st);
++      st->print("   ");
+     }
+   }
+   if (n >= MaxSubklassPrintSize) st->print("(%d more klasses...)", n - MaxSubklassPrintSize);
+@@ -525,7 +505,7 @@
+       if (ik->implementor(i) != NULL) {
+         if (++print_impl == 1)
+           st->print_cr(" - implementor:    ");
+-        st->print("   "); 
++        st->print("   ");
+         ik->implementor(i)->print_value_on(st);
+       }
+     }
+@@ -548,18 +528,18 @@
+   st->print(" - protection domain: "); ik->protection_domain()->print_value_on(st); st->cr();
+   st->print(" - signers:           "); ik->signers()->print_value_on(st);           st->cr();
+   if (ik->source_file_name() != NULL) {
+-    st->print(" - source file:       "); 
++    st->print(" - source file:       ");
+     ik->source_file_name()->print_value_on(st);
+     st->cr();
+   }
+   if (ik->source_debug_extension() != NULL) {
+-    st->print(" - source debug extension:       "); 
++    st->print(" - source debug extension:       ");
+     ik->source_debug_extension()->print_value_on(st);
+     st->cr();
+   }
+ 
+-  st->print_cr(" - previous version:       "); 
+-  { 
++  st->print_cr(" - previous version:       ");
++  {
+     ResourceMark rm;
+     // PreviousVersionInfo objects returned via PreviousVersionWalker
+     // contain a GrowableArray of handles. We have to clean up the
+@@ -576,19 +556,19 @@
+   } // rm is cleaned up
+ 
+   if (ik->generic_signature() != NULL) {
+-    st->print(" - generic signature:            "); 
++    st->print(" - generic signature:            ");
+     ik->generic_signature()->print_value_on(st);
+   }
+   st->print(" - inner classes:     "); ik->inner_classes()->print_value_on(st);     st->cr();
+   st->print(" - java mirror:       "); ik->java_mirror()->print_value_on(st);       st->cr();
+-  st->print(" - vtable length      %d  (start addr: " INTPTR_FORMAT ")", ik->vtable_length(), ik->start_of_vtable());  st->cr();  
++  st->print(" - vtable length      %d  (start addr: " INTPTR_FORMAT ")", ik->vtable_length(), ik->start_of_vtable());  st->cr();
+   st->print(" - itable length      %d (start addr: " INTPTR_FORMAT ")", ik->itable_length(), ik->start_of_itable()); st->cr();
+   st->print_cr(" - static fields:");
+-  printing_stream = st;
+-  ik->do_local_static_fields(print_static_field, obj);
++  FieldPrinter print_static_field(st);
++  ik->do_local_static_fields(&print_static_field);
+   st->print_cr(" - non-static fields:");
+-  ik->do_nonstatic_fields(print_nonstatic_field, NULL);
+-  printing_stream = NULL;
++  FieldPrinter print_nonstatic_field(st, obj);
++  ik->do_nonstatic_fields(&print_nonstatic_field);
+ 
+   st->print(" - static oop maps:     ");
+   if (ik->static_oop_field_size() > 0) {
+@@ -636,7 +616,7 @@
+   klassKlass::oop_verify_on(obj, st);
+   if (!obj->partially_loaded()) {
+     Thread *thread = Thread::current();
+-    instanceKlass* ik = instanceKlass::cast(klassOop(obj));   
++    instanceKlass* ik = instanceKlass::cast(klassOop(obj));
+ 
+ #ifndef PRODUCT
+     // Avoid redundant verifies
+@@ -650,26 +630,26 @@
+       Handle h_obj(thread, obj);
+       SystemDictionary::verify_obj_klass_present(h_obj, h_name, h_loader);
+     }
+-    
++
+     // Verify static fields
+     VerifyFieldClosure blk;
+     ik->iterate_static_fields(&blk);
+ 
+     // Verify vtables
+     if (ik->is_linked()) {
+-      ResourceMark rm(thread);  
++      ResourceMark rm(thread);
+       // $$$ This used to be done only for m/s collections.  Doing it
+       // always seemed a valid generalization.  (DLD -- 6/00)
+       ik->vtable()->verify(st);
+     }
+-  
++
+     // Verify oop map cache
+     if (ik->oop_map_cache() != NULL) {
+       ik->oop_map_cache()->verify();
+     }
+ 
+     // Verify first subklass
+-    if (ik->subklass_oop() != NULL) { 
++    if (ik->subklass_oop() != NULL) {
+       guarantee(ik->subklass_oop()->is_perm(),  "should be in permspace");
+       guarantee(ik->subklass_oop()->is_klass(), "should be klass");
+     }
+@@ -703,7 +683,7 @@
+       guarantee(im->is_klass(), "should be klass");
+       guarantee(!Klass::cast(klassOop(im))->is_interface(), "implementors cannot be interfaces");
+     }
+-    
++
+     // Verify local interfaces
+     objArrayOop local_interfaces = ik->local_interfaces();
+     guarantee(local_interfaces->is_perm(),          "should be in permspace");
+@@ -735,7 +715,7 @@
+       methodOop m2 = methodOop(methods->obj_at(j + 1));
+       guarantee(m1->name()->fast_compare(m2->name()) <= 0, "methods not sorted correctly");
+     }
+-    
++
+     // Verify method ordering
+     typeArrayOop method_ordering = ik->method_ordering();
+     guarantee(method_ordering->is_perm(),              "should be in permspace");
+@@ -827,4 +807,3 @@
+   assert(ik->transitive_interfaces() == NULL, "just checking");
+   ik->set_transitive_interfaces((objArrayOop) obj);   // Temporarily set transitive_interfaces to point to self
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/instanceKlassKlass.hpp openjdk/hotspot/src/share/vm/oops/instanceKlassKlass.hpp
+--- openjdk6/hotspot/src/share/vm/oops/instanceKlassKlass.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/instanceKlassKlass.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)instanceKlassKlass.hpp	1.54 07/05/29 09:44:21 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // An instanceKlassKlass is the klass of an instanceKlass
+@@ -39,17 +36,17 @@
+   // Allocation
+   DEFINE_ALLOCATE_PERMANENT(instanceKlassKlass);
+   static klassOop create_klass(TRAPS);
+-  klassOop allocate_instance_klass(int vtable_len, 
+-                                   int itable_len, 
+-                                   int static_field_size, 
+-                                   int nonstatic_oop_map_size, 
+-                                   ReferenceType rt, 
++  klassOop allocate_instance_klass(int vtable_len,
++                                   int itable_len,
++                                   int static_field_size,
++                                   int nonstatic_oop_map_size,
++                                   ReferenceType rt,
+                                    TRAPS);
+ 
+   // Casting from klassOop
+   static instanceKlassKlass* cast(klassOop k) {
+     assert(k->klass_part()->oop_is_klass(), "cast to instanceKlassKlass");
+-    return (instanceKlassKlass*) k->klass_part(); 
++    return (instanceKlassKlass*) k->klass_part();
+   }
+ 
+   // Sizing
+@@ -87,4 +84,3 @@
+   bool oop_partially_loaded(oop obj) const;
+   void oop_set_partially_loaded(oop obj);
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/instanceOop.cpp openjdk/hotspot/src/share/vm/oops/instanceOop.cpp
+--- openjdk6/hotspot/src/share/vm/oops/instanceOop.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/instanceOop.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)instanceOop.cpp	1.14 07/05/05 17:06:03 JVM"
+-#endif
+ /*
+  * Copyright 1997 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,11 +19,10 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_instanceOop.cpp.incl"
+ 
+ // <<this page is intentionally left blank>>
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/instanceOop.hpp openjdk/hotspot/src/share/vm/oops/instanceOop.hpp
+--- openjdk6/hotspot/src/share/vm/oops/instanceOop.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/instanceOop.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)instanceOop.hpp	1.15 07/05/05 17:06:04 JVM"
+-#endif
+ /*
+  * Copyright 1997-2000 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // An instanceOop is an instance of a Java Class
+@@ -32,5 +29,3 @@
+  public:
+   static int header_size() { return sizeof(instanceOopDesc)/HeapWordSize; }
+ };
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/instanceRefKlass.cpp openjdk/hotspot/src/share/vm/oops/instanceRefKlass.cpp
+--- openjdk6/hotspot/src/share/vm/oops/instanceRefKlass.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/instanceRefKlass.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)instanceRefKlass.cpp	1.90 07/05/29 09:44:20 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -42,14 +39,14 @@
+           discover_reference(obj, reference_type())) {
+       // reference already enqueued, referent will be traversed later
+       instanceKlass::oop_follow_contents(obj);
+-      debug_only( 
++      debug_only(
+         if(TraceReferenceGC && PrintGCDetails) {
+-          gclog_or_tty->print_cr("       Non NULL enqueued " INTPTR_FORMAT, (address)obj); 
++          gclog_or_tty->print_cr("       Non NULL enqueued " INTPTR_FORMAT, (address)obj);
+         }
+       )
+       return;
+     } else {
+-      // treat referent as normal oop      
++      // treat referent as normal oop
+       debug_only(
+         if(TraceReferenceGC && PrintGCDetails) {
+           gclog_or_tty->print_cr("       Non NULL normal " INTPTR_FORMAT, (address)obj);
+@@ -71,7 +68,7 @@
+ 
+ #ifndef SERIALGC
+ void instanceRefKlass::oop_follow_contents(ParCompactionManager* cm,
+-					   oop obj) {
++                                           oop obj) {
+   oop* referent_addr = java_lang_ref_Reference::referent_addr(obj);
+   oop referent = *referent_addr;
+   debug_only(
+@@ -85,14 +82,14 @@
+           discover_reference(obj, reference_type())) {
+       // reference already enqueued, referent will be traversed later
+       instanceKlass::oop_follow_contents(cm, obj);
+-      debug_only( 
++      debug_only(
+         if(TraceReferenceGC && PrintGCDetails) {
+-          gclog_or_tty->print_cr("       Non NULL enqueued " INTPTR_FORMAT, (address)obj); 
++          gclog_or_tty->print_cr("       Non NULL enqueued " INTPTR_FORMAT, (address)obj);
+         }
+       )
+       return;
+     } else {
+-      // treat referent as normal oop      
++      // treat referent as normal oop
+       debug_only(
+         if(TraceReferenceGC && PrintGCDetails) {
+           gclog_or_tty->print_cr("       Non NULL normal " INTPTR_FORMAT, (address)obj);
+@@ -117,7 +114,7 @@
+ int instanceRefKlass::oop_adjust_pointers(oop obj) {
+   int size = size_helper();
+   instanceKlass::oop_adjust_pointers(obj);
+-  
++
+   oop* referent_addr = java_lang_ref_Reference::referent_addr(obj);
+   MarkSweep::adjust_pointer(referent_addr);
+   oop* next_addr = java_lang_ref_Reference::next_addr(obj);
+@@ -128,16 +125,16 @@
+ #ifdef ASSERT
+   if(TraceReferenceGC && PrintGCDetails) {
+     gclog_or_tty->print_cr("instanceRefKlass::oop_adjust_pointers obj "
+-			   INTPTR_FORMAT, (address)obj);
++                           INTPTR_FORMAT, (address)obj);
+     gclog_or_tty->print_cr("     referent_addr/* " INTPTR_FORMAT " / "
+-			   INTPTR_FORMAT, referent_addr,
+-			   referent_addr ? (address)*referent_addr : NULL);
++                           INTPTR_FORMAT, referent_addr,
++                           referent_addr ? (address)*referent_addr : NULL);
+     gclog_or_tty->print_cr("     next_addr/* " INTPTR_FORMAT " / "
+-			   INTPTR_FORMAT, next_addr,
+-			   next_addr ? (address)*next_addr : NULL);
++                           INTPTR_FORMAT, next_addr,
++                           next_addr ? (address)*next_addr : NULL);
+     gclog_or_tty->print_cr("     discovered_addr/* " INTPTR_FORMAT " / "
+-			   INTPTR_FORMAT, discovered_addr,
+-			   discovered_addr ? (address)*discovered_addr : NULL);
++                           INTPTR_FORMAT, discovered_addr,
++                           discovered_addr ? (address)*discovered_addr : NULL);
+   }
+ #endif
+ 
+@@ -260,7 +257,7 @@
+ 
+ int instanceRefKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
+   instanceKlass::oop_update_pointers(cm, obj);
+-  
++
+   oop* referent_addr = java_lang_ref_Reference::referent_addr(obj);
+   PSParallelCompact::adjust_pointer(referent_addr);
+   oop* next_addr = java_lang_ref_Reference::next_addr(obj);
+@@ -271,16 +268,16 @@
+ #ifdef ASSERT
+   if(TraceReferenceGC && PrintGCDetails) {
+     gclog_or_tty->print_cr("instanceRefKlass::oop_update_pointers obj "
+-			   INTPTR_FORMAT, (oopDesc*) obj);
++                           INTPTR_FORMAT, (oopDesc*) obj);
+     gclog_or_tty->print_cr("     referent_addr/* " INTPTR_FORMAT " / "
+-			   INTPTR_FORMAT, referent_addr,
+-			   referent_addr ? (oopDesc*) *referent_addr : NULL);
++                           INTPTR_FORMAT, referent_addr,
++                           referent_addr ? (oopDesc*) *referent_addr : NULL);
+     gclog_or_tty->print_cr("     next_addr/* " INTPTR_FORMAT " / "
+-			   INTPTR_FORMAT, next_addr,
+-			   next_addr ? (oopDesc*) *next_addr : NULL);
++                           INTPTR_FORMAT, next_addr,
++                           next_addr ? (oopDesc*) *next_addr : NULL);
+     gclog_or_tty->print_cr("     discovered_addr/* " INTPTR_FORMAT " / "
+-		   INTPTR_FORMAT, discovered_addr,
+-		   discovered_addr ? (oopDesc*) *discovered_addr : NULL);
++                   INTPTR_FORMAT, discovered_addr,
++                   discovered_addr ? (oopDesc*) *discovered_addr : NULL);
+   }
+ #endif
+ 
+@@ -289,9 +286,9 @@
+ 
+ int
+ instanceRefKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
+-				      HeapWord* beg_addr, HeapWord* end_addr) {
++                                      HeapWord* beg_addr, HeapWord* end_addr) {
+   instanceKlass::oop_update_pointers(cm, obj, beg_addr, end_addr);
+-  
++
+   oop* p;
+   oop* referent_addr = p = java_lang_ref_Reference::referent_addr(obj);
+   PSParallelCompact::adjust_pointer(p, beg_addr, end_addr);
+@@ -303,16 +300,16 @@
+ #ifdef ASSERT
+   if(TraceReferenceGC && PrintGCDetails) {
+     gclog_or_tty->print_cr("instanceRefKlass::oop_update_pointers obj "
+-			   INTPTR_FORMAT, (oopDesc*) obj);
++                           INTPTR_FORMAT, (oopDesc*) obj);
+     gclog_or_tty->print_cr("     referent_addr/* " INTPTR_FORMAT " / "
+-			   INTPTR_FORMAT, referent_addr,
+-			   referent_addr ? (oopDesc*) *referent_addr : NULL);
++                           INTPTR_FORMAT, referent_addr,
++                           referent_addr ? (oopDesc*) *referent_addr : NULL);
+     gclog_or_tty->print_cr("     next_addr/* " INTPTR_FORMAT " / "
+-			   INTPTR_FORMAT, next_addr,
+-			   next_addr ? (oopDesc*) *next_addr : NULL);
++                           INTPTR_FORMAT, next_addr,
++                           next_addr ? (oopDesc*) *next_addr : NULL);
+     gclog_or_tty->print_cr("     discovered_addr/* " INTPTR_FORMAT " / "
+-		   INTPTR_FORMAT, discovered_addr,
+-		   discovered_addr ? (oopDesc*) *discovered_addr : NULL);
++                   INTPTR_FORMAT, discovered_addr,
++                   discovered_addr ? (oopDesc*) *discovered_addr : NULL);
+   }
+ #endif
+ 
+@@ -322,7 +319,7 @@
+ 
+ void instanceRefKlass::update_nonstatic_oop_maps(klassOop k) {
+   // Clear the nonstatic oop-map entries corresponding to referent
+-  // and nextPending field.  They are treated specially by the 
++  // and nextPending field.  They are treated specially by the
+   // garbage collector.
+   // The discovered field is used only by the garbage collector
+   // and is also treated specially.
+@@ -340,7 +337,7 @@
+   // Check that the current map is (2,4) - currently points at field with
+   // offset 2 (words) and has 4 map entries.
+   debug_only(int offset = java_lang_ref_Reference::referent_offset);
+-  debug_only(int length = ((java_lang_ref_Reference::discovered_offset - 
++  debug_only(int length = ((java_lang_ref_Reference::discovered_offset -
+     java_lang_ref_Reference::referent_offset)/wordSize) + 1);
+ 
+   if (UseSharedSpaces) {
+@@ -368,7 +365,7 @@
+   GenCollectedHeap* gch = NULL;
+   if (Universe::heap()->kind() == CollectedHeap::GenCollectedHeap)
+     gch = GenCollectedHeap::heap();
+-  
++
+   if (referent != NULL) {
+     guarantee(referent->is_oop(), "referent field heap failed");
+     if (gch != NULL && !gch->is_in_youngest(obj))
+@@ -380,7 +377,7 @@
+   // Verify next field
+   oop next = java_lang_ref_Reference::next(obj);
+   if (next != NULL) {
+-    guarantee(next->is_oop(), "next field verify failed");    
++    guarantee(next->is_oop(), "next field verify failed");
+     guarantee(next->is_instanceRef(), "next field verify failed");
+     if (gch != NULL && !gch->is_in_youngest(obj)) {
+       // We do a specific remembered set check here since the next field is
+diff -ruN openjdk6/hotspot/src/share/vm/oops/instanceRefKlass.hpp openjdk/hotspot/src/share/vm/oops/instanceRefKlass.hpp
+--- openjdk6/hotspot/src/share/vm/oops/instanceRefKlass.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/instanceRefKlass.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)instanceRefKlass.hpp	1.62 07/05/29 09:44:21 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,13 +19,13 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+-// An instanceRefKlass is a specialized instanceKlass for Java 
++// An instanceRefKlass is a specialized instanceKlass for Java
+ // classes that are subclasses of java/lang/ref/Reference.
+ //
+-// These classes are used to implement soft/weak/final/phantom 
++// These classes are used to implement soft/weak/final/phantom
+ // references and finalization, and need special treatment by the
+ // garbage collector.
+ //
+@@ -50,7 +47,7 @@
+   // Casting from klassOop
+   static instanceRefKlass* cast(klassOop k) {
+     assert(k->klass_part()->oop_is_instanceRef(), "cast to instanceRefKlass");
+-    return (instanceRefKlass*) k->klass_part(); 
++    return (instanceRefKlass*) k->klass_part();
+   }
+ 
+   // allocation
+diff -ruN openjdk6/hotspot/src/share/vm/oops/klass.cpp openjdk/hotspot/src/share/vm/oops/klass.cpp
+--- openjdk6/hotspot/src/share/vm/oops/klass.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/klass.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)klass.cpp	1.119 07/05/05 17:06:00 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -32,11 +29,11 @@
+ bool Klass::is_subclass_of(klassOop k) const {
+   // Run up the super chain and check
+   klassOop t = as_klassOop();
+-    
++
+   if (t == k) return true;
+   t = Klass::cast(t)->super();
+ 
+-  while (t != NULL) {  
++  while (t != NULL) {
+     if (t == k) return true;
+     t = Klass::cast(t)->super();
+   }
+@@ -89,7 +86,7 @@
+ void Klass::check_valid_for_instantiation(bool throwError, TRAPS) {
+   ResourceMark rm(THREAD);
+   THROW_MSG(throwError ? vmSymbols::java_lang_InstantiationError()
+-	    : vmSymbols::java_lang_InstantiationException(), external_name());
++            : vmSymbols::java_lang_InstantiationException(), external_name());
+ }
+ 
+ 
+@@ -118,7 +115,7 @@
+   return NULL;
+ }
+ 
+-klassOop Klass::base_create_klass_oop(KlassHandle& klass, int size, 
++klassOop Klass::base_create_klass_oop(KlassHandle& klass, int size,
+                                       const Klass_vtbl& vtbl, TRAPS) {
+   size = align_object_size(size);
+   // allocate and initialize vtable
+@@ -161,21 +158,21 @@
+   return KlassHandle(THREAD, ek);
+ }
+ 
+-void Klass_vtbl::post_new_init_klass(KlassHandle& klass, 
+-				     klassOop new_klass,
+-				     int size) const {
++void Klass_vtbl::post_new_init_klass(KlassHandle& klass,
++                                     klassOop new_klass,
++                                     int size) const {
+   assert(!new_klass->klass_part()->null_vtbl(), "Not a complete klass");
+   CollectedHeap::post_allocation_install_obj_klass(klass, new_klass, size);
+ }
+ 
+ void* Klass_vtbl::operator new(size_t ignored, KlassHandle& klass,
+                                int size, TRAPS) {
+-  // The vtable pointer is installed during the execution of 
++  // The vtable pointer is installed during the execution of
+   // constructors in the call to permanent_obj_allocate().  Delay
+   // the installation of the klass pointer into the new klass "k"
+   // until after the vtable pointer has been installed (i.e., until
+   // after the return of permanent_obj_allocate().
+-  klassOop k = 
++  klassOop k =
+     (klassOop) CollectedHeap::permanent_obj_allocate_no_klass_install(klass,
+       size, CHECK_NULL);
+   return k->klass_part();
+@@ -222,7 +219,7 @@
+     assert(super_depth() == 0, "Object must already be initialized properly");
+   } else if (k != super() || k == SystemDictionary::object_klass()) {
+     assert(super() == NULL || super() == SystemDictionary::object_klass(),
+-	   "initialize this only once to a non-trivial value");
++           "initialize this only once to a non-trivial value");
+     set_super(k);
+     Klass* sup = k->klass_part();
+     int sup_depth = sup->super_depth();
+@@ -248,16 +245,16 @@
+       assert(j == my_depth, "computed accessor gets right answer");
+       klassOop t = as_klassOop();
+       while (!Klass::cast(t)->can_be_primary_super()) {
+-	t = Klass::cast(t)->super();
+-	j = Klass::cast(t)->super_depth();
++        t = Klass::cast(t)->super();
++        j = Klass::cast(t)->super_depth();
+       }
+       for (juint j1 = j+1; j1 < primary_super_limit(); j1++) {
+-	assert(primary_super_of_depth(j1) == NULL, "super list padding");
++        assert(primary_super_of_depth(j1) == NULL, "super list padding");
+       }
+       while (t != NULL) {
+-	assert(primary_super_of_depth(j) == t, "super list initialization");
+-	t = Klass::cast(t)->super();
+-	--j;
++        assert(primary_super_of_depth(j) == t, "super list initialization");
++        t = Klass::cast(t)->super();
++        --j;
+       }
+       assert(j == (juint)-1, "correct depth count");
+     }
+@@ -313,7 +310,7 @@
+     if (secondaries() != Universe::the_array_interfaces_array()) {
+       // We must not copy any NULL placeholders left over from bootstrap.
+       for (int j = 0; j < secondaries->length(); j++) {
+-	assert(secondaries->obj_at(j) != NULL, "correct bootstrapping order");
++        assert(secondaries->obj_at(j) != NULL, "correct bootstrapping order");
+       }
+     }
+   #endif
+@@ -379,7 +376,7 @@
+     // first subklass
+     super->set_subklass(_next_sibling);
+   } else {
+-    Klass* sib = super->subklass(); 
++    Klass* sib = super->subklass();
+     while (sib->next_sibling() != this) {
+       sib = sib->next_sibling();
+     };
+@@ -389,8 +386,8 @@
+ 
+ void Klass::follow_weak_klass_links( BoolObjectClosure* is_alive, OopClosure* keep_alive) {
+   // This klass is alive but the subklass and siblings are not followed/updated.
+-  // We update the subklass link and the subklass' sibling links here. 
+-  // Our own sibling link will be updated by our superclass (which must be alive 
++  // We update the subklass link and the subklass' sibling links here.
++  // Our own sibling link will be updated by our superclass (which must be alive
+   // since we are).
+   assert(is_alive->do_object_b(as_klassOop()), "just checking, this should be live");
+   if (ClassUnloading) {
+@@ -427,7 +424,7 @@
+       sub = next;
+     }
+   } else {
+-    // Always follow subklass and sibling link. This will prevent any klasses from 
++    // Always follow subklass and sibling link. This will prevent any klasses from
+     // being unloaded (all classes are transitively linked from java.lang.Object).
+     keep_alive->do_oop(adr_subklass());
+     keep_alive->do_oop(adr_next_sibling());
+@@ -449,7 +446,7 @@
+ 
+ klassOop Klass::array_klass_or_null(int rank) {
+   EXCEPTION_MARK;
+-  // No exception can be thrown by array_klass_impl when called with or_null == true. 
++  // No exception can be thrown by array_klass_impl when called with or_null == true.
+   // (In anycase, the execption mark will fail if it do so)
+   return array_klass_impl(true, rank, THREAD);
+ }
+@@ -457,9 +454,9 @@
+ 
+ klassOop Klass::array_klass_or_null() {
+   EXCEPTION_MARK;
+-  // No exception can be thrown by array_klass_impl when called with or_null == true. 
++  // No exception can be thrown by array_klass_impl when called with or_null == true.
+   // (In anycase, the execption mark will fail if it do so)
+-  return array_klass_impl(true, THREAD); 
++  return array_klass_impl(true, THREAD);
+ }
+ 
+ 
+@@ -512,7 +509,7 @@
+   // print title
+   st->print_cr("%s ", internal_name());
+   obj->print_address_on(st);
+-  
++
+   if (WizardMode) {
+      // print header
+      obj->mark()->print_on(st);
+@@ -527,7 +524,7 @@
+ 
+ void Klass::oop_print_value_on(oop obj, outputStream* st) {
+   // print title
+-  ResourceMark rm;		// Cannot print in debug mode without this
++  ResourceMark rm;              // Cannot print in debug mode without this
+   st->print("%s", internal_name());
+   obj->print_address_on(st);
+ }
+@@ -537,7 +534,7 @@
+ // Verification
+ 
+ void Klass::oop_verify_on(oop obj, outputStream* st) {
+-  guarantee(obj->is_oop(),  "should be oop");  
++  guarantee(obj->is_oop(),  "should be oop");
+   guarantee(obj->klass()->is_perm(),  "should be in permspace");
+   guarantee(obj->klass()->is_klass(), "klass field is not a klass");
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/oops/klass.hpp openjdk/hotspot/src/share/vm/oops/klass.hpp
+--- openjdk6/hotspot/src/share/vm/oops/klass.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/klass.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)klass.hpp	1.142 07/05/29 09:44:17 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,14 +19,14 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A Klass is the part of the klassOop that provides:
+ //  1: language level class object (method dictionary etc.)
+ //  2: provide vm dispatch behavior for the object
+ // Both functions are combined into one C++ class. The toplevel class "Klass"
+-// implements purpose 1 whereas all subclasses provide extra virtual functions 
++// implements purpose 1 whereas all subclasses provide extra virtual functions
+ // for purpose 2.
+ 
+ // One reason for the oop/klass dichotomy in the implementation is
+@@ -141,10 +138,10 @@
+   void* allocate_permanent(KlassHandle& klass_klass, int size, TRAPS) const { \
+     void* result = new(klass_klass, size, THREAD) thisKlass(); \
+     if (HAS_PENDING_EXCEPTION) return NULL;                    \
+-    klassOop new_klass = ((Klass*) result)->as_klassOop();	\
+-    OrderAccess::storestore();	\
+-    post_new_init_klass(klass_klass, new_klass, size);	\
+-    return result;	\
++    klassOop new_klass = ((Klass*) result)->as_klassOop();      \
++    OrderAccess::storestore();  \
++    post_new_init_klass(klass_klass, new_klass, size);  \
++    return result;      \
+   }
+ 
+   bool null_vtbl() { return *(intptr_t*)this == 0; }
+@@ -208,7 +205,7 @@
+   // must remain first and last, unless oop_block_beg() and/or oop_block_end()
+   // are updated.  Grouping the oop fields in a single block simplifies oop
+   // iteration.
+-  // 
++  //
+ 
+   // Cache of last observed secondary supertype
+   klassOop    _secondary_super_cache;
+@@ -230,7 +227,7 @@
+ 
+   //
+   // End of the oop block.
+-  // 
++  //
+ 
+   jint        _modifier_flags;  // Processed access flags, for use by Class.getModifiers.
+   AccessFlags _access_flags;    // Access flags. The class/interface distinction is stored here.
+@@ -338,7 +335,7 @@
+  protected:                                // internal accessors
+   klassOop subklass_oop() const            { return _subklass; }
+   klassOop next_sibling_oop() const        { return _next_sibling; }
+-  void     set_subklass(klassOop s); 
++  void     set_subklass(klassOop s);
+   void     set_next_sibling(klassOop s);
+ 
+   oop* adr_super()           const { return (oop*)&_super;             }
+@@ -460,7 +457,7 @@
+   // subclass check
+   bool is_subclass_of(klassOop k) const;
+   // subtype check: true if is_subclass_of, or if k is interface and receiver implements it
+-  bool is_subtype_of(klassOop k) const { 
++  bool is_subtype_of(klassOop k) const {
+     juint    off = k->klass_part()->super_check_offset();
+     klassOop sup = *(klassOop*)( (address)as_klassOop() + off );
+     const juint secondary_offset = secondary_super_cache_offset_in_bytes() + sizeof(oopDesc);
+@@ -484,7 +481,7 @@
+   // Casting
+   static Klass* cast(klassOop k) {
+     assert(k->is_klass(), "cast to Klass");
+-    return k->klass_part(); 
++    return k->klass_part();
+   }
+ 
+   // array copying
+@@ -497,7 +494,7 @@
+   // lookup operation for MethodLookupCache
+   friend class MethodLookupCache;
+   virtual methodOop uncached_lookup_method(symbolOop name, symbolOop signature) const;
+- public:  
++ public:
+   methodOop lookup_method(symbolOop name, symbolOop signature) const {
+     return uncached_lookup_method(name, signature);
+   }
+@@ -541,10 +538,10 @@
+ 
+   // Returns the Java name for a class (Resource allocated)
+   // For arrays, this returns the name of the element with a leading '['.
+-  // For classes, this returns the name with the package separators 
++  // For classes, this returns the name with the package separators
+   //     turned into '.'s.
+   const char* external_name() const;
+-  // Returns the name for a class (Resource allocated) as the class 
++  // Returns the name for a class (Resource allocated) as the class
+   // would appear in a signature.
+   // For arrays, this returns the name of the element with a leading '['.
+   // For classes, this returns the name with a leading 'L' and a trailing ';'
+@@ -568,7 +565,7 @@
+   virtual bool oop_is_klass()               const { return false; }
+   virtual bool oop_is_thread()              const { return false; }
+   virtual bool oop_is_method()              const { return false; }
+-  virtual bool oop_is_constMethod()	    const { return false; }
++  virtual bool oop_is_constMethod()         const { return false; }
+   virtual bool oop_is_methodData()          const { return false; }
+   virtual bool oop_is_constantPool()        const { return false; }
+   virtual bool oop_is_constantPoolCache()   const { return false; }
+@@ -607,7 +604,7 @@
+                                                     layout_helper_is_typeArray(layout_helper()),
+                                                     oop_is_typeArray_slow()); }
+   #undef assert_same_query
+-          
++
+   // Unless overridden, oop is parsable if it has a klass pointer.
+   virtual bool oop_is_parsable(oop obj) const { return true; }
+ 
+@@ -768,4 +765,3 @@
+   void verify_vtable_index(int index);
+ #endif
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/klass.inline.hpp openjdk/hotspot/src/share/vm/oops/klass.inline.hpp
+--- openjdk6/hotspot/src/share/vm/oops/klass.inline.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/klass.inline.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)klass.inline.hpp	1.5 07/05/05 17:06:04 JVM"
+-#endif
+ /*
+  * Copyright 2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ inline void Klass::set_prototype_header(markOop header) {
+diff -ruN openjdk6/hotspot/src/share/vm/oops/klassKlass.cpp openjdk/hotspot/src/share/vm/oops/klassKlass.cpp
+--- openjdk6/hotspot/src/share/vm/oops/klassKlass.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/klassKlass.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)klassKlass.cpp	1.69 07/05/29 09:44:21 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -54,7 +51,7 @@
+   MarkSweep::mark_and_push(k->adr_secondary_supers());
+   MarkSweep::mark_and_push(k->adr_java_mirror());
+   MarkSweep::mark_and_push(k->adr_name());
+-  // We follow the subklass and sibling links at the end of the 
++  // We follow the subklass and sibling links at the end of the
+   // marking phase, since otherwise following them will prevent
+   // class unloading (all classes are transitively linked from
+   // java.lang.Object).
+@@ -64,7 +61,7 @@
+ 
+ #ifndef SERIALGC
+ void klassKlass::oop_follow_contents(ParCompactionManager* cm,
+-				     oop obj) {
++                                     oop obj) {
+   Klass* k = Klass::cast(klassOop(obj));
+   // If we are alive it is valid to keep our superclass and subtype caches alive
+   PSParallelCompact::mark_and_push(cm, k->adr_super());
+@@ -74,7 +71,7 @@
+   PSParallelCompact::mark_and_push(cm, k->adr_secondary_supers());
+   PSParallelCompact::mark_and_push(cm, k->adr_java_mirror());
+   PSParallelCompact::mark_and_push(cm, k->adr_name());
+-  // We follow the subklass and sibling links at the end of the 
++  // We follow the subklass and sibling links at the end of the
+   // marking phase, since otherwise following them will prevent
+   // class unloading (all classes are transitively linked from
+   // java.lang.Object).
+@@ -97,8 +94,8 @@
+   // The following are in the perm gen and are treated
+   // specially in a later phase of a perm gen collection; ...
+   assert(oop(k)->is_perm(), "should be in perm");
+-  assert(oop(k->adr_subklass())->is_perm(), "should be in perm");
+-  assert(oop(k->adr_next_sibling())->is_perm(), "should be in perm");
++  assert(oop(k->subklass())->is_perm_or_null(), "should be in perm");
++  assert(oop(k->next_sibling())->is_perm_or_null(), "should be in perm");
+   // ... don't scan them normally, but remember this klassKlass
+   // for later (see, for instance, oop_follow_contents above
+   // for what MarkSweep does with it.
+@@ -183,7 +180,7 @@
+ }
+ 
+ int klassKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
+-				    HeapWord* beg_addr, HeapWord* end_addr) {
++                                    HeapWord* beg_addr, HeapWord* end_addr) {
+   Klass* k = Klass::cast(klassOop(obj));
+ 
+   oop* const beg_oop = MAX2((oop*)beg_addr, k->oop_block_beg());
+@@ -249,7 +246,7 @@
+   }
+   if (k->name() != NULL) {
+     guarantee(Universe::heap()->is_in_permanent(k->name()),
+-	      "should be in permspace");
++              "should be in permspace");
+     guarantee(k->name()->is_symbol(), "should be symbol");
+   }
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/oops/klassKlass.hpp openjdk/hotspot/src/share/vm/oops/klassKlass.hpp
+--- openjdk6/hotspot/src/share/vm/oops/klassKlass.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/klassKlass.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)klassKlass.hpp	1.44 07/05/29 09:44:21 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A klassKlass serves as the fix point of the klass chain.
+@@ -48,10 +45,10 @@
+   // Casting from klassOop
+   static klassKlass* cast(klassOop k) {
+     assert(k->klass_part()->oop_is_klass(), "cast to klassKlass");
+-    return (klassKlass*) k->klass_part(); 
++    return (klassKlass*) k->klass_part();
+   }
+ 
+-  // Sizing 
++  // Sizing
+   static int header_size()  { return oopDesc::header_size() + sizeof(klassKlass)/HeapWordSize; }
+   int object_size() const   { return align_object_size(header_size()); }
+ 
+@@ -82,4 +79,3 @@
+   const char* internal_name() const;
+   void oop_verify_on(oop obj, outputStream* st);
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/klassOop.cpp openjdk/hotspot/src/share/vm/oops/klassOop.cpp
+--- openjdk6/hotspot/src/share/vm/oops/klassOop.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/klassOop.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)klassOop.cpp	1.14 07/05/05 17:06:04 JVM"
+-#endif
+ /*
+  * Copyright 1997 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,9 +19,8 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_klassOop.cpp.incl"
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/klassOop.hpp openjdk/hotspot/src/share/vm/oops/klassOop.hpp
+--- openjdk6/hotspot/src/share/vm/oops/klassOop.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/klassOop.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)klassOop.hpp	1.19 07/05/05 17:06:04 JVM"
+-#endif
+ /*
+  * Copyright 1997-2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,11 +19,11 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A klassOop is the C++ equivalent of a Java class.
+-// Part of a klassOopDesc is a Klass which handle the 
++// Part of a klassOopDesc is a Klass which handle the
+ // dispatching for the C++ method calls.
+ 
+ //  klassOop object layout:
+@@ -45,6 +42,3 @@
+   // returns the Klass part containing dispatching behavior
+   Klass* klass_part()                            { return (Klass*)((address)this + klass_part_offset_in_bytes()); }
+ };
+-
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/klassPS.hpp openjdk/hotspot/src/share/vm/oops/klassPS.hpp
+--- openjdk6/hotspot/src/share/vm/oops/klassPS.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/klassPS.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,28 +1,25 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)klassPS.hpp	1.1 07/05/14 06:13:07 JVM"
+-#endif
+ /*
+  * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+- *   
++ *
+  * This code is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License version 2 only, as
+  * published by the Free Software Foundation.
+- *   
++ *
+  * This code is distributed in the hope that it will be useful, but WITHOUT
+  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+  * version 2 for more details (a copy is included in the LICENSE file that
+  * accompanied this code).
+- *  
++ *
+  * You should have received a copy of the GNU General Public License version
+  * 2 along with this work; if not, write to the Free Software Foundation,
+  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+- *   
++ *
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ #ifndef KLASS_PS_H
+ #define KLASS_PS_H
+@@ -42,7 +39,7 @@
+   virtual void oop_follow_contents(ParCompactionManager* cm, oop obj);      \
+   virtual int  oop_update_pointers(ParCompactionManager* cm, oop obj);      \
+   virtual int  oop_update_pointers(ParCompactionManager* cm, oop obj,       \
+-				   HeapWord* beg_addr, HeapWord* end_addr);
++                                   HeapWord* beg_addr, HeapWord* end_addr);
+ 
+ // Pure virtual version for klass.hpp
+ #define PARALLEL_GC_DECLS_PV \
+@@ -51,7 +48,7 @@
+   virtual void oop_follow_contents(ParCompactionManager* cm, oop obj) = 0;  \
+   virtual int  oop_update_pointers(ParCompactionManager* cm, oop obj) = 0;  \
+   virtual int  oop_update_pointers(ParCompactionManager* cm, oop obj,       \
+-				   HeapWord* beg_addr, HeapWord* end_addr) = 0;
++                                   HeapWord* beg_addr, HeapWord* end_addr) = 0;
+ #else  // SERIALGC
+ #define PARALLEL_GC_DECLS
+ #define PARALLEL_GC_DECLS_PV
+diff -ruN openjdk6/hotspot/src/share/vm/oops/klassVtable.cpp openjdk/hotspot/src/share/vm/oops/klassVtable.cpp
+--- openjdk6/hotspot/src/share/vm/oops/klassVtable.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/klassVtable.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)klassVtable.cpp	1.146 07/07/19 12:19:09 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -38,20 +35,20 @@
+ // this function computes the vtable size (including the size needed for miranda
+ // methods) and the number of miranda methods in this class
+ // Note on Miranda methods: Let's say there is a class C that implements
+-// interface I.  Let's say there is a method m in I that neither C nor any 
++// interface I.  Let's say there is a method m in I that neither C nor any
+ // of its super classes implement (i.e there is no method of any access, with
+ // the same name and signature as m), then m is a Miranda method which is
+ // entered as a public abstract method in C's vtable.  From then on it should
+ // treated as any other public method in C for method over-ride purposes.
+ void klassVtable::compute_vtable_size_and_num_mirandas(int &vtable_length,
+-						       int &num_miranda_methods,
+-						       klassOop super,
+-						       objArrayOop methods, 
+-						       AccessFlags class_flags, 
+-						       oop classloader,
+-						       symbolOop classname,
+-						       objArrayOop local_interfaces
+-						       ) {
++                                                       int &num_miranda_methods,
++                                                       klassOop super,
++                                                       objArrayOop methods,
++                                                       AccessFlags class_flags,
++                                                       oop classloader,
++                                                       symbolOop classname,
++                                                       objArrayOop local_interfaces
++                                                       ) {
+ 
+   No_Safepoint_Verifier nsv;
+ 
+@@ -62,34 +59,34 @@
+   // start off with super's vtable length
+   instanceKlass* sk = (instanceKlass*)super->klass_part();
+   vtable_length = super == NULL ? 0 : sk->vtable_length();
+-  
++
+   // go thru each method in the methods table to see if it needs a new entry
+   int len = methods->length();
+   for (int i = 0; i < len; i++) {
+     assert(methods->obj_at(i)->is_method(), "must be a methodOop");
+     methodOop m = methodOop(methods->obj_at(i));
+-    
++
+     if (needs_new_vtable_entry(m, super, classloader, classname, class_flags)) {
+-      vtable_length += vtableEntry::size(); // we need a new entry      
++      vtable_length += vtableEntry::size(); // we need a new entry
+     }
+   }
+-  
++
+   // compute the number of mirandas methods that must be added to the end
+   num_miranda_methods = get_num_mirandas(super, methods, local_interfaces);
+   vtable_length += (num_miranda_methods * vtableEntry::size());
+-  
++
+   if (Universe::is_bootstrapping() && vtable_length == 0) {
+-    // array classes don't have their superclass set correctly during 
++    // array classes don't have their superclass set correctly during
+     // bootstrapping
+     vtable_length = Universe::base_vtable_size();
+   }
+ 
+-  if (super == NULL && !Universe::is_bootstrapping() && 
++  if (super == NULL && !Universe::is_bootstrapping() &&
+       vtable_length != Universe::base_vtable_size()) {
+     // Someone is attempting to redefine java.lang.Object incorrectly.  The
+     // only way this should happen is from
+     // SystemDictionary::resolve_from_stream(), which will detect this later
+-    // and throw a security exception.  So don't assert here to let 
++    // and throw a security exception.  So don't assert here to let
+     // the exception occur.
+     vtable_length = Universe::base_vtable_size();
+   }
+@@ -119,7 +116,7 @@
+ #endif
+     superVtable->copy_vtable_to(table());
+ #ifndef PRODUCT
+-    if (PrintVtables && Verbose) {  
++    if (PrintVtables && Verbose) {
+       tty->print_cr("copy vtable from %s to %s size %d", sk->internal_name(), klass()->internal_name(), _length);
+     }
+ #endif
+@@ -128,17 +125,17 @@
+ }
+ 
+ // Revised lookup semantics   introduced 1.3 (Kestral beta)
+-void klassVtable::initialize_vtable(bool checkconstraints, TRAPS) {  
+-      
++void klassVtable::initialize_vtable(bool checkconstraints, TRAPS) {
++
+   // Note:  Arrays can have intermediate array supers.  Use java_super to skip them.
+   KlassHandle super (THREAD, klass()->java_super());
+   int nofNewEntries = 0;
+- 
++
+ 
+   if (PrintVtables && !klass()->oop_is_array()) {
+-    ResourceMark rm(THREAD);      
+-    tty->print_cr("Initializing: %s", _klass->name()->as_C_string());    
+-  }  
++    ResourceMark rm(THREAD);
++    tty->print_cr("Initializing: %s", _klass->name()->as_C_string());
++  }
+ 
+ #ifdef ASSERT
+   oop* end_of_obj = (oop*)_klass() + _klass()->size();
+@@ -146,7 +143,7 @@
+   assert(end_of_vtable <= end_of_obj, "vtable extends beyond end");
+ #endif
+ 
+-  if (Universe::is_bootstrapping()) {    
++  if (Universe::is_bootstrapping()) {
+     // just clear everything
+     for (int i = 0; i < _length; i++) table()[i].clear();
+     return;
+@@ -157,33 +154,33 @@
+     assert(super_vtable_len == _length, "arrays shouldn't introduce new methods");
+   } else {
+     assert(_klass->oop_is_instance(), "must be instanceKlass");
+-    
+-    objArrayHandle methods(THREAD, ik()->methods());    
++
++    objArrayHandle methods(THREAD, ik()->methods());
+     int len = methods()->length();
+-    int initialized = super_vtable_len;    
++    int initialized = super_vtable_len;
+ 
+     // update_super_vtable can stop for gc - ensure using handles
+     for (int i = 0; i < len; i++) {
+       HandleMark hm(THREAD);
+-      assert(methods()->obj_at(i)->is_method(), "must be a methodOop");    
++      assert(methods()->obj_at(i)->is_method(), "must be a methodOop");
+       methodHandle mh(THREAD, (methodOop)methods()->obj_at(i));
+ 
+       bool needs_new_entry = update_super_vtable(ik(), mh, super_vtable_len, checkconstraints, CHECK);
+ 
+       if (needs_new_entry) {
+         put_method_at(mh(), initialized);
+-	mh()->set_vtable_index(initialized); // set primary vtable index
+-	initialized++;        
+-      } 
++        mh()->set_vtable_index(initialized); // set primary vtable index
++        initialized++;
++      }
+     }
+ 
+     // add miranda methods; it will also update the value of initialized
+     fill_in_mirandas(initialized);
+-  
+-    // In class hierachieswhere the accesibility is not increasing (i.e., going from private -> 
++
++    // In class hierachieswhere the accesibility is not increasing (i.e., going from private ->
+     // package_private -> publicprotected), the vtable might actually be smaller than our initial
+-    // calculation. 
+-    assert(initialized <= _length, "vtable initialization failed");    
++    // calculation.
++    assert(initialized <= _length, "vtable initialization failed");
+     for(;initialized < _length; initialized++) {
+       put_method_at(NULL, initialized);
+     }
+@@ -197,18 +194,18 @@
+ klassVtable::AccessType klassVtable::vtable_accessibility_at(int i) {
+   // This vtable is not implementing the specific method
+   if (i >= length()) return acc_private;
+-      
++
+   // Compute AccessType for current method. public or protected we are done.
+   methodOop m = method_at(i);
+   if (m->is_protected() || m->is_public()) return acc_publicprotected;
+-  
++
+   AccessType acc = m->is_package_private() ? acc_package_private : acc_private;
+ 
+   // Compute AccessType for method in super classes
+-  klassOop super = klass()->super();  
++  klassOop super = klass()->super();
+   AccessType super_acc = (super != NULL) ? instanceKlass::cast(klass()->super())->vtable()->vtable_accessibility_at(i)
+                                          : acc_private;
+-  
++
+   // Merge
+   return (AccessType)MAX2((int)acc, (int)super_acc);
+ }
+@@ -218,7 +215,7 @@
+ // OR return true if a new vtable entry is required
+ // Only called for instanceKlass's, i.e. not for arrays
+ // If that changed, could not use _klass as handle for klass
+-bool klassVtable::update_super_vtable(instanceKlass* klass, methodHandle target_method, int super_vtable_len, bool checkconstraints, TRAPS) {    
++bool klassVtable::update_super_vtable(instanceKlass* klass, methodHandle target_method, int super_vtable_len, bool checkconstraints, TRAPS) {
+   ResourceMark rm;
+   bool allocate_new = true;
+   assert(klass->oop_is_instance(), "must be instanceKlass");
+@@ -227,7 +224,7 @@
+   // If we allocate a vtable entry, we will update it to a non-negative number.
+   target_method()->set_vtable_index(methodOopDesc::nonvirtual_vtable_index);
+ 
+-  // Static and <init> methods are never in 
++  // Static and <init> methods are never in
+   if (target_method()->is_static() || target_method()->name() ==  vmSymbols::object_initializer_name()) {
+     return false;
+   }
+@@ -235,9 +232,9 @@
+   if (klass->is_final() || target_method()->is_final()) {
+     // a final method never needs a new entry; final methods can be statically
+     // resolved and they have to be present in the vtable only if they override
+-    // a super's method, in which case they re-use its entry          
++    // a super's method, in which case they re-use its entry
+     allocate_new = false;
+-  }      
++  }
+ 
+   // we need a new entry if there is no superclass
+   if (klass->super() == NULL) {
+@@ -248,25 +245,25 @@
+   if (target_method()->is_private()) {
+     return allocate_new;
+   }
+-  
++
+   // search through the vtable and update overridden entries
+   // Since check_signature_loaders acquires SystemDictionary_lock
+   // which can block for gc, once we are in this loop, use handles, not
+   // unhandled oops unless they are reinitialized for each loop
+   // handles for name, signature, klass, target_method
+   // not for match_method, holder
+-  
++
+   symbolHandle name(THREAD,target_method()->name());
+-  symbolHandle signature(THREAD,target_method()->signature());    
++  symbolHandle signature(THREAD,target_method()->signature());
+   for(int i = 0; i < super_vtable_len; i++) {
+     methodOop match_method = method_at(i);
+     // Check if method name matches
+     if (match_method->name() == name() && match_method->signature() == signature()) {
+-      
++
+       instanceKlass* holder = (THREAD, instanceKlass::cast(match_method->method_holder()));
+ 
+       // Check if the match_method is accessable from current class
+-      
++
+       bool same_package_init = false;
+       bool same_package_flag = false;
+       bool simple_match = match_method->is_public()  || match_method->is_protected();
+@@ -277,27 +274,27 @@
+         simple_match = match_method->is_package_private() && same_package_flag;
+       }
+       // match_method is the superclass' method. Note we can't override
+-      // and shouldn't access superclass' ACC_PRIVATE methods 
++      // and shouldn't access superclass' ACC_PRIVATE methods
+       // (although they have been copied into our vtable)
+       // A simple form of this statement is:
+       // if ( (match_method->is_public()  || match_method->is_protected()) ||
+       //    (match_method->is_package_private() && holder->is_same_class_package(klass->class_loader(), klass->name()))) {
+       //
+       // The complexity is introduced it avoid recomputing 'is_same_class_package' which is expensive.
+-      if (simple_match) {      
++      if (simple_match) {
+         // Check if target_method and match_method has same level of accessibility. The accesibility of the
+         // match method is the "most-general" visibility of all entries at it's particular vtable index for
+         // all superclasses. This check must be done before we override the current entry in the vtable.
+-        AccessType at = vtable_accessibility_at(i);        
++        AccessType at = vtable_accessibility_at(i);
+         bool same_access = false;
+ 
+-        if (  (at == acc_publicprotected && (target_method()->is_public() || target_method()->is_protected()) 
++        if (  (at == acc_publicprotected && (target_method()->is_public() || target_method()->is_protected())
+            || (at == acc_package_private && (target_method()->is_package_private() &&
+                                             (( same_package_init && same_package_flag) ||
+                                              (!same_package_init && holder->is_same_class_package(_klass->class_loader(), _klass->name()))))))) {
+            same_access = true;
+         }
+-        
++
+         if (checkconstraints) {
+         // Override vtable entry if passes loader constraint check
+         // if loader constraint checking requested
+@@ -311,65 +308,65 @@
+           Handle super_loader(THREAD, super_klass->class_loader());
+ 
+           if (this_loader() != super_loader()) {
+-	    ResourceMark rm(THREAD);
+-            char* failed_type_name = 
+-	      SystemDictionary::check_signature_loaders(signature, this_loader,
+-							super_loader, true, 
+-							CHECK_(false));
+-	    if (failed_type_name != NULL) {
+-	      const char* msg = "loader constraint violation: when resolving "
+-		"overridden method \"%s\" the class loader (instance"
+-		" of %s) of the current class, %s, and its superclass loader "
+-		"(instance of %s), have different Class objects for the type "
+-		"%s used in the signature";
+-	      char* sig = target_method()->name_and_sig_as_C_string();
+-	      const char* loader1 = SystemDictionary::loader_name(this_loader());
+-	      char* current = _klass->name()->as_C_string();
+-	      const char* loader2 = SystemDictionary::loader_name(super_loader());
+-	      size_t buflen = strlen(msg) + strlen(sig) + strlen(loader1) + 
+-		strlen(current) + strlen(loader2) + strlen(failed_type_name);
+-	      char* buf = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, buflen);
+-	      jio_snprintf(buf, buflen, msg, sig, loader1, current, loader2,
+-			   failed_type_name);
+-	      THROW_MSG_(vmSymbols::java_lang_LinkageError(), buf, false);
+-	    }
++            ResourceMark rm(THREAD);
++            char* failed_type_name =
++              SystemDictionary::check_signature_loaders(signature, this_loader,
++                                                        super_loader, true,
++                                                        CHECK_(false));
++            if (failed_type_name != NULL) {
++              const char* msg = "loader constraint violation: when resolving "
++                "overridden method \"%s\" the class loader (instance"
++                " of %s) of the current class, %s, and its superclass loader "
++                "(instance of %s), have different Class objects for the type "
++                "%s used in the signature";
++              char* sig = target_method()->name_and_sig_as_C_string();
++              const char* loader1 = SystemDictionary::loader_name(this_loader());
++              char* current = _klass->name()->as_C_string();
++              const char* loader2 = SystemDictionary::loader_name(super_loader());
++              size_t buflen = strlen(msg) + strlen(sig) + strlen(loader1) +
++                strlen(current) + strlen(loader2) + strlen(failed_type_name);
++              char* buf = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, buflen);
++              jio_snprintf(buf, buflen, msg, sig, loader1, current, loader2,
++                           failed_type_name);
++              THROW_MSG_(vmSymbols::java_lang_LinkageError(), buf, false);
++            }
+           }
+         }
+         put_method_at(target_method(), i);
+ 
+ 
+         if (same_access) {
+-          // target and match has same accessiblity - share entry  
++          // target and match has same accessiblity - share entry
+           allocate_new = false;
+           target_method()->set_vtable_index(i);
+ #ifndef PRODUCT
+-          if (PrintVtables && Verbose) {  
+-            AccessType targetacc; 
+-            if (target_method()->is_protected() || 
++          if (PrintVtables && Verbose) {
++            AccessType targetacc;
++            if (target_method()->is_protected() ||
+                  target_method()->is_public()) {
+                targetacc =  acc_publicprotected;
+             } else {
+               targetacc = target_method()->is_package_private() ? acc_package_private : acc_private;
+             }
+-            tty->print_cr("overriding with %s::%s index %d, original flags: %x overriders flags: %x", 
+-             _klass->internal_name(), (target_method() != NULL) ? 
++            tty->print_cr("overriding with %s::%s index %d, original flags: %x overriders flags: %x",
++             _klass->internal_name(), (target_method() != NULL) ?
+              target_method()->name()->as_C_string() : "<NULL>", i,
+              at, targetacc);
+           }
+ #endif /*PRODUCT*/
+         } else {
+ #ifndef PRODUCT
+-          if (PrintVtables && Verbose) {  
+-            AccessType targetacc; 
+-            if (target_method()->is_protected() || 
++          if (PrintVtables && Verbose) {
++            AccessType targetacc;
++            if (target_method()->is_protected() ||
+                  target_method()->is_public()) {
+                targetacc =  acc_publicprotected;
+             } else {
+               targetacc = target_method()->is_package_private() ? acc_package_private : acc_private;
+             }
+-            tty->print_cr("override %s %s::%s at index %d, original flags: %x overriders flags: %x", 
++            tty->print_cr("override %s %s::%s at index %d, original flags: %x overriders flags: %x",
+             allocate_new ? "+ new" : "only",
+-            _klass->internal_name(), (target_method() != NULL) ? 
++            _klass->internal_name(), (target_method() != NULL) ?
+             target_method()->name()->as_C_string() : "<NULL>", i,
+             at, targetacc);
+            }
+@@ -380,14 +377,14 @@
+   }
+   return allocate_new;
+ }
+-      
++
+ 
+ 
+ void klassVtable::put_method_at(methodOop m, int index) {
+   assert(m->is_oop_or_null(), "Not an oop or null");
+ #ifndef PRODUCT
+-  if (PrintVtables && Verbose) {  
+-    tty->print_cr("adding %s::%s at index %d", _klass->internal_name(), 
++  if (PrintVtables && Verbose) {
++    tty->print_cr("adding %s::%s at index %d", _klass->internal_name(),
+       (m != NULL) ? m->name()->as_C_string() : "<NULL>", index);
+   }
+   assert(unchecked_method_at(index)->is_oop_or_null(), "Not an oop or null");
+@@ -395,17 +392,17 @@
+   table()[index].set(m);
+ }
+ 
+-// Find out if a method "m" with superclass "super", loader "classloader" and 
++// Find out if a method "m" with superclass "super", loader "classloader" and
+ // name "classname" needs a new vtable entry.  Let P be a class package defined
+ // by "classloader" and "classname".
+ // NOTE: The logic used here is very similar to the one used for computing
+ // the vtables indices for a method. We cannot directly use that function because,
+ // when the Universe is boostrapping, a super's vtable might not be initialized.
+-bool klassVtable::needs_new_vtable_entry(methodOop target_method, 
+-					 klassOop super, 
+-					 oop classloader,
+-					 symbolOop classname,
+-					 AccessFlags class_flags) {
++bool klassVtable::needs_new_vtable_entry(methodOop target_method,
++                                         klassOop super,
++                                         oop classloader,
++                                         symbolOop classname,
++                                         AccessFlags class_flags) {
+   if ((class_flags.is_final() || target_method->is_final()) ||
+       // a final method never needs a new entry; final methods can be statically
+       // resolved and they have to be present in the vtable only if they override
+@@ -446,33 +443,33 @@
+ 
+     if (!match_method->is_static()) { // we want only instance method matches
+       if ((target_method->is_public() || target_method->is_protected()) &&
+-	  (match_method->is_public()  || match_method->is_protected())) {
+-	// target and match are public/protected; we do not need a new entry
++          (match_method->is_public()  || match_method->is_protected())) {
++        // target and match are public/protected; we do not need a new entry
+         return false;
+       }
+ 
+       if (target_method->is_package_private() &&
+-	  match_method->is_package_private() &&
+-	  holder->is_same_class_package(classloader, classname)) {
+-	// target and match are P private; we do not need a new entry
++          match_method->is_package_private() &&
++          holder->is_same_class_package(classloader, classname)) {
++        // target and match are P private; we do not need a new entry
+         return false;
+       }
+     }
+-    
++
+     k = holder->super(); // haven't found a match yet; continue to look
+   }
+-  
++
+   // if the target method is public or protected it may have a matching
+   // miranda method in the super, whose entry it should re-use.
+   if (target_method->is_public() || target_method->is_protected()) {
+     instanceKlass *sk = instanceKlass::cast(super);
+     if (sk->has_miranda_methods()) {
+       if (sk->lookup_method_in_all_interfaces(name, signature) != NULL) {
+-	return false;  // found a matching miranda; we do not need a new entry
++        return false;  // found a matching miranda; we do not need a new entry
+       }
+     }
+   }
+-  
++
+   return true; // found no match; we need a new entry
+ }
+ 
+@@ -496,9 +493,9 @@
+   methodOop m = method_at(i);
+   klassOop method_holder = m->method_holder();
+   instanceKlass *mhk = instanceKlass::cast(method_holder);
+-  
++
+   // miranda methods are interface methods in a class's vtable
+-  if (mhk->is_interface()) {  
++  if (mhk->is_interface()) {
+     assert(m->is_public() && m->is_abstract(), "should be public and abstract");
+     assert(ik()->implements_interface(method_holder) , "this class should implement the interface");
+     assert(is_miranda(m, ik()->methods(), ik()->super()), "should be a miranda_method");
+@@ -519,18 +516,18 @@
+       return true;
+     } else {
+       if (instanceKlass::cast(super)->lookup_method(name, signature) == NULL) {
+-	// super class hierarchy does not implement it
+-	return true;
++        // super class hierarchy does not implement it
++        return true;
+       }
+-    } 
++    }
+   }
+-  return false; 
++  return false;
+ }
+ 
+ void klassVtable::add_new_mirandas_to_list(GrowableArray<methodOop>* list_of_current_mirandas,
+-					   objArrayOop current_interface_methods,
+-					   objArrayOop class_methods,
+-					   klassOop super) {
++                                           objArrayOop current_interface_methods,
++                                           objArrayOop class_methods,
++                                           klassOop super) {
+   // iterate thru the current interface's method to see if it a miranda
+   int num_methods = current_interface_methods->length();
+   for (int i = 0; i < num_methods; i++) {
+@@ -541,29 +538,29 @@
+     for (int j = 0; j < num_of_current_mirandas; j++) {
+       methodOop miranda = list_of_current_mirandas->at(j);
+       if ((im->name() == miranda->name()) &&
+-	  (im->signature() == miranda->signature())) {
+-	is_duplicate = true;
+-	break;
++          (im->signature() == miranda->signature())) {
++        is_duplicate = true;
++        break;
+       }
+     }
+-    
++
+     if (!is_duplicate) { // we don't want duplicate miranda entries in the vtable
+       if (is_miranda(im, class_methods, super)) { // is it a miranda at all?
+-	instanceKlass *sk = instanceKlass::cast(super);
+-	// check if it is a duplicate of a super's miranda
+-	if (sk->lookup_method_in_all_interfaces(im->name(), im->signature()) == NULL) {
+-	  list_of_current_mirandas->append(im);
+-	}
++        instanceKlass *sk = instanceKlass::cast(super);
++        // check if it is a duplicate of a super's miranda
++        if (sk->lookup_method_in_all_interfaces(im->name(), im->signature()) == NULL) {
++          list_of_current_mirandas->append(im);
++        }
+       }
+     }
+   }
+ }
+ 
+ void klassVtable::get_mirandas(GrowableArray<methodOop>* mirandas,
+-			       klassOop super, objArrayOop class_methods, 
+-			       objArrayOop local_interfaces) {
++                               klassOop super, objArrayOop class_methods,
++                               objArrayOop local_interfaces) {
+   assert((mirandas->length() == 0) , "current mirandas must be 0");
+-  
++
+   // iterate thru the local interfaces looking for a miranda
+   int num_local_ifs = local_interfaces->length();
+   for (int i = 0; i < num_local_ifs; i++) {
+@@ -671,7 +668,7 @@
+ }
+ 
+ void klassVtable::oop_update_pointers(ParCompactionManager* cm,
+-				      HeapWord* beg_addr, HeapWord* end_addr) {
++                                      HeapWord* beg_addr, HeapWord* end_addr) {
+   const int n = length();
+   const int entry_size = vtableEntry::size();
+ 
+@@ -704,7 +701,7 @@
+   for (i = 0; i < len; i++) {
+     if ((HeapWord*)adr_method_at(i) >= mr.start()) break;
+   }
+-  for (; i < len; i++) {  
++  for (; i < len; i++) {
+     oop* adr = adr_method_at(i);
+     if ((HeapWord*)adr < mr.end()) blk->do_oop(adr);
+   }
+@@ -729,16 +726,16 @@
+       // First offset entry points to the first method_entry
+       intptr_t* method_entry  = (intptr_t *)(((address)klass->as_klassOop()) + offset_entry->offset());
+       intptr_t* end         = klass->end_of_itable();
+-  
++
+       _table_offset      = (intptr_t*)offset_entry - (intptr_t*)klass->as_klassOop();
+       _size_offset_table = (method_entry - ((intptr_t*)offset_entry)) / itableOffsetEntry::size();
+       _size_method_table = (end - method_entry)                  / itableMethodEntry::size();
+       assert(_table_offset >= 0 && _size_offset_table >= 0 && _size_method_table >= 0, "wrong computation");
+       return;
+-    }    
++    }
+   }
+ 
+-  // The length of the itable was either zero, or it has not yet been initialized.
++  // This lenght of the itable was either zero, or it has not yet been initialized.
+   _table_offset      = 0;
+   _size_offset_table = 0;
+   _size_method_table = 0;
+@@ -814,7 +811,7 @@
+ }
+ 
+ void klassItable::oop_update_pointers(ParCompactionManager* cm,
+-				      HeapWord* beg_addr, HeapWord* end_addr) {
++                                      HeapWord* beg_addr, HeapWord* end_addr) {
+   // offset table
+   itableOffsetEntry* ioe = offset_entry(0);
+   for(int i = 0; i < _size_offset_table; i++) {
+@@ -873,33 +870,32 @@
+ 
+ // Initialization
+ void klassItable::initialize_itable(bool checkconstraints, TRAPS) {
+-  // Cannot be setup doing bootstrapping, interfaces don't have
+-  // itables, and klass with only ones entry have empty itables
+-  if (Universe::is_bootstrapping() || 
+-      _klass->is_interface() ||
+-      _klass->itable_length() == itableOffsetEntry::size()) return;
+-
+-  // There's alway an extra itable entry so we can null-terminate it.
+-  guarantee(size_offset_table() >= 1, "too small");
+-  int num_interfaces = size_offset_table() - 1;
++  // Cannot be setup doing bootstrapping
++  if (Universe::is_bootstrapping()) return;
++
++  int num_interfaces = nof_interfaces();
+   if (num_interfaces > 0) {
+-    if (TraceItables) tty->print_cr("%3d: Initializing itables for %s", ++initialize_count,
+-                                    _klass->name()->as_C_string());
+-    
++    if (TraceItables) tty->print_cr("%3d: Initializing itables for %s", ++initialize_count, _klass->name()->as_C_string());
++
++    // In debug mode, we got an extra NULL/NULL entry
++    debug_only(num_interfaces--);
++    assert(num_interfaces > 0, "to few interfaces in offset itable");
+ 
+     // Interate through all interfaces
+     int i;
+     for(i = 0; i < num_interfaces; i++) {
+       itableOffsetEntry* ioe = offset_entry(i);
+       KlassHandle interf_h (THREAD, ioe->interface_klass());
+-      assert(interf_h() != NULL && ioe->offset() != 0, "bad offset entry in itable");      
+-      initialize_itable_for_interface(ioe->offset(), interf_h, checkconstraints, CHECK);      
+-    } 
+-
+-  }  
+-  // Check that the last entry is empty
+-  itableOffsetEntry* ioe = offset_entry(size_offset_table() - 1);
+-  guarantee(ioe->interface_klass() == NULL && ioe->offset() == 0, "terminator entry missing");
++      assert(interf_h() != NULL && ioe->offset() != 0, "bad offset entry in itable");
++      initialize_itable_for_interface(ioe->offset(), interf_h, checkconstraints, CHECK);
++    }
++
++#ifdef ASSERT
++    // Check that the last entry is empty
++    itableOffsetEntry* ioe = offset_entry(i);
++    assert(ioe->interface_klass() == NULL && ioe->offset() == 0, "terminator entry missing");
++#endif
++  }
+ }
+ 
+ 
+@@ -919,72 +915,72 @@
+   // don't need preserving across check_signature_loaders call
+   // methods needs a handle in case of gc from check_signature_loaders
+   for(; i < nof_methods; i++) {
+-    methodOop m = (methodOop)methods()->obj_at(i);    
++    methodOop m = (methodOop)methods()->obj_at(i);
+     symbolOop method_name = m->name();
+     symbolOop method_signature = m->signature();
+-      
++
+     // This is same code as in Linkresolver::lookup_instance_method_in_klasses
+     methodOop target = klass->uncached_lookup_method(method_name, method_signature);
+     while (target != NULL && target->is_static()) {
+       // continue with recursive lookup through the superclass
+       klassOop super = Klass::cast(target->method_holder())->super();
+       target = (super == NULL) ? methodOop(NULL) : Klass::cast(super)->uncached_lookup_method(method_name, method_signature);
+-    }      
+-    if (target == NULL || !target->is_public() || target->is_abstract()) {        
++    }
++    if (target == NULL || !target->is_public() || target->is_abstract()) {
+       // Entry do not resolve. Leave it empty
+-    } else {            
++    } else {
+       // Entry did resolve, check loader constraints before initializing
+       // if checkconstraints requested
+       methodHandle  target_h (THREAD, target); // preserve across gc
+       if (checkconstraints) {
+         Handle method_holder_loader (THREAD, instanceKlass::cast(target->method_holder())->class_loader());
+         if (method_holder_loader() != interface_loader()) {
+-	  ResourceMark rm(THREAD);
+-	  char* failed_type_name =
+-	    SystemDictionary::check_signature_loaders(method_signature, 
+-						      method_holder_loader, 
+-						      interface_loader, 
+-						      true, CHECK);
+-	  if (failed_type_name != NULL) {
+-	    const char* msg = "loader constraint violation in interface "
+-	      "itable initialization: when resolving method \"%s\" the class"
+-	      " loader (instance of %s) of the current class, %s, "
+-	      "and the class loader (instance of %s) for interface "
+-	      "%s have different Class objects for the type %s "
+-	      "used in the signature";
+-	    char* sig = target_h()->name_and_sig_as_C_string();
+-	    const char* loader1 = SystemDictionary::loader_name(method_holder_loader());
+-	    char* current = klass->name()->as_C_string();
+-	    const char* loader2 = SystemDictionary::loader_name(interface_loader());
+-	    char* iface = instanceKlass::cast(interf_h())->name()->as_C_string();
+-	    size_t buflen = strlen(msg) + strlen(sig) + strlen(loader1) + 
+-	      strlen(current) + strlen(loader2) + strlen(iface) + 
+-	      strlen(failed_type_name);
+-	    char* buf = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, buflen);
+-	    jio_snprintf(buf, buflen, msg, sig, loader1, current, loader2,
+-			 iface, failed_type_name);
+-	    THROW_MSG(vmSymbols::java_lang_LinkageError(), buf);
+-	  }
++          ResourceMark rm(THREAD);
++          char* failed_type_name =
++            SystemDictionary::check_signature_loaders(method_signature,
++                                                      method_holder_loader,
++                                                      interface_loader,
++                                                      true, CHECK);
++          if (failed_type_name != NULL) {
++            const char* msg = "loader constraint violation in interface "
++              "itable initialization: when resolving method \"%s\" the class"
++              " loader (instance of %s) of the current class, %s, "
++              "and the class loader (instance of %s) for interface "
++              "%s have different Class objects for the type %s "
++              "used in the signature";
++            char* sig = target_h()->name_and_sig_as_C_string();
++            const char* loader1 = SystemDictionary::loader_name(method_holder_loader());
++            char* current = klass->name()->as_C_string();
++            const char* loader2 = SystemDictionary::loader_name(interface_loader());
++            char* iface = instanceKlass::cast(interf_h())->name()->as_C_string();
++            size_t buflen = strlen(msg) + strlen(sig) + strlen(loader1) +
++              strlen(current) + strlen(loader2) + strlen(iface) +
++              strlen(failed_type_name);
++            char* buf = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, buflen);
++            jio_snprintf(buf, buflen, msg, sig, loader1, current, loader2,
++                         iface, failed_type_name);
++            THROW_MSG(vmSymbols::java_lang_LinkageError(), buf);
++          }
+         }
+       }
+-      
++
+       // ime may have moved during GC so recalculate address
+       itableOffsetEntry::method_entry(_klass(), method_table_offset)[ime_num].initialize(target_h());
+     }
+     // Progress to next entry
+     ime_num++;
+-  }  
++  }
+ }
+ 
+-// Update entry for specific methodOop
++// Update entry for specic methodOop
+ void klassItable::initialize_with_method(methodOop m) {
+   itableMethodEntry* ime = method_entry(0);
+-  for(int i = 0; i < _size_method_table; i++) {    
+-    if (ime->method() == m) {      
+-      ime->initialize(m);      
++  for(int i = 0; i < _size_method_table; i++) {
++    if (ime->method() == m) {
++      ime->initialize(m);
+     }
+     ime++;
+-  }  
++  }
+ }
+ 
+ void klassItable::adjust_method_entries(methodOop* old_methods, methodOop* new_methods,
+@@ -1015,7 +1011,7 @@
+       }
+       ime++;
+     }
+-  } 
++  }
+ }
+ 
+ 
+@@ -1026,14 +1022,14 @@
+ };
+ 
+ // Visit all interfaces with at-least one method (excluding <clinit>)
+-void visit_all_interfaces(objArrayOop transitive_intf, InterfaceVisiterClosure *blk) {  
+-  // Handle array argument  
++void visit_all_interfaces(objArrayOop transitive_intf, InterfaceVisiterClosure *blk) {
++  // Handle array argument
+   for(int i = 0; i < transitive_intf->length(); i++) {
+     klassOop intf = (klassOop)transitive_intf->obj_at(i);
+     assert(Klass::cast(intf)->is_interface(), "sanity check");
+-    
++
+     // Find no. of methods excluding a <clinit>
+-    int method_count = instanceKlass::cast(intf)->methods()->length();    
++    int method_count = instanceKlass::cast(intf)->methods()->length();
+     if (method_count > 0) {
+       methodOop m = (methodOop)instanceKlass::cast(intf)->methods()->obj_at(0);
+       assert(m != NULL && m->is_method(), "sanity check");
+@@ -1044,8 +1040,8 @@
+ 
+     // Only count interfaces with at least one method
+     if (method_count > 0) {
+-      blk->doit(intf, method_count);       
+-    }    
++      blk->doit(intf, method_count);
++    }
+   }
+ }
+ 
+@@ -1071,48 +1067,52 @@
+   SetupItableClosure(address klass_begin, itableOffsetEntry* offset_entry, itableMethodEntry* method_entry) {
+     _klass_begin  = klass_begin;
+     _offset_entry = offset_entry;
+-    _method_entry = method_entry;    
++    _method_entry = method_entry;
+   }
+ 
+   itableMethodEntry* method_entry() const { return _method_entry; }
+ 
+   void doit(klassOop intf, int method_count) {
+     int offset = ((address)_method_entry) - _klass_begin;
+-    _offset_entry->initialize(intf, offset);      
++    _offset_entry->initialize(intf, offset);
+     _offset_entry++;
+     _method_entry += method_count;
+   }
+ };
+ 
+-int klassItable::compute_itable_size(objArrayHandle transitive_interfaces) {    
++int klassItable::compute_itable_size(objArrayHandle transitive_interfaces) {
+   // Count no of interfaces and total number of interface methods
+-  CountInterfacesClosure cic;  
++  CountInterfacesClosure cic;
+   visit_all_interfaces(transitive_interfaces(), &cic);
+-    
+-  // There's alway an extra itable entry so we can null-terminate it.
+-  int itable_size = calc_itable_size(cic.nof_interfaces() + 1, cic.nof_methods()); 
++
++  // Add one extra entry in debug mode, so we can null-terminate the table
++  int nof_methods    = cic.nof_methods();
++  int nof_interfaces = cic.nof_interfaces();
++  debug_only(if (nof_interfaces > 0) nof_interfaces++);
++
++  int itable_size = calc_itable_size(nof_interfaces, nof_methods);
+ 
+   // Statistics
+-  update_stats(itable_size * HeapWordSize);  
+-  
++  update_stats(itable_size * HeapWordSize);
++
+   return itable_size;
+ }
+ 
+ 
+ // Fill out offset table and interface klasses into the itable space
+-void klassItable::setup_itable_offset_table(instanceKlassHandle klass) {  
++void klassItable::setup_itable_offset_table(instanceKlassHandle klass) {
+   if (klass->itable_length() == 0) return;
+   assert(!klass->is_interface(), "Should have zero length itable");
+-    
+-  // Count no of interfaces and total number of interface methods  
+-  CountInterfacesClosure cic;  
++
++  // Count no of interfaces and total number of interface methods
++  CountInterfacesClosure cic;
+   visit_all_interfaces(klass->transitive_interfaces(), &cic);
+   int nof_methods    = cic.nof_methods();
+   int nof_interfaces = cic.nof_interfaces();
+-  
+-  // Add one extra entry so we can null-terminate the table
+-  nof_interfaces++;
+-  
++
++  // Add one extra entry in debug mode, so we can null-terminate the table
++  debug_only(if (nof_interfaces > 0) nof_interfaces++);
++
+   assert(compute_itable_size(objArrayHandle(klass->transitive_interfaces())) ==
+          calc_itable_size(nof_interfaces, nof_methods),
+          "mismatch calculation of itable size");
+@@ -1123,29 +1123,29 @@
+   intptr_t* end               = klass->end_of_itable();
+   assert((oop*)(ime + nof_methods) <= klass->start_of_static_fields(), "wrong offset calculation (1)");
+   assert((oop*)(end) == (oop*)(ime + nof_methods),                     "wrong offset calculation (2)");
+-  
++
+   // Visit all interfaces and initialize itable offset table
+   SetupItableClosure sic((address)klass->as_klassOop(), ioe, ime);
+   visit_all_interfaces(klass->transitive_interfaces(), &sic);
+-    
++
+ #ifdef ASSERT
+   ime  = sic.method_entry();
+   oop* v = (oop*) klass->end_of_itable();
+   assert( (oop*)(ime) == v, "wrong offset calculation (2)");
+-#endif 
++#endif
+ }
+ 
+ 
+ // m must be a method in an interface
+-int klassItable::compute_itable_index(methodOop m) {  
++int klassItable::compute_itable_index(methodOop m) {
+   klassOop intf = m->method_holder();
+   assert(instanceKlass::cast(intf)->is_interface(), "sanity check");
+-  objArrayOop methods = instanceKlass::cast(intf)->methods();  
+-  int index = 0;  
++  objArrayOop methods = instanceKlass::cast(intf)->methods();
++  int index = 0;
+   while(methods->obj_at(index) != m) {
+     index++;
+     assert(index < methods->length(), "should find index for resolve_invoke");
+-  }  
++  }
+   // Adjust for <clinit>, which is left out of table if first method
+   if (methods->length() > 0 && ((methodOop)methods->obj_at(0))->name() == vmSymbols::class_initializer_name()) {
+     index--;
+@@ -1197,17 +1197,17 @@
+     tty->cr();
+   }
+ }
+-#endif    
++#endif
+ 
+ void vtableEntry::verify(klassVtable* vt, outputStream* st) {
+   NOT_PRODUCT(FlagSetting fs(IgnoreLockingAssertions, true));
+   assert(method() != NULL, "must have set method");
+   method()->verify();
+   // we sub_type, because it could be a miranda method
+-  if (!vt->klass()->is_subtype_of(method()->method_holder())) { 
++  if (!vt->klass()->is_subtype_of(method()->method_holder())) {
+ #ifndef PRODUCT
+     print();
+-#endif    
++#endif
+     fatal1("vtableEntry %#lx: method is from subclass", this);
+   }
+ }
+@@ -1260,13 +1260,13 @@
+   }
+ };
+ 
+-int VtableStats::no_klasses = 0;         
+-int VtableStats::no_array_klasses = 0;   
++int VtableStats::no_klasses = 0;
++int VtableStats::no_array_klasses = 0;
+ int VtableStats::no_instance_klasses = 0;
+-int VtableStats::sum_of_vtable_len = 0; 
+-int VtableStats::sum_of_array_vtable_len = 0; 
+-int VtableStats::fixed = 0;     
+-int VtableStats::filler = 0; 
++int VtableStats::sum_of_vtable_len = 0;
++int VtableStats::sum_of_array_vtable_len = 0;
++int VtableStats::fixed = 0;
++int VtableStats::filler = 0;
+ int VtableStats::entries = 0;
+ int VtableStats::array_entries = 0;
+ 
+@@ -1316,9 +1316,7 @@
+ void klassItable::print_statistics() {
+  tty->print_cr("itable statistics:");
+  tty->print_cr("%6d classes with itables", _total_classes);
+- tty->print_cr("%6d K uses for itables (average by class: %d bytes)", _total_size / K, _total_size / _total_classes); 
++ tty->print_cr("%6d K uses for itables (average by class: %d bytes)", _total_size / K, _total_size / _total_classes);
+ }
+ 
+ #endif // PRODUCT
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/klassVtable.hpp openjdk/hotspot/src/share/vm/oops/klassVtable.hpp
+--- openjdk6/hotspot/src/share/vm/oops/klassVtable.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/klassVtable.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)klassVtable.hpp	1.62 07/07/19 12:19:09 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A klassVtable abstracts the variable-length vtable that is embedded in instanceKlass
+@@ -37,7 +34,7 @@
+ class vtableEntry;
+ 
+ class klassVtable : public ResourceObj {
+-  KlassHandle  _klass;            // my klass 
++  KlassHandle  _klass;            // my klass
+   int          _tableOffset;      // offset of start of vtable data within klass
+   int          _length;           // length of vtable (number of entries)
+ #ifndef PRODUCT
+@@ -69,12 +66,12 @@
+   int index_of_miranda(symbolOop name, symbolOop signature);
+ 
+   void initialize_vtable(bool checkconstraints, TRAPS);   // initialize vtable of a new klass
+-  
++
+   // conputes vtable length (in words) and the number of miranda methods
+   static void compute_vtable_size_and_num_mirandas(int &vtable_length, int &num_miranda_methods,
+-						   klassOop super, objArrayOop methods, 
+-						   AccessFlags class_flags, oop classloader,
+-						   symbolOop classname, objArrayOop local_interfaces);
++                                                   klassOop super, objArrayOop methods,
++                                                   AccessFlags class_flags, oop classloader,
++                                                   symbolOop classname, objArrayOop local_interfaces);
+ 
+   // RedefineClasses() API support:
+   // If any entry of this vtable points to any of old_methods,
+@@ -94,7 +91,7 @@
+   void oop_follow_contents(ParCompactionManager* cm);
+   void oop_update_pointers(ParCompactionManager* cm);
+   void oop_update_pointers(ParCompactionManager* cm,
+-			   HeapWord* beg_addr, HeapWord* end_addr);
++                           HeapWord* beg_addr, HeapWord* end_addr);
+ #endif // SERIALGC
+ 
+   // Iterators
+@@ -130,10 +127,10 @@
+   static void add_new_mirandas_to_list(GrowableArray<methodOop>* list_of_current_mirandas, objArrayOop current_interface_methods, objArrayOop class_methods, klassOop super);
+   static void get_mirandas(GrowableArray<methodOop>* mirandas, klassOop super, objArrayOop class_methods, objArrayOop local_interfaces);
+   static int get_num_mirandas(klassOop super, objArrayOop class_methods, objArrayOop local_interfaces);
+-    
+-  
++
++
+   void verify_against(outputStream* st, klassVtable* vt, int index);
+-  inline instanceKlass* ik() const;    
++  inline instanceKlass* ik() const;
+ };
+ 
+ 
+@@ -165,19 +162,19 @@
+ };
+ 
+ 
+-inline methodOop klassVtable::method_at(int i) const { 
++inline methodOop klassVtable::method_at(int i) const {
+   assert(i >= 0 && i < _length, "index out of bounds");
+   assert(table()[i].method() != NULL, "should not be null");
+   assert(oop(table()[i].method())->is_method(), "should be method");
+   return table()[i].method();
+ }
+ 
+-inline methodOop klassVtable::unchecked_method_at(int i) const { 
++inline methodOop klassVtable::unchecked_method_at(int i) const {
+   assert(i >= 0 && i < _length, "index out of bounds");
+   return table()[i].method();
+ }
+ 
+-inline oop* klassVtable::adr_method_at(int i) const { 
++inline oop* klassVtable::adr_method_at(int i) const {
+   // Allow one past the last entry to be referenced; useful for loop bounds.
+   assert(i >= 0 && i <= _length, "index out of bounds");
+   return (oop*)(address(table() + i) + vtableEntry::method_offset_in_bytes());
+@@ -209,7 +206,7 @@
+ };
+ 
+ 
+-class itableMethodEntry VALUE_OBJ_CLASS_SPEC { 
++class itableMethodEntry VALUE_OBJ_CLASS_SPEC {
+  private:
+   methodOop _method;
+ 
+@@ -218,7 +215,7 @@
+ 
+   void clear()             { _method = NULL; }
+ 
+-  void initialize(methodOop method); 
++  void initialize(methodOop method);
+ 
+   // Static size and offset accessors
+   static int size()                         { return sizeof(itableMethodEntry) / HeapWordSize; }  // size in words
+@@ -237,17 +234,17 @@
+ //    klassOop of interface n             \
+ //    offset to vtable from start of oop  / offset table entry
+ //    --- vtable for interface 1 ---
+-//    methodOop                           \ 
++//    methodOop                           \
+ //    compiler entry point                / method table entry
+ //    ...
+-//    methodOop                           \ 
++//    methodOop                           \
+ //    compiler entry point                / method table entry
+ //    -- vtable for interface 2 ---
+ //    ...
+-//      
++//
+ class klassItable : public ResourceObj {
+  private:
+-  instanceKlassHandle  _klass;             // my klass 
++  instanceKlassHandle  _klass;             // my klass
+   int                  _table_offset;      // offset of start of itable data within klass (in words)
+   int                  _size_offset_table; // size of offset table (in itableOffset entries)
+   int                  _size_method_table; // size of methodtable (in itableMethodEntry entries)
+@@ -261,11 +258,11 @@
+ 
+   itableMethodEntry* method_entry(int i) { assert(0 <= i && i <= _size_method_table, "index out of bounds");
+                                            return &((itableMethodEntry*)method_start())[i]; }
+-  
+-  int size_offset_table()                { return _size_offset_table; }
++
++  int nof_interfaces()                   { return _size_offset_table; }
+ 
+   // Initialization
+-  void initialize_itable(bool checkconstraints, TRAPS);    
++  void initialize_itable(bool checkconstraints, TRAPS);
+ 
+   // Updates
+   void initialize_with_method(methodOop m);
+@@ -288,7 +285,7 @@
+   void oop_follow_contents(ParCompactionManager* cm);
+   void oop_update_pointers(ParCompactionManager* cm);
+   void oop_update_pointers(ParCompactionManager* cm,
+-			   HeapWord* beg_addr, HeapWord* end_addr);
++                           HeapWord* beg_addr, HeapWord* end_addr);
+ #endif // SERIALGC
+ 
+   // Iterators
+@@ -304,11 +301,11 @@
+ 
+   // Debugging/Statistics
+   static void print_statistics() PRODUCT_RETURN;
+- private:  
++ private:
+   intptr_t* vtable_start() const { return ((intptr_t*)_klass()) + _table_offset; }
+   intptr_t* method_start() const { return vtable_start() + _size_offset_table * itableOffsetEntry::size(); }
+ 
+-  // Helper methods  
++  // Helper methods
+   static int  calc_itable_size(int num_interfaces, int num_methods) { return (num_interfaces * itableOffsetEntry::size()) + (num_methods * itableMethodEntry::size()); }
+ 
+   // Statistics
+diff -ruN openjdk6/hotspot/src/share/vm/oops/markOop.cpp openjdk/hotspot/src/share/vm/oops/markOop.cpp
+--- openjdk6/hotspot/src/share/vm/oops/markOop.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/markOop.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)markOop.cpp	1.28 07/05/05 17:06:05 JVM"
+-#endif
+ /*
+  * Copyright 1997-1999 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/oops/markOop.hpp openjdk/hotspot/src/share/vm/oops/markOop.hpp
+--- openjdk6/hotspot/src/share/vm/oops/markOop.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/markOop.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)markOop.hpp	1.64 07/05/05 17:06:02 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,17 +19,17 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The markOop describes the header of an object.
+ //
+-// Note that the mark is not a real oop but just a word. 
++// Note that the mark is not a real oop but just a word.
+ // It is placed in the oop hierarchy for historical reasons.
+ //
+ // Bit-format of an object header (most significant first):
+ //
+-//  
++//
+ //  unused:0/25 hash:25/31 age:4 biased_lock:1 lock:2 = 32/64 bits
+ //
+ //  - hash contains the identity hash value: largest value is
+@@ -134,8 +131,8 @@
+ 
+ #ifdef _WIN64
+     // These values are too big for Win64
+-    const static uintptr_t hash_mask = right_n_bits(hash_bits); 
+-    const static uintptr_t hash_mask_in_place  = 
++    const static uintptr_t hash_mask = right_n_bits(hash_bits);
++    const static uintptr_t hash_mask_in_place  =
+                             (address_word)hash_mask << hash_shift;
+ #endif
+ 
+@@ -208,14 +205,14 @@
+ 
+   // Special temporary state of the markOop while being inflated.
+   // Code that looks at mark outside a lock need to take this into account.
+-  bool is_being_inflated() const { return (value() == 0); } 
++  bool is_being_inflated() const { return (value() == 0); }
+ 
+-  // Distinguished markword value - used when inflating over 
++  // Distinguished markword value - used when inflating over
+   // an existing stacklock.  0 indicates the markword is "BUSY".
+   // Lockword mutators that use a LD...CAS idiom should always
+   // check for and avoid overwriting a 0 value installed by some
+   // other thread.  (They should spin or block instead.  The 0 value
+-  // is transient and *should* be short-lived). 
++  // is transient and *should* be short-lived).
+   static markOop INFLATING() { return (markOop) 0; }    // inflate-in-progress
+ 
+   // Should this header be preserved during GC?
+@@ -261,7 +258,7 @@
+   }
+   inline bool must_be_preserved_with_bias_for_cms_scavenge(klassOop klass_of_obj_containing_mark) const;
+ 
+-  // WARNING: The following routines are used EXCLUSIVELY by 
++  // WARNING: The following routines are used EXCLUSIVELY by
+   // synchronization functions. They are not really gc safe.
+   // They must get updated if markOop layout get changed.
+   markOop set_unlocked() const {
+@@ -300,7 +297,7 @@
+     tmp |= ((hash & hash_mask) << hash_shift);
+     return (markOop)tmp;
+   }
+-  // it is only used to be stored into BasicLock as the 
++  // it is only used to be stored into BasicLock as the
+   // indicator that the lock is using heavyweight monitor
+   static markOop unused_mark() {
+     return (markOop) marked_value;
+@@ -321,9 +318,9 @@
+     assert(bias_epoch <= max_bias_epoch, "bias epoch too large");
+     return (markOop) (tmp | (bias_epoch << epoch_shift) | (age << age_shift) | biased_lock_pattern);
+   }
+-  
++
+   // used to encode pointers during GC
+-  markOop clear_lock_bits() { return markOop(value() & ~lock_mask_in_place); }  
++  markOop clear_lock_bits() { return markOop(value() & ~lock_mask_in_place); }
+ 
+   // age operations
+   markOop set_marked()   { return markOop((value() & ~lock_mask_in_place) | marked_value); }
+@@ -336,12 +333,12 @@
+   markOop incr_age()          const { return age() == max_age ? markOop(this) : set_age(age() + 1); }
+ 
+   // hash operations
+-  intptr_t hash() const {     
++  intptr_t hash() const {
+     return mask_bits(value() >> hash_shift, hash_mask);
+   }
+-  
+-  bool has_no_hash() const { 
+-    return hash() == no_hash; 
++
++  bool has_no_hash() const {
++    return hash() == no_hash;
+   }
+ 
+   // Prototype mark for initialization
+diff -ruN openjdk6/hotspot/src/share/vm/oops/markOop.inline.hpp openjdk/hotspot/src/share/vm/oops/markOop.inline.hpp
+--- openjdk6/hotspot/src/share/vm/oops/markOop.inline.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/markOop.inline.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)markOop.inline.hpp	1.7 07/05/05 17:06:04 JVM"
+-#endif
+ /*
+  * Copyright 2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Should this header be preserved during GC?
+diff -ruN openjdk6/hotspot/src/share/vm/oops/methodDataKlass.cpp openjdk/hotspot/src/share/vm/oops/methodDataKlass.cpp
+--- openjdk6/hotspot/src/share/vm/oops/methodDataKlass.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/methodDataKlass.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)methodDataKlass.cpp	1.36 07/05/29 09:44:22 JVM"
+-#endif
+ /*
+  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -30,12 +27,12 @@
+ 
+ klassOop methodDataKlass::create_klass(TRAPS) {
+   methodDataKlass o;
+-  KlassHandle h_this_klass(THREAD, Universe::klassKlassObj());  
++  KlassHandle h_this_klass(THREAD, Universe::klassKlassObj());
+   KlassHandle k = base_create_klass(h_this_klass, header_size(),
+-				    o.vtbl_value(), CHECK_NULL);
++                                    o.vtbl_value(), CHECK_NULL);
+   // Make sure size calculation is right
+   assert(k()->size() == align_object_size(header_size()),
+-	 "wrong size for object");
++         "wrong size for object");
+   return k();
+ }
+ 
+@@ -74,8 +71,8 @@
+   obj->follow_header();
+   MarkSweep::mark_and_push(m->adr_method());
+   ResourceMark rm;
+-  for (ProfileData* data = m->first_data(); 
+-       m->is_valid(data); 
++  for (ProfileData* data = m->first_data();
++       m->is_valid(data);
+        data = m->next_data(data)) {
+     data->follow_contents();
+   }
+@@ -83,15 +80,15 @@
+ 
+ #ifndef SERIALGC
+ void methodDataKlass::oop_follow_contents(ParCompactionManager* cm,
+-					  oop obj) {
++                                          oop obj) {
+   assert (obj->is_methodData(), "object must be method data");
+   methodDataOop m = methodDataOop(obj);
+ 
+   obj->follow_header(cm);
+   PSParallelCompact::mark_and_push(cm, m->adr_method());
+   ResourceMark rm;
+-  for (ProfileData* data = m->first_data(); 
+-       m->is_valid(data); 
++  for (ProfileData* data = m->first_data();
++       m->is_valid(data);
+        data = m->next_data(data)) {
+     data->follow_contents(cm);
+   }
+@@ -108,7 +105,7 @@
+   obj->oop_iterate_header(blk);
+   blk->do_oop(m->adr_method());
+   ResourceMark rm;
+-  for (ProfileData* data = m->first_data(); 
++  for (ProfileData* data = m->first_data();
+        m->is_valid(data);
+        data = m->next_data(data)) {
+     data->oop_iterate(blk);
+@@ -130,7 +127,7 @@
+     blk->do_oop(m->adr_method());
+   }
+   ResourceMark rm;
+-  for (ProfileData* data = m->first_data(); 
++  for (ProfileData* data = m->first_data();
+        m->is_valid(data);
+        data = m->next_data(data)) {
+     data->oop_iterate_m(blk, mr);
+@@ -159,14 +156,14 @@
+ #ifndef SERIALGC
+ void methodDataKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
+   assert (obj->is_methodData(), "object must be method data");
+-  methodDataOop m = methodDataOop(obj);  
++  methodDataOop m = methodDataOop(obj);
+   // This should never point into the young gen.
+   assert(!PSScavenge::should_scavenge(oop(*m->adr_method())), "Sanity");
+ }
+ 
+ void methodDataKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
+   assert (obj->is_methodData(), "object must be method data");
+-  methodDataOop m = methodDataOop(obj);  
++  methodDataOop m = methodDataOop(obj);
+   // This should never point into the young gen.
+   assert(!PSScavenge::should_scavenge(oop(*m->adr_method())), "Sanity");
+ }
+@@ -187,7 +184,7 @@
+ 
+ int
+ methodDataKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
+-				     HeapWord* beg_addr, HeapWord* end_addr) {
++                                     HeapWord* beg_addr, HeapWord* end_addr) {
+   assert(obj->is_methodData(), "should be method data");
+ 
+   oop* p;
+@@ -239,4 +236,3 @@
+   guarantee(m->is_perm(), "should be in permspace");
+   m->verify_data_on(st);
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/methodDataKlass.hpp openjdk/hotspot/src/share/vm/oops/methodDataKlass.hpp
+--- openjdk6/hotspot/src/share/vm/oops/methodDataKlass.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/methodDataKlass.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)methodDataKlass.hpp	1.30 07/05/29 09:44:23 JVM"
+-#endif
+ /*
+  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // a methodDataKlass is the klass of a methodDataOop
+@@ -34,7 +31,7 @@
+  public:
+   // Testing
+   bool oop_is_methodData() const { return true; }
+-  
++
+   // Allocation
+   DEFINE_ALLOCATE_PERMANENT(methodDataKlass);
+   methodDataOop allocate(methodHandle method, TRAPS);
+@@ -43,7 +40,7 @@
+   // Sizing
+   int oop_size(oop obj) const;
+   int klass_oop_size() const { return object_size(); }
+-  
++
+   // Casting from klassOop
+   static methodDataKlass* cast(klassOop k) {
+     assert(k->klass_part()->oop_is_methodData(), "cast to methodDataKlass");
+@@ -69,11 +66,11 @@
+   // Allocation profiling support
+   juint alloc_size() const { return _alloc_size; }
+   void  set_alloc_size(juint n) { _alloc_size = n; }
+-  
++
+   // Iterators
+   int oop_oop_iterate(oop obj, OopClosure* blk);
+   int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
+-  
++
+ #ifndef PRODUCT
+  public:
+   // Printing
+diff -ruN openjdk6/hotspot/src/share/vm/oops/methodDataOop.cpp openjdk/hotspot/src/share/vm/oops/methodDataOop.cpp
+--- openjdk6/hotspot/src/share/vm/oops/methodDataOop.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/methodDataOop.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)methodDataOop.cpp	1.51 07/05/29 09:44:22 JVM"
+-#endif
+ /*
+  * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -171,18 +168,18 @@
+       blk->do_oop(adr_receiver(row));
+     }
+   }
+-}  
++}
+ 
+ void ReceiverTypeData::oop_iterate_m(OopClosure* blk, MemRegion mr) {
+   for (uint row = 0; row < row_limit(); row++) {
+     if (receiver(row) != NULL) {
+       oop* adr = adr_receiver(row);
+       if (mr.contains(adr)) {
+-	blk->do_oop(adr);
++        blk->do_oop(adr);
+       }
+     }
+   }
+-}  
++}
+ 
+ void ReceiverTypeData::adjust_pointers() {
+   for (uint row = 0; row < row_limit(); row++) {
+@@ -247,7 +244,7 @@
+ // been executed, followed by a series of triples of the form
+ // (bci, count, di) which count the number of times that some bci was the
+ // target of the ret and cache a corresponding displacement.
+-  
++
+ void RetData::post_initialize(BytecodeStream* stream, methodDataOop mdo) {
+   for (uint row = 0; row < row_limit(); row++) {
+     set_bci_displacement(row, -1);
+@@ -297,7 +294,7 @@
+     if (bci(row) != no_bci) {
+       tab(st);
+       st->print_cr("bci(%d: count(%u) displacement(%d))",
+-		   bci(row), bci_count(row), bci_displacement(row));
++                   bci(row), bci_count(row), bci_displacement(row));
+     }
+   }
+ }
+@@ -323,7 +320,7 @@
+ void BranchData::print_data_on(outputStream* st) {
+   print_shared(st, "BranchData");
+   st->print_cr("taken(%u) displacement(%d)",
+-	       taken(), displacement());
++               taken(), displacement());
+   tab(st);
+   st->print_cr("not taken(%u)", not_taken());
+ }
+@@ -350,7 +347,7 @@
+ }
+ 
+ void MultiBranchData::post_initialize(BytecodeStream* stream,
+-				      methodDataOop mdo) {
++                                      methodDataOop mdo) {
+   assert(stream->bci() == bci(), "wrong pos");
+   int target;
+   int my_di;
+@@ -366,7 +363,7 @@
+       target_di = mdo->bci_to_di(target);
+       offset = target_di - my_di;
+       set_displacement_at(count, offset);
+-    }         
++    }
+     target = sw->default_offset() + bci();
+     my_di = mdo->dp_to_di(dp());
+     target_di = mdo->bci_to_di(target);
+@@ -397,12 +394,12 @@
+ void MultiBranchData::print_data_on(outputStream* st) {
+   print_shared(st, "MultiBranchData");
+   st->print_cr("default_count(%u) displacement(%d)",
+-	       default_count(), default_displacement());
++               default_count(), default_displacement());
+   int cases = number_of_cases();
+   for (int i = 0; i < cases; i++) {
+     tab(st);
+     st->print_cr("count(%u) displacement(%d)",
+-		 count_at(i), displacement_at(i));
++                 count_at(i), displacement_at(i));
+   }
+ }
+ #endif
+@@ -423,15 +420,15 @@
+     } else {
+       return BitData::static_cell_count();
+     }
+-  case Bytecodes::_invokespecial: 
++  case Bytecodes::_invokespecial:
+   case Bytecodes::_invokestatic:
+     return CounterData::static_cell_count();
+-  case Bytecodes::_goto: 
+-  case Bytecodes::_goto_w: 
++  case Bytecodes::_goto:
++  case Bytecodes::_goto_w:
+   case Bytecodes::_jsr:
+   case Bytecodes::_jsr_w:
+     return JumpData::static_cell_count();
+-  case Bytecodes::_invokevirtual: 
++  case Bytecodes::_invokevirtual:
+   case Bytecodes::_invokeinterface:
+     return VirtualCallData::static_cell_count();
+   case Bytecodes::_ret:
+@@ -525,7 +522,7 @@
+ // Initialize an individual data segment.  Returns the size of
+ // the segment in bytes.
+ int methodDataOopDesc::initialize_data(BytecodeStream* stream,
+-				       int data_index) {
++                                       int data_index) {
+   int cell_count = -1;
+   int tag = DataLayout::no_tag;
+   DataLayout* data_layout = data_layout_at(data_index);
+@@ -607,7 +604,7 @@
+     return NULL;
+   }
+   DataLayout* data_layout = data_layout_at(data_index);
+-  
++
+   switch (data_layout->tag()) {
+   case DataLayout::no_tag:
+   default:
+@@ -701,14 +698,14 @@
+ int methodDataOopDesc::mileage_of(methodOop method) {
+   int mileage = 0;
+   int iic = method->interpreter_invocation_count();
+-  if (mileage < iic)  mileage = iic; 
++  if (mileage < iic)  mileage = iic;
+ 
+   InvocationCounter* ic = method->invocation_counter();
+   InvocationCounter* bc = method->backedge_counter();
+ 
+   int icval = ic->count();
+   if (ic->carry()) icval += CompileThreshold;
+-  if (mileage < icval)  mileage = icval; 
++  if (mileage < icval)  mileage = icval;
+   int bcval = bc->count();
+   if (bc->carry()) bcval += CompileThreshold;
+   if (mileage < bcval)  mileage = bcval;
+diff -ruN openjdk6/hotspot/src/share/vm/oops/methodDataOop.hpp openjdk/hotspot/src/share/vm/oops/methodDataOop.hpp
+--- openjdk6/hotspot/src/share/vm/oops/methodDataOop.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/methodDataOop.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)methodDataOop.hpp	1.53 07/05/29 09:44:23 JVM"
+-#endif
+ /*
+  * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class BytecodeStream;
+@@ -189,6 +186,9 @@
+   intptr_t cell_at(int index) {
+     return _cells[index];
+   }
++  intptr_t* adr_cell_at(int index) {
++    return &_cells[index];
++  }
+   oop* adr_oop_at(int index) {
+     return (oop*)&(_cells[index]);
+   }
+@@ -201,7 +201,7 @@
+     assert(flag_number < flag_limit, "oob");
+     return (_header._struct._flags & (0x1 << flag_number)) != 0;
+   }
+-  
++
+   // Low-level support for code generation.
+   static ByteSize header_offset() {
+     return byte_offset_of(DataLayout, _header);
+@@ -407,7 +407,7 @@
+   }
+   ArrayData* as_ArrayData() {
+     assert(is_ArrayData(), "wrong type");
+-    return is_ArrayData()       ? (ArrayData*)      this : NULL;        
++    return is_ArrayData()       ? (ArrayData*)      this : NULL;
+   }
+   MultiBranchData* as_MultiBranchData() {
+     assert(is_MultiBranchData(), "wrong type");
+@@ -431,10 +431,10 @@
+   virtual void update_pointers(HeapWord* beg_addr, HeapWord* end_addr) {}
+ #endif // SERIALGC
+ 
+-  // CI translation: ProfileData can represent both MethodDataOop data 
+-  // as well as CIMethodData data. This function is provided for translating 
+-  // an oop in a ProfileData to the ci equivalent. Generally speaking, 
+-  // most ProfileData don't require any translation, so we provide the null 
++  // CI translation: ProfileData can represent both MethodDataOop data
++  // as well as CIMethodData data. This function is provided for translating
++  // an oop in a ProfileData to the ci equivalent. Generally speaking,
++  // most ProfileData don't require any translation, so we provide the null
+   // translation here, and the required translators are in the ci subclasses.
+   virtual void translate_from(ProfileData* data) {}
+ 
+@@ -458,7 +458,7 @@
+     //  saw a null operand (cast/aastore/instanceof)
+     null_seen_flag              = DataLayout::first_flag + 0
+   };
+-  enum { bit_cell_count = 0 };  // no additional data fields needed. 
++  enum { bit_cell_count = 0 };  // no additional data fields needed.
+ public:
+   BitData(DataLayout* layout) : ProfileData(layout) {
+   }
+@@ -468,19 +468,19 @@
+   static int static_cell_count() {
+     return bit_cell_count;
+   }
+-  
++
+   virtual int cell_count() {
+     return static_cell_count();
+   }
+-  
++
+   // Accessor
+ 
+   // The null_seen flag bit is specially known to the interpreter.
+   // Consulting it allows the compiler to avoid setting up null_check traps.
+   bool null_seen()     { return flag_at(null_seen_flag); }
+   void set_null_seen()    { set_flag_at(null_seen_flag); }
+-  
+-  
++
++
+   // Code generation support
+   static int null_seen_byte_constant() {
+     return flag_number_to_byte_constant(null_seen_flag);
+@@ -489,7 +489,7 @@
+   static ByteSize bit_data_size() {
+     return cell_offset(bit_cell_count);
+   }
+-  
++
+ #ifndef PRODUCT
+   void print_data_on(outputStream* st);
+ #endif
+@@ -512,16 +512,16 @@
+   static int static_cell_count() {
+     return counter_cell_count;
+   }
+-  
++
+   virtual int cell_count() {
+     return static_cell_count();
+   }
+-  
++
+   // Direct accessor
+   uint count() {
+     return uint_at(count_off);
+   }
+-  
++
+   // Code generation support
+   static ByteSize count_offset() {
+     return cell_offset(count_off);
+@@ -529,12 +529,12 @@
+   static ByteSize counter_data_size() {
+     return cell_offset(counter_cell_count);
+   }
+-  
++
+ #ifndef PRODUCT
+   void print_data_on(outputStream* st);
+ #endif
+ };
+-  
++
+ // JumpData
+ //
+ // A JumpData is used to access profiling information for a direct
+@@ -552,7 +552,7 @@
+   void set_displacement(int displacement) {
+     set_int_at(displacement_off_set, displacement);
+   }
+-  
++
+ public:
+   JumpData(DataLayout* layout) : ProfileData(layout) {
+     assert(layout->tag() == DataLayout::jump_data_tag ||
+@@ -564,20 +564,28 @@
+   static int static_cell_count() {
+     return jump_cell_count;
+   }
+-  
++
+   virtual int cell_count() {
+     return static_cell_count();
+   }
+-  
++
+   // Direct accessor
+   uint taken() {
+     return uint_at(taken_off_set);
+   }
+-  
++  // Saturating counter
++  uint inc_taken() {
++    uint cnt = taken() + 1;
++    // Did we wrap? Will compiler screw us??
++    if (cnt == 0) cnt--;
++    set_uint_at(taken_off_set, cnt);
++    return cnt;
++  }
++
+   int displacement() {
+     return int_at(displacement_off_set);
+   }
+-  
++
+   // Code generation support
+   static ByteSize taken_offset() {
+     return cell_offset(taken_off_set);
+@@ -589,7 +597,7 @@
+ 
+   // Specific initialization.
+   void post_initialize(BytecodeStream* stream, methodDataOop mdo);
+-  
++
+ #ifndef PRODUCT
+   void print_data_on(outputStream* st);
+ #endif
+@@ -620,11 +628,11 @@
+   static int static_cell_count() {
+     return counter_cell_count + (uint) TypeProfileWidth * receiver_type_row_cell_count;
+   }
+-  
++
+   virtual int cell_count() {
+     return static_cell_count();
+   }
+-  
++
+   // Direct accessors
+   static uint row_limit() {
+     return TypeProfileWidth;
+@@ -656,7 +664,7 @@
+     assert(row < row_limit(), "oob");
+     return uint_at(receiver_count_cell_index(row));
+   }
+-  
++
+   // Code generation support
+   static ByteSize receiver_offset(uint row) {
+     return cell_offset(receiver_cell_index(row));
+@@ -667,7 +675,7 @@
+   static ByteSize receiver_type_data_size() {
+     return cell_offset(static_cell_count());
+   }
+-  
++
+   // GC support
+   virtual void follow_contents();
+   virtual void oop_iterate(OopClosure* blk);
+@@ -712,12 +720,12 @@
+   virtual int cell_count() {
+     return static_cell_count();
+   }
+-  
++
+   // Direct accessors
+   static ByteSize virtual_call_data_size() {
+     return cell_offset(static_cell_count());
+   }
+-  
++
+ #ifndef PRODUCT
+   void print_data_on(outputStream* st);
+ #endif
+@@ -771,11 +779,11 @@
+   static int static_cell_count() {
+     return counter_cell_count + (uint) BciProfileWidth * ret_row_cell_count;
+   }
+-  
++
+   virtual int cell_count() {
+     return static_cell_count();
+   }
+-  
++
+   static uint row_limit() {
+     return BciProfileWidth;
+   }
+@@ -799,7 +807,7 @@
+   int bci_displacement(uint row) {
+     return int_at(bci_displacement_cell_index(row));
+   }
+-  
++
+   // Interpreter Runtime support
+   address fixup_ret(int return_bci, methodDataHandle mdo);
+ 
+@@ -816,7 +824,7 @@
+ 
+   // Specific initialization.
+   void post_initialize(BytecodeStream* stream, methodDataOop mdo);
+-  
++
+ #ifndef PRODUCT
+   void print_data_on(outputStream* st);
+ #endif
+@@ -837,7 +845,7 @@
+   void set_displacement(int displacement) {
+     set_int_at(displacement_off_set, displacement);
+   }
+-  
++
+ public:
+   BranchData(DataLayout* layout) : JumpData(layout) {
+     assert(layout->tag() == DataLayout::branch_data_tag, "wrong type");
+@@ -848,16 +856,24 @@
+   static int static_cell_count() {
+     return branch_cell_count;
+   }
+-  
++
+   virtual int cell_count() {
+     return static_cell_count();
+   }
+-  
++
+   // Direct accessor
+   uint not_taken() {
+     return uint_at(not_taken_off_set);
+   }
+-  
++
++  uint inc_not_taken() {
++    uint cnt = not_taken() + 1;
++    // Did we wrap? Will compiler screw us??
++    if (cnt == 0) cnt--;
++    set_uint_at(not_taken_off_set, cnt);
++    return cnt;
++  }
++
+   // Code generation support
+   static ByteSize not_taken_offset() {
+     return cell_offset(not_taken_off_set);
+@@ -865,10 +881,10 @@
+   static ByteSize branch_data_size() {
+     return cell_offset(branch_cell_count);
+   }
+-  
++
+   // Specific initialization.
+   void post_initialize(BytecodeStream* stream, methodDataOop mdo);
+-  
++
+ #ifndef PRODUCT
+   void print_data_on(outputStream* st);
+ #endif
+@@ -904,7 +920,7 @@
+     int aindex = index + array_start_off_set;
+     set_int_at(aindex, value);
+   }
+-  
++
+   // Code generation support for subclasses.
+   static ByteSize array_element_offset(int index) {
+     return cell_offset(array_start_off_set + index);
+@@ -922,7 +938,7 @@
+   int array_len() {
+     return int_at_unchecked(array_len_off_set);
+   }
+-  
++
+   virtual int cell_count() {
+     return array_len() + 1;
+   }
+@@ -989,13 +1005,13 @@
+ 
+   uint count_at(int index) {
+     return array_uint_at(case_array_start +
+-			 index * per_case_cell_count +
+-			 relative_count_off_set);
++                         index * per_case_cell_count +
++                         relative_count_off_set);
+   }
+   int displacement_at(int index) {
+     return array_int_at(case_array_start +
+-			index * per_case_cell_count +
+-			relative_displacement_off_set);
++                        index * per_case_cell_count +
++                        relative_displacement_off_set);
+   }
+ 
+   // Code generation support
+@@ -1006,7 +1022,7 @@
+     return array_element_offset(default_disaplacement_off_set);
+   }
+   static ByteSize case_count_offset(int index) {
+-    return case_array_offset() + 
++    return case_array_offset() +
+            (per_case_size() * index) +
+            relative_count_offset();
+   }
+@@ -1045,23 +1061,23 @@
+ // -----------------------------
+ // | Data entries...           |
+ // |   (variable size)         |
+-// |			       |
+-// .			       .
+-// .			       .
+-// .			       .
+-// |   			       |
++// |                           |
++// .                           .
++// .                           .
++// .                           .
++// |                           |
+ // -----------------------------
+ //
+-// The data entry area is a heterogeneous array of DataLayouts. Each 
+-// DataLayout in the array corresponds to a specific bytecode in the 
+-// method.  The entries in the array are sorted by the corresponding 
++// The data entry area is a heterogeneous array of DataLayouts. Each
++// DataLayout in the array corresponds to a specific bytecode in the
++// method.  The entries in the array are sorted by the corresponding
+ // bytecode.  Access to the data is via resource-allocated ProfileData,
+ // which point to the underlying blocks of DataLayout structures.
+ //
+ // During interpretation, if profiling in enabled, the interpreter
+ // maintains a method data pointer (mdp), which points at the entry
+-// in the array corresponding to the current bci.  In the course of 
+-// intepretation, when a bytecode is encountered that has profile data 
++// in the array corresponding to the current bci.  In the course of
++// intepretation, when a bytecode is encountered that has profile data
+ // associated with it, the entry pointed to by mdp is updated, then the
+ // mdp is adjusted to point to the next appropriate DataLayout.  If mdp
+ // is NULL to begin with, the interpreter assumes that the current method
+@@ -1070,9 +1086,9 @@
+ // In methodDataOop parlance, "dp" is a "data pointer", the actual address
+ // of a DataLayout element.  A "di" is a "data index", the offset in bytes
+ // from the base of the data entry array.  A "displacement" is the byte offset
+-// in certain ProfileData objects that indicate the amount the mdp must be 
++// in certain ProfileData objects that indicate the amount the mdp must be
+ // adjusted in the event of a change in control flow.
+-// 
++//
+ 
+ class methodDataOopDesc : public oopDesc {
+   friend class VMStructs;
+@@ -1081,7 +1097,7 @@
+ 
+   // Back pointer to the methodOop
+   methodOop _method;
+-  
++
+   // Size of this oop in bytes
+   int _size;
+ 
+@@ -1090,7 +1106,7 @@
+ 
+   // Whole-method sticky bits and flags
+ public:
+-  enum { 
++  enum {
+     _trap_hist_limit    = 16,   // decoupled from Deoptimization::Reason_LIMIT
+     _trap_hist_mask     = max_jubyte,
+     _extra_data_count   = 4     // extra DataLayout headers, for trap history
+@@ -1147,9 +1163,9 @@
+ 
+   // hint accessors
+   int      hint_di() const  { return _hint_di; }
+-  void set_hint_di(int di)  { 
++  void set_hint_di(int di)  {
+     assert(!out_of_bounds(di), "hint_di out of bounds");
+-    _hint_di = di; 
++    _hint_di = di;
+   }
+   ProfileData* data_before(int bci) {
+     // avoid SEGV on this edge case
+@@ -1307,7 +1323,7 @@
+ 
+   // Support for code generation
+   static ByteSize data_offset() {
+-    return byte_offset_of(methodDataOopDesc, _data[0]); 
++    return byte_offset_of(methodDataOopDesc, _data[0]);
+   }
+ 
+   // GC support
+diff -ruN openjdk6/hotspot/src/share/vm/oops/methodKlass.cpp openjdk/hotspot/src/share/vm/oops/methodKlass.cpp
+--- openjdk6/hotspot/src/share/vm/oops/methodKlass.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/methodKlass.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)methodKlass.cpp	1.120 07/05/29 09:44:23 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -30,7 +27,7 @@
+ 
+ klassOop methodKlass::create_klass(TRAPS) {
+   methodKlass o;
+-  KlassHandle h_this_klass(THREAD, Universe::klassKlassObj());  
++  KlassHandle h_this_klass(THREAD, Universe::klassKlassObj());
+   KlassHandle k = base_create_klass(h_this_klass, header_size(), o.vtbl_value(), CHECK_NULL);
+   // Make sure size calculation is right
+   assert(k()->size() == align_object_size(header_size()), "wrong size for object");
+@@ -74,12 +71,12 @@
+   m->clear_intrinsic_id_cache();
+   m->set_method_data(NULL);
+   m->set_interpreter_throwout_count(0);
+-  m->set_vtable_index(methodOopDesc::garbage_vtable_index);  
++  m->set_vtable_index(methodOopDesc::garbage_vtable_index);
+ 
+-  // Fix and bury in methodOop 
++  // Fix and bury in methodOop
+   m->set_interpreter_entry(NULL); // sets i2i entry and from_int
+   m->set_highest_tier_compile(CompLevel_none);
+-  m->set_adapter_entry(NULL); 
++  m->set_adapter_entry(NULL);
+   m->clear_code(); // from_c/from_i get set to c2i/i2i
+ 
+   if (access_flags.is_native()) {
+@@ -106,7 +103,7 @@
+ void methodKlass::oop_follow_contents(oop obj) {
+   assert (obj->is_method(), "object must be method");
+   methodOop m = methodOop(obj);
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::methodKlassObj never moves.
+   MarkSweep::mark_and_push(m->adr_constMethod());
+   MarkSweep::mark_and_push(m->adr_constants());
+@@ -117,10 +114,10 @@
+ 
+ #ifndef SERIALGC
+ void methodKlass::oop_follow_contents(ParCompactionManager* cm,
+-				      oop obj) {
++                                      oop obj) {
+   assert (obj->is_method(), "object must be method");
+   methodOop m = methodOop(obj);
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::methodKlassObj never moves.
+   PSParallelCompact::mark_and_push(cm, m->adr_constMethod());
+   PSParallelCompact::mark_and_push(cm, m->adr_constants());
+@@ -135,10 +132,10 @@
+ int methodKlass::oop_oop_iterate(oop obj, OopClosure* blk) {
+   assert (obj->is_method(), "object must be method");
+   methodOop m = methodOop(obj);
+-  // Get size before changing pointers. 
++  // Get size before changing pointers.
+   // Don't call size() or oop_size() since that is a virtual call.
+-  int size = m->object_size();  
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  int size = m->object_size();
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::methodKlassObj never moves
+   blk->do_oop(m->adr_constMethod());
+   blk->do_oop(m->adr_constants());
+@@ -154,8 +151,8 @@
+   methodOop m = methodOop(obj);
+   // Get size before changing pointers.
+   // Don't call size() or oop_size() since that is a virtual call.
+-  int size = m->object_size();  
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  int size = m->object_size();
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::methodKlassObj never moves.
+   oop* adr;
+   adr = m->adr_constMethod();
+@@ -175,8 +172,8 @@
+   methodOop m = methodOop(obj);
+   // Get size before changing pointers.
+   // Don't call size() or oop_size() since that is a virtual call.
+-  int size = m->object_size();  
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  int size = m->object_size();
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::methodKlassObj never moves.
+   MarkSweep::adjust_pointer(m->adr_constMethod());
+   MarkSweep::adjust_pointer(m->adr_constants());
+@@ -209,7 +206,7 @@
+ }
+ 
+ int methodKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
+-				     HeapWord* beg_addr, HeapWord* end_addr) {
++                                     HeapWord* beg_addr, HeapWord* end_addr) {
+   assert(obj->is_method(), "should be method");
+ 
+   oop* p;
+@@ -337,9 +334,9 @@
+     guarantee(m->constMethod()->is_constMethod(), "should be constMethodOop");
+     guarantee(m->constMethod()->is_perm(), "should be in permspace");
+     methodDataOop method_data = m->method_data();
+-    guarantee(method_data == NULL || 
++    guarantee(method_data == NULL ||
+               method_data->is_perm(), "should be in permspace");
+-    guarantee(method_data == NULL || 
++    guarantee(method_data == NULL ||
+               method_data->is_methodData(), "should be method data");
+   }
+ }
+@@ -362,4 +359,3 @@
+   constMethodKlass* ck = constMethodKlass::cast(xconst->klass());
+   ck->oop_set_partially_loaded(xconst);
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/methodKlass.hpp openjdk/hotspot/src/share/vm/oops/methodKlass.hpp
+--- openjdk6/hotspot/src/share/vm/oops/methodKlass.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/methodKlass.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)methodKlass.hpp	1.49 07/05/29 09:44:23 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // a methodKlass is the klass of a methodOop
+@@ -48,7 +45,7 @@
+   // Casting from klassOop
+   static methodKlass* cast(klassOop k) {
+     assert(k->klass_part()->oop_is_method(), "cast to methodKlass");
+-    return (methodKlass*) k->klass_part(); 
++    return (methodKlass*) k->klass_part();
+   }
+ 
+   // Sizing
+@@ -85,4 +82,3 @@
+   bool oop_partially_loaded(oop obj) const;
+   void oop_set_partially_loaded(oop obj);
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/methodOop.cpp openjdk/hotspot/src/share/vm/oops/methodOop.cpp
+--- openjdk6/hotspot/src/share/vm/oops/methodOop.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/methodOop.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)methodOop.cpp	1.312 07/06/08 15:21:45 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -114,8 +111,8 @@
+         // we know the exception class => get the constraint class
+         // this may require loading of the constraint class; if verification
+         // fails or some other exception occurs, return handler_bci
+-	klassOop k = pool->klass_at(klass_index, CHECK_(handler_bci));
+-	KlassHandle klass = KlassHandle(THREAD, k);
++        klassOop k = pool->klass_at(klass_index, CHECK_(handler_bci));
++        KlassHandle klass = KlassHandle(THREAD, k);
+         assert(klass.not_null(), "klass not loaded");
+         if (ex_klass->is_subtype_of(klass())) {
+           return handler_bci;
+@@ -139,7 +136,7 @@
+ 
+ 
+ void methodOopDesc::mask_for(int bci, InterpreterOopMap* mask) {
+-    
++
+   Thread* myThread    = Thread::current();
+   methodHandle h_this(myThread, this);
+ #ifdef ASSERT
+@@ -151,7 +148,7 @@
+     if (!VerifyStack && !VerifyLastFrame) {
+       // verify stack calls this outside VM thread
+       warning("oopmap should only be accessed by the "
+-              "VM, GC task or CMS threads (or during debugging)");  
++              "VM, GC task or CMS threads (or during debugging)");
+       InterpreterOopMap local_mask;
+       instanceKlass::cast(method_holder())->mask_for(h_this, bci, &local_mask);
+       local_mask.print();
+@@ -163,7 +160,7 @@
+ }
+ 
+ 
+-int methodOopDesc::bci_from(address bcp) const {  
++int methodOopDesc::bci_from(address bcp) const {
+   assert(is_native() && bcp == code_base() || contains(bcp), "bcp doesn't belong to this method");
+   return bcp - code_base();
+ }
+@@ -218,8 +215,8 @@
+ 
+ 
+ void methodOopDesc::set_interpreter_kind() {
+-  int kind = AbstractInterpreter::method_kind(methodOop(this));
+-  assert(kind != AbstractInterpreter::invalid,
++  int kind = Interpreter::method_kind(methodOop(this));
++  assert(kind != Interpreter::invalid,
+          "interpreter entry must be valid");
+   set_interpreter_kind(kind);
+ }
+@@ -314,8 +311,8 @@
+ }
+ 
+ #ifdef CC_INTERP
+-void methodOopDesc::set_result_index(BasicType type)          { 
+-  _result_index = AbstractInterpreter::BasicType_as_index(type); 
++void methodOopDesc::set_result_index(BasicType type)          {
++  _result_index = Interpreter::BasicType_as_index(type);
+ }
+ #endif
+ 
+@@ -340,7 +337,7 @@
+   //   invokespecial
+   //   indexbyte1
+   //   indexbyte2
+-  // 
++  //
+   // followed by an (optional) sequence of:
+   //
+   //   aload_0
+@@ -374,41 +371,41 @@
+ }
+ 
+ 
+-bool methodOopDesc::compute_has_loops_flag() {  
++bool methodOopDesc::compute_has_loops_flag() {
+   BytecodeStream bcs(methodOop(this));
+   Bytecodes::Code bc;
+-    
+-  while ((bc = bcs.next()) >= 0) {    
+-    switch( bc ) {        
+-      case Bytecodes::_ifeq: 
+-      case Bytecodes::_ifnull: 
+-      case Bytecodes::_iflt: 
+-      case Bytecodes::_ifle: 
+-      case Bytecodes::_ifne: 
+-      case Bytecodes::_ifnonnull: 
+-      case Bytecodes::_ifgt: 
+-      case Bytecodes::_ifge: 
+-      case Bytecodes::_if_icmpeq: 
+-      case Bytecodes::_if_icmpne: 
+-      case Bytecodes::_if_icmplt: 
+-      case Bytecodes::_if_icmpgt: 
+-      case Bytecodes::_if_icmple: 
+-      case Bytecodes::_if_icmpge: 
++
++  while ((bc = bcs.next()) >= 0) {
++    switch( bc ) {
++      case Bytecodes::_ifeq:
++      case Bytecodes::_ifnull:
++      case Bytecodes::_iflt:
++      case Bytecodes::_ifle:
++      case Bytecodes::_ifne:
++      case Bytecodes::_ifnonnull:
++      case Bytecodes::_ifgt:
++      case Bytecodes::_ifge:
++      case Bytecodes::_if_icmpeq:
++      case Bytecodes::_if_icmpne:
++      case Bytecodes::_if_icmplt:
++      case Bytecodes::_if_icmpgt:
++      case Bytecodes::_if_icmple:
++      case Bytecodes::_if_icmpge:
+       case Bytecodes::_if_acmpeq:
+       case Bytecodes::_if_acmpne:
+-      case Bytecodes::_goto: 
+-      case Bytecodes::_jsr:     
++      case Bytecodes::_goto:
++      case Bytecodes::_jsr:
+         if( bcs.dest() < bcs.next_bci() ) _access_flags.set_has_loops();
+         break;
+ 
+-      case Bytecodes::_goto_w:       
+-      case Bytecodes::_jsr_w:        
+-        if( bcs.dest_w() < bcs.next_bci() ) _access_flags.set_has_loops(); 
++      case Bytecodes::_goto_w:
++      case Bytecodes::_jsr_w:
++        if( bcs.dest_w() < bcs.next_bci() ) _access_flags.set_has_loops();
+         break;
+-    }  
++    }
+   }
+   _access_flags.set_loops_flag_init();
+-  return _access_flags.has_loops(); 
++  return _access_flags.has_loops();
+ }
+ 
+ 
+@@ -509,9 +506,9 @@
+ bool methodOopDesc::is_klass_loaded(int refinfo_index, bool must_be_resolved) const {
+   int klass_index = _constants->klass_ref_index_at(refinfo_index);
+   if (must_be_resolved) {
+-    // Make sure klass is resolved in constantpool.   
++    // Make sure klass is resolved in constantpool.
+     if (constants()->tag_at(klass_index).is_unresolved_klass()) return false;
+-  }  
++  }
+   return is_klass_loaded_by_klass_index(klass_index);
+ }
+ 
+@@ -521,7 +518,7 @@
+   address* native_function = native_function_addr();
+ 
+   // We can see racers trying to place the same native function into place. Once
+-  // is plenty. 
++  // is plenty.
+   address current = *native_function;
+   if (current == function) return;
+   if (post_event_flag && JvmtiExport::should_post_native_method_bind() &&
+@@ -542,7 +539,7 @@
+   nmethod* nm = code(); // Put it into local variable to guard against concurrent updates
+   if (nm != NULL) {
+     nm->make_not_entrant();
+-  }  
++  }
+ }
+ 
+ 
+@@ -560,7 +557,7 @@
+ }
+ 
+ 
+-void methodOopDesc::set_signature_handler(address handler) { 
++void methodOopDesc::set_signature_handler(address handler) {
+   address* signature_handler =  signature_handler_addr();
+   *signature_handler = handler;
+ }
+@@ -605,8 +602,8 @@
+ }
+ 
+ // Revert to using the interpreter and clear out the nmethod
+-void methodOopDesc::clear_code() { 
+-  
++void methodOopDesc::clear_code() {
++
+   // this may be NULL if c2i adapters have not been made yet
+   // Only should happen at allocate time.
+   if (_adapter == NULL) {
+@@ -650,7 +647,7 @@
+ 
+   // Setup interpreter entrypoint
+   assert(this == h_method(), "wrong h_method()" );
+-  address entry = AbstractInterpreter::entry_for_method(h_method);
++  address entry = Interpreter::entry_for_method(h_method);
+   assert(entry != NULL, "interpreter entry must be non-null");
+   // Sets both _i2i_entry and _from_interpreted_entry
+   set_interpreter_entry(entry);
+@@ -674,7 +671,7 @@
+ 
+ }
+ 
+-address methodOopDesc::make_adapters(methodHandle mh, TRAPS) { 
++address methodOopDesc::make_adapters(methodHandle mh, TRAPS) {
+   // If running -Xint we need no adapters.
+   if (Arguments::mode() == Arguments::_int) return NULL;
+ 
+@@ -710,11 +707,11 @@
+ bool methodOopDesc::check_code() const {
+   // cached in a register or local.  There's a race on the value of the field.
+   nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code);
+-  return code == NULL || (code->method() == NULL) || (code->method() == (methodOop)this && !code->is_osr_method()); 
++  return code == NULL || (code->method() == NULL) || (code->method() == (methodOop)this && !code->is_osr_method());
+ }
+ 
+ // Install compiled code.  Instantly it can execute.
+-void methodOopDesc::set_code(methodHandle mh, nmethod *code) { 
++void methodOopDesc::set_code(methodHandle mh, nmethod *code) {
+   assert( code, "use clear_code to remove code" );
+   assert( mh->check_code(), "" );
+ 
+@@ -723,7 +720,7 @@
+   // These writes must happen in this order, because the interpreter will
+   // directly jump to from_interpreted_entry which jumps to an i2c adapter
+   // which jumps to _from_compiled_entry.
+-  mh->_code = code;		// Assign before allowing compiled code to exec
++  mh->_code = code;             // Assign before allowing compiled code to exec
+ 
+   int comp_level = code->comp_level();
+   // In theory there could be a race here. In practice it is unlikely
+@@ -755,7 +752,7 @@
+       return false;
+     }
+     return true;
+-  }  
++  }
+ 
+   assert(ik->is_subclass_of(method_holder()), "should be subklass");
+   assert(ik->vtable() != NULL, "vtable should exist");
+@@ -768,7 +765,7 @@
+ }
+ 
+ 
+-methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length, 
++methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length,
+                                                 u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS) {
+   // Code below does not work for native methods - they should never get rewritten anyway
+   assert(!m->is_native(), "cannot rewrite native methods");
+@@ -891,7 +888,7 @@
+   for(SignatureStream ss(signature); !ss.is_done(); ss.next()) {
+     if (ss.type() == T_OBJECT) {
+       symbolHandle name(THREAD, ss.as_symbol_or_null());
+-      if (name() == NULL) return true;   
++      if (name() == NULL) return true;
+       klassOop klass = SystemDictionary::find(name, class_loader, protection_domain, THREAD);
+       if (klass == NULL) return true;
+     }
+@@ -996,7 +993,7 @@
+       compareFn compare = (compareFn) (idempotent ? method_compare_idempotent : method_compare);
+       qsort(methods->obj_at_addr(0), length, oopSize, compare);
+     }
+-    
++
+     // Sort annotations if necessary
+     assert(methods_annotations == NULL           || methods_annotations->length() == methods->length(), "");
+     assert(methods_parameter_annotations == NULL || methods_parameter_annotations->length() == methods->length(), "");
+@@ -1058,18 +1055,18 @@
+ }
+ 
+ 
+-void methodOopDesc::print_codes() const {
+-  print_codes(0, code_size());
++void methodOopDesc::print_codes_on(outputStream* st) const {
++  print_codes_on(0, code_size(), st);
+ }
+ 
+-void methodOopDesc::print_codes(int from, int to) const {
++void methodOopDesc::print_codes_on(int from, int to, outputStream* st) const {
+   Thread *thread = Thread::current();
+   ResourceMark rm(thread);
+   methodHandle mh (thread, (methodOop)this);
+   BytecodeStream s(mh);
+   s.set_interval(from, to);
+   BytecodeTracer::set_closure(BytecodeTracer::std_closure());
+-  while (s.next() >= 0) BytecodeTracer::trace(mh, s.bcp());
++  while (s.next() >= 0) BytecodeTracer::trace(mh, s.bcp(), st);
+ }
+ #endif // not PRODUCT
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/oops/methodOop.hpp openjdk/hotspot/src/share/vm/oops/methodOop.hpp
+--- openjdk6/hotspot/src/share/vm/oops/methodOop.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/methodOop.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)methodOop.hpp	1.219 07/05/17 15:57:04 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,10 +19,10 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+-// A methodOop represents a Java method. 
++// A methodOop represents a Java method.
+ //
+ // Memory layout (each line represents a word). Note that most applications load thousands of methods,
+ // so keeping the size of this structure small has a big impact on footprint.
+@@ -39,10 +36,10 @@
+ // Accessing the checked exceptions table is used by reflection, so we put that last to make access
+ // to it fast.
+ //
+-// The line number table is compressed and inlined following the byte codes. It is found as the first 
+-// byte following the byte codes. The checked exceptions table and the local variable table are inlined 
++// The line number table is compressed and inlined following the byte codes. It is found as the first
++// byte following the byte codes. The checked exceptions table and the local variable table are inlined
+ // after the line number table, and indexed from the end of the method. We do not compress the checked
+-// exceptions table since the average length is less than 2, and do not bother to compress the local 
++// exceptions table since the average length is less than 2, and do not bother to compress the local
+ // variable table either since it is mostly absent.
+ //
+ // Note that native_function and signature_handler has to be at fixed offsets (required by the interpreter)
+@@ -54,7 +51,7 @@
+ // | constMethodOop                 (oop)                 |
+ // | constants                      (oop)                 |
+ // |------------------------------------------------------|
+-// | methodData                     (oop)                 | 
++// | methodData                     (oop)                 |
+ // | interp_invocation_count                              |
+ // |------------------------------------------------------|
+ // | access_flags                                         |
+@@ -117,12 +114,12 @@
+   int               _compiled_invocation_count;  // Number of nmethod invocations so far (for perf. debugging)
+ #endif
+   // Entry point for calling both from and to the interpreter.
+-  address _i2i_entry;		// All-args-on-stack calling convention
++  address _i2i_entry;           // All-args-on-stack calling convention
+   // Adapter blob (i2c/c2i) for this methodOop. Set once when method is linked.
+   AdapterHandlerEntry* _adapter;
+   // Entry point for calling from compiled code, to compiled code if it exists
+   // or else the interpreter.
+-  volatile address _from_compiled_entry;	// Cache of: _code ? _code->entry_point() : _adapter->c2i_entry()
++  volatile address _from_compiled_entry;        // Cache of: _code ? _code->entry_point() : _adapter->c2i_entry()
+   // The entry point for calling both from and to compiled code is
+   // "_code->entry_point()".  Because of tiered compilation and de-opt, this
+   // field can come and go.  It can transition from NULL to not-null at any
+@@ -226,11 +223,11 @@
+   void clear_intrinsic_id_cache() { _intrinsic_id_cache = 0; }
+ 
+   // Count of times method was exited via exception while interpreting
+-  void interpreter_throwout_increment() { 
++  void interpreter_throwout_increment() {
+     if (_interpreter_throwout_count < 65534) {
+       _interpreter_throwout_count++;
+     }
+-  }   
++  }
+ 
+   int  interpreter_throwout_count() const        { return _interpreter_throwout_count; }
+   void set_interpreter_throwout_count(int count) { _interpreter_throwout_count = count; }
+@@ -238,11 +235,11 @@
+   // size of parameters
+   int  size_of_parameters() const                { return _size_of_parameters; }
+ 
+-  bool has_stackmap_table() const { 
+-    return constMethod()->has_stackmap_table(); 
++  bool has_stackmap_table() const {
++    return constMethod()->has_stackmap_table();
+   }
+ 
+-  typeArrayOop stackmap_data() const { 
++  typeArrayOop stackmap_data() const {
+     return constMethod()->stackmap_data();
+   }
+ 
+@@ -259,9 +256,9 @@
+   // for ex_klass indicates that the exception klass is not known; in
+   // this case it matches any constraint class. Returns -1 if the
+   // exception cannot be handled in this method. The handler
+-  // constraint classes are loaded if necessary. Note that this may 
++  // constraint classes are loaded if necessary. Note that this may
+   // throw an exception if loading of the constraint classes causes
+-  // an IllegalAccessError (bugid 4307310) or an OutOfMemoryError. 
++  // an IllegalAccessError (bugid 4307310) or an OutOfMemoryError.
+   // If an exception is thrown, returns the bci of the
+   // exception handler which caused the exception to be thrown, which
+   // is needed for proper retries. See, for example,
+@@ -269,10 +266,10 @@
+   int fast_exception_handler_bci_for(KlassHandle ex_klass, int throw_bci, TRAPS);
+ 
+   // method data access
+-  methodDataOop method_data() const              { 
++  methodDataOop method_data() const              {
+     return _method_data;
+   }
+-  void set_method_data(methodDataOop data)       { 
++  void set_method_data(methodDataOop data)       {
+     oop_store_without_check((oop*)&_method_data, (oop)data);
+   }
+ 
+@@ -301,7 +298,7 @@
+ 
+   // nmethod/verified compiler entry
+   address verified_code_entry();
+-  bool check_code() const;	// Not inline to avoid circular ref
++  bool check_code() const;      // Not inline to avoid circular ref
+   nmethod* volatile code() const                 { assert( check_code(), "" ); return (nmethod *)OrderAccess::load_ptr_acquire(&_code); }
+   void clear_code();            // Clear out any compiled code
+   void set_code(methodHandle mh, nmethod* code);
+@@ -368,8 +365,10 @@
+   address code_base() const           { return constMethod()->code_base(); }
+   bool    contains(address bcp) const { return constMethod()->contains(bcp); }
+ 
+-  void print_codes() const                       PRODUCT_RETURN; // prints byte codes
+-  void print_codes(int from, int to) const       PRODUCT_RETURN;
++  // prints byte codes
++  void print_codes() const            { print_codes_on(tty); }
++  void print_codes_on(outputStream* st) const                      PRODUCT_RETURN;
++  void print_codes_on(int from, int to, outputStream* st) const    PRODUCT_RETURN;
+ 
+   // checked exceptions
+   int checked_exceptions_length() const
+@@ -396,6 +395,7 @@
+   void compute_size_of_parameters(Thread *thread); // word size of parameters (receiver if any + arguments)
+   symbolOop klass_name() const;                  // returns the name of the method holder
+   BasicType result_type() const;                 // type of the method result
++  int result_type_index() const;                 // type index of the method result
+   bool is_returning_oop() const                  { BasicType r = result_type(); return (r == T_OBJECT || r == T_ARRAY); }
+   bool is_returning_fp() const                   { BasicType r = result_type(); return (r == T_FLOAT || r == T_DOUBLE); }
+ 
+@@ -414,7 +414,7 @@
+   bool is_abstract() const                       { return access_flags().is_abstract();    }
+   bool is_strict() const                         { return access_flags().is_strict();      }
+   bool is_synthetic() const                      { return access_flags().is_synthetic();   }
+-  
++
+   // returns true if contains only return operation
+   bool is_empty_method() const;
+ 
+@@ -429,13 +429,13 @@
+   bool can_be_statically_bound() const;
+ 
+   // returns true if the method has any backward branches.
+-  bool has_loops() { 
+-    return access_flags().loops_flag_init() ? access_flags().has_loops() : compute_has_loops_flag(); 
++  bool has_loops() {
++    return access_flags().loops_flag_init() ? access_flags().has_loops() : compute_has_loops_flag();
+   };
+ 
+   bool compute_has_loops_flag();
+-  
+-  bool has_jsrs() { 
++
++  bool has_jsrs() {
+     return access_flags().has_jsrs();
+   };
+   void set_has_jsrs() {
+@@ -443,11 +443,11 @@
+   }
+ 
+   // returns true if the method has any monitors.
+-  bool has_monitors() const                      { return is_synchronized() || access_flags().has_monitor_bytecodes(); } 
++  bool has_monitors() const                      { return is_synchronized() || access_flags().has_monitor_bytecodes(); }
+   bool has_monitor_bytecodes() const             { return access_flags().has_monitor_bytecodes(); }
+ 
+   void set_has_monitor_bytecodes()               { _access_flags.set_has_monitor_bytecodes(); }
+-  
++
+   // monitor matching. This returns a conservative estimate of whether the monitorenter/monitorexit bytecodes
+   // propererly nest in the method. It might return false, even though they actually nest properly, since the info.
+   // has not been computed yet.
+@@ -482,10 +482,10 @@
+   static ByteSize size_of_locals_offset()        { return byte_offset_of(methodOopDesc, _max_locals        ); }
+   static ByteSize size_of_parameters_offset()    { return byte_offset_of(methodOopDesc, _size_of_parameters); }
+   static ByteSize from_compiled_offset()         { return byte_offset_of(methodOopDesc, _from_compiled_entry); }
+-  static ByteSize code_offset()                  { return byte_offset_of(methodOopDesc, _code); }  
++  static ByteSize code_offset()                  { return byte_offset_of(methodOopDesc, _code); }
+   static ByteSize invocation_counter_offset()    { return byte_offset_of(methodOopDesc, _invocation_counter); }
+   static ByteSize backedge_counter_offset()      { return byte_offset_of(methodOopDesc, _backedge_counter); }
+-  static ByteSize method_data_offset()           { 
++  static ByteSize method_data_offset()           {
+     return byte_offset_of(methodOopDesc, _method_data);
+   }
+   static ByteSize interpreter_invocation_counter_offset() { return byte_offset_of(methodOopDesc, _interpreter_invocation_count); }
+@@ -496,11 +496,11 @@
+   static ByteSize from_interpreted_offset()      { return byte_offset_of(methodOopDesc, _from_interpreted_entry ); }
+   static ByteSize interpreter_entry_offset()     { return byte_offset_of(methodOopDesc, _i2i_entry ); }
+   static ByteSize signature_handler_offset()     { return in_ByteSize(sizeof(methodOopDesc) + wordSize);      }
+-  static ByteSize max_stack_offset()             { return byte_offset_of(methodOopDesc, _max_stack         ); } 
++  static ByteSize max_stack_offset()             { return byte_offset_of(methodOopDesc, _max_stack         ); }
+ 
+   // for code generation
+   static int method_data_offset_in_bytes()       { return offset_of(methodOopDesc, _method_data); }
+-  static int interpreter_invocation_counter_offset_in_bytes()       
++  static int interpreter_invocation_counter_offset_in_bytes()
+                                                  { return offset_of(methodOopDesc, _interpreter_invocation_count); }
+ 
+   // Static methods that are used to implement member methods where an exposed this pointer
+@@ -530,7 +530,7 @@
+   void set_is_prefixed_native()                     { _access_flags.set_is_prefixed_native(); }
+ 
+   // Rewriting support
+-  static methodHandle clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length, 
++  static methodHandle clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length,
+                                           u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS);
+ 
+   // Get this method's jmethodID -- allocate if it doesn't exist
+@@ -562,7 +562,7 @@
+     }
+   }
+ 
+-  // On-stack replacement support   
++  // On-stack replacement support
+   bool has_osr_nmethod()                         { return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, InvocationEntryBci) != NULL; }
+   nmethod* lookup_osr_nmethod_for(int bci)       { return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, bci); }
+ 
+@@ -737,5 +737,3 @@
+   void set(methodOop method);
+   void clear(methodOop method);
+ };
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/objArrayKlass.cpp openjdk/hotspot/src/share/vm/oops/objArrayKlass.cpp
+--- openjdk6/hotspot/src/share/vm/oops/objArrayKlass.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/objArrayKlass.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)objArrayKlass.cpp	1.147 07/05/29 09:44:23 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -33,7 +30,7 @@
+   return objArrayOop(obj)->object_size();
+ }
+ 
+-objArrayOop objArrayKlass::allocate(int length, TRAPS) {  
++objArrayOop objArrayKlass::allocate(int length, TRAPS) {
+   if (length >= 0) {
+     if (length <= arrayOopDesc::max_array_length(T_OBJECT)) {
+       int size = objArrayOopDesc::object_size(length);
+@@ -51,7 +48,7 @@
+ 
+ static int multi_alloc_counter = 0;
+ 
+-oop objArrayKlass::multi_allocate(int rank, jint* sizes, TRAPS) { 
++oop objArrayKlass::multi_allocate(int rank, jint* sizes, TRAPS) {
+   int length = *sizes;
+   // Call to lower_dimension uses this pointer, so most be called before a
+   // possible GC
+@@ -62,7 +59,7 @@
+   objArrayHandle h_array (THREAD, array);
+   if (rank > 1) {
+     if (length != 0) {
+-      for (int index = 0; index < length; index++) {  
++      for (int index = 0; index < length; index++) {
+         arrayKlass* ak = arrayKlass::cast(h_lower_dimension());
+         oop sub_array = ak->multi_allocate(rank-1, &sizes[1], CHECK_NULL);
+         assert(sub_array->is_parsable(), "Don't publish until parsable");
+@@ -119,7 +116,7 @@
+   assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
+ 
+   if (s == d) {
+-    // since source and destination are equal we do not need conversion checks. 
++    // since source and destination are equal we do not need conversion checks.
+     assert(length > 0, "sanity check");
+     Copy::conjoint_oops_atomic(src, dst, length);
+   } else {
+@@ -139,7 +136,7 @@
+         if (element == NULL || Klass::cast(element->klass())->is_subtype_of(bound)) {
+           *p = element;
+         } else {
+-	  // We must do a barrier to cover the partial copy.
++          // We must do a barrier to cover the partial copy.
+           const size_t done_word_len = pointer_delta(p, dst, oopSize) *
+                                        HeapWordsPerOop;
+           bs->write_ref_array(MemRegion((HeapWord*)dst, done_word_len));
+@@ -159,15 +156,15 @@
+ }
+ 
+ 
+-klassOop objArrayKlass::array_klass_impl(objArrayKlassHandle this_oop, bool or_null, int n, TRAPS) {  
+-  
++klassOop objArrayKlass::array_klass_impl(objArrayKlassHandle this_oop, bool or_null, int n, TRAPS) {
++
+   assert(this_oop->dimension() <= n, "check order of chain");
+   int dimension = this_oop->dimension();
+-  if (dimension == n) 
++  if (dimension == n)
+     return this_oop();
+ 
+   objArrayKlassHandle ak (THREAD, this_oop->higher_dimension());
+-  if (ak.is_null()) {    
++  if (ak.is_null()) {
+     if (or_null)  return NULL;
+ 
+     ResourceMark rm;
+@@ -182,11 +179,11 @@
+       if( ak.is_null() ) {
+ 
+         // Create multi-dim klass object and link them together
+-        klassOop new_klass = 
++        klassOop new_klass =
+           objArrayKlassKlass::cast(Universe::objArrayKlassKlassObj())->
+           allocate_objArray_klass(dimension + 1, this_oop, CHECK_NULL);
+         ak = objArrayKlassHandle(THREAD, new_klass);
+-        this_oop->set_higher_dimension(ak());    
++        this_oop->set_higher_dimension(ak());
+         ak->set_lower_dimension(this_oop());
+         assert(ak->oop_is_objArray(), "incorrect initialization of objArrayKlass");
+       }
+@@ -258,27 +255,27 @@
+   oop* base      = (oop*)a->base(T_OBJECT);
+   oop* const end = base + a->length();
+   while (base < end) {
+-    if (*base != NULL) 
++    if (*base != NULL)
+       // we call mark_and_follow here to avoid excessive marking stack usage
+-      MarkSweep::mark_and_follow(base); 
++      MarkSweep::mark_and_follow(base);
+     base++;
+-  }  
++  }
+ }
+ 
+ #ifndef SERIALGC
+ void objArrayKlass::oop_follow_contents(ParCompactionManager* cm,
+-					oop obj) {
++                                        oop obj) {
+   assert (obj->is_array(), "obj must be array");
+   arrayOop a = arrayOop(obj);
+   a->follow_header(cm);
+   oop* base      = (oop*)a->base(T_OBJECT);
+   oop* const end = base + a->length();
+   while (base < end) {
+-    if (*base != NULL) 
++    if (*base != NULL)
+       // we call mark_and_follow here to avoid excessive marking stack usage
+-      PSParallelCompact::mark_and_follow(cm, base); 
++      PSParallelCompact::mark_and_follow(cm, base);
+     base++;
+-  }  
++  }
+ }
+ #endif // SERIALGC
+ 
+@@ -376,7 +373,7 @@
+   while (base < end) {
+     MarkSweep::adjust_pointer(base);
+     base++;
+-  }  
++  }
+   return size;
+ }
+ 
+@@ -389,7 +386,7 @@
+   oop* end = curr + objArrayOop(obj)->length();
+   //  assert(align_object_size(end - (oop*)obj) == oop_size(obj), "checking size");
+   assert(align_object_size(pointer_delta(end, obj, sizeof(oop*)))
+-	                          == oop_size(obj), "checking size");
++                                  == oop_size(obj), "checking size");
+ 
+   // Iterate over oops
+   while (curr < end) {
+@@ -408,7 +405,7 @@
+   oop* end = curr + objArrayOop(obj)->length();
+   //  assert(align_object_size(end - (oop*)obj) == oop_size(obj), "checking size");
+   assert(align_object_size(pointer_delta(end, obj, sizeof(oop*)))
+-	                          == oop_size(obj), "checking size");
++                                  == oop_size(obj), "checking size");
+ 
+   // Iterate over oops
+   while (curr < end) {
+@@ -433,7 +430,7 @@
+ }
+ 
+ int objArrayKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
+-				       HeapWord* beg_addr, HeapWord* end_addr) {
++                                       HeapWord* beg_addr, HeapWord* end_addr) {
+   assert (obj->is_objArray(), "obj must be obj array");
+   objArrayOop a = objArrayOop(obj);
+ 
+@@ -456,7 +453,7 @@
+     return JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC;
+   }
+   // Recurse down the element list
+-  jint element_flags = Klass::cast(element_klass())->compute_modifier_flags(CHECK_0);  
++  jint element_flags = Klass::cast(element_klass())->compute_modifier_flags(CHECK_0);
+ 
+   return (element_flags & (JVM_ACC_PUBLIC | JVM_ACC_PRIVATE | JVM_ACC_PROTECTED))
+                         | (JVM_ACC_ABSTRACT | JVM_ACC_FINAL);
+diff -ruN openjdk6/hotspot/src/share/vm/oops/objArrayKlass.hpp openjdk/hotspot/src/share/vm/oops/objArrayKlass.hpp
+--- openjdk6/hotspot/src/share/vm/oops/objArrayKlass.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/objArrayKlass.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)objArrayKlass.hpp	1.87 07/05/29 09:44:23 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // objArrayKlass is the klass for objArrays
+@@ -77,7 +74,7 @@
+   // Casting from klassOop
+   static objArrayKlass* cast(klassOop k) {
+     assert(k->klass_part()->oop_is_objArray_slow(), "cast to objArrayKlass");
+-    return (objArrayKlass*) k->klass_part(); 
++    return (objArrayKlass*) k->klass_part();
+   }
+ 
+   // Sizing
+@@ -129,4 +126,3 @@
+   void oop_verify_old_oop(oop obj, oop* p, bool allow_dirty);
+ 
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/objArrayKlassKlass.cpp openjdk/hotspot/src/share/vm/oops/objArrayKlassKlass.cpp
+--- openjdk6/hotspot/src/share/vm/oops/objArrayKlassKlass.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/objArrayKlassKlass.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)objArrayKlassKlass.cpp	1.79 07/05/29 09:44:24 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -30,7 +27,7 @@
+ 
+ klassOop objArrayKlassKlass::create_klass(TRAPS) {
+   objArrayKlassKlass o;
+-  KlassHandle h_this_klass(THREAD, Universe::klassKlassObj());  
++  KlassHandle h_this_klass(THREAD, Universe::klassKlassObj());
+   KlassHandle k = base_create_klass(h_this_klass, header_size(), o.vtbl_value(), CHECK_0);
+   assert(k()->size() == align_object_size(header_size()), "wrong size for object");
+   java_lang_Class::create_mirror(k, CHECK_0); // Allocate mirror
+@@ -53,7 +50,7 @@
+   return allocate_objArray_klass_impl(this_oop, n, element_klass, THREAD);
+ }
+ 
+-klassOop objArrayKlassKlass::allocate_objArray_klass_impl(objArrayKlassKlassHandle this_oop, 
++klassOop objArrayKlassKlass::allocate_objArray_klass_impl(objArrayKlassKlassHandle this_oop,
+                                                           int n, KlassHandle element_klass, TRAPS) {
+ 
+   // Eagerly allocate the direct array supertype.
+@@ -66,32 +63,32 @@
+       bool supers_exist = super_klass.not_null();
+       // Also, see if the element has secondary supertypes.
+       // We need an array type for each.
+-      objArrayHandle element_supers = objArrayHandle(THREAD, 
++      objArrayHandle element_supers = objArrayHandle(THREAD,
+                                             element_klass->secondary_supers());
+       for( int i = element_supers->length()-1; i >= 0; i-- ) {
+-	klassOop elem_super = (klassOop) element_supers->obj_at(i);
+-	if (Klass::cast(elem_super)->array_klass_or_null() == NULL) {
+-	  supers_exist = false;
+-	  break;
+-	}
++        klassOop elem_super = (klassOop) element_supers->obj_at(i);
++        if (Klass::cast(elem_super)->array_klass_or_null() == NULL) {
++          supers_exist = false;
++          break;
++        }
+       }
+       if (!supers_exist) {
+-	// Oops.  Not allocated yet.  Back out, allocate it, and retry.
++        // Oops.  Not allocated yet.  Back out, allocate it, and retry.
+ #ifndef PRODUCT
+-	if (WizardMode) {
++        if (WizardMode) {
+           tty->print_cr("Must retry array klass creation for depth %d",n);
+         }
+ #endif
+         KlassHandle ek;
+         {
+-	  MutexUnlocker mu(MultiArray_lock);
+-	  MutexUnlocker mc(Compile_lock);   // for vtables
+-	  klassOop sk = element_super->array_klass(CHECK_0);
+-	  super_klass = KlassHandle(THREAD, sk);
+-	  for( int i = element_supers->length()-1; i >= 0; i-- ) {
+-	    KlassHandle elem_super (THREAD, element_supers->obj_at(i));
+-	    elem_super->array_klass(CHECK_0);
+-	  }
++          MutexUnlocker mu(MultiArray_lock);
++          MutexUnlocker mc(Compile_lock);   // for vtables
++          klassOop sk = element_super->array_klass(CHECK_0);
++          super_klass = KlassHandle(THREAD, sk);
++          for( int i = element_supers->length()-1; i >= 0; i-- ) {
++            KlassHandle elem_super (THREAD, element_supers->obj_at(i));
++            elem_super->array_klass(CHECK_0);
++          }
+           // Now retry from the beginning
+           klassOop klass_oop = element_klass->array_klass(n, CHECK_0);
+           // Create a handle because the enclosing brace, when locking
+@@ -112,7 +109,7 @@
+   // get a handle to the new objArrayKlass we want to construct.  We cannot
+   // block while holding a handling to a partly initialized object.
+   symbolHandle name = symbolHandle();
+-  
++
+   if (!element_klass->oop_is_symbol()) {
+     ResourceMark rm(THREAD);
+     char *name_str = element_klass->name()->as_C_string();
+@@ -122,24 +119,24 @@
+     new_str[idx++] = '[';
+     if (element_klass->oop_is_instance()) { // it could be an array or simple type
+       new_str[idx++] = 'L';
+-    } 
++    }
+     memcpy(&new_str[idx], name_str, len * sizeof(char));
+     idx += len;
+     if (element_klass->oop_is_instance()) {
+       new_str[idx++] = ';';
+     }
+     new_str[idx++] = '\0';
+-    name = oopFactory::new_symbol_handle(new_str, CHECK_0);    
+-  } 
++    name = oopFactory::new_symbol_handle(new_str, CHECK_0);
++  }
+ 
+-  objArrayKlass o;  
+-  arrayKlassHandle k = arrayKlass::base_create_array_klass(o.vtbl_value(), 
+-                                                           objArrayKlass::header_size(), 
+-                                                          this_oop, 
++  objArrayKlass o;
++  arrayKlassHandle k = arrayKlass::base_create_array_klass(o.vtbl_value(),
++                                                           objArrayKlass::header_size(),
++                                                          this_oop,
+                                                            CHECK_0);
+ 
+ 
+-  // Initialize instance variables  
++  // Initialize instance variables
+   objArrayKlass* oak = objArrayKlass::cast(k());
+   oak->set_dimension(n);
+   oak->set_element_klass(element_klass());
+@@ -178,7 +175,7 @@
+ 
+ #ifndef SERIALGC
+ void objArrayKlassKlass::oop_follow_contents(ParCompactionManager* cm,
+-					     oop obj) {
++                                             oop obj) {
+   assert(obj->is_klass(), "must be klass");
+   assert(klassOop(obj)->klass_part()->oop_is_objArray_slow(), "must be obj array");
+ 
+@@ -252,8 +249,8 @@
+ }
+ 
+ int objArrayKlassKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
+-					    HeapWord* beg_addr,
+-					    HeapWord* end_addr) {
++                                            HeapWord* beg_addr,
++                                            HeapWord* end_addr) {
+   assert(obj->is_klass(), "must be klass");
+   assert(klassOop(obj)->klass_part()->oop_is_objArray_slow(), "must be obj array");
+ 
+@@ -309,4 +306,3 @@
+   Klass* bk = Klass::cast(oak->bottom_klass());
+   guarantee(bk->oop_is_instance() || bk->oop_is_typeArray(),  "invalid bottom klass");
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/objArrayKlassKlass.hpp openjdk/hotspot/src/share/vm/oops/objArrayKlassKlass.hpp
+--- openjdk6/hotspot/src/share/vm/oops/objArrayKlassKlass.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/objArrayKlassKlass.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)objArrayKlassKlass.hpp	1.48 07/05/29 09:44:24 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The objArrayKlassKlass is klass for all objArrayKlass'
+@@ -45,7 +42,7 @@
+   // Casting from klassOop
+   static objArrayKlassKlass* cast(klassOop k) {
+     assert(k->klass_part()->oop_is_klass(), "cast to objArrayKlassKlass");
+-    return (objArrayKlassKlass*) k->klass_part(); 
++    return (objArrayKlassKlass*) k->klass_part();
+   }
+ 
+   // Sizing
+@@ -69,7 +66,7 @@
+ 
+ #ifndef PRODUCT
+  public:
+-  // Printing 
++  // Printing
+   void oop_print_on(oop obj, outputStream* st);
+   void oop_print_value_on(oop obj, outputStream* st);
+ #endif
+@@ -80,4 +77,3 @@
+   void oop_verify_on(oop obj, outputStream* st);
+ 
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/objArrayOop.cpp openjdk/hotspot/src/share/vm/oops/objArrayOop.cpp
+--- openjdk6/hotspot/src/share/vm/oops/objArrayOop.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/objArrayOop.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)objArrayOop.cpp	1.14 07/05/05 17:06:07 JVM"
+-#endif
+ /*
+  * Copyright 1997 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,11 +19,10 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_objArrayOop.cpp.incl"
+ 
+ // <<this page is intentionally left blank>>
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/objArrayOop.hpp openjdk/hotspot/src/share/vm/oops/objArrayOop.hpp
+--- openjdk6/hotspot/src/share/vm/oops/objArrayOop.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/objArrayOop.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)objArrayOop.hpp	1.29 07/05/05 17:06:07 JVM"
+-#endif
+ /*
+  * Copyright 1997-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // An objArrayOop is an array containing oops.
+@@ -37,7 +34,7 @@
+ 
+   // Sizing
+   static int header_size()              { return arrayOopDesc::header_size(T_OBJECT); }
+-  static int object_size(int length)    { return align_object_size(header_size() + length); }  
++  static int object_size(int length)    { return align_object_size(header_size() + length); }
+   int object_size()                     { return object_size(length()); }
+ 
+   // Returns the address of the index'th element
+diff -ruN openjdk6/hotspot/src/share/vm/oops/oop.cpp openjdk/hotspot/src/share/vm/oops/oop.cpp
+--- openjdk6/hotspot/src/share/vm/oops/oop.cpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/oop.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)oop.cpp	1.99 07/05/29 09:44:21 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -82,13 +79,13 @@
+ 
+ void oopDesc::print_address() { print_address_on(tty); }
+ 
+-char* oopDesc::print_string() { 
++char* oopDesc::print_string() {
+   stringStream* st = new stringStream();
+   print_on(st);
+   return st->as_string();
+ }
+ 
+-char* oopDesc::print_value_string() { 
++char* oopDesc::print_value_string() {
+   stringStream* st = new stringStream();
+   print_value_on(st);
+   return st->as_string();
+@@ -103,8 +100,8 @@
+ }
+ 
+ 
+-void oopDesc::verify() { 
+-  verify_on(tty); 
++void oopDesc::verify() {
++  verify_on(tty);
+ }
+ 
+ 
+@@ -124,7 +121,7 @@
+ 
+ 
+ intptr_t oopDesc::slow_identity_hash() {
+-  // slow case; we have to acquire the micro lock in order to locate the header  
++  // slow case; we have to acquire the micro lock in order to locate the header
+   ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY
+   HandleMark hm;
+   Handle object((oop)this);
+diff -ruN openjdk6/hotspot/src/share/vm/oops/oop.hpp openjdk/hotspot/src/share/vm/oops/oop.hpp
+--- openjdk6/hotspot/src/share/vm/oops/oop.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/oop.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)oop.hpp	1.116 07/05/29 09:44:21 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // oopDesc is the top baseclass for objects classes.  The {name}Desc classes describe
+@@ -193,7 +190,7 @@
+   void release_double_field_put(int offset, jdouble contents);
+ 
+   // printing functions for VM debugging
+-  void print_on(outputStream* st) const;         // First level print 
++  void print_on(outputStream* st) const;         // First level print
+   void print_value_on(outputStream* st) const;   // Second level print.
+   void print_address_on(outputStream* st) const; // Address printing
+ 
+@@ -239,22 +236,22 @@
+   void copy_contents(PSPromotionManager* pm);
+   void push_contents(PSPromotionManager* pm);
+ 
+-  // Parallel Old 
++  // Parallel Old
+   void update_contents(ParCompactionManager* cm);
+   void update_contents(ParCompactionManager* cm,
+-		       HeapWord* begin_limit,
+-		       HeapWord* end_limit);
++                       HeapWord* begin_limit,
++                       HeapWord* end_limit);
+   void update_contents(ParCompactionManager* cm,
+-		       klassOop old_klass,
+-		       HeapWord* begin_limit,
+-	               HeapWord* end_limit);
++                       klassOop old_klass,
++                       HeapWord* begin_limit,
++                       HeapWord* end_limit);
+ 
+   void follow_contents(ParCompactionManager* cm);
+   void follow_header(ParCompactionManager* cm);
+ #endif // SERIALGC
+ 
+   bool is_perm() const;
+-  bool is_perm_and_alloced() const;
++  bool is_perm_or_null() const;
+   bool is_shared() const;
+   bool is_shared_readonly() const;
+   bool is_shared_readwrite() const;
+@@ -302,8 +299,8 @@
+   int oop_iterate(OopClosureType* blk);                                  \
+   int oop_iterate(OopClosureType* blk, MemRegion mr);  // Only in mr.
+ 
+-  ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DECL) 
+-  ALL_OOP_OOP_ITERATE_CLOSURES_3(OOP_ITERATE_DECL) 
++  ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DECL)
++  ALL_OOP_OOP_ITERATE_CLOSURES_3(OOP_ITERATE_DECL)
+ 
+   void oop_iterate_header(OopClosure* blk);
+   void oop_iterate_header(OopClosure* blk, MemRegion mr);
+diff -ruN openjdk6/hotspot/src/share/vm/oops/oop.inline2.hpp openjdk/hotspot/src/share/vm/oops/oop.inline2.hpp
+--- openjdk6/hotspot/src/share/vm/oops/oop.inline2.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/oop.inline2.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)oop.inline2.hpp	1.12 07/05/05 17:06:07 JVM"
+-#endif
+ /*
+  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Implementation of all inlined member functions defined in oop.hpp
+@@ -33,13 +30,7 @@
+   return Universe::heap()->is_in_permanent(this);
+ }
+ 
+-// is_perm only verifies that oop is in the reserved space for
+-// the perm gen. Things like forte stackwalking need something that
+-// assures us that the pointer is in the commited area so we don't
+-// segv checking suspicious frame contents.
+-
+-inline bool oopDesc::is_perm_and_alloced() const {
+-  return Universe::heap()->is_permanent(this);
++// Check for NULL also.
++inline bool oopDesc::is_perm_or_null() const {
++  return this == NULL || is_perm();
+ }
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/oop.inline.hpp openjdk/hotspot/src/share/vm/oops/oop.inline.hpp
+--- openjdk6/hotspot/src/share/vm/oops/oop.inline.hpp	2008-02-28 05:02:38.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/oop.inline.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)oop.inline.hpp	1.141 07/06/12 15:40:38 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Implementation of all inlined member functions defined in oop.hpp
+@@ -65,7 +62,7 @@
+ inline bool oopDesc::is_klass()              const { return blueprint()->oop_is_klass(); }
+ inline bool oopDesc::is_thread()             const { return blueprint()->oop_is_thread(); }
+ inline bool oopDesc::is_method()             const { return blueprint()->oop_is_method(); }
+-inline bool oopDesc::is_constMethod()	     const { return blueprint()->oop_is_constMethod(); }
++inline bool oopDesc::is_constMethod()        const { return blueprint()->oop_is_constMethod(); }
+ inline bool oopDesc::is_methodData()         const { return blueprint()->oop_is_methodData(); }
+ inline bool oopDesc::is_constantPool()       const { return blueprint()->oop_is_constantPool(); }
+ inline bool oopDesc::is_constantPoolCache()  const { return blueprint()->oop_is_constantPoolCache(); }
+@@ -157,7 +154,7 @@
+     // The most common case is instances; fall through if so.
+     if (lh < Klass::_lh_neutral_value) {
+       // Second most common case is arrays.  We have to fetch the
+-      // length of the array, shift (multiply) it appropriately, 
++      // length of the array, shift (multiply) it appropriately,
+       // up to wordSize, add the header, and align to object size.
+       size_t size_in_bytes;
+ #ifdef _M_IA64
+@@ -167,11 +164,11 @@
+       // array oop.  Making the reference volatile prohibits this.
+       // (%%% please explain by what magic the length is actually fetched!)
+       volatile int *array_length;
+-      array_length = (volatile int *)( (intptr_t)this + 
++      array_length = (volatile int *)( (intptr_t)this +
+                           arrayOopDesc::length_offset_in_bytes() );
+       assert(array_length > 0, "Integer arithmetic problem somewhere");
+       // Put into size_t to avoid overflow.
+-      size_in_bytes = (size_t) array_length;  
++      size_in_bytes = (size_t) array_length;
+       size_in_bytes = size_in_bytes << Klass::layout_helper_log2_element_size(lh);
+ #else
+       size_t array_length = (size_t) ((arrayOop)this)->length();
+@@ -183,8 +180,8 @@
+       // in units of bytes and doing it this way we can round up just once,
+       // skipping the intermediate round to HeapWordSize.  Cast the result
+       // of round_to to size_t to guarantee unsigned division == right shift.
+-      s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) / 
+-	HeapWordSize);
++      s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) /
++        HeapWordSize);
+ 
+       // UseParNewGC can change the length field of an "old copy" of an object
+       // array in the young gen so it indicates the stealable portion of
+@@ -202,7 +199,8 @@
+       // technique, or when G1 is integrated (and currently uses this array chunking
+       // technique) we will need to suitably modify the assertion.
+       assert((s == klass->oop_size(this)) ||
+-             ((UseParNewGC && Universe::heap()->is_gc_active()) &&
++             (((UseParNewGC || UseParallelGC) &&
++                                           Universe::heap()->is_gc_active()) &&
+               (is_typeArray() ||
+                (is_objArray() && is_forwarded()))),
+              "wrong array object size");
+@@ -346,7 +344,7 @@
+ 
+ #endif // PRODUCT
+ 
+-inline void oopDesc::follow_header() { 
++inline void oopDesc::follow_header() {
+   MarkSweep::mark_and_push((oop*)&_klass);
+ }
+ 
+@@ -358,7 +356,7 @@
+ 
+ // Used by scavengers
+ 
+-inline bool oopDesc::is_forwarded() const { 
++inline bool oopDesc::is_forwarded() const {
+   // The extra heap check is needed since the obj might be locked, in which case the
+   // mark would point to a stack location and have the sentinel bit cleared
+   return mark()->is_marked();
+@@ -377,7 +375,7 @@
+ // Used by parallel scavengers
+ inline bool oopDesc::cas_forward_to(oop p, markOop compare) {
+   assert(Universe::heap()->is_in_reserved(p),
+-	 "forwarding to something not in heap");
++         "forwarding to something not in heap");
+   markOop m = markOopDesc::encode_pointer_as_mark(p);
+   assert(m->decode_pointer() == p, "encoding must be reversable");
+   return cas_set_mark(m, compare) == compare;
+@@ -468,8 +466,8 @@
+   return blueprint()->oop_oop_iterate##nv_suffix##_m(this, blk, mr);       \
+ }
+ 
+-ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DEFN) 
+-ALL_OOP_OOP_ITERATE_CLOSURES_3(OOP_ITERATE_DEFN) 
++ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DEFN)
++ALL_OOP_OOP_ITERATE_CLOSURES_3(OOP_ITERATE_DEFN)
+ 
+ 
+ inline bool oopDesc::is_shared() const {
+diff -ruN openjdk6/hotspot/src/share/vm/oops/oop.pcgc.inline.hpp openjdk/hotspot/src/share/vm/oops/oop.pcgc.inline.hpp
+--- openjdk6/hotspot/src/share/vm/oops/oop.pcgc.inline.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/oop.pcgc.inline.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)oop.pcgc.inline.hpp	1.16 07/05/29 09:44:24 JVM"
+-#endif
+ /*
+  * Copyright 2005-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ inline void oopDesc::update_contents(ParCompactionManager* cm) {
+@@ -32,7 +29,7 @@
+ 
+   // Can the option to update and/or copy be moved up in the
+   // call chain to avoid calling into here?
+- 
++
+   if (PSParallelCompact::should_update_klass(klass())) {
+     update_header();
+     assert(klass()->is_klass(), "Not updated correctly");
+@@ -49,8 +46,8 @@
+ }
+ 
+ inline void oopDesc::update_contents(ParCompactionManager* cm,
+-				     HeapWord* begin_limit,
+-				     HeapWord* end_limit) {
++                                     HeapWord* begin_limit,
++                                     HeapWord* end_limit) {
+   // The klass field must be updated before anything else
+   // can be done.
+   debug_only(klassOopDesc* original_klass = klass());
+@@ -59,11 +56,11 @@
+ }
+ 
+ inline void oopDesc::update_contents(ParCompactionManager* cm,
+-				     klassOop old_klass,
+-				     HeapWord* begin_limit,
+-				     HeapWord* end_limit) {
++                                     klassOop old_klass,
++                                     HeapWord* begin_limit,
++                                     HeapWord* end_limit) {
+ 
+-  klassOop updated_klass = 
++  klassOop updated_klass =
+     PSParallelCompact::summary_data().calc_new_klass(old_klass);
+ 
+   // Needs to be boundary aware for the 64 bit case
+@@ -84,7 +81,7 @@
+ }
+ 
+ inline void oopDesc::follow_contents(ParCompactionManager* cm) {
+-  assert (PSParallelCompact::mark_bitmap()->is_marked(this), 
++  assert (PSParallelCompact::mark_bitmap()->is_marked(this),
+     "should be marked");
+   blueprint()->oop_follow_contents(cm, this);
+ }
+@@ -97,7 +94,7 @@
+ 
+ inline oop oopDesc::forward_to_atomic(oop p) {
+   assert(ParNewGeneration::is_legal_forward_ptr(p),
+-	 "illegal forwarding pointer value.");
++         "illegal forwarding pointer value.");
+   markOop oldMark = mark();
+   markOop forwardPtrMark = markOopDesc::encode_pointer_as_mark(p);
+   markOop curMark;
+diff -ruN openjdk6/hotspot/src/share/vm/oops/oop.psgc.inline.hpp openjdk/hotspot/src/share/vm/oops/oop.psgc.inline.hpp
+--- openjdk6/hotspot/src/share/vm/oops/oop.psgc.inline.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/oop.psgc.inline.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)oop.psgc.inline.hpp	1.17 07/05/05 17:06:07 JVM"
+-#endif
+ /*
+  * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ParallelScavengeHeap methods
+@@ -44,4 +41,3 @@
+   }
+   // Else skip it.  The typeArrayKlass in the header never needs scavenging.
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/oopsHierarchy.cpp openjdk/hotspot/src/share/vm/oops/oopsHierarchy.cpp
+--- openjdk6/hotspot/src/share/vm/oops/oopsHierarchy.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/oopsHierarchy.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)oopsHierarchy.cpp	1.7 07/05/05 17:06:08 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/oops/oopsHierarchy.hpp openjdk/hotspot/src/share/vm/oops/oopsHierarchy.hpp
+--- openjdk6/hotspot/src/share/vm/oops/oopsHierarchy.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/oopsHierarchy.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)oopsHierarchy.hpp	1.31 07/05/17 15:57:10 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // OBJECT hierarchy
+@@ -31,19 +28,19 @@
+ 
+ #ifndef CHECK_UNHANDLED_OOPS
+ 
+-typedef class oopDesc*			    oop;
+-typedef class   instanceOopDesc*	    instanceOop;
+-typedef class   methodOopDesc*		    methodOop;
+-typedef class   constMethodOopDesc*	    constMethodOop;
+-typedef class   methodDataOopDesc*	    methodDataOop;
+-typedef class   arrayOopDesc*		    arrayOop;
+-typedef class     constantPoolOopDesc*	    constantPoolOop;
++typedef class oopDesc*                      oop;
++typedef class   instanceOopDesc*            instanceOop;
++typedef class   methodOopDesc*              methodOop;
++typedef class   constMethodOopDesc*         constMethodOop;
++typedef class   methodDataOopDesc*          methodDataOop;
++typedef class   arrayOopDesc*               arrayOop;
++typedef class     constantPoolOopDesc*      constantPoolOop;
+ typedef class     constantPoolCacheOopDesc* constantPoolCacheOop;
+-typedef class     objArrayOopDesc*	    objArrayOop;
+-typedef class     typeArrayOopDesc*	    typeArrayOop;
+-typedef class   symbolOopDesc*	            symbolOop;
+-typedef class   klassOopDesc*		    klassOop;
+-typedef class   markOopDesc*		    markOop;
++typedef class     objArrayOopDesc*          objArrayOop;
++typedef class     typeArrayOopDesc*         typeArrayOop;
++typedef class   symbolOopDesc*              symbolOop;
++typedef class   klassOopDesc*               klassOop;
++typedef class   markOopDesc*                markOop;
+ typedef class   compiledICHolderOopDesc*    compiledICHolderOop;
+ 
+ #else
+@@ -64,7 +61,7 @@
+ // instead, which generates less code anyway.
+ 
+ class Thread;
+-typedef class   markOopDesc*		    markOop;
++typedef class   markOopDesc*                markOop;
+ class PromotedObject;
+ 
+ 
+@@ -77,7 +74,7 @@
+   // friend class markOop;
+ public:
+   void set_obj(const void* p)         {
+-    raw_set_obj(p); 
++    raw_set_obj(p);
+     if (CheckUnhandledOops) register_oop();
+   }
+   void raw_set_obj(const void* p)     { _o = (oopDesc*)p; }
+diff -ruN openjdk6/hotspot/src/share/vm/oops/symbolKlass.cpp openjdk/hotspot/src/share/vm/oops/symbolKlass.cpp
+--- openjdk6/hotspot/src/share/vm/oops/symbolKlass.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/symbolKlass.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)symbolKlass.cpp	1.66 07/05/29 09:44:24 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -31,7 +28,7 @@
+ symbolOop symbolKlass::allocate_symbol(u1* name, int len, TRAPS) {
+   // Don't allow symbol oops to be created which cannot fit in a symbolOop.
+   if (len > symbolOopDesc::max_length()) {
+-    THROW_MSG_0(vmSymbols::java_lang_InternalError(), 
++    THROW_MSG_0(vmSymbols::java_lang_InternalError(),
+                 "name is too long to represent");
+   }
+   int size = symbolOopDesc::object_size(len);
+@@ -121,11 +118,11 @@
+   return k();
+ }
+ 
+-int symbolKlass::oop_size(oop obj) const { 
++int symbolKlass::oop_size(oop obj) const {
+   assert(obj->is_symbol(),"must be a symbol");
+   symbolOop s = symbolOop(obj);
+   int size = s->object_size();
+-  return size; 
++  return size;
+ }
+ 
+ bool symbolKlass::oop_is_parsable(oop obj) const {
+@@ -136,7 +133,7 @@
+ 
+ void symbolKlass::oop_follow_contents(oop obj) {
+   assert (obj->is_symbol(), "object must be symbol");
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::symbolKlassObj never moves.
+   // Note: do not follow next link here (see SymbolTable::follow_contents)
+ }
+@@ -144,7 +141,7 @@
+ #ifndef SERIALGC
+ void symbolKlass::oop_follow_contents(ParCompactionManager* cm, oop obj) {
+   assert (obj->is_symbol(), "object must be symbol");
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::symbolKlassObj never moves.
+   // Note: do not follow next link here (see SymbolTable::follow_contents)
+ }
+@@ -156,7 +153,7 @@
+   // Get size before changing pointers.
+   // Don't call size() or oop_size() since that is a virtual call.
+   int size = s->object_size();
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::symbolKlassObj never moves.
+   return size;
+ }
+@@ -168,7 +165,7 @@
+   // Get size before changing pointers.
+   // Don't call size() or oop_size() since that is a virtual call.
+   int size = s->object_size();
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::symbolKlassObj never moves.
+   return size;
+ }
+@@ -180,7 +177,7 @@
+   // Get size before changing pointers.
+   // Don't call size() or oop_size() since that is a virtual call.
+   int size = s->object_size();
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::symbolKlassObj never moves.
+   return size;
+ }
+@@ -201,7 +198,7 @@
+ }
+ 
+ int symbolKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
+-				     HeapWord* beg_addr, HeapWord* end_addr) {
++                                     HeapWord* beg_addr, HeapWord* end_addr) {
+   assert(obj->is_symbol(), "should be symbol");
+   return symbolOop(obj)->object_size();
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/oops/symbolKlass.hpp openjdk/hotspot/src/share/vm/oops/symbolKlass.hpp
+--- openjdk6/hotspot/src/share/vm/oops/symbolKlass.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/symbolKlass.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)symbolKlass.hpp	1.42 07/05/29 09:44:24 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // a symbolKlass is the klass for a symbolOop
+@@ -44,7 +41,7 @@
+   // Casting from klassOop
+   static symbolKlass* cast(klassOop k) {
+     assert(k->klass_part()->oop_is_symbol(), "cast to symbolKlass");
+-    return (symbolKlass*) k->klass_part(); 
++    return (symbolKlass*) k->klass_part();
+   }
+ 
+   static int header_size()       { return oopDesc::header_size() + sizeof(symbolKlass)/HeapWordSize; }
+@@ -75,4 +72,3 @@
+ #endif
+   const char* internal_name() const;
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/symbolOop.cpp openjdk/hotspot/src/share/vm/oops/symbolOop.cpp
+--- openjdk6/hotspot/src/share/vm/oops/symbolOop.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/symbolOop.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)symbolOop.cpp	1.28 07/05/05 17:06:08 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -32,7 +29,7 @@
+   int l = utf8_length();
+   if (l != len) return false;
+   while (l-- > 0) {
+-    if (str[l] != (char) byte_at(l)) 
++    if (str[l] != (char) byte_at(l))
+       return false;
+   }
+   assert(l == -1, "we should be at the beginning");
+diff -ruN openjdk6/hotspot/src/share/vm/oops/symbolOop.hpp openjdk/hotspot/src/share/vm/oops/symbolOop.hpp
+--- openjdk6/hotspot/src/share/vm/oops/symbolOop.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/symbolOop.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)symbolOop.hpp	1.40 07/05/05 17:06:07 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A symbolOop is a canonicalized string.
+@@ -33,10 +30,10 @@
+   friend class VMStructs;
+  private:
+   unsigned short _length; // number of UTF8 characters in the symbol
+-  jbyte _body[1];  
++  jbyte _body[1];
+ 
+   enum {
+-    // max_symbol_length is constrained by type of _length 
++    // max_symbol_length is constrained by type of _length
+     max_symbol_length = (1 << 16) -1
+   };
+  public:
+@@ -45,7 +42,7 @@
+   jbyte* base() { return &_body[0]; }
+ 
+ 
+-  // Returns the largest size symbol we can safely hold. 
++  // Returns the largest size symbol we can safely hold.
+   static int max_length() {
+     return max_symbol_length;
+   }
+@@ -94,7 +91,7 @@
+     return as_C_string_flexible_buffer(t, buf, size);
+   }
+ 
+-  jchar* as_unicode(int& length) const;  
++  jchar* as_unicode(int& length) const;
+ 
+   // Treating this symbol as a class name, returns the Java name for the class.
+   // String is allocated in resource area if buffer is not provided.
+@@ -119,4 +116,3 @@
+  return (((uintptr_t)this < (uintptr_t)other) ? -1
+    : ((uintptr_t)this == (uintptr_t) other) ? 0 : 1);
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/typeArrayKlass.cpp openjdk/hotspot/src/share/vm/oops/typeArrayKlass.cpp
+--- openjdk6/hotspot/src/share/vm/oops/typeArrayKlass.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/typeArrayKlass.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)typeArrayKlass.cpp	1.125 07/05/29 09:44:24 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -46,7 +43,7 @@
+   // bootstrapping: don't create sym if symbolKlass not created yet
+   if (Universe::symbolKlassObj() != NULL) {
+     sym = oopFactory::new_symbol_handle(external_name(type), CHECK_NULL);
+-  }   
++  }
+   KlassHandle klassklass (THREAD, Universe::typeArrayKlassKlassObj());
+ 
+   arrayKlassHandle k = base_create_array_klass(o.vtbl_value(), header_size(), klassklass, CHECK_NULL);
+@@ -66,11 +63,11 @@
+   return k();
+ }
+ 
+-typeArrayOop typeArrayKlass::allocate(int length, TRAPS) {  
++typeArrayOop typeArrayKlass::allocate(int length, TRAPS) {
+   assert(log2_element_size() >= 0, "bad scale");
+   if (length >= 0) {
+     if (length <= max_length()) {
+-      size_t size = typeArrayOopDesc::object_size(layout_helper(), length);    
++      size_t size = typeArrayOopDesc::object_size(layout_helper(), length);
+       KlassHandle h_k(THREAD, as_klassOop());
+       typeArrayOop t;
+       CollectedHeap* ch = Universe::heap();
+@@ -89,17 +86,17 @@
+   }
+ }
+ 
+-typeArrayOop typeArrayKlass::allocate_permanent(int length, TRAPS) {  
++typeArrayOop typeArrayKlass::allocate_permanent(int length, TRAPS) {
+   if (length < 0) THROW_0(vmSymbols::java_lang_NegativeArraySizeException());
+   int size = typeArrayOopDesc::object_size(layout_helper(), length);
+-  KlassHandle h_k(THREAD, as_klassOop());  
++  KlassHandle h_k(THREAD, as_klassOop());
+   typeArrayOop t = (typeArrayOop)
+     CollectedHeap::permanent_array_allocate(h_k, size, length, CHECK_NULL);
+   assert(t->is_parsable(), "Can't publish until parsable");
+   return t;
+ }
+ 
+-oop typeArrayKlass::multi_allocate(int rank, jint* last_size, TRAPS) {  
++oop typeArrayKlass::multi_allocate(int rank, jint* last_size, TRAPS) {
+   // For typeArrays this is only called for the last dimension
+   assert(rank == 1, "just checking");
+   int length = *last_size;
+@@ -143,14 +140,14 @@
+   return array_klass_impl(h_this, or_null, n, THREAD);
+ }
+ 
+-klassOop typeArrayKlass::array_klass_impl(typeArrayKlassHandle h_this, bool or_null, int n, TRAPS) {  
++klassOop typeArrayKlass::array_klass_impl(typeArrayKlassHandle h_this, bool or_null, int n, TRAPS) {
+   int dimension = h_this->dimension();
+   assert(dimension <= n, "check order of chain");
+-    if (dimension == n) 
++    if (dimension == n)
+       return h_this();
+ 
+   objArrayKlassHandle  h_ak(THREAD, h_this->higher_dimension());
+-  if (h_ak.is_null()) {    
++  if (h_ak.is_null()) {
+     if (or_null)  return NULL;
+ 
+     ResourceMark rm;
+@@ -159,7 +156,7 @@
+       MutexLocker mc(Compile_lock, THREAD);   // for vtables
+       // Atomic create higher dimension and link into list
+       MutexLocker mu(MultiArray_lock, THREAD);
+-    
++
+       h_ak = objArrayKlassHandle(THREAD, h_this->higher_dimension());
+       if (h_ak.is_null()) {
+         klassOop oak = objArrayKlassKlass::cast(
+@@ -168,14 +165,14 @@
+         h_ak = objArrayKlassHandle(THREAD, oak);
+         h_ak->set_lower_dimension(h_this());
+         h_this->set_higher_dimension(h_ak());
+-        assert(h_ak->oop_is_objArray(), "incorrect initialization of objArrayKlass");    
+-      }       
++        assert(h_ak->oop_is_objArray(), "incorrect initialization of objArrayKlass");
++      }
+     }
+   } else {
+     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
+   }
+   if (or_null) {
+-    return h_ak->array_klass_or_null(n); 
++    return h_ak->array_klass_or_null(n);
+   }
+   return h_ak->array_klass(n, CHECK_NULL);
+ }
+@@ -184,7 +181,7 @@
+   return array_klass_impl(or_null, dimension() +  1, THREAD);
+ }
+ 
+-int typeArrayKlass::oop_size(oop obj) const { 
++int typeArrayKlass::oop_size(oop obj) const {
+   assert(obj->is_typeArray(),"must be a type array");
+   typeArrayOop t = typeArrayOop(obj);
+   return t->object_size();
+@@ -192,14 +189,14 @@
+ 
+ void typeArrayKlass::oop_follow_contents(oop obj) {
+   assert(obj->is_typeArray(),"must be a type array");
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::typeArrayKlass never moves.
+ }
+ 
+ #ifndef SERIALGC
+ void typeArrayKlass::oop_follow_contents(ParCompactionManager* cm, oop obj) {
+   assert(obj->is_typeArray(),"must be a type array");
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::typeArrayKlass never moves.
+ }
+ #endif // SERIALGC
+@@ -207,7 +204,7 @@
+ int typeArrayKlass::oop_adjust_pointers(oop obj) {
+   assert(obj->is_typeArray(),"must be a type array");
+   typeArrayOop t = typeArrayOop(obj);
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::typeArrayKlass never moves.
+   return t->object_size();
+ }
+@@ -215,7 +212,7 @@
+ int typeArrayKlass::oop_oop_iterate(oop obj, OopClosure* blk) {
+   assert(obj->is_typeArray(),"must be a type array");
+   typeArrayOop t = typeArrayOop(obj);
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::typeArrayKlass never moves.
+   return t->object_size();
+ }
+@@ -223,7 +220,7 @@
+ int typeArrayKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) {
+   assert(obj->is_typeArray(),"must be a type array");
+   typeArrayOop t = typeArrayOop(obj);
+-  // Performance tweak: We skip iterating over the klass pointer since we 
++  // Performance tweak: We skip iterating over the klass pointer since we
+   // know that Universe::typeArrayKlass never moves.
+   return t->object_size();
+ }
+@@ -245,7 +242,7 @@
+ 
+ int
+ typeArrayKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
+-				    HeapWord* beg_addr, HeapWord* end_addr) {
++                                    HeapWord* beg_addr, HeapWord* end_addr) {
+   assert(obj->is_typeArray(),"must be a type array");
+   return typeArrayOop(obj)->object_size();
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/oops/typeArrayKlass.hpp openjdk/hotspot/src/share/vm/oops/typeArrayKlass.hpp
+--- openjdk6/hotspot/src/share/vm/oops/typeArrayKlass.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/typeArrayKlass.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)typeArrayKlass.hpp	1.69 07/05/29 09:44:25 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A typeArrayKlass is the klass of a typeArray
+@@ -79,7 +76,7 @@
+   // Casting from klassOop
+   static typeArrayKlass* cast(klassOop k) {
+     assert(k->klass_part()->oop_is_typeArray_slow(), "cast to typeArrayKlass");
+-    return (typeArrayKlass*) k->klass_part(); 
++    return (typeArrayKlass*) k->klass_part();
+   }
+ 
+   // Naming
+diff -ruN openjdk6/hotspot/src/share/vm/oops/typeArrayKlassKlass.cpp openjdk/hotspot/src/share/vm/oops/typeArrayKlassKlass.cpp
+--- openjdk6/hotspot/src/share/vm/oops/typeArrayKlassKlass.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/typeArrayKlassKlass.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)typeArrayKlassKlass.cpp	1.30 07/05/05 17:06:08 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -30,7 +27,7 @@
+ 
+ klassOop typeArrayKlassKlass::create_klass(TRAPS) {
+   typeArrayKlassKlass o;
+-  KlassHandle h_this_klass(THREAD, Universe::klassKlassObj());  
++  KlassHandle h_this_klass(THREAD, Universe::klassKlassObj());
+   KlassHandle k = base_create_klass(h_this_klass, header_size(), o.vtbl_value(), CHECK_NULL);
+   assert(k()->size() == align_object_size(header_size()), "wrong size for object");
+   java_lang_Class::create_mirror(k, CHECK_NULL); // Allocate mirror
+@@ -45,7 +42,7 @@
+ void typeArrayKlassKlass::oop_print_on(oop obj, outputStream* st) {
+   assert(obj->is_klass(), "must be klass");
+   oop_print_value_on(obj, st);
+-  Klass:: oop_print_on(obj, st); 
++  Klass:: oop_print_on(obj, st);
+ }
+ 
+ 
+@@ -71,4 +68,3 @@
+ const char* typeArrayKlassKlass::internal_name() const {
+   return "{type array class}";
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/typeArrayKlassKlass.hpp openjdk/hotspot/src/share/vm/oops/typeArrayKlassKlass.hpp
+--- openjdk6/hotspot/src/share/vm/oops/typeArrayKlassKlass.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/typeArrayKlassKlass.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)typeArrayKlassKlass.hpp	1.24 07/05/05 17:06:08 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A typeArrayKlassKlass is the klass of a typeArrayKlass
+@@ -39,11 +36,11 @@
+   // Allocation
+   DEFINE_ALLOCATE_PERMANENT(typeArrayKlassKlass);
+   static klassOop create_klass(TRAPS);
+- 
++
+   // Casting from klassOop
+   static typeArrayKlassKlass* cast(klassOop k) {
+     assert(k->klass_part()->oop_is_klass(), "cast to typeArrayKlassKlass");
+-    return (typeArrayKlassKlass*) k->klass_part(); 
++    return (typeArrayKlassKlass*) k->klass_part();
+   }
+ 
+   // Sizing
+@@ -59,4 +56,3 @@
+  public:
+   const char* internal_name() const;
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/typeArrayOop.cpp openjdk/hotspot/src/share/vm/oops/typeArrayOop.cpp
+--- openjdk6/hotspot/src/share/vm/oops/typeArrayOop.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/typeArrayOop.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)typeArrayOop.cpp	1.14 07/05/05 17:06:08 JVM"
+-#endif
+ /*
+  * Copyright 1997 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,11 +19,10 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_typeArrayOop.cpp.incl"
+ 
+ // <<this page is intentionally left blank>>
+-
+diff -ruN openjdk6/hotspot/src/share/vm/oops/typeArrayOop.hpp openjdk/hotspot/src/share/vm/oops/typeArrayOop.hpp
+--- openjdk6/hotspot/src/share/vm/oops/typeArrayOop.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/oops/typeArrayOop.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)typeArrayOop.hpp	1.46 07/05/05 17:06:08 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A typeArrayOop is an array containing basic types (non oop elements).
+@@ -86,7 +83,7 @@
+   jdouble* double_at_addr(int which) const {
+     assert(is_within_bounds(which), "index out of bounds");
+     return &double_base()[which];
+-  }  
++  }
+ 
+   jbyte byte_at(int which) const                  { return *byte_at_addr(which); }
+   void byte_at_put(int which, jbyte contents)     { *byte_at_addr(which) = contents; }
+@@ -136,7 +133,7 @@
+     assert(size_in_words <= (julong)max_jint, "no overflow");
+ 
+     return align_object_size((intptr_t)size_in_words);
+-  }     
++  }
+ 
+  public:
+   int object_size() {
+diff -ruN openjdk6/hotspot/src/share/vm/opto/addnode.cpp openjdk/hotspot/src/share/vm/opto/addnode.cpp
+--- openjdk6/hotspot/src/share/vm/opto/addnode.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/addnode.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)addnode.cpp	1.141 07/05/05 17:06:10 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Portions of code courtesy of Clifford Click
+@@ -40,7 +37,7 @@
+ 
+ //=============================================================================
+ //------------------------------hash-------------------------------------------
+-// Hash function over AddNodes.  Needs to be commutative; i.e., I swap 
++// Hash function over AddNodes.  Needs to be commutative; i.e., I swap
+ // (commute) inputs to AddNodes willy-nilly so the hash function must return
+ // the same value in the presence of edge swapping.
+ uint AddNode::hash() const {
+@@ -48,7 +45,7 @@
+ }
+ 
+ //------------------------------Identity---------------------------------------
+-// If either input is a constant 0, return the other input.  
++// If either input is a constant 0, return the other input.
+ Node *AddNode::Identity( PhaseTransform *phase ) {
+   const Type *zero = add_id();  // The additive identity
+   if( phase->type( in(1) )->higher_equal( zero ) ) return in(2);
+@@ -121,7 +118,7 @@
+       add1_op == this_op ) { // Left input is an Add?
+ 
+     // Type of left _in right input
+-    const Type *t12 = phase->type( add1->in(2) ); 
++    const Type *t12 = phase->type( add1->in(2) );
+     if( t12->singleton() && t12 != Type::TOP ) { // Left input is an add of a constant?
+       // Check for rare case of closed data cycle which can happen inside
+       // unreachable loops. In these cases the computation is undefined.
+@@ -197,7 +194,7 @@
+   // Either input is BOTTOM ==> the result is the local BOTTOM
+   const Type *bot = bottom_type();
+   if( (t1 == bot) || (t2 == bot) ||
+-      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) ) 
++      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) )
+     return bot;
+ 
+   // Check for an addition involving the additive identity
+@@ -233,7 +230,7 @@
+     // Convert "(a-b)+(c-d)" into "(a+c)-(b+d)"
+     if( op2 == Op_SubI ) {
+       // Check for dead cycle: d = (a-b)+(c-d)
+-      assert( in(1)->in(2) != this && in(2)->in(2) != this, 
++      assert( in(1)->in(2) != this && in(2)->in(2) != this,
+               "dead loop in AddINode::Ideal" );
+       Node *sub  = new (phase->C, 3) SubINode(NULL, NULL);
+       sub->init_req(1, phase->transform(new (phase->C, 3) AddINode(in(1)->in(1), in(2)->in(1) ) ));
+@@ -243,11 +240,11 @@
+   }
+ 
+   // Convert "x+(0-y)" into "(x-y)"
+-  if( op2 == Op_SubI && phase->type(in(2)->in(1)) == TypeInt::ZERO ) 
++  if( op2 == Op_SubI && phase->type(in(2)->in(1)) == TypeInt::ZERO )
+     return new (phase->C, 3) SubINode(in(1), in(2)->in(2) );
+ 
+   // Convert "(0-y)+x" into "(x-y)"
+-  if( op1 == Op_SubI && phase->type(in(1)->in(1)) == TypeInt::ZERO ) 
++  if( op1 == Op_SubI && phase->type(in(1)->in(1)) == TypeInt::ZERO )
+     return new (phase->C, 3) SubINode( in(2), in(1)->in(2) );
+ 
+   // Convert (x>>>z)+y into (x+(y<<z))>>>z for small constant z and y.
+@@ -259,7 +256,7 @@
+   // Transform works for small z and small negative y when the addition
+   // (x + (y << z)) does not cross zero.
+   // Implement support for negative y and (x >= -(y << z))
+-  // Have not observed cases where type information exists to support 
++  // Have not observed cases where type information exists to support
+   // positive y and (x <= -(y << z))
+   if( op1 == Op_URShiftI && op2 == Op_ConI &&
+       in(1)->in(2)->Opcode() == Op_ConI ) {
+@@ -274,7 +271,7 @@
+       }
+     }
+   }
+-  
++
+   return AddNode::Ideal(phase, can_reshape);
+ }
+ 
+@@ -298,7 +295,7 @@
+ // pre-check.
+ const Type *AddINode::add_ring( const Type *t0, const Type *t1 ) const {
+   const TypeInt *r0 = t0->is_int(); // Handy access
+-  const TypeInt *r1 = t1->is_int();  
++  const TypeInt *r1 = t1->is_int();
+   int lo = r0->_lo + r1->_lo;
+   int hi = r0->_hi + r1->_hi;
+   if( !(r0->is_con() && r1->is_con()) ) {
+@@ -336,7 +333,7 @@
+     // Convert "(a-b)+(c-d)" into "(a+c)-(b+d)"
+     if( op2 == Op_SubL ) {
+       // Check for dead cycle: d = (a-b)+(c-d)
+-      assert( in(1)->in(2) != this && in(2)->in(2) != this, 
++      assert( in(1)->in(2) != this && in(2)->in(2) != this,
+               "dead loop in AddLNode::Ideal" );
+       Node *sub  = new (phase->C, 3) SubLNode(NULL, NULL);
+       sub->init_req(1, phase->transform(new (phase->C, 3) AddLNode(in(1)->in(1), in(2)->in(1) ) ));
+@@ -346,14 +343,14 @@
+   }
+ 
+   // Convert "x+(0-y)" into "(x-y)"
+-  if( op2 == Op_SubL && phase->type(in(2)->in(1)) == TypeLong::ZERO ) 
++  if( op2 == Op_SubL && phase->type(in(2)->in(1)) == TypeLong::ZERO )
+     return new (phase->C, 3) SubLNode(in(1), in(2)->in(2) );
+ 
+   // Convert "X+X+X+X+X...+X+Y" into "k*X+Y" or really convert "X+(X+Y)"
+   // into "(X<<1)+Y" and let shift-folding happen.
+   if( op2 == Op_AddL &&
+       in(2)->in(1) == in(1) &&
+-      op1 != Op_ConL && 
++      op1 != Op_ConL &&
+       0 ) {
+     Node *shift = phase->transform(new (phase->C, 3) LShiftLNode(in(1),phase->intcon(1)));
+     return new (phase->C, 3) AddLNode(shift,in(2)->in(2));
+@@ -410,11 +407,11 @@
+ // Check for addition of the identity
+ const Type *AddFNode::add_of_identity( const Type *t1, const Type *t2 ) const {
+   // x ADD 0  should return x unless 'x' is a -zero
+-  // 
++  //
+   // const Type *zero = add_id();     // The additive identity
+   // jfloat f1 = t1->getf();
+   // jfloat f2 = t2->getf();
+-  // 
++  //
+   // if( t1->higher_equal( zero ) ) return t2;
+   // if( t2->higher_equal( zero ) ) return t1;
+ 
+@@ -448,11 +445,11 @@
+ // Check for addition of the identity
+ const Type *AddDNode::add_of_identity( const Type *t1, const Type *t2 ) const {
+   // x ADD 0  should return x unless 'x' is a -zero
+-  // 
++  //
+   // const Type *zero = add_id();     // The additive identity
+   // jfloat f1 = t1->getf();
+   // jfloat f2 = t2->getf();
+-  // 
++  //
+   // if( t1->higher_equal( zero ) ) return t2;
+   // if( t2->higher_equal( zero ) ) return t1;
+ 
+@@ -496,7 +493,7 @@
+   const Node *n = in(Address);
+   if (n->is_AddP() && n->in(Base) == in(Base)) {
+     const AddPNode *addp = n->as_AddP(); // Left input is an AddP
+-    assert( !addp->in(Address)->is_AddP() || 
++    assert( !addp->in(Address)->is_AddP() ||
+              addp->in(Address)->as_AddP() != addp,
+             "dead loop in AddPNode::Ideal" );
+     // Type of left input's right input
+@@ -549,7 +546,7 @@
+ 
+ //------------------------------bottom_type------------------------------------
+ // Bottom-type is the pointer-type with unknown offset.
+-const Type *AddPNode::bottom_type() const { 
++const Type *AddPNode::bottom_type() const {
+   if (in(Address) == NULL)  return TypePtr::BOTTOM;
+   const TypePtr *tp = in(Address)->bottom_type()->isa_ptr();
+   if( !tp ) return Type::TOP;   // TOP input means TOP output
+@@ -723,7 +720,7 @@
+     return in(1);
+   }
+ 
+-  return AddNode::Identity(phase); 
++  return AddNode::Identity(phase);
+ }
+ 
+ //------------------------------add_ring---------------------------------------
+@@ -750,8 +747,8 @@
+   const TypeInt *r1 = t1->is_int();
+ 
+   // Complementing a boolean?
+-  if( r0 == TypeInt::BOOL && ( r1 == TypeInt::ONE 
+-			       || r1 == TypeInt::BOOL))
++  if( r0 == TypeInt::BOOL && ( r1 == TypeInt::ONE
++                               || r1 == TypeInt::BOOL))
+     return TypeInt::BOOL;
+ 
+   if( !r0->is_con() || !r1->is_con() ) // Not constants
+@@ -846,12 +843,12 @@
+       y = y->in(1);
+     }
+ 
+-    if( x->_idx > y->_idx ) 
++    if( x->_idx > y->_idx )
+       return new (phase->C, 3) MinINode(r->in(1),phase->transform(new (phase->C, 3) MinINode(l,r->in(2))));
+-    
++
+     // See if covers: MIN2(x+c0,MIN2(y+c1,z))
+     if( !phase->eqv(x,y) ) return NULL;
+-    // If (y == x) transform MIN2(x+c0, MIN2(x+c1,z)) into 
++    // If (y == x) transform MIN2(x+c0, MIN2(x+c1,z)) into
+     // MIN2(x+c0 or x+c1 which less, z).
+     return new (phase->C, 3) MinINode(phase->transform(new (phase->C, 3) AddINode(x,phase->intcon(MIN2(x_off,y_off)))),r->in(2));
+   } else {
+diff -ruN openjdk6/hotspot/src/share/vm/opto/addnode.hpp openjdk/hotspot/src/share/vm/opto/addnode.hpp
+--- openjdk6/hotspot/src/share/vm/opto/addnode.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/addnode.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)addnode.hpp	1.58 07/05/05 17:06:10 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Portions of code courtesy of Clifford Click
+@@ -42,10 +39,10 @@
+   }
+ 
+   // Handle algebraic identities here.  If we have an identity, return the Node
+-  // we are equivalent to.  We look for "add of zero" as an identity.  
++  // we are equivalent to.  We look for "add of zero" as an identity.
+   virtual Node *Identity( PhaseTransform *phase );
+ 
+-  // We also canonicalize the Node, moving constants to the right input, 
++  // We also canonicalize the Node, moving constants to the right input,
+   // and flatten expressions (so that 1+x+2 becomes x+3).
+   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+ 
+@@ -55,7 +52,7 @@
+ 
+   // Check if this addition involves the additive identity
+   virtual const Type *add_of_identity( const Type *t1, const Type *t2 ) const;
+- 
++
+   // Supplied function returns the sum of the inputs.
+   // This also type-checks the inputs for sanity.  Guaranteed never to
+   // be passed a TOP or BOTTOM type, these are filtered out by a pre-check.
+@@ -209,7 +206,7 @@
+ // all the behavior of addition on a ring.  Only new thing is that we allow
+ // 2 equal inputs to be equal.
+ class MaxNode : public AddNode {
+-public: 
++public:
+   MaxNode( Node *in1, Node *in2 ) : AddNode(in1,in2) {}
+   virtual int Opcode() const = 0;
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/opto/adlcVMDeps.hpp openjdk/hotspot/src/share/vm/opto/adlcVMDeps.hpp
+--- openjdk6/hotspot/src/share/vm/opto/adlcVMDeps.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/adlcVMDeps.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)adlcVMDeps.hpp	1.19 07/05/05 17:06:10 JVM"
+-#endif
+ /*
+  * Copyright 1998-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,14 +19,14 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Declare commonly known constant and data structures between the
+ // ADLC and the VM
+ //
+ 
+-class AdlcVMDeps : public AllStatic {  
++class AdlcVMDeps : public AllStatic {
+  public:
+   // Mirror of TypeFunc types
+   enum { Control, I_O, Memory, FramePtr, ReturnAdr, Parms };
+@@ -37,11 +34,11 @@
+   enum Cisc_Status { Not_cisc_spillable = -1 };
+ 
+   // Mirror of OptoReg::Name names
+-  enum Name {   
+-    Physical = 0                // Start of physical regs    
++  enum Name {
++    Physical = 0                // Start of physical regs
+   };
+ 
+   // relocInfo
+   static const char* oop_reloc_type()  { return "relocInfo::oop_type"; }
+-  static const char* none_reloc_type() { return "relocInfo::none"; }  
++  static const char* none_reloc_type() { return "relocInfo::none"; }
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/opto/block.cpp openjdk/hotspot/src/share/vm/opto/block.cpp
+--- openjdk6/hotspot/src/share/vm/opto/block.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/block.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)block.cpp	1.169 07/05/17 15:57:15 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Optimization - Graph Style
+@@ -36,7 +33,7 @@
+   assert(i >= Max(), "must be an overflow");
+   debug_only(_limit = i+1);
+   if( i < _size )  return;
+-  if( !_size ) { 
++  if( !_size ) {
+     _size = 1;
+     _blocks = (Block**)_arena->Amalloc( _size * sizeof(Block*) );
+     _blocks[0] = NULL;
+@@ -76,7 +73,7 @@
+     // but only because my "divide by 4" heuristic surely gets nearly
+     // all possible gain (a "do not align at all" heuristic has a
+     // chance of getting a really tiny gain).
+-    if( h->is_CountedLoop() && (h->as_CountedLoop()->is_pre_loop() || 
++    if( h->is_CountedLoop() && (h->as_CountedLoop()->is_pre_loop() ||
+                                 h->as_CountedLoop()->is_post_loop()) )
+       return (OptoLoopAlignment > 4) ? (OptoLoopAlignment>>2) : 1;
+     // Loops with low backedge frequency should not be aligned.
+@@ -91,9 +88,9 @@
+ 
+ //-----------------------------------------------------------------------------
+ // Compute the size of first 'inst_cnt' instructions in this block.
+-// Return the number of instructions left to compute if the block has 
++// Return the number of instructions left to compute if the block has
+ // less then 'inst_cnt' instructions.
+-uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt, 
++uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt,
+                                     PhaseRegAlloc* ra) {
+   uint last_inst = _nodes.size();
+   for( uint j = 0; j < last_inst && inst_cnt > 0; j++ ) {
+@@ -132,17 +129,17 @@
+ // Return empty status of a block.  Empty blocks contain only the head, other
+ // ideal nodes, and an optional trailing goto.
+ int Block::is_Empty() const {
+-  
++
+   // Root or start block is not considered empty
+-  if (_nodes[0]->is_Root() || _nodes[0]->is_Start()) {
+-    return not_empty; 
++  if (head()->is_Root() || head()->is_Start()) {
++    return not_empty;
+   }
+ 
+   int success_result = completely_empty;
+   int end_idx = _nodes.size()-1;
+ 
+   // Check for ending goto
+-  if ((end_idx > 0) && (_nodes[end_idx]->is_Goto())) { 
++  if ((end_idx > 0) && (_nodes[end_idx]->is_Goto())) {
+     success_result = empty_with_goto;
+     end_idx--;
+   }
+@@ -150,12 +147,12 @@
+   // Unreachable blocks are considered empty
+   if (num_preds() <= 1) {
+     return success_result;
+-  } 
++  }
+ 
+-  // Ideal nodes are allowable in empty blocks: skip them  Only MachNodes 
++  // Ideal nodes are allowable in empty blocks: skip them  Only MachNodes
+   // turn directly into code, because only MachNodes have non-trivial
+   // emit() functions.
+-  while ((end_idx > 0) && !_nodes[end_idx]->is_Mach()) { 
++  while ((end_idx > 0) && !_nodes[end_idx]->is_Mach()) {
+     end_idx--;
+   }
+ 
+@@ -167,16 +164,45 @@
+   return not_empty;
+ }
+ 
++//------------------------------has_uncommon_code------------------------------
++// Return true if the block's code implies that it is not likely to be
++// executed infrequently.  Check to see if the block ends in a Halt or
++// a low probability call.
++bool Block::has_uncommon_code() const {
++  Node* en = end();
++
++  if (en->is_Goto())
++    en = en->in(0);
++  if (en->is_Catch())
++    en = en->in(0);
++  if (en->is_Proj() && en->in(0)->is_MachCall()) {
++    MachCallNode* call = en->in(0)->as_MachCall();
++    if (call->cnt() != COUNT_UNKNOWN && call->cnt() <= PROB_UNLIKELY_MAG(4)) {
++      // This is true for slow-path stubs like new_{instance,array},
++      // slow_arraycopy, complete_monitor_locking, uncommon_trap.
++      // The magic number corresponds to the probability of an uncommon_trap,
++      // even though it is a count not a probability.
++      return true;
++    }
++  }
++
++  int op = en->is_Mach() ? en->as_Mach()->ideal_Opcode() : en->Opcode();
++  return op == Op_Halt;
++}
++
+ //------------------------------is_uncommon------------------------------------
+-// True if block is low enough frequency or guarded by a test which 
++// True if block is low enough frequency or guarded by a test which
+ // mostly does not go here.
+ bool Block::is_uncommon( Block_Array &bbs ) const {
+   // Initial blocks must never be moved, so are never uncommon.
+-  if (_nodes[0]->is_Root() || _nodes[0]->is_Start())  return false;
++  if (head()->is_Root() || head()->is_Start())  return false;
+ 
+   // Check for way-low freq
+   if( _freq < BLOCK_FREQUENCY(0.00001f) ) return true;
+ 
++  // Look for code shape indicating uncommon_trap or slow path
++  if (has_uncommon_code()) return true;
++
+   const float epsilon = 0.05f;
+   const float guard_factor = PROB_UNLIKELY_MAG(4) / (1.f - epsilon);
+   uint uncommon_preds = 0;
+@@ -186,13 +212,13 @@
+   for( uint i=1; i<num_preds(); i++ ) {
+     Block* guard = bbs[pred(i)->_idx];
+     // Check to see if this block follows its guard 1 time out of 10000
+-    // or less. 
++    // or less.
+     //
+     // See list of magnitude-4 unlikely probabilities in cfgnode.hpp which
+-    // we intend to be "uncommon", such as slow-path TLE allocation, 
++    // we intend to be "uncommon", such as slow-path TLE allocation,
+     // predicted call failure, and uncommon trap triggers.
+     //
+-    // Use an epsilon value of 5% to allow for variability in frequency 
++    // Use an epsilon value of 5% to allow for variability in frequency
+     // predictions and floating point calculations. The net effect is
+     // that guard_factor is set to 9500.
+     //
+@@ -212,7 +238,7 @@
+       (uncommon_preds == (num_preds()-1) ||
+       // it is uncommon for all frequent preds.
+        uncommon_for_freq_preds == freq_preds) ) {
+-    return true; 
++    return true;
+   }
+   return false;
+ }
+@@ -224,7 +250,7 @@
+   else tty->print("N%d", head()->_idx);
+ 
+   if (Verbose && orig != this) {
+-    // Dump the original block's idx 
++    // Dump the original block's idx
+     tty->print(" (");
+     orig->dump_bidx(orig);
+     tty->print(")");
+@@ -243,7 +269,7 @@
+   }
+ }
+ 
+-void Block::dump_head( const Block_Array *bbs ) const { 
++void Block::dump_head( const Block_Array *bbs ) const {
+   // Print the basic block
+   dump_bidx(this);
+   tty->print(": #\t");
+@@ -261,12 +287,12 @@
+         Block *p = (*bbs)[s->_idx];
+         p->dump_pred(bbs, p);
+       } else {
+-        while (!s->is_block_start()) 
++        while (!s->is_block_start())
+           s = s->in(0);
+         tty->print("N%d ", s->_idx );
+       }
+     }
+-  } else 
++  } else
+     tty->print("BLOCK HEAD IS JUNK  ");
+ 
+   // Print loop, if any
+@@ -280,11 +306,10 @@
+     }
+     tty->print("\tLoop: B%d-B%d ", bhead->_pre_order, bx->_pre_order);
+     // Dump any loop-specific bits, especially for CountedLoops.
+-    loop->dump_spec();
++    loop->dump_spec(tty);
+   }
+   tty->print(" Freq: %g",_freq);
+   if( Verbose || WizardMode ) {
+-    tty->print(" Count: %g",_cnt);
+     tty->print(" IDom: %d/#%d", _idom ? _idom->_pre_order : 0, _dom_depth);
+     tty->print(" RegPressure: %d",_reg_pressure);
+     tty->print(" IHRP Index: %d",_ihrp_index);
+@@ -301,16 +326,16 @@
+   uint cnt = _nodes.size();
+   for( uint i=0; i<cnt; i++ )
+     _nodes[i]->dump();
+-  tty->print("\n");  
++  tty->print("\n");
+ }
+ #endif
+ 
+ //=============================================================================
+ //------------------------------PhaseCFG---------------------------------------
+-PhaseCFG::PhaseCFG( Arena *a, RootNode *r, Matcher &m ) : 
+-  Phase(CFG), 
+-  _bbs(a), 
+-  _root(r) 
++PhaseCFG::PhaseCFG( Arena *a, RootNode *r, Matcher &m ) :
++  Phase(CFG),
++  _bbs(a),
++  _root(r)
+ #ifndef PRODUCT
+   , _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
+ #endif
+@@ -349,7 +374,7 @@
+     // 'np' is _root (see above) or RegionNode, StartNode: we push on stack
+     // only nodes which point to the start of basic block (see below).
+     Node *np = nstack.node();
+-    // idx > 0, except for the first node (_root) pushed on stack 
++    // idx > 0, except for the first node (_root) pushed on stack
+     // at the beginning when idx == 0.
+     // We will use the condition (idx == 0) later to end the build.
+     uint idx = nstack.index();
+@@ -393,7 +418,7 @@
+       for (int i = (cnt - 1); i > 0; i-- ) { // For all predecessors
+         Node *prevproj = p->in(i);  // Get prior input
+         assert( !prevproj->is_Con(), "dead input not removed" );
+-        // Check to see if p->in(i) is a "control-dependent" CFG edge - 
++        // Check to see if p->in(i) is a "control-dependent" CFG edge -
+         // i.e., it splits at the source (via an IF or SWITCH) and merges
+         // at the destination (via a many-input Region).
+         // This breaks critical edges.  The RegionNode to start the block
+@@ -511,17 +536,17 @@
+   b->_num_succs = 1;
+   // remap successor's predecessors if necessary
+   uint j;
+-  for( j = 1; j < succ->num_preds(); j++) 
+-    if( succ->pred(j)->in(0) == bp ) 
++  for( j = 1; j < succ->num_preds(); j++)
++    if( succ->pred(j)->in(0) == bp )
+       succ->head()->set_req(j, gto);
+   // Kill alternate exit path
+   Block *dead = b->_succs[1-idx];
+-  for( j = 1; j < dead->num_preds(); j++) 
+-    if( dead->pred(j)->in(0) == bp ) 
++  for( j = 1; j < dead->num_preds(); j++)
++    if( dead->pred(j)->in(0) == bp )
+       break;
+   // Scan through block, yanking dead path from
+   // all regions and phis.
+-  dead->_nodes[0]->del_req(j);
++  dead->head()->del_req(j);
+   for( int k = 1; dead->_nodes[k]->is_Phi(); k++ )
+     dead->_nodes[k]->del_req(j);
+ }
+@@ -537,9 +562,9 @@
+   if ((bx_index <= b_index) && (_blocks[bx_index] == bx)) {
+     return false;
+   }
+- 
++
+   // Find the current index of block bx on the block list
+-  bx_index = b_index + 1; 
++  bx_index = b_index + 1;
+   while( bx_index < _num_blocks && _blocks[bx_index] != bx ) bx_index++;
+   assert(_blocks[bx_index] == bx, "block not found");
+ 
+@@ -560,7 +585,7 @@
+   return true;
+ }
+ 
+-//------------------------------MoveEmptyToEnd---------------------------------
++//------------------------------MoveToEnd--------------------------------------
+ // Move empty and uncommon blocks to the end.
+ void PhaseCFG::MoveToEnd(Block *b, uint i) {
+   int e = b->is_Empty();
+@@ -579,7 +604,7 @@
+ }
+ 
+ //------------------------------RemoveEmpty------------------------------------
+-// Remove empty basic blocks and useless branches.  
++// Remove empty basic blocks and useless branches.
+ void PhaseCFG::RemoveEmpty() {
+   // Move uncommon blocks to the end
+   uint last = _num_blocks;
+@@ -594,7 +619,7 @@
+     // to give a fake exit path to infinite loops.  At this late stage they
+     // need to turn into Goto's so that when you enter the infinite loop you
+     // indeed hang.
+-    if( b->_nodes[b->end_idx()]->Opcode() == Op_NeverBranch ) 
++    if( b->_nodes[b->end_idx()]->Opcode() == Op_NeverBranch )
+       convert_NeverBranch_to_Goto(b);
+ 
+     // Look for uncommon blocks and move to end.
+@@ -634,11 +659,11 @@
+ 
+     // Connector blocks need no further processing.
+     if (b->is_connector()) {
+-      assert((i+1) == _num_blocks || _blocks[i+1]->is_connector(), 
++      assert((i+1) == _num_blocks || _blocks[i+1]->is_connector(),
+              "All connector blocks should sink to the end");
+       continue;
+     }
+-    assert(b->is_Empty() != Block::completely_empty, 
++    assert(b->is_Empty() != Block::completely_empty,
+            "Empty blocks should be connectors");
+ 
+     Block *bnext = (i < _num_blocks-1) ? _blocks[i+1] : NULL;
+@@ -669,7 +694,7 @@
+         }
+       }
+       // Remove all CatchProjs
+-      for (j1 = 0; j1 < b->_num_succs; j1++) b->_nodes.pop();        
++      for (j1 = 0; j1 < b->_num_succs; j1++) b->_nodes.pop();
+ 
+     } else if (b->_num_succs == 1) {
+       // Block ends in a Goto?
+@@ -689,15 +714,15 @@
+       ProjNode *proj1 = b->_nodes[b->_nodes.size()-1]->as_Proj();
+ 
+       // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1].
+-      assert(proj0->raw_out(0) == b->_succs[0]->_nodes[0], "Mismatch successor 0");
+-      assert(proj1->raw_out(0) == b->_succs[1]->_nodes[0], "Mismatch successor 1");
++      assert(proj0->raw_out(0) == b->_succs[0]->head(), "Mismatch successor 0");
++      assert(proj1->raw_out(0) == b->_succs[1]->head(), "Mismatch successor 1");
+ 
+       Block *bs1 = b->non_connector_successor(1);
+ 
+-      // Check for neither successor block following the current 
+-      // block ending in a conditional. If so, move one of the 
++      // Check for neither successor block following the current
++      // block ending in a conditional. If so, move one of the
+       // successors after the current one, provided that the
+-      // successor was previously unscheduled, but moveable 
++      // successor was previously unscheduled, but moveable
+       // (i.e., all paths to it involve a branch).
+       if( bnext != bs0 && bnext != bs1 ) {
+ 
+@@ -712,7 +737,7 @@
+         if( proj0->Opcode() == Op_IfTrue ) {
+           p = 1.0 - p;
+         }
+-        
++
+         // Prefer successor #1 if p > 0.5
+         if (p > PROB_FAIR) {
+           bx = bs1;
+@@ -730,11 +755,11 @@
+       // Check for conditional branching the wrong way.  Negate
+       // conditional, if needed, so it falls into the following block
+       // and branches to the not-following block.
+-      
++
+       // Check for the next block being in succs[0].  We are going to branch
+-      // to succs[0], so we want the fall-thru case as the next block in 
++      // to succs[0], so we want the fall-thru case as the next block in
+       // succs[1].
+-      if (bnext == bs0) {      
++      if (bnext == bs0) {
+         // Fall-thru case in succs[0], so flip targets in succs map
+         Block *tbs0 = b->_succs[0];
+         Block *tbs1 = b->_succs[1];
+@@ -757,10 +782,10 @@
+       // Make sure we TRUE branch to the target
+       if( proj0->Opcode() == Op_IfFalse )
+         iff->negate();
+-      
++
+       b->_nodes.pop();          // Remove IfFalse & IfTrue projections
+       b->_nodes.pop();
+-      
++
+     } else {
+       // Multi-exit block, e.g. a switch statement
+       // But we don't need to do anything here
+@@ -830,7 +855,7 @@
+       for( uint k = 0; k < n->req(); k++ ) {
+         Node *use = n->in(k);
+         if( use && use != n ) {
+-          assert( _bbs[use->_idx] || use->is_Con(), 
++          assert( _bbs[use->_idx] || use->is_Con(),
+                   "must have block; constants for debug info ok" );
+         }
+       }
+@@ -860,7 +885,7 @@
+ void UnionFind::extend( uint from_idx, uint to_idx ) {
+   _nesting.check();
+   if( from_idx >= _max ) {
+-    uint size = 16; 
++    uint size = 16;
+     while( size <= from_idx ) size <<=1;
+     _indices = REALLOC_RESOURCE_ARRAY( uint, _indices, _max, size );
+     _max = size;
+@@ -873,7 +898,7 @@
+   assert( max <= max_uint, "Must fit within uint" );
+   // Force the Union-Find mapping to be at least this large
+   extend(max,0);
+-  // Initialize to be the ID mapping.  
++  // Initialize to be the ID mapping.
+   for( uint i=0; i<_max; i++ ) map(i,i);
+ }
+ 
+@@ -881,7 +906,7 @@
+ // Straight out of Tarjan's union-find algorithm
+ uint UnionFind::Find_compress( uint idx ) {
+   uint cur  = idx;
+-  uint next = lookup(cur); 
++  uint next = lookup(cur);
+   while( next != cur ) {        // Scan chain of equivalences
+     assert( next < cur, "always union smaller" );
+     cur = next;                 // until find a fixed-point
+@@ -901,10 +926,10 @@
+ // Like Find above, but no path compress, so bad asymptotic behavior
+ uint UnionFind::Find_const( uint idx ) const {
+   if( idx == 0 ) return idx;    // Ignore the zero idx
+-  // Off the end?  This can happen during debugging dumps 
++  // Off the end?  This can happen during debugging dumps
+   // when data structures have not finished being updated.
+   if( idx >= _max ) return idx;
+-  uint next = lookup(idx); 
++  uint next = lookup(idx);
+   while( next != idx ) {        // Scan chain of equivalences
+     assert( next < idx, "always union smaller" );
+     idx = next;                 // until find a fixed-point
+diff -ruN openjdk6/hotspot/src/share/vm/opto/block.hpp openjdk/hotspot/src/share/vm/opto/block.hpp
+--- openjdk6/hotspot/src/share/vm/opto/block.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/block.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)block.hpp	1.98 07/05/17 15:57:17 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,12 +19,13 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Optimization - Graph Style
+ 
+ class Block;
++class CFGLoop;
+ class MachCallNode;
+ class Matcher;
+ class RootNode;
+@@ -37,7 +35,7 @@
+ //------------------------------Block_Array------------------------------------
+ // Map dense integer indices to Blocks.  Uses classic doubling-array trick.
+ // Abstractly provides an infinite array of Block*'s, initialized to NULL.
+-// Note that the constructor just zeros things, and since I use Arena 
++// Note that the constructor just zeros things, and since I use Arena
+ // allocation I do not need a destructor to reclaim storage.
+ class Block_Array : public ResourceObj {
+   uint _size;                   // allocated size, as opposed to formal limit
+@@ -80,11 +78,22 @@
+ };
+ 
+ 
++class CFGElement : public ResourceObj {
++ public:
++  float _freq; // Execution frequency (estimate)
++
++  CFGElement() : _freq(0.0f) {}
++  virtual bool is_block() { return false; }
++  virtual bool is_loop()  { return false; }
++  Block*   as_Block() { assert(is_block(), "must be block"); return (Block*)this; }
++  CFGLoop* as_CFGLoop()  { assert(is_loop(),  "must be loop");  return (CFGLoop*)this;  }
++};
++
+ //------------------------------Block------------------------------------------
+ // This class defines a Basic Block.
+ // Basic blocks are used during the output routines, and are not used during
+ // any optimization pass.  They are created late in the game.
+-class Block : public ResourceObj {
++class Block : public CFGElement {
+  public:
+   // Nodes in this block, in order
+   Node_List _nodes;
+@@ -116,6 +125,12 @@
+   uint _dom_depth;              // Depth in dominator tree for fast LCA
+   Block* _idom;                 // Immediate dominator block
+ 
++  CFGLoop *_loop;               // Loop to which this block belongs
++  uint _rpo;                    // Number in reverse post order walk
++
++  virtual bool is_block() { return true; }
++  float succ_prob(uint i); // return probability of i'th successor
++
+   Block* dom_lca(Block* that);  // Compute LCA in dominator tree.
+ #ifdef ASSERT
+   bool dominates(Block* that) {
+@@ -131,23 +146,10 @@
+   uint code_alignment();
+ 
+   // BLOCK_FREQUENCY is a sentinel to mark uses of constant block frequencies.
+-  // It is currently also used to scale such frequencies relative to 
++  // It is currently also used to scale such frequencies relative to
+   // FreqCountInvocations relative to the old value of 1500.
+ #define BLOCK_FREQUENCY(f) ((f * (float) 1500) / FreqCountInvocations)
+ 
+-  // Execution frequency (estimate)
+-  float _freq;
+-  float _cnt;
+-
+-#ifdef ASSERT
+-  // Validate _cnt and _freq
+-  bool has_valid_counts() const { 
+-    if (_freq <= 0.0f) return false;
+-    if ((_cnt <= 0.0f) && (_cnt != COUNT_UNKNOWN)) return false;
+-    return true;
+-  }
+-#endif
+-
+   // Register Pressure (estimate) for Splitting heuristic
+   uint _reg_pressure;
+   uint _ihrp_index;
+@@ -163,7 +165,7 @@
+   void    set_raise_LCA_visited(node_idx_t x) { _raise_LCA_visited = x; }
+   node_idx_t  raise_LCA_visited() const       { return _raise_LCA_visited; }
+ 
+-  // Estimated size in bytes of first instructions in a loop. 
++  // Estimated size in bytes of first instructions in a loop.
+   uint _first_inst_size;
+   uint first_inst_size() const     { return _first_inst_size; }
+   void set_first_inst_size(uint s) { _first_inst_size = s; }
+@@ -192,7 +194,7 @@
+     return 0;
+   }
+ 
+-  // Connector blocks. Connector blocks are basic blocks devoid of 
++  // Connector blocks. Connector blocks are basic blocks devoid of
+   // instructions, but may have relevant non-instruction Nodes, such as
+   // Phis or MergeMems. Such blocks are discovered and marked during the
+   // RemoveEmpty phase, and elided during Output.
+@@ -202,28 +204,28 @@
+ 
+   // Create a new Block with given head Node.
+   // Creates the (empty) predecessor arrays.
+-  Block( Arena *a, Node *headnode ) 
+-    : _nodes(a), 
+-      _succs(a), 
+-      _num_succs(0), 
+-      _pre_order(0), 
+-      _idom(0), 
+-      _freq(0.0f), 
+-      _cnt(COUNT_UNKNOWN), 
+-      _reg_pressure(0), 
+-      _ihrp_index(1), 
+-      _freg_pressure(0), 
+-      _fhrp_index(1), 
++  Block( Arena *a, Node *headnode )
++    : CFGElement(),
++      _nodes(a),
++      _succs(a),
++      _num_succs(0),
++      _pre_order(0),
++      _idom(0),
++      _loop(NULL),
++      _reg_pressure(0),
++      _ihrp_index(1),
++      _freg_pressure(0),
++      _fhrp_index(1),
+       _raise_LCA_mark(0),
+       _raise_LCA_visited(0),
+-      _first_inst_size(999999), 
+-      _connector(false) { 
+-    _nodes.push(headnode); 
++      _first_inst_size(999999),
++      _connector(false) {
++    _nodes.push(headnode);
+   }
+ 
+   // Index of 'end' Node
+   uint end_idx() const {
+-    // %%%%% add a proj after every goto 
++    // %%%%% add a proj after every goto
+     // so (last->is_block_proj() != last) always, then simplify this code
+     // This will not give correct end_idx for block 0 when it only contains root.
+     int last_idx = _nodes.size() - 1;
+@@ -232,7 +234,7 @@
+     return (last->is_block_proj() == last) ? last_idx : (last_idx - _num_succs);
+   }
+ 
+-  // Basic blocks have a Node which ends them.  This Node determines which 
++  // Basic blocks have a Node which ends them.  This Node determines which
+   // basic block follows this one in the program flow.  This Node is either an
+   // IfNode, a GotoNode, a JmpNode, or a ReturnNode.
+   Node *end() const { return _nodes[end_idx()]; }
+@@ -243,7 +245,7 @@
+   // Find node in block
+   uint find_node( const Node *n ) const;
+   // Find and remove n from block list
+-  void find_remove( const Node *n ); 
++  void find_remove( const Node *n );
+ 
+   // Schedule a call next in the block
+   uint sched_call(Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, int *ready_cnt, MachCallNode *mcall, VectorSet &next_call);
+@@ -255,7 +257,7 @@
+   bool schedule_local(PhaseCFG *cfg, Matcher &m, int *ready_cnt, VectorSet &next_call);
+   // Cleanup if any code lands between a Call and his Catch
+   void call_catch_cleanup(Block_Array &bbs);
+-  // Detect implicit-null-check opportunities.  Basically, find NULL checks 
++  // Detect implicit-null-check opportunities.  Basically, find NULL checks
+   // with suitable memory ops nearby.  Use the memory op to do the NULL check.
+   // I can generate a memory op if there is not one nearby.
+   void implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons);
+@@ -273,12 +275,16 @@
+     return s;
+   }
+ 
+-  // Successor block, after forwarding through connectors 
++  // Successor block, after forwarding through connectors
+   Block* non_connector_successor(int i) const {
+     return _succs[i]->non_connector();
+   }
+ 
+-  // True if block is way uncommon
++  // Examine block's code shape to predict if it is not commonly executed.
++  bool has_uncommon_code() const;
++
++  // Use frequency calculations and code shape to predict if the block
++  // is uncommon.
+   bool is_uncommon( Block_Array &bbs ) const;
+ 
+ #ifndef PRODUCT
+@@ -299,10 +305,10 @@
+   // Build a proper looking cfg.  Return count of basic blocks
+   uint build_cfg();
+ 
+-  // Perform DFS search.  
+-  // Setup 'vertex' as DFS to vertex mapping.  
+-  // Setup 'semi' as vertex to DFS mapping.  
+-  // Set 'parent' to DFS parent.             
++  // Perform DFS search.
++  // Setup 'vertex' as DFS to vertex mapping.
++  // Setup 'semi' as vertex to DFS mapping.
++  // Set 'parent' to DFS parent.
+   uint DFS( Tarjan *tarjan );
+ 
+   // Helper function to insert a node into a block
+@@ -325,12 +331,13 @@
+   PhaseCFG( Arena *a, RootNode *r, Matcher &m );
+ 
+   uint _num_blocks;             // Count of basic blocks
+-  Block_List _blocks;           // List of basic blocks  
++  Block_List _blocks;           // List of basic blocks
+   RootNode *_root;              // Root of whole program
+   Block_Array _bbs;             // Map Nodes to owning Basic Block
+   Block *_broot;                // Basic block of root
+   uint _rpo_ctr;
+-  
++  CFGLoop* _root_loop;
++
+   // Per node latency estimation, valid only during GCM
+   GrowableArray<uint> _node_latency;
+ 
+@@ -347,7 +354,7 @@
+   // Global Code Motion.  See Click's PLDI95 paper.  Place Nodes in specific
+   // basic blocks; i.e. _bbs now maps _idx for all Nodes to some Block.
+   void GlobalCodeMotion( Matcher &m, uint unique, Node_List &proj_list );
+- 
++
+   // Compute the (backwards) latency of a node from the uses
+   void latency_from_uses(Node *n);
+ 
+@@ -384,11 +391,13 @@
+   // into Goto's so that when you enter the infinite loop you indeed hang.
+   void convert_NeverBranch_to_Goto(Block *b);
+ 
++  CFGLoop* create_loop_tree();
++
+   // Insert a node into a block, and update the _bbs
+-  void insert( Block *b, uint idx, Node *n ) { 
+-    b->_nodes.insert( idx, n ); 
+-    _bbs.map( n->_idx, b ); 
+-  } 
++  void insert( Block *b, uint idx, Node *n ) {
++    b->_nodes.insert( idx, n );
++    _bbs.map( n->_idx, b );
++  }
+ 
+ #ifndef PRODUCT
+   bool trace_opto_pipelining() const { return _trace_opto_pipelining; }
+@@ -439,3 +448,63 @@
+ 
+ };
+ 
++//----------------------------BlockProbPair---------------------------
++// Ordered pair of Node*.
++class BlockProbPair VALUE_OBJ_CLASS_SPEC {
++protected:
++  Block* _target;      // block target
++  float  _prob;        // probability of edge to block
++public:
++  BlockProbPair() : _target(NULL), _prob(0.0) {}
++  BlockProbPair(Block* b, float p) : _target(b), _prob(p) {}
++
++  Block* get_target() const { return _target; }
++  float get_prob() const { return _prob; }
++};
++
++//------------------------------CFGLoop-------------------------------------------
++class CFGLoop : public CFGElement {
++  int _id;
++  int _depth;
++  CFGLoop *_parent;      // root of loop tree is the method level "pseudo" loop, it's parent is null
++  CFGLoop *_sibling;     // null terminated list
++  CFGLoop *_child;       // first child, use child's sibling to visit all immediately nested loops
++  GrowableArray<CFGElement*> _members; // list of members of loop
++  GrowableArray<BlockProbPair> _exits; // list of successor blocks and their probabilities
++  float _exit_prob;       // probability any loop exit is taken on a single loop iteration
++  void update_succ_freq(Block* b, float freq);
++
++ public:
++  CFGLoop(int id) :
++    CFGElement(),
++    _id(id),
++    _depth(0),
++    _parent(NULL),
++    _sibling(NULL),
++    _child(NULL),
++    _exit_prob(1.0f) {}
++  CFGLoop* parent() { return _parent; }
++  void push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk);
++  void add_member(CFGElement *s) { _members.push(s); }
++  void add_nested_loop(CFGLoop* cl);
++  Block* head() {
++    assert(_members.at(0)->is_block(), "head must be a block");
++    Block* hd = _members.at(0)->as_Block();
++    assert(hd->_loop == this, "just checking");
++    assert(hd->head()->is_Loop(), "must begin with loop head node");
++    return hd;
++  }
++  Block* backedge_block(); // Return the block on the backedge of the loop (else NULL)
++  void compute_loop_depth(int depth);
++  void compute_freq(); // compute frequency with loop assuming head freq 1.0f
++  void scale_freq();   // scale frequency by loop trip count (including outer loops)
++  bool in_loop_nest(Block* b);
++  float trip_count() const { return 1.0f / _exit_prob; }
++  virtual bool is_loop()  { return true; }
++  int id() { return _id; }
++
++#ifndef PRODUCT
++  void dump( ) const;
++  void dump_tree() const;
++#endif
++};
+diff -ruN openjdk6/hotspot/src/share/vm/opto/buildOopMap.cpp openjdk/hotspot/src/share/vm/opto/buildOopMap.cpp
+--- openjdk6/hotspot/src/share/vm/opto/buildOopMap.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/buildOopMap.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)buildOopMap.cpp	1.37 07/05/05 17:06:11 JVM"
+-#endif
+ /*
+  * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -68,9 +65,9 @@
+ 
+ 
+ //------------------------------OopFlow----------------------------------------
+-// Structure to pass around 
++// Structure to pass around
+ struct OopFlow : public ResourceObj {
+-  short *_callees;              // Array mapping register to callee-saved 
++  short *_callees;              // Array mapping register to callee-saved
+   Node **_defs;                 // array mapping register to reaching def
+                                 // or NULL if dead/conflict
+   // OopFlow structs, when not being actively modified, describe the _end_ of
+@@ -206,7 +203,7 @@
+ OopMap *OopFlow::build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, int* live ) {
+   int framesize = regalloc->_framesize;
+   int max_inarg_slot = OptoReg::reg2stack(regalloc->_matcher._new_SP);
+-  debug_only( char *dup_check = NEW_RESOURCE_ARRAY(char,OptoReg::stack0()); 
++  debug_only( char *dup_check = NEW_RESOURCE_ARRAY(char,OptoReg::stack0());
+               memset(dup_check,0,OptoReg::stack0()) );
+ 
+   OopMap *omap = new OopMap( framesize,  max_inarg_slot );
+@@ -220,7 +217,7 @@
+ 
+     // %%% C2 can use 2 OptoRegs when the physical register is only one 64bit
+     // register in that case we'll get an non-concrete register for the second
+-    // half. We only need to tell the map the register once! 
++    // half. We only need to tell the map the register once!
+     //
+     // However for the moment we disable this change and leave things as they
+     // were.
+@@ -251,13 +248,13 @@
+ 
+       // Check for a legal reg name in the oopMap and bailout if it is not.
+       if (!omap->legal_vm_reg_name(r)) {
+-	regalloc->C->record_method_not_compilable("illegal oopMap register name");
+-	continue;
++        regalloc->C->record_method_not_compilable("illegal oopMap register name");
++        continue;
+       }
+       if( t->is_ptr()->_offset == 0 ) { // Not derived?
+         if( mcall ) {
+-          // Outgoing argument GC mask responsibility belongs to the callee, 
+-          // not the caller.  Inspect the inputs to the call, to see if 
++          // Outgoing argument GC mask responsibility belongs to the callee,
++          // not the caller.  Inspect the inputs to the call, to see if
+           // this live-range is one of them.
+           uint cnt = mcall->tf()->domain()->cnt();
+           uint j;
+@@ -268,11 +265,11 @@
+             continue;           // Continue on to the next register
+         }
+         omap->set_oop(r);
+-      } else {                  // Else it's derived.  
++      } else {                  // Else it's derived.
+         // Find the base of the derived value.
+         uint i;
+         // Fast, common case, scan
+-        for( i = jvms->oopoff(); i < n->req(); i+=2 ) 
++        for( i = jvms->oopoff(); i < n->req(); i+=2 )
+           if( n->in(i) == def ) break; // Common case
+         if( i == n->req() ) {   // Missed, try a more generous scan
+           // Scan again, but this time peek through copies
+@@ -345,7 +342,7 @@
+   }
+   */
+ #endif
+-  
++
+   return omap;
+ }
+ 
+@@ -374,13 +371,13 @@
+ 
+     while( worklist->size() ) { // Standard worklist algorithm
+       Block *b = worklist->rpop();
+-      
++
+       // Copy first successor into my tmp_live space
+       int s0num = b->_succs[0]->_pre_order;
+       int *t = &live[s0num*max_reg_ints];
+       for( int i=0; i<max_reg_ints; i++ )
+         tmp_live[i] = t[i];
+-      
++
+       // OR in the remaining live registers
+       for( uint j=1; j<b->_num_succs; j++ ) {
+         uint sjnum = b->_succs[j]->_pre_order;
+@@ -388,7 +385,7 @@
+         for( int i=0; i<max_reg_ints; i++ )
+           tmp_live[i] |= t[i];
+       }
+-      
++
+       // Now walk tmp_live up the block backwards, computing live
+       for( int k=b->_nodes.size()-1; k>=0; k-- ) {
+         Node *n = b->_nodes[k];
+@@ -401,12 +398,12 @@
+         MachNode *m = n->is_Mach() ? n->as_Mach() : NULL;
+ 
+         // Check if m is potentially a CISC alternate instruction (i.e, possibly
+-        // synthesized by RegAlloc from a conventional instruction and a 
++        // synthesized by RegAlloc from a conventional instruction and a
+         // spilled input)
+         bool is_cisc_alternate = false;
+         if (UseCISCSpill && m) {
+           is_cisc_alternate = m->is_cisc_alternate();
+-        }             
++        }
+ 
+         // GEN use'd bits
+         for( uint l=1; l<n->req(); l++ ) {
+@@ -417,8 +414,8 @@
+           if( OptoReg::is_valid(first) ) set_live_bit(tmp_live,first);
+           if( OptoReg::is_valid(second) ) set_live_bit(tmp_live,second);
+           // If we use the stack pointer in a cisc-alternative instruction,
+-          // check for use as a memory operand.  Then reconstruct the RegName 
+-          // for this stack location, and set the appropriate bit in the 
++          // check for use as a memory operand.  Then reconstruct the RegName
++          // for this stack location, and set the appropriate bit in the
+           // live vector 4987749.
+           if (is_cisc_alternate && def == fp) {
+             const TypePtr *adr_type = NULL;
+@@ -431,7 +428,7 @@
+               // look at a specific input instead of all inputs.
+               assert(!def->bottom_type()->isa_oop_ptr(), "expecting non-oop mem input");
+             } else if (base != fp || offset == Type::OffsetBot) {
+-              // Do nothing: the fp operand is either not from a memory use 
++              // Do nothing: the fp operand is either not from a memory use
+               // (base == NULL) OR the fp is used in a non-memory context
+               // (base is some other register) OR the offset is not constant,
+               // so it is not a stack slot.
+@@ -461,7 +458,7 @@
+         }
+ 
+       }
+-      
++
+       // Now at block top, see if we have any changes.  If so, propagate
+       // to prior blocks.
+       int *old_live = &live[b->_pre_order*max_reg_ints];
+@@ -478,7 +475,7 @@
+           worklist->push(cfg->_bbs[b->pred(l)->_idx]);
+       }
+     }
+-    
++
+     // Scan for any missing safepoints.  Happens to infinite loops
+     // ala ZKM.jar
+     uint i;
+@@ -515,7 +512,7 @@
+ 
+   Arena *A = Thread::current()->resource_area();
+   Block_List worklist;          // Worklist of pending blocks
+-  
++
+   int max_reg_ints = round_to(max_reg, BitsPerInt)>>LogBitsPerInt;
+   Dict *safehash = NULL;        // Used for assert only
+   // Compute a backwards liveness per register.  Needs a bitarray of
+@@ -551,10 +548,10 @@
+     // structures rapidly and cut down on the memory footprint.
+     // Note: not all predecessors might be visited yet (must happen for
+     // irreducible loops).  This is OK, since every live value must have the
+-    // SAME reaching def for the block, so any reaching def is OK.  
++    // SAME reaching def for the block, so any reaching def is OK.
+     uint i;
+ 
+-    Block *b = worklist.pop(); 
++    Block *b = worklist.pop();
+     // Ignore root block
+     if( b == _cfg->_broot ) continue;
+     // Block is already done?  Happens if block has several predecessors,
+@@ -615,7 +612,7 @@
+ 
+     // Now push flow forward
+     flows[b->_pre_order] = flow;// Mark flow for this block
+-    flow->_b = b;               
++    flow->_b = b;
+     flow->compute_reach( _regalloc, max_reg, safehash );
+ 
+     // Now push children onto worklist
+diff -ruN openjdk6/hotspot/src/share/vm/opto/bytecodeInfo.cpp openjdk/hotspot/src/share/vm/opto/bytecodeInfo.cpp
+--- openjdk6/hotspot/src/share/vm/opto/bytecodeInfo.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/bytecodeInfo.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)bytecodeInfo.cpp	1.122 07/05/05 17:06:12 JVM"
+-#endif
+ /*
+  * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -46,7 +43,7 @@
+ InlineTree::InlineTree( Compile* c, const InlineTree *caller_tree, ciMethod* callee, JVMState* caller_jvms, int caller_bci, float site_invoke_ratio )
+ : C(c), _caller_jvms(caller_jvms),
+   _caller_tree((InlineTree*)caller_tree),
+-  _method(callee), _site_invoke_ratio(site_invoke_ratio), 
++  _method(callee), _site_invoke_ratio(site_invoke_ratio),
+   _count_inline_bcs(method()->code_size()) {
+   NOT_PRODUCT(_count_inlines = 0;)
+   if (_caller_jvms != NULL) {
+@@ -82,7 +79,7 @@
+   for (int i = depth; i != 0; --i) tty->print("  ");
+ }
+ 
+-// positive filter: should send be inlined?  returns NULL, if yes, or rejection msg 
++// positive filter: should send be inlined?  returns NULL, if yes, or rejection msg
+ const char* InlineTree::shouldInline(ciMethod* callee_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const {
+   // Allows targeted inlining
+   if(callee_method->should_inline()) {
+@@ -129,7 +126,7 @@
+     }
+   } else {
+     // Not hot.  Check for medium-sized pre-existing nmethod at cold sites.
+-    if (callee_method->has_compiled_code() && callee_method->instructions_size() > InlineSmallCode/4) 
++    if (callee_method->has_compiled_code() && callee_method->instructions_size() > InlineSmallCode/4)
+       return "already compiled into a medium method";
+   }
+   if (size > max_size) {
+@@ -141,9 +138,9 @@
+ }
+ 
+ 
+-// negative filter: should send NOT be inlined?  returns NULL, ok to inline, or rejection msg 
++// negative filter: should send NOT be inlined?  returns NULL, ok to inline, or rejection msg
+ const char* InlineTree::shouldNotInline(ciMethod *callee_method, WarmCallInfo* wci_result) const {
+-  // negative filter: should send NOT be inlined?  returns NULL (--> inline) or rejection msg 
++  // negative filter: should send NOT be inlined?  returns NULL (--> inline) or rejection msg
+   if (!UseOldInlining) {
+     const char* fail = NULL;
+     if (callee_method->is_abstract())               fail = "abstract method";
+@@ -171,7 +168,7 @@
+ 
+     if (callee_method->has_compiled_code() && callee_method->instructions_size() > InlineSmallCode) {
+       wci_result->set_profit(wci_result->profit() * 0.1);
+-      // %%% adjust wci_result->size()? 
++      // %%% adjust wci_result->size()?
+     }
+ 
+     return NULL;
+@@ -181,17 +178,17 @@
+   if (callee_method->is_abstract())               return "abstract method";
+   // note: we allow ik->is_abstract()
+   if (!callee_method->holder()->is_initialized()) return "method holder not initialized";
+-  if (callee_method->is_native())                 return "native method"; 
++  if (callee_method->is_native())                 return "native method";
+   if (callee_method->has_unloaded_classes_in_signature()) return "unloaded signature classes";
+ 
+   if (callee_method->should_inline()) {
+     // ignore heuristic controls on inlining
+     return NULL;
+-  }  
++  }
+ 
+   // Now perform checks which are heuristic
+ 
+-  if( callee_method->has_compiled_code() && callee_method->instructions_size() > InlineSmallCode ) 
++  if( callee_method->has_compiled_code() && callee_method->instructions_size() > InlineSmallCode )
+     return "already compiled into a big method";
+ 
+   // don't inline exception code unless the top method belongs to an
+@@ -206,7 +203,7 @@
+   }
+ 
+   // use frequency-based objections only for non-trivial methods
+-  if (callee_method->code_size() <= MaxTrivialSize) return NULL;    
++  if (callee_method->code_size() <= MaxTrivialSize) return NULL;
+   if (UseInterpreter && !CompileTheWorld) { // don't use counts with -Xcomp or CTW
+     if (!callee_method->has_compiled_code() && !callee_method->was_executed_more_than(0)) return "never executed";
+     if (!callee_method->was_executed_more_than(MIN2(MinInliningThreshold, CompileThreshold >> 1))) return "executed < MinInliningThreshold times";
+@@ -257,8 +254,8 @@
+   if (UseOldInlining && ClipInlining
+       && (int)count_inline_bcs() + size >= DesiredMethodLimit) {
+     return "size > DesiredMethodLimit";
+-  } 
+-  
++  }
++
+   // ok, inline this method
+   return NULL;
+ }
+@@ -488,6 +485,6 @@
+   methods_seen_old = methods_seen;
+   explicit_null_checks_inserted_old = explicit_null_checks_inserted;
+   explicit_null_checks_elided_old = explicit_null_checks_elided;
+-}  
++}
+ 
+ #endif
+diff -ruN openjdk6/hotspot/src/share/vm/opto/c2compiler.cpp openjdk/hotspot/src/share/vm/opto/c2compiler.cpp
+--- openjdk6/hotspot/src/share/vm/opto/c2compiler.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/c2compiler.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)c2compiler.cpp	1.29 07/05/05 17:06:11 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -59,7 +56,7 @@
+   bool callee_saved_floats = false;
+   for( OptoReg::Name i=OptoReg::Name(0); i<OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i,1) ) {
+     // Is there a callee-saved float or double?
+-    if( register_save_policy[i] == 'E' /* callee-saved */ && 
++    if( register_save_policy[i] == 'E' /* callee-saved */ &&
+        (register_save_type[i] == Op_RegF || register_save_type[i] == Op_RegD) ) {
+       callee_saved_floats = true;
+     }
+@@ -88,7 +85,7 @@
+ 
+   // Note that this is being called from a compiler thread not the
+   // main startup thread.
+-  
++
+   if (_runtimes != initialized) {
+     initialize_runtimes( initialize_runtime, &_runtimes);
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/opto/c2compiler.hpp openjdk/hotspot/src/share/vm/opto/c2compiler.hpp
+--- openjdk6/hotspot/src/share/vm/opto/c2compiler.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/c2compiler.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c2compiler.hpp	1.28 07/05/05 17:06:11 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class C2Compiler : public AbstractCompiler {
+@@ -43,24 +40,17 @@
+   // Customization
+   bool needs_adapters         () { return true; }
+   bool needs_stubs            () { return true; }
+-  
++
+   void initialize();
+ 
+   // Compilation entry point for methods
+   void compile_method(ciEnv* env,
+                       ciMethod* target,
+                       int entry_bci);
+-  
++
+   // sentinel value used to trigger backtracking in compile_method().
+   static const char* retry_no_subsuming_loads();
+ 
+   // Print compilation timers and statistics
+   void print_timers();
+ };
+-
+-
+-
+-
+-
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/c2_globals.cpp openjdk/hotspot/src/share/vm/opto/c2_globals.cpp
+--- openjdk6/hotspot/src/share/vm/opto/c2_globals.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/c2_globals.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)c2_globals.cpp	1.14 07/05/17 15:57:19 JVM"
+-#endif
+ /*
+  * Copyright 2000-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/opto/c2_globals.hpp openjdk/hotspot/src/share/vm/opto/c2_globals.hpp
+--- openjdk6/hotspot/src/share/vm/opto/c2_globals.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/c2_globals.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c2_globals.hpp	1.90 07/09/25 22:01:58 JVM"
+-#endif
+ /*
+  * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //
+@@ -131,6 +128,9 @@
+   notproduct(bool, PrintOptoPeephole, false,                                \
+           "Print New compiler peephole replacements")                       \
+                                                                             \
++  develop(bool, PrintCFGBlockFreq, false,                                   \
++          "Print CFG block freqencies")                                     \
++                                                                            \
+   develop(bool, TraceOptoParse, false,                                      \
+           "Trace bytecode parse and control-flow merge")                    \
+                                                                             \
+@@ -142,11 +142,15 @@
+           "of rounds of unroll,optimize,..")                                \
+                                                                             \
+   develop(intx, UnrollLimitForProfileCheck, 1,                              \
+-	  "Don't use profile_trip_cnt() to restrict unrolling until "       \
+-	  "unrolling would push the number of unrolled iterations above "   \
++          "Don't use profile_trip_cnt() to restrict unrolling until "       \
++          "unrolling would push the number of unrolled iterations above "   \
+           "UnrollLimitForProfileCheck. A higher value allows more "         \
+           "unrolling. Zero acts as a very large value." )                   \
+                                                                             \
++  product(intx, MultiArrayExpandLimit, 6,                                   \
++          "Maximum number of individual allocations in an inline-expanded " \
++          "multianewarray instruction")                                     \
++                                                                            \
+   notproduct(bool, TraceProfileTripCount, false,                            \
+           "Trace profile loop trip count information")                      \
+                                                                             \
+@@ -159,6 +163,18 @@
+   develop(bool, UseExactTypes, true,                                        \
+           "Use exact types to eliminate array store checks and v-calls")    \
+                                                                             \
++  product(intx, TrackedInitializationLimit, 50,                             \
++          "When initializing fields, track up to this many words")          \
++                                                                            \
++  product(bool, ReduceFieldZeroing, true,                                   \
++          "When initializing fields, try to avoid needless zeroing")        \
++                                                                            \
++  product(bool, ReduceInitialCardMarks, true,                               \
++          "When initializing fields, try to avoid needless card marks")     \
++                                                                            \
++  product(bool, ReduceBulkZeroing, true,                                    \
++          "When bulk-initializing, try to avoid needless zeroing")          \
++                                                                            \
+   develop_pd(intx, RegisterCostAreaRatio,                                   \
+           "Spill selection in reg allocator: scale area by (X/64K) before " \
+           "adding cost")                                                    \
+@@ -169,7 +185,7 @@
+   notproduct(bool, VerifyGraphEdges , false,                                \
+           "Verify Bi-directional Edges")                                    \
+                                                                             \
+-  notproduct(bool, VerifyDUIterators, false,                                \
++  notproduct(bool, VerifyDUIterators, true,                                 \
+           "Verify the safety of all iterations of Bi-directional Edges")    \
+                                                                             \
+   notproduct(bool, VerifyHashTableKeys, true,                               \
+@@ -211,7 +227,7 @@
+   notproduct(bool, TraceLoopUnswitching, false,                             \
+           "Trace loop unswitching")                                         \
+                                                                             \
+-  product(bool, UseSuperWord, false,                                        \
++  product(bool, UseSuperWord, true,                                         \
+           "Transform scalar operations into superword operations")          \
+                                                                             \
+   develop(bool, SuperWordRTDepCheck, false,                                 \
+@@ -233,6 +249,20 @@
+   develop(bool, SparcV9RegsHiBitsZero, true,                                \
+           "Assume Sparc V9 I&L registers on V8+ systems are zero-extended") \
+                                                                             \
++  develop(intx, PrintIdealGraphLevel, 0,                                    \
++          "Print ideal graph to XML file / network interface. "             \
++          "By default attempts to connect to the visualizer on a socket.")  \
++                                                                            \
++  develop(intx, PrintIdealGraphPort, 4444,                                  \
++          "Ideal graph printer to network port")                            \
++                                                                            \
++  develop(ccstr, PrintIdealGraphAddress, "127.0.0.1",                       \
++          "IP address to connect to visualizer")                            \
++                                                                            \
++  develop(ccstr, PrintIdealGraphFile, NULL,                                 \
++          "File to dump ideal graph to.  If set overrides the "             \
++          "use of the network")                                             \
++                                                                            \
+   product(bool, UseOldInlining, true,                                       \
+           "Enable the 1.3 inlining strategy")                               \
+                                                                             \
+@@ -343,8 +373,10 @@
+   notproduct(bool, PrintEscapeAnalysis, false,                              \
+           "Print the results of escape analysis")                           \
+                                                                             \
+-  product(intx, MaxLabelRootDepth, 1100, 				    \
++  product(bool, EliminateAllocations, true,                                 \
++          "Use escape analysis to eliminate allocations")                   \
++                                                                            \
++  product(intx, MaxLabelRootDepth, 1100,                                    \
+           "Maximum times call Label_Root to prevent stack overflow")        \
+ 
+ C2_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG)
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/callGenerator.cpp openjdk/hotspot/src/share/vm/opto/callGenerator.cpp
+--- openjdk6/hotspot/src/share/vm/opto/callGenerator.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/callGenerator.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)callGenerator.cpp	1.47 07/05/05 17:06:12 JVM"
+-#endif
+ /*
+  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -79,7 +76,7 @@
+ #ifdef ASSERT
+   if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
+     MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag);
+-    assert(C->env()->system_dictionary_modification_counter_changed(), 
++    assert(C->env()->system_dictionary_modification_counter_changed(),
+            "Must invalidate if TypeFuncs differ");
+   }
+ #endif
+@@ -403,6 +400,11 @@
+                          CallGenerator* if_hit, float hit_prob)
+     : CallGenerator(if_missed->method())
+   {
++    // The call profile data may predict the hit_prob as extreme as 0 or 1.
++    // Remove the extremes values from the range.
++    if (hit_prob > PROB_MAX)   hit_prob = PROB_MAX;
++    if (hit_prob < PROB_MIN)   hit_prob = PROB_MIN;
++
+     _predicted_receiver = predicted_receiver;
+     _if_missed          = if_missed;
+     _if_hit             = if_hit;
+@@ -556,7 +558,17 @@
+   int nargs = method()->arg_size();
+   kit.inc_sp(nargs);
+   assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
+-  kit.uncommon_trap(_reason, _action);
++  if (_reason == Deoptimization::Reason_class_check &&
++      _action == Deoptimization::Action_maybe_recompile) {
++    // Temp fix for 6529811
++    // Don't allow uncommon_trap to override our decision to recompile in the event
++    // of a class cast failure for a monomorphic call as it will never let us convert
++    // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
++    bool keep_exact_action = true;
++    kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);
++  } else {
++    kit.uncommon_trap(_reason, _action);
++  }
+   return kit.transfer_exceptions_into_jvms();
+ }
+ 
+@@ -611,7 +623,7 @@
+   return false;
+ }
+ 
+-// compute_heat:  
++// compute_heat:
+ float WarmCallInfo::compute_heat() const {
+   assert(!is_cold(), "compute heat only on warm nodes");
+   assert(!is_hot(),  "compute heat only on warm nodes");
+diff -ruN openjdk6/hotspot/src/share/vm/opto/callGenerator.hpp openjdk/hotspot/src/share/vm/opto/callGenerator.hpp
+--- openjdk6/hotspot/src/share/vm/opto/callGenerator.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/callGenerator.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)callGenerator.hpp	1.19 07/05/05 17:06:12 JVM"
+-#endif
+ /*
+  * Copyright 2000-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //---------------------------CallGenerator-------------------------------------
+@@ -175,7 +172,7 @@
+ 
+   // We approximate this mythical quantity by playing with averages,
+   // rough estimates, and assumptions that history repeats itself.
+-  // The basic formula count * profit is heuristically adjusted 
++  // The basic formula count * profit is heuristically adjusted
+   // by looking at the expected compilation and execution times of
+   // of the inlined call.
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/opto/callnode.cpp openjdk/hotspot/src/share/vm/opto/callnode.cpp
+--- openjdk6/hotspot/src/share/vm/opto/callnode.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/callnode.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)callnode.cpp	1.235 07/05/05 17:06:13 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Portions of code courtesy of Clifford Click
+@@ -39,7 +36,7 @@
+ const Type *StartNode::bottom_type() const { return _domain; }
+ const Type *StartNode::Value(PhaseTransform *phase) const { return _domain; }
+ #ifndef PRODUCT
+-void StartNode::dump_spec() const { tty->print(" #"); _domain->dump();}
++void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
+ #endif
+ 
+ //------------------------------Ideal------------------------------------------
+@@ -53,7 +50,7 @@
+ }
+ 
+ //------------------------------Registers--------------------------------------
+-const RegMask &StartNode::in_RegMask(uint) const { 
++const RegMask &StartNode::in_RegMask(uint) const {
+   return RegMask::Empty;
+ }
+ 
+@@ -61,7 +58,7 @@
+ // Construct projections for incoming parameters, and their RegMask info
+ Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
+   switch (proj->_con) {
+-  case TypeFunc::Control: 
++  case TypeFunc::Control:
+   case TypeFunc::I_O:
+   case TypeFunc::Memory:
+     return new (match->C, 1) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
+@@ -79,7 +76,7 @@
+       RegMask &rm = match->_calling_convention_mask[parm_num];
+       return new (match->C, 1) MachProjNode(this,proj->_con,rm,ideal_reg);
+     }
+-  } 
++  }
+   return NULL;
+ }
+ 
+@@ -100,13 +97,13 @@
+ };
+ 
+ #ifndef PRODUCT
+-void ParmNode::dump_spec() const {
++void ParmNode::dump_spec(outputStream *st) const {
+   if( _con < TypeFunc::Parms ) {
+-    tty->print(names[_con]);
++    st->print(names[_con]);
+   } else {
+-    tty->print("Parm%d: ",_con-TypeFunc::Parms);
++    st->print("Parm%d: ",_con-TypeFunc::Parms);
+     // Verbose and WizardMode dump bottom_type for all nodes
+-    if( !Verbose && !WizardMode )   bottom_type()->dump();
++    if( !Verbose && !WizardMode )   bottom_type()->dump_on(st);
+   }
+ }
+ #endif
+@@ -117,8 +114,8 @@
+   case TypeFunc::I_O      : // fall through
+   case TypeFunc::Memory   : return 0;
+   case TypeFunc::FramePtr : // fall through
+-  case TypeFunc::ReturnAdr: return Op_RegP;      
+-  default                 : assert( _con > TypeFunc::Parms, "" ); 
++  case TypeFunc::ReturnAdr: return Op_RegP;
++  default                 : assert( _con > TypeFunc::Parms, "" );
+     // fall through
+   case TypeFunc::Parms    : {
+     // Type of argument being passed
+@@ -131,19 +128,19 @@
+ }
+ 
+ //=============================================================================
+-ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) { 
+-  init_req(TypeFunc::Control,cntrl); 
+-  init_req(TypeFunc::I_O,i_o); 
+-  init_req(TypeFunc::Memory,memory); 
++ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) {
++  init_req(TypeFunc::Control,cntrl);
++  init_req(TypeFunc::I_O,i_o);
++  init_req(TypeFunc::Memory,memory);
+   init_req(TypeFunc::FramePtr,frameptr);
+-  init_req(TypeFunc::ReturnAdr,retadr); 
++  init_req(TypeFunc::ReturnAdr,retadr);
+ }
+ 
+ Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){
+-  return remove_dead_region(phase, can_reshape) ? this : NULL; 
++  return remove_dead_region(phase, can_reshape) ? this : NULL;
+ }
+ 
+-const Type *ReturnNode::Value( PhaseTransform *phase ) const { 
++const Type *ReturnNode::Value( PhaseTransform *phase ) const {
+   return ( phase->type(in(TypeFunc::Control)) == Type::TOP)
+     ? Type::TOP
+     : Type::BOTTOM;
+@@ -175,20 +172,20 @@
+   Node* frameptr,
+   Node* ret_adr,
+   Node* exception
+-) : Node(TypeFunc::Parms + 1) { 
+-  init_req(TypeFunc::Control  , cntrl    ); 
+-  init_req(TypeFunc::I_O      , i_o      ); 
+-  init_req(TypeFunc::Memory   , memory   ); 
++) : Node(TypeFunc::Parms + 1) {
++  init_req(TypeFunc::Control  , cntrl    );
++  init_req(TypeFunc::I_O      , i_o      );
++  init_req(TypeFunc::Memory   , memory   );
+   init_req(TypeFunc::FramePtr , frameptr );
+   init_req(TypeFunc::ReturnAdr, ret_adr);
+   init_req(TypeFunc::Parms    , exception);
+ }
+ 
+ Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){
+-  return remove_dead_region(phase, can_reshape) ? this : NULL; 
++  return remove_dead_region(phase, can_reshape) ? this : NULL;
+ }
+ 
+-const Type *RethrowNode::Value( PhaseTransform *phase ) const { 
++const Type *RethrowNode::Value( PhaseTransform *phase ) const {
+   return (phase->type(in(TypeFunc::Control)) == Type::TOP)
+     ? Type::TOP
+     : Type::BOTTOM;
+@@ -304,42 +301,42 @@
+ // Given an allocation (a Chaitin object) and a Node decide if the Node carries
+ // any defined value or not.  If it does, print out the register or constant.
+ #ifndef PRODUCT
+-static void format_helper( PhaseRegAlloc *regalloc, Node *n, const char *msg, uint i ) {
+-  if (n == NULL) { tty->print(" NULL"); return; }
++static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i ) {
++  if (n == NULL) { st->print(" NULL"); return; }
+   if( OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined
+     char buf[50];
+     regalloc->dump_register(n,buf);
+-    tty->print(" %s%d]=%s",msg,i,buf);
+-  } else {                      // No register, but might be constant  
++    st->print(" %s%d]=%s",msg,i,buf);
++  } else {                      // No register, but might be constant
+     const Type *t = n->bottom_type();
+     switch (t->base()) {
+-    case Type::Int:  
+-      tty->print(" %s%d]=#"INT32_FORMAT,msg,i,t->is_int()->get_con()); 
++    case Type::Int:
++      st->print(" %s%d]=#"INT32_FORMAT,msg,i,t->is_int()->get_con());
+       break;
+-    case Type::AnyPtr: 
++    case Type::AnyPtr:
+       assert( t == TypePtr::NULL_PTR, "" );
+-      tty->print(" %s%d]=#NULL",msg,i);
++      st->print(" %s%d]=#NULL",msg,i);
+       break;
+-    case Type::AryPtr: 
++    case Type::AryPtr:
+     case Type::KlassPtr:
+-    case Type::InstPtr: 
+-      tty->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->isa_oopptr()->const_oop());
++    case Type::InstPtr:
++      st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->isa_oopptr()->const_oop());
+       break;
+-    case Type::RawPtr: 
+-      tty->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,t->is_rawptr());
++    case Type::RawPtr:
++      st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,t->is_rawptr());
+       break;
+     case Type::DoubleCon:
+-      tty->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d);
++      st->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d);
+       break;
+     case Type::FloatCon:
+-      tty->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f);
++      st->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f);
+       break;
+     case Type::Long:
+-      tty->print(" %s%d]=#"INT64_FORMAT,msg,i,t->is_long()->get_con());
++      st->print(" %s%d]=#"INT64_FORMAT,msg,i,t->is_long()->get_con());
+       break;
+     case Type::Half:
+-    case Type::Top:  
+-      tty->print(" %s%d]=_",msg,i);
++    case Type::Top:
++      st->print(" %s%d]=_",msg,i);
+       break;
+     default: ShouldNotReachHere();
+     }
+@@ -349,51 +346,51 @@
+ 
+ //------------------------------format-----------------------------------------
+ #ifndef PRODUCT
+-void JVMState::format(PhaseRegAlloc *regalloc, const Node *n) const {
+-  tty->print("        #");
++void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const {
++  st->print("        #");
+   if( _method ) {
+-    _method->print_short_name();
+-    tty->print(" @ bci:%d ",_bci);
++    _method->print_short_name(st);
++    st->print(" @ bci:%d ",_bci);
+   } else {
+-    tty->print_cr(" runtime stub ");
++    st->print_cr(" runtime stub ");
+     return;
+   }
+   if (n->is_MachSafePoint()) {
+     MachSafePointNode *mcall = n->as_MachSafePoint();
+     uint i;
+     // Print locals
+-    for( i = 0; i < (uint)loc_size(); i++ ) 
+-      format_helper( regalloc, mcall->local(this, i), "L[", i );
++    for( i = 0; i < (uint)loc_size(); i++ )
++      format_helper( regalloc, st, mcall->local(this, i), "L[", i );
+     // Print stack
+     for (i = 0; i < (uint)stk_size(); i++) {
+-      if ((uint)(_stkoff + i) >= mcall->len()) 
+-        tty->print(" oob ");
++      if ((uint)(_stkoff + i) >= mcall->len())
++        st->print(" oob ");
+       else
+-       format_helper( regalloc, mcall->stack(this, i), "STK[", i );
++       format_helper( regalloc, st, mcall->stack(this, i), "STK[", i );
+     }
+     for (i = 0; (int)i < nof_monitors(); i++) {
+       Node *box = mcall->monitor_box(this, i);
+       Node *obj = mcall->monitor_obj(this, i);
+       if ( OptoReg::is_valid(regalloc->get_reg_first(box)) ) {
+         while( !box->is_BoxLock() )  box = box->in(1);
+-        format_helper( regalloc, box, "MON-BOX[", i );
++        format_helper( regalloc, st, box, "MON-BOX[", i );
+       } else {
+         OptoReg::Name box_reg = BoxLockNode::stack_slot(box);
+-        tty->print(" MON-BOX%d=%s+%d",
++        st->print(" MON-BOX%d=%s+%d",
+                    i,
+                    OptoReg::regname(OptoReg::c_frame_pointer),
+                    regalloc->reg2offset(box_reg));
+       }
+-      format_helper( regalloc, obj, "MON-OBJ[", i );      
++      format_helper( regalloc, st, obj, "MON-OBJ[", i );
+     }
+   }
+-  tty->print_cr("");
+-  if (caller() != NULL)  caller()->format(regalloc, n);
++  st->print_cr("");
++  if (caller() != NULL)  caller()->format(regalloc, n, st);
+ }
+ #endif
+ 
+ #ifndef PRODUCT
+-void JVMState::dump_spec() const { 
++void JVMState::dump_spec(outputStream *st) const {
+   if (_method != NULL) {
+     bool printed = false;
+     if (!Verbose) {
+@@ -410,22 +407,22 @@
+         if (endcn == NULL)  endcn = name + strlen(name);
+         while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/')
+           --endcn;
+-        tty->print(" %s", endcn);
++        st->print(" %s", endcn);
+         printed = true;
+       }
+     }
+     if (!printed)
+-      _method->print_short_name(tty);
+-    tty->print(" @ bci:%d",_bci);
++      _method->print_short_name(st);
++    st->print(" @ bci:%d",_bci);
+   } else {
+-    tty->print(" runtime stub");
++    st->print(" runtime stub");
+   }
+-  if (caller() != NULL)  caller()->dump_spec();
++  if (caller() != NULL)  caller()->dump_spec(st);
+ }
+ #endif
+ 
+ #ifndef PRODUCT
+-void JVMState::dump() const {
++void JVMState::dump_on(outputStream* st) const {
+   if (_map && !((uintptr_t)_map & 1)) {
+     if (_map->len() > _map->req()) {  // _map->has_exceptions()
+       Node* ex = _map->in(_map->req());  // _map->next_exception()
+@@ -437,20 +434,20 @@
+     }
+     _map->dump(2);
+   }
+-  tty->print("JVMS depth=%d loc=%d stk=%d mon=%d end=%d mondepth=%d sp=%d bci=%d method=",
++  st->print("JVMS depth=%d loc=%d stk=%d mon=%d end=%d mondepth=%d sp=%d bci=%d method=",
+              depth(), locoff(), stkoff(), monoff(), endoff(), monitor_depth(), sp(), bci());
+   if (_method == NULL) {
+-    tty->print_cr("(none)");
++    st->print_cr("(none)");
+   } else {
+-    _method->print_name();
+-    tty->cr();
++    _method->print_name(st);
++    st->cr();
+     if (bci() >= 0 && bci() < _method->code_size()) {
+-      tty->print("    bc: ");
+-      _method->print_codes(bci(), bci()+1);
++      st->print("    bc: ");
++      _method->print_codes_on(bci(), bci()+1, st);
+     }
+   }
+   if (caller() != NULL) {
+-    caller()->dump();
++    caller()->dump_on(st);
+   }
+ }
+ 
+@@ -489,7 +486,7 @@
+ uint CallNode::cmp( const Node &n ) const
+ { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; }
+ #ifndef PRODUCT
+-void CallNode::dump_req() const { 
++void CallNode::dump_req() const {
+   // Dump the required inputs, enclosed in '(' and ')'
+   uint i;                       // Exit value of loop
+   for( i=0; i<req(); i++ ) {    // For all required inputs
+@@ -500,23 +497,23 @@
+   tty->print(")");
+ }
+ 
+-void CallNode::dump_spec() const { 
+-  tty->print(" "); 
+-  tf()->dump();
+-  if (_cnt != COUNT_UNKNOWN)  tty->print(" C=%f",_cnt);
+-  if (jvms() != NULL)  jvms()->dump_spec();
++void CallNode::dump_spec(outputStream *st) const {
++  st->print(" ");
++  tf()->dump_on(st);
++  if (_cnt != COUNT_UNKNOWN)  st->print(" C=%f",_cnt);
++  if (jvms() != NULL)  jvms()->dump_spec(st);
+ }
+ #endif
+ 
+ const Type *CallNode::bottom_type() const { return tf()->range(); }
+-const Type *CallNode::Value(PhaseTransform *phase) const { 
++const Type *CallNode::Value(PhaseTransform *phase) const {
+   if (phase->type(in(0)) == Type::TOP)  return Type::TOP;
+-  return tf()->range(); 
++  return tf()->range();
+ }
+ 
+ //------------------------------calling_convention-----------------------------
+ void CallNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
+-  // Use the standard compiler calling convention 
++  // Use the standard compiler calling convention
+   Matcher::calling_convention( sig_bt, parm_regs, argcnt, true );
+ }
+ 
+@@ -526,7 +523,7 @@
+ // return result(s) along with their RegMask info
+ Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
+   switch (proj->_con) {
+-  case TypeFunc::Control: 
++  case TypeFunc::Control:
+   case TypeFunc::I_O:
+   case TypeFunc::Memory:
+     return new (match->C, 1) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
+@@ -538,7 +535,7 @@
+ 
+   case TypeFunc::Parms: {       // Normal returns
+     uint ideal_reg = Matcher::base2reg[tf()->range()->field_at(TypeFunc::Parms)->base()];
+-    OptoRegPair regs = is_CallRuntime() 
++    OptoRegPair regs = is_CallRuntime()
+       ? match->c_return_value(ideal_reg,true)  // Calls into C runtime
+       : match->  return_value(ideal_reg,true); // Calls into compiled Java code
+     RegMask rm = RegMask(regs.first());
+@@ -562,22 +559,22 @@
+ 
+ //=============================================================================
+ uint CallJavaNode::size_of() const { return sizeof(*this); }
+-uint CallJavaNode::cmp( const Node &n ) const { 
++uint CallJavaNode::cmp( const Node &n ) const {
+   CallJavaNode &call = (CallJavaNode&)n;
+-  return CallNode::cmp(call) && _method == call._method; 
++  return CallNode::cmp(call) && _method == call._method;
+ }
+ #ifndef PRODUCT
+-void CallJavaNode::dump_spec() const { 
+-  if( _method ) _method->print_short_name();
+-  CallNode::dump_spec();
++void CallJavaNode::dump_spec(outputStream *st) const {
++  if( _method ) _method->print_short_name(st);
++  CallNode::dump_spec(st);
+ }
+ #endif
+ 
+ //=============================================================================
+ uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
+-uint CallStaticJavaNode::cmp( const Node &n ) const { 
++uint CallStaticJavaNode::cmp( const Node &n ) const {
+   CallStaticJavaNode &call = (CallStaticJavaNode&)n;
+-  return CallJavaNode::cmp(call); 
++  return CallJavaNode::cmp(call);
+ }
+ 
+ //----------------------------uncommon_trap_request----------------------------
+@@ -602,47 +599,47 @@
+ }
+ 
+ #ifndef PRODUCT
+-void CallStaticJavaNode::dump_spec() const { 
+-  tty->print("# Static ");
++void CallStaticJavaNode::dump_spec(outputStream *st) const {
++  st->print("# Static ");
+   if (_name != NULL) {
+-    tty->print("%s", _name);
++    st->print("%s", _name);
+     int trap_req = uncommon_trap_request();
+     if (trap_req != 0) {
+       char buf[100];
+-      tty->print("(%s)",
++      st->print("(%s)",
+                  Deoptimization::format_trap_request(buf, sizeof(buf),
+                                                      trap_req));
+     }
+-    tty->print(" ");
++    st->print(" ");
+   }
+-  CallJavaNode::dump_spec();
++  CallJavaNode::dump_spec(st);
+ }
+ #endif
+ 
+ //=============================================================================
+ uint CallDynamicJavaNode::size_of() const { return sizeof(*this); }
+-uint CallDynamicJavaNode::cmp( const Node &n ) const { 
++uint CallDynamicJavaNode::cmp( const Node &n ) const {
+   CallDynamicJavaNode &call = (CallDynamicJavaNode&)n;
+-  return CallJavaNode::cmp(call); 
++  return CallJavaNode::cmp(call);
+ }
+ #ifndef PRODUCT
+-void CallDynamicJavaNode::dump_spec() const { 
+-  tty->print("# Dynamic ");
+-  CallJavaNode::dump_spec();
++void CallDynamicJavaNode::dump_spec(outputStream *st) const {
++  st->print("# Dynamic ");
++  CallJavaNode::dump_spec(st);
+ }
+ #endif
+ 
+ //=============================================================================
+ uint CallRuntimeNode::size_of() const { return sizeof(*this); }
+-uint CallRuntimeNode::cmp( const Node &n ) const { 
++uint CallRuntimeNode::cmp( const Node &n ) const {
+   CallRuntimeNode &call = (CallRuntimeNode&)n;
+   return CallNode::cmp(call) && !strcmp(_name,call._name);
+ }
+ #ifndef PRODUCT
+-void CallRuntimeNode::dump_spec() const { 
+-  tty->print("# "); 
+-  tty->print(_name);
+-  CallNode::dump_spec();
++void CallRuntimeNode::dump_spec(outputStream *st) const {
++  st->print("# ");
++  st->print(_name);
++  CallNode::dump_spec(st);
+ }
+ #endif
+ 
+@@ -657,10 +654,10 @@
+ 
+ //=============================================================================
+ #ifndef PRODUCT
+-void CallLeafNode::dump_spec() const { 
+-  tty->print("# "); 
+-  tty->print(_name);
+-  CallNode::dump_spec();
++void CallLeafNode::dump_spec(outputStream *st) const {
++  st->print("# ");
++  st->print(_name);
++  CallNode::dump_spec(st);
+ }
+ #endif
+ 
+@@ -683,7 +680,7 @@
+ }
+ 
+ uint SafePointNode::size_of() const { return sizeof(*this); }
+-uint SafePointNode::cmp( const Node &n ) const { 
++uint SafePointNode::cmp( const Node &n ) const {
+   return (&n == this);          // Always fail except on self
+ }
+ 
+@@ -715,7 +712,7 @@
+ Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+   if (remove_dead_region(phase, can_reshape))  return this;
+ 
+-  return NULL; 
++  return NULL;
+ }
+ 
+ //------------------------------Identity---------------------------------------
+@@ -750,12 +747,12 @@
+ }
+ 
+ #ifndef PRODUCT
+-void SafePointNode::dump_spec() const { 
+-  tty->print(" SafePoint "); 
++void SafePointNode::dump_spec(outputStream *st) const {
++  st->print(" SafePoint ");
+ }
+ #endif
+ 
+-const RegMask &SafePointNode::in_RegMask(uint idx) const { 
++const RegMask &SafePointNode::in_RegMask(uint idx) const {
+   if( idx < TypeFunc::Parms ) return RegMask::Empty;
+   // Values outside the domain represent debug info
+   return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
+@@ -830,8 +827,7 @@
+ 
+ AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
+                            Node *ctrl, Node *mem, Node *abio,
+-                           Node *size, Node *klass_node, Node *initial_test,
+-                           Node *eden_top, Node *eden_end)
++                           Node *size, Node *klass_node, Node *initial_test)
+   : CallNode(atype, NULL, TypeRawPtr::BOTTOM)
+ {
+   init_class_id(Class_Allocate);
+@@ -846,8 +842,6 @@
+   init_req( AllocSize          , size);
+   init_req( KlassNode          , klass_node);
+   init_req( InitialTest        , initial_test);
+-  init_req( EdenTop            , eden_top);
+-  init_req( EdenEnd            , eden_end);
+   init_req( ALength            , topnode);
+   C->add_macro_node(this);
+ }
+@@ -874,7 +868,7 @@
+ //
+ // Assuming p is a simple predicate which can't trap in any way and s
+ // is a synchronized method consider this code:
+-// 
++//
+ //   s();
+ //   if (p)
+ //     s();
+@@ -899,7 +893,7 @@
+ //
+ // 3. In this case we eliminate the unlock of the first s, the lock
+ // and unlock in the then case and the lock in the final s.
+-// 
++//
+ // Note also that in all these cases the then/else pieces don't have
+ // to be trivial as long as they begin and end with synchronization
+ // operations.
+@@ -1010,7 +1004,7 @@
+   return ctrl;
+ }
+ //
+-// Given a control, see if it's the control projection of an Unlock which 
++// Given a control, see if it's the control projection of an Unlock which
+ // operating on the same object as lock.
+ //
+ bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock,
+@@ -1078,7 +1072,7 @@
+                                                        GrowableArray<AbstractLockNode*> &lock_ops) {
+   Node* if_node = node->in(0);
+   bool  if_true = node->is_IfTrue();
+-     
++
+   if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) {
+     Node *lock_ctrl = next_control(if_node->in(0));
+     if (find_matching_unlock(lock_ctrl, lock, lock_ops)) {
+diff -ruN openjdk6/hotspot/src/share/vm/opto/callnode.hpp openjdk/hotspot/src/share/vm/opto/callnode.hpp
+--- openjdk6/hotspot/src/share/vm/opto/callnode.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/callnode.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)callnode.hpp	1.192 07/05/17 15:57:24 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Portions of code courtesy of Clifford Click
+@@ -75,7 +72,7 @@
+   virtual Node *match( const ProjNode *proj, const Matcher *m );
+   virtual uint ideal_reg() const { return 0; }
+ #ifndef PRODUCT
+-  virtual void  dump_spec() const;
++  virtual void  dump_spec(outputStream *st) const;
+ #endif
+ };
+ 
+@@ -99,7 +96,7 @@
+   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
+   virtual uint ideal_reg() const;
+ #ifndef PRODUCT
+-  virtual void dump_spec() const;
++  virtual void dump_spec(outputStream *st) const;
+ #endif
+ };
+ 
+@@ -266,9 +263,12 @@
+   JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
+ 
+ #ifndef PRODUCT
+-  void      format(PhaseRegAlloc *regalloc, const Node *n) const;
+-  void      dump_spec() const;
+-  void      dump() const;
++  void      format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
++  void      dump_spec(outputStream *st) const;
++  void      dump_on(outputStream* st) const;
++  void      dump() const {
++    dump_on(tty);
++  }
+ #endif
+ };
+ 
+@@ -291,7 +291,7 @@
+   {
+     init_class_id(Class_SafePoint);
+   }
+-  
++
+   OopMap*         _oop_map;   // Array of OopMap info (8-bit char) for GC
+   JVMState* const _jvms;      // Pointer to list of JVM State objects
+   const TypePtr*  _adr_type;  // What type of memory does this node produce?
+@@ -393,7 +393,7 @@
+   static  bool           needs_polling_address_input();
+ 
+ #ifndef PRODUCT
+-  virtual void              dump_spec() const;
++  virtual void              dump_spec(outputStream *st) const;
+ #endif
+ };
+ 
+@@ -445,7 +445,7 @@
+ 
+ #ifndef PRODUCT
+   virtual void        dump_req()  const;
+-  virtual void        dump_spec() const;
++  virtual void        dump_spec(outputStream *st) const;
+ #endif
+ };
+ 
+@@ -476,7 +476,7 @@
+   bool  is_optimized_virtual() const      { return _optimized_virtual; }
+ 
+ #ifndef PRODUCT
+-  virtual void  dump_spec() const;
++  virtual void  dump_spec(outputStream *st) const;
+ #endif
+ };
+ 
+@@ -498,7 +498,7 @@
+     init_class_id(Class_CallStaticJava);
+     // This node calls a runtime stub, which often has narrow memory effects.
+     _adr_type = adr_type;
+-  }  
++  }
+   const char *_name;            // Runtime wrapper name
+ 
+   // If this is an uncommon trap, return the request code, else zero.
+@@ -507,7 +507,7 @@
+ 
+   virtual int         Opcode() const;
+ #ifndef PRODUCT
+-  virtual void        dump_spec() const;
++  virtual void        dump_spec(outputStream *st) const;
+ #endif
+ };
+ 
+@@ -524,7 +524,7 @@
+   int _vtable_index;
+   virtual int   Opcode() const;
+ #ifndef PRODUCT
+-  virtual void  dump_spec() const;
++  virtual void  dump_spec(outputStream *st) const;
+ #endif
+ };
+ 
+@@ -547,7 +547,7 @@
+   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
+ 
+ #ifndef PRODUCT
+-  virtual void  dump_spec() const;
++  virtual void  dump_spec(outputStream *st) const;
+ #endif
+ };
+ 
+@@ -565,7 +565,7 @@
+   virtual int   Opcode() const;
+   virtual bool        guaranteed_safepoint()  { return false; }
+ #ifndef PRODUCT
+-  virtual void  dump_spec() const;
++  virtual void  dump_spec(outputStream *st) const;
+ #endif
+ };
+ 
+@@ -592,7 +592,7 @@
+ //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
+ //  order to differentiate the uses of the projection on the normal control path from
+ //  those on the exception return path.
+-//  
++//
+ class AllocateNode : public CallNode {
+ public:
+   enum {
+@@ -602,8 +602,6 @@
+     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
+     KlassNode,                        // type (maybe dynamic) of the obj.
+     InitialTest,                      // slow-path test (may be constant)
+-    EdenTop,                          // eden pointers
+-    EdenEnd,
+     ALength,                          // array length (or TOP if none)
+     ParmLimit
+   };
+@@ -613,9 +611,7 @@
+     fields[AllocSize]   = TypeInt::POS;
+     fields[KlassNode]   = TypeInstPtr::NOTNULL;
+     fields[InitialTest] = TypeInt::BOOL;
+-    fields[EdenTop]     = TypeRawPtr::NOTNULL;
+-    fields[EdenEnd]     = TypeRawPtr::NOTNULL;
+-    fields[ALength]     = TypeInt::INT;  // length >= 0
++    fields[ALength]     = TypeInt::INT;  // length (can be a bad length)
+ 
+     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
+ 
+@@ -630,8 +626,7 @@
+ 
+   virtual uint size_of() const; // Size is bigger
+   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
+-               Node *size, Node *klass_node, Node *initial_test,
+-               Node *eden_top, Node *eden_end);
++               Node *size, Node *klass_node, Node *initial_test);
+   // Expansion modifies the JVMState, so we need to clone it
+   virtual void  clone_jvms() {
+     set_jvms(jvms()->clone_deep(Compile::current()));
+@@ -651,6 +646,7 @@
+ 
+   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
+   // an offset, which is reported back to the caller.
++  // (Note:  AllocateNode::Ideal_allocation is defined in graphKit.cpp.)
+   static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase,
+                                         intptr_t& offset);
+ 
+@@ -664,6 +660,15 @@
+   int minimum_header_size() {
+     return is_AllocateArray() ? sizeof(arrayOopDesc) : sizeof(oopDesc);
+   }
++
++  // Return the corresponding initialization barrier (or null if none).
++  // Walks out edges to find it...
++  // (Note: Both InitializeNode::allocation and AllocateNode::initialization
++  // are defined in graphKit.cpp, which sets up the bidirectional relation.)
++  InitializeNode* initialization();
++
++  // Convenience for initialization->maybe_set_complete(phase)
++  bool maybe_set_complete(PhaseGVN* phase);
+ };
+ 
+ //------------------------------AllocateArray---------------------------------
+@@ -674,13 +679,13 @@
+ public:
+   AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
+                     Node* size, Node* klass_node, Node* initial_test,
+-                    Node* eden_top, Node* eden_end, Node* count_val
++                    Node* count_val
+                     )
+     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
+-                   initial_test, eden_top, eden_end)
++                   initial_test)
+   {
+     init_class_id(Class_AllocateArray);
+-    set_req(AllocateNode::ALength, count_val);
++    set_req(AllocateNode::ALength,        count_val);
+   }
+   virtual int Opcode() const;
+   virtual uint size_of() const; // Size is bigger
+diff -ruN openjdk6/hotspot/src/share/vm/opto/cfgnode.cpp openjdk/hotspot/src/share/vm/opto/cfgnode.cpp
+--- openjdk6/hotspot/src/share/vm/opto/cfgnode.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/cfgnode.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)cfgnode.cpp	1.260 07/05/17 15:57:27 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Portions of code courtesy of Clifford Click
+@@ -158,7 +155,7 @@
+       Node *n = phi->in(j);
+       int opcode = n->Opcode();
+       switch( opcode ) {
+-      case Op_ConI: 
++      case Op_ConI:
+         {
+           if( min == NULL ) {
+             min     = n->Opcode() == Op_ConI ? (ConNode*)n : NULL;
+@@ -187,19 +184,19 @@
+   }
+   return ( min && max && val && (min->get_int() <= 0) && (max->get_int() >=0) );
+ }
+-  
++
+ 
+ //------------------------------check_if_clipping------------------------------
+ // Helper function for RegionNode's identification of FP clipping
+-// Check that inputs to Region come from two IfNodes, 
+-// 
++// Check that inputs to Region come from two IfNodes,
++//
+ //            If
+ //      False    True
+ //       If        |
+ //  False  True    |
+ //    |      |     |
+ //  RegionNode_inputs
+-// 
++//
+ static bool check_if_clipping( const RegionNode *region, IfNode * &bot_if, IfNode * &top_if ) {
+   top_if = NULL;
+   bot_if = NULL;
+@@ -214,14 +211,14 @@
+     Node *in20 = in2->in(0);
+     Node *in30 = in3->in(0);
+     // Check that #1 and #2 are ifTrue and ifFalse from same If
+-    if( in10 != NULL && in10->is_If() && 
+-        in20 != NULL && in20->is_If() && 
+-        in30 != NULL && in30->is_If() && in10 == in20 && 
++    if( in10 != NULL && in10->is_If() &&
++        in20 != NULL && in20->is_If() &&
++        in30 != NULL && in30->is_If() && in10 == in20 &&
+         (in1->Opcode() != in2->Opcode()) ) {
+       Node  *in100 = in10->in(0);
+       Node *in1000 = (in100 != NULL && in100->is_Proj()) ? in100->in(0) : NULL;
+       // Check that control for in10 comes from other branch of IF from in3
+-      if( in1000 != NULL && in1000->is_If() && 
++      if( in1000 != NULL && in1000->is_If() &&
+           in30 == in1000 && (in3->Opcode() != in100->Opcode()) ) {
+         // Control pattern checks
+         top_if = (IfNode*)in1000;
+@@ -260,7 +257,7 @@
+   jint left_shift     = lshift->in(2)->get_int();
+   jint right_shift    = rshift->in(2)->get_int();
+   jint max_post_shift = nth_bit(BitsPerJavaInteger - left_shift - 1);
+-  if( left_shift != right_shift || 
++  if( left_shift != right_shift ||
+       0 > left_shift || left_shift >= BitsPerJavaInteger ||
+       max_post_shift < max_cutoff ||
+       max_post_shift < -min_cutoff ) {
+@@ -331,11 +328,11 @@
+   Arena *a = Thread::current()->resource_area();
+   Node_List nstack(a);
+   VectorSet visited(a);
+-  
++
+   // Mark all control nodes reachable from root outputs
+   Node *n = (Node*)phase->C->root();
+   nstack.push(n);
+-  visited.set(n->_idx);  
++  visited.set(n->_idx);
+   while (nstack.size() != 0) {
+     n = nstack.pop();
+     uint max = n->outcnt();
+@@ -387,7 +384,7 @@
+     }
+   }
+ 
+-  // Remove TOP or NULL input paths. If only 1 input path remains, this Region 
++  // Remove TOP or NULL input paths. If only 1 input path remains, this Region
+   // degrades to a copy.
+   bool add_to_worklist = false;
+   int cnt = 0;                  // Count of values merging
+@@ -446,7 +443,7 @@
+       i--;
+     }
+   }
+-  
++
+   if (can_reshape && cnt == 1) {
+     // Is it dead loop?
+     // If it is LoopNopde it had 2 (+1 itself) inputs and
+@@ -466,14 +463,14 @@
+       Node *top = phase->C->top();
+       PhaseIterGVN *igvn = phase->is_IterGVN();
+       DUIterator j;
+-      while(progress) { 
++      while(progress) {
+         progress = false;
+         for (j = outs(); has_out(j); j++) {
+           Node *n = out(j);
+           if( n->is_Phi() ) {
+             assert( igvn->eqv(n->in(0), this), "" );
+             assert( n->req() == 2 &&  n->in(1) != NULL, "Only one data input expected" );
+-            // Break dead loop data path. 
++            // Break dead loop data path.
+             // Eagerly replace phis with top to avoid phis copies generation.
+             igvn->add_users_to_worklist(n);
+             igvn->hash_delete(n); // Yank from hash before hacking edges
+@@ -504,7 +501,7 @@
+       if( cnt == 0 ) {
+         assert( req() == 1, "no inputs expected" );
+         // During IGVN phase such region will be subsumed by TOP node
+-        // so region's phis will have TOP as control node. 
++        // so region's phis will have TOP as control node.
+         // Kill phis here to avoid it. PhiNode::is_copy() will be always false.
+         // Also set other user's input to top.
+         parent_ctrl = phase->C->top();
+@@ -533,12 +530,19 @@
+               in1 = phase->C->top();            // replaced by top
+             igvn->subsume_node(n, in1);
+           }
+-        } 
++        }
+         else if( n->is_Region() ) { // Update all incoming edges
+           assert( !igvn->eqv(n, this), "Must be removed from DefUse edges");
+-          for( uint k=1; k < n->req(); k++ ) 
+-            if( n->in(k) == this ) 
++          uint uses_found = 0;
++          for( uint k=1; k < n->req(); k++ ) {
++            if( n->in(k) == this ) {
+               n->set_req(k, parent_ctrl);
++              uses_found++;
++            }
++          }
++          if( uses_found > 1 ) { // (--i) done at the end of the loop.
++            i -= (uses_found - 1);
++          }
+         }
+         else {
+           assert( igvn->eqv(n->in(0), this), "Expect RegionNode to be control parent");
+@@ -583,7 +587,7 @@
+           // Control pattern checks, now verify compares
+           Node   *top_in = NULL;   // value being compared against
+           Node   *bot_in = NULL;
+-          if( check_compare_clipping( true,  bot_if, min, bot_in ) && 
++          if( check_compare_clipping( true,  bot_if, min, bot_in ) &&
+               check_compare_clipping( false, top_if, max, top_in ) ) {
+             if( bot_in == top_in ) {
+               PhaseIterGVN *gvn = phase->is_IterGVN();
+@@ -592,7 +596,7 @@
+ 
+               // Check for the ConvF2INode
+               ConvF2INode *convf2i;
+-              if( check_convf2i_clipping( phi, val_idx, convf2i, min, max ) && 
++              if( check_convf2i_clipping( phi, val_idx, convf2i, min, max ) &&
+                 convf2i->in(1) == bot_in ) {
+                 // Matched pattern, including LShiftI; RShiftI, replace with integer compares
+                 // max test
+@@ -630,7 +634,7 @@
+ 
+ 
+ 
+-const RegMask &RegionNode::out_RegMask() const { 
++const RegMask &RegionNode::out_RegMask() const {
+   return RegMask::Empty;
+ }
+ 
+@@ -867,7 +871,7 @@
+     // because the type system doesn't interact well with interfaces.
+     const TypeInstPtr *jtip = jt->isa_instptr();
+     if( jtip && ttip ) {
+-      if( jtip->is_loaded() &&  jtip->klass()->is_interface() && 
++      if( jtip->is_loaded() &&  jtip->klass()->is_interface() &&
+           ttip->is_loaded() && !ttip->klass()->is_interface() )
+         // Happens in a CTW of rt.jar, 320-341, no extra flags
+         { assert(ft == ttip->cast_to_ptr_type(jtip->ptr()), ""); jt = ft; }
+@@ -941,7 +945,7 @@
+ // in very special circumstances, we do it here on generic Phi's.
+ Node* PhiNode::is_cmove_id(PhaseTransform* phase, int true_path) {
+   assert(true_path !=0, "only diamond shape graph expected");
+-  
++
+   // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
+   // phi->region->if_proj->ifnode->bool->cmp
+   Node*     region = in(0);
+@@ -953,7 +957,7 @@
+   Node*     id     = CMoveNode::is_cmove_id(phase, cmp, tval, fval, b);
+   if (id == NULL)
+     return NULL;
+- 
++
+   // Either value might be a cast that depends on a branch of 'iff'.
+   // Since the 'id' value will float free of the diamond, either
+   // decast or return failure.
+@@ -964,9 +968,9 @@
+     } else {
+       // Don't know how to disentangle this value.
+       return NULL;
+-    } 
+-  } 
+-  
++    }
++  }
++
+   return id;
+ }
+ 
+@@ -1001,15 +1005,15 @@
+   //     the type of input is the same or sharper (more specific)
+   //     than the phi's type.
+   //  3) an input is a self loop
+-  //  
+-  //  1) input   or   2) input     or   3) input __ 
+-  //     /   \           /   \               \  /  \ 
+-  //     \   /          |    cast             phi  cast 
+-  //      phi            \   /               /  \  /   
+-  //                      phi               /    --   
++  //
++  //  1) input   or   2) input     or   3) input __
++  //     /   \           /   \               \  /  \
++  //     \   /          |    cast             phi  cast
++  //      phi            \   /               /  \  /
++  //                      phi               /    --
+ 
+   Node* r = in(0);                      // RegionNode
+-  if (r == NULL)  return in(1);         // Already degraded to a Copy 
++  if (r == NULL)  return in(1);         // Already degraded to a Copy
+   Node* uncasted_input = NULL; // The unique uncasted input (ConstraintCasts removed)
+   Node* direct_input   = NULL; // The unique direct input
+ 
+@@ -1043,7 +1047,7 @@
+   if (direct_input != NodeSentinel) {
+     return direct_input;           // one unique direct input
+   }
+-  if (uncasted_input != NodeSentinel && 
++  if (uncasted_input != NodeSentinel &&
+       phase->type(uncasted_input)->higher_equal(type())) {
+     return uncasted_input;         // one unique uncasted input
+   }
+@@ -1101,7 +1105,7 @@
+ 
+   // Build int->bool conversion
+   Node *n = new (phase->C, 2) Conv2BNode( cmp->in(1) );
+-  if( flipped ) 
++  if( flipped )
+     n = new (phase->C, 3) XorINode( phase->transform(n), phase->intcon(1) );
+ 
+   return n;
+@@ -1110,9 +1114,9 @@
+ //------------------------------is_cond_add------------------------------------
+ // Check for simple conditional add pattern:  "(P < Q) ? X+Y : X;"
+ // To be profitable the control flow has to disappear; there can be no other
+-// values merging here.  We replace the test-and-branch with: 
++// values merging here.  We replace the test-and-branch with:
+ // "(sgn(P-Q))&Y) + X".  Basically, convert "(P < Q)" into 0 or -1 by
+-// moving the carry bit from (P-Q) into a register with 'sbb EAX,EAX'.  
++// moving the carry bit from (P-Q) into a register with 'sbb EAX,EAX'.
+ // Then convert Y to 0-or-Y and finally add.
+ // This is a key transform for SpecJava _201_compress.
+ static Node* is_cond_add(PhaseGVN *phase, PhiNode *phi, int true_path) {
+@@ -1158,7 +1162,7 @@
+   } else return NULL;
+ 
+   // Not so profitable if compare and add are constants
+-  if( q->is_Con() && phase->type(q) != TypeInt::ZERO && y->is_Con() ) 
++  if( q->is_Con() && phase->type(q) != TypeInt::ZERO && y->is_Con() )
+     return NULL;
+ 
+   Node *cmplt = phase->transform( new (phase->C, 3) CmpLTMaskNode(p,q) );
+@@ -1221,17 +1225,17 @@
+   Node *sub = phi_root->in(3 - phi_x_idx);
+ 
+   // Allow only Sub(0,X) and fail out for all others; Neg is not OK
+-  if( tzero == TypeF::ZERO ) { 
+-    if( sub->Opcode() != Op_SubF || 
+-        sub->in(2) != x || 
++  if( tzero == TypeF::ZERO ) {
++    if( sub->Opcode() != Op_SubF ||
++        sub->in(2) != x ||
+         phase->type(sub->in(1)) != tzero ) return NULL;
+     x = new (phase->C, 2) AbsFNode(x);
+     if (flip) {
+       x = new (phase->C, 3) SubFNode(sub->in(1), phase->transform(x));
+     }
+   } else {
+-    if( sub->Opcode() != Op_SubD || 
+-        sub->in(2) != x || 
++    if( sub->Opcode() != Op_SubD ||
++        sub->in(2) != x ||
+         phase->type(sub->in(1)) != tzero ) return NULL;
+     x = new (phase->C, 2) AbsDNode(x);
+     if (flip) {
+@@ -1285,7 +1289,7 @@
+   }
+   if( i >= phi->req() )         // Only split for constants
+     return NULL;
+-  
++
+   Node *val = phi->in(i);       // Constant to split for
+   uint hit = 0;                 // Number of times it occurs
+ 
+@@ -1293,7 +1297,7 @@
+     Node *n = phi->in(i);
+     if( !n ) return NULL;
+     if( phase->type(n) == Type::TOP ) return NULL;
+-    if( phi->in(i) == val ) 
++    if( phi->in(i) == val )
+       hit++;
+   }
+ 
+@@ -1315,7 +1319,7 @@
+       PhiNode *newphi = PhiNode::make_blank(newr, phi2);
+       split_once(igvn, phi, val, phi2, newphi);
+     }
+-  }      
++  }
+ 
+   // Clean up this guy
+   igvn->hash_delete(phi);
+@@ -1332,14 +1336,14 @@
+ //=============================================================================
+ //------------------------------simple_data_loop_check-------------------------
+ //  Try to determing if the phi node in a simple safe/unsafe data loop.
+-//  Returns:    
++//  Returns:
+ // enum LoopSafety { Safe = 0, Unsafe, UnsafeLoop };
+-// Safe       - safe case when the phi and it's inputs reference only safe data 
++// Safe       - safe case when the phi and it's inputs reference only safe data
+ //              nodes;
+-// Unsafe     - the phi and it's inputs reference unsafe data nodes but there 
++// Unsafe     - the phi and it's inputs reference unsafe data nodes but there
+ //              is no reference back to the phi - need a graph walk
+ //              to determine if it is in a loop;
+-// UnsafeLoop - unsafe case when the phi references itself directly or through 
++// UnsafeLoop - unsafe case when the phi references itself directly or through
+ //              unsafe data node.
+ //  Note: a safe data node is a node which could/never reference itself during
+ //  GVN transformations. For now it is Con, Proj, Phi, CastPP, CheckCastPP.
+@@ -1347,14 +1351,14 @@
+ //  but also to prevent mistaking the fallthrough case inside an outer loop
+ //  as dead loop when the phi references itselfs through an other phi.
+ PhiNode::LoopSafety PhiNode::simple_data_loop_check(Node *in) const {
+-  // It is unsafe loop if the phi node references itself directly. 
++  // It is unsafe loop if the phi node references itself directly.
+   if (in == (Node*)this)
+     return UnsafeLoop; // Unsafe loop
+-  // Unsafe loop if the phi node references itself through an unsafe data node. 
++  // Unsafe loop if the phi node references itself through an unsafe data node.
+   // Exclude cases with null inputs or data nodes which could reference
+   // itself (safe for dead loops).
+   if (in != NULL && !in->is_dead_loop_safe()) {
+-    // Check inputs of phi's inputs also. 
++    // Check inputs of phi's inputs also.
+     // It is much less expensive then full graph walk.
+     uint cnt = in->req();
+     for (uint i = 1; i < cnt; ++i) {
+@@ -1367,7 +1371,7 @@
+         Node *m1 = (m->is_AddP() && m->req() > 3) ? m->in(1) : NULL;
+         if (m1 == (Node*)this)
+           return UnsafeLoop; // Unsafe loop
+-        if (m1 != NULL && m1 == m->in(2) && 
++        if (m1 != NULL && m1 == m->in(2) &&
+             m1->is_dead_loop_safe() && m->in(3)->is_Con()) {
+           continue; // Safe case
+         }
+@@ -1383,7 +1387,7 @@
+ // If phi can be reached through the data input - it is data loop.
+ bool PhiNode::is_unsafe_data_reference(Node *in) const {
+   assert(req() > 1, "");
+-  // First, check simple cases when phi references itself directly or 
++  // First, check simple cases when phi references itself directly or
+   // through an other node.
+   LoopSafety safety = simple_data_loop_check(in);
+   if (safety == UnsafeLoop)
+@@ -1399,9 +1403,9 @@
+   Arena *a = Thread::current()->resource_area();
+   Node_List nstack(a);
+   VectorSet visited(a);
+-  
++
+   nstack.push(in); // Start with unique input.
+-  visited.set(in->_idx); 
++  visited.set(in->_idx);
+   while (nstack.size() != 0) {
+     Node* n = nstack.pop();
+     uint cnt = n->req();
+@@ -1438,7 +1442,7 @@
+ 
+   Node *top = phase->C->top();
+ 
+-  // The are 2 situations when only one valid phi's input is left 
++  // The are 2 situations when only one valid phi's input is left
+   // (in addition to Region input).
+   // One: region is not loop - replace phi with this input.
+   // Two: region is loop - replace phi with top since this data path is dead
+@@ -1449,7 +1453,7 @@
+     Node* rc = r->in(j);
+     Node* n = in(j);            // Get the input
+     if (rc == NULL || phase->type(rc) == Type::TOP) {
+-      if (n != top) {           // Not already top?  
++      if (n != top) {           // Not already top?
+         set_req(j, top);        // Nuke it down
+         progress = this;        // Record progress
+       }
+@@ -1502,11 +1506,11 @@
+     return NULL;
+   }
+ 
+-  
++
+   Node* opt = NULL;
+   int true_path = is_diamond_phi();
+   if( true_path != 0 ) {
+-    // Check for CMove'ing identity. If it would be unsafe, 
++    // Check for CMove'ing identity. If it would be unsafe,
+     // handle it here. In the safe case, let Identity handle it.
+     Node* unsafe_id = is_cmove_id(phase, true_path);
+     if( unsafe_id != NULL && is_unsafe_data_reference(unsafe_id) )
+@@ -1579,12 +1583,31 @@
+         // Accumulate type for resulting Phi
+         type = type->meet(in(i)->in(AddPNode::Base)->bottom_type());
+       }
++      Node* base = NULL;
++      if (doit) {
++        // Check for neighboring AddP nodes in a tree.
++        // If they have a base, use that it.
++        for (DUIterator_Fast kmax, k = this->fast_outs(kmax); k < kmax; k++) {
++          Node* u = this->fast_out(k);
++          if (u->is_AddP()) {
++            Node* base2 = u->in(AddPNode::Base);
++            if (base2 != NULL && !base2->is_top()) {
++              if (base == NULL)
++                base = base2;
++              else if (base != base2)
++                { doit = false; break; }
++            }
++          }
++        }
++      }
+       if (doit) {
+-        Node* base = new (phase->C, in(0)->req()) PhiNode(in(0), type, NULL);
+-        for (uint i = 1; i < req(); i++) {
+-          base->init_req(i, in(i)->in(AddPNode::Base));
++        if (base == NULL) {
++          base = new (phase->C, in(0)->req()) PhiNode(in(0), type, NULL);
++          for (uint i = 1; i < req(); i++) {
++            base->init_req(i, in(i)->in(AddPNode::Base));
++          }
++          phase->is_IterGVN()->register_new_node_with_optimizer(base);
+         }
+-        phase->is_IterGVN()->register_new_node_with_optimizer(base);
+         return new (phase->C, 4) AddPNode(base, base, y);
+       }
+     }
+@@ -1605,7 +1628,7 @@
+       if (ii->is_MergeMem()) {
+         MergeMemNode* n = ii->as_MergeMem();
+         merge_width = MAX2(merge_width, n->req());
+-        saw_self = saw_self || phase->eqv(n->base_memory(), this); 
++        saw_self = saw_self || phase->eqv(n->base_memory(), this);
+       }
+     }
+ 
+@@ -1630,7 +1653,7 @@
+             Node         *m  = phase->transform(n);
+             // If tranformed to a MergeMem, get the desired slice
+             // Otherwise the returned node represents memory for every slice
+-            Node *new_mem = (m->is_MergeMem()) ? 
++            Node *new_mem = (m->is_MergeMem()) ?
+                              m->as_MergeMem()->memory_at(alias_idx) : m;
+             // Update input if it is progress over what we have now
+             if (new_mem != ii) {
+@@ -1641,7 +1664,7 @@
+         }
+       } else {
+         // We know that at least one MergeMem->base_memory() == this
+-        // (saw_self == true). If all other inputs also references this phi 
++        // (saw_self == true). If all other inputs also references this phi
+         // (directly or through data nodes) - it is dead loop.
+         bool saw_safe_input = false;
+         for (uint j = 1; j < req(); ++j) {
+@@ -1655,7 +1678,7 @@
+         }
+         if (!saw_safe_input)
+           return top; // all inputs reference back to this phi - dead loop
+-          
++
+         // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into
+         //     MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...))
+         PhaseIterGVN *igvn = phase->is_IterGVN();
+@@ -1719,11 +1742,11 @@
+ }
+ 
+ //------------------------------out_RegMask------------------------------------
+-const RegMask &PhiNode::in_RegMask(uint i) const { 
++const RegMask &PhiNode::in_RegMask(uint i) const {
+   return i ? out_RegMask() : RegMask::Empty;
+ }
+ 
+-const RegMask &PhiNode::out_RegMask() const { 
++const RegMask &PhiNode::out_RegMask() const {
+   uint ideal_reg = Matcher::base2reg[_type->base()];
+   assert( ideal_reg != Node::NotAMachineReg, "invalid type at Phi" );
+   if( ideal_reg == 0 ) return RegMask::Empty;
+@@ -1731,12 +1754,12 @@
+ }
+ 
+ #ifndef PRODUCT
+-void PhiNode::dump_spec() const { 
+-  TypeNode::dump_spec();
++void PhiNode::dump_spec(outputStream *st) const {
++  TypeNode::dump_spec(st);
+   if (in(0) != NULL &&
+       in(0)->is_CountedLoop() &&
+       in(0)->as_CountedLoop()->phi() == this) {
+-    tty->print(" #tripcount");
++    st->print(" #tripcount");
+   }
+ }
+ #endif
+@@ -1746,29 +1769,29 @@
+ const Type *GotoNode::Value( PhaseTransform *phase ) const {
+   // If the input is reachable, then we are executed.
+   // If the input is not reachable, then we are not executed.
+-  return phase->type(in(0)); 
++  return phase->type(in(0));
+ }
+ 
+ Node *GotoNode::Identity( PhaseTransform *phase ) {
+   return in(0);                // Simple copy of incoming control
+ }
+ 
+-const RegMask &GotoNode::out_RegMask() const { 
++const RegMask &GotoNode::out_RegMask() const {
+   return RegMask::Empty;
+ }
+ 
+ //=============================================================================
+-const RegMask &JumpNode::out_RegMask() const { 
++const RegMask &JumpNode::out_RegMask() const {
+   return RegMask::Empty;
+ }
+- 
++
+ //=============================================================================
+-const RegMask &JProjNode::out_RegMask() const { 
++const RegMask &JProjNode::out_RegMask() const {
+   return RegMask::Empty;
+ }
+ 
+ //=============================================================================
+-const RegMask &CProjNode::out_RegMask() const { 
++const RegMask &CProjNode::out_RegMask() const {
+   return RegMask::Empty;
+ }
+ 
+@@ -1787,7 +1810,7 @@
+ }
+ 
+ //------------------------------Value------------------------------------------
+-// Compute the type of the PCTableNode.  If reachable it is a tuple of 
++// Compute the type of the PCTableNode.  If reachable it is a tuple of
+ // Control, otherwise the table targets are not reachable
+ const Type *PCTableNode::Value( PhaseTransform *phase ) const {
+   if( phase->type(in(0)) == Type::CONTROL )
+@@ -1796,7 +1819,7 @@
+ }
+ 
+ //------------------------------Ideal------------------------------------------
+-// Return a node which is more "ideal" than the current node.  Strip out 
++// Return a node which is more "ideal" than the current node.  Strip out
+ // control copies
+ Node *PCTableNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+   return remove_dead_region(phase, can_reshape) ? this : NULL;
+@@ -1813,9 +1836,9 @@
+ }
+ 
+ #ifndef PRODUCT
+-void JumpProjNode::dump_spec() const { 
+-  ProjNode::dump_spec();
+-   tty->print("@bci %d ",_dest_bci);
++void JumpProjNode::dump_spec(outputStream *st) const {
++  ProjNode::dump_spec(st);
++   st->print("@bci %d ",_dest_bci);
+ }
+ #endif
+ 
+@@ -1843,8 +1866,8 @@
+       } else if( call->req() > TypeFunc::Parms ) {
+         const Type *arg0 = phase->type( call->in(TypeFunc::Parms) );
+         // Check for null reciever to virtual or interface calls
+-        if( call->is_CallDynamicJava() && 
+-            arg0->higher_equal(TypePtr::NULL_PTR) ) { 
++        if( call->is_CallDynamicJava() &&
++            arg0->higher_equal(TypePtr::NULL_PTR) ) {
+           f[CatchProjNode::fall_through_index] = Type::TOP;
+         }
+       } // End of if not a runtime stub
+@@ -1901,9 +1924,9 @@
+ 
+ 
+ #ifndef PRODUCT
+-void CatchProjNode::dump_spec() const { 
+-  ProjNode::dump_spec();
+-  tty->print("@bci %d ",_handler_bci);
++void CatchProjNode::dump_spec(outputStream *st) const {
++  ProjNode::dump_spec(st);
++  st->print("@bci %d ",_handler_bci);
+ }
+ #endif
+ 
+@@ -1914,19 +1937,18 @@
+   if( phase->type(in(1)) == Type::TOP ) return in(1);
+   if( phase->type(in(0)) == Type::TOP ) return in(0);
+   // We only come from CatchProj, unless the CatchProj goes away.
+-  // If the CatchProj is optimized away, then we just carry the 
++  // If the CatchProj is optimized away, then we just carry the
+   // exception oop through.
+   CallNode *call = in(1)->in(0)->as_Call();
+ 
+-  return ( in(0)->is_CatchProj() && in(0)->in(0)->in(1) == in(1) ) 
++  return ( in(0)->is_CatchProj() && in(0)->in(0)->in(1) == in(1) )
+     ? this
+     : call->in(TypeFunc::Parms);
+ }
+ 
+ //=============================================================================
+ #ifndef PRODUCT
+-void NeverBranchNode::format( PhaseRegAlloc *ra_ ) const {
+-  tty->print("%s", Name());
++void NeverBranchNode::format( PhaseRegAlloc *ra_, outputStream *st) const {
++  st->print("%s", Name());
+ }
+ #endif
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/cfgnode.hpp openjdk/hotspot/src/share/vm/opto/cfgnode.hpp
+--- openjdk6/hotspot/src/share/vm/opto/cfgnode.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/cfgnode.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)cfgnode.hpp	1.115 07/05/05 17:06:11 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Portions of code courtesy of Clifford Click
+@@ -103,13 +100,13 @@
+   virtual const Node* is_block_proj() const { return in(0); }
+   virtual const RegMask& out_RegMask() const;
+   virtual uint  ideal_reg() const { return 0; }
+-};  
++};
+ 
+ //------------------------------PhiNode----------------------------------------
+ // PhiNodes merge values from different Control paths.  Slot 0 points to the
+ // controlling RegionNode.  Other slots map 1-for-1 with incoming control flow
+-// paths to the RegionNode.  For speed reasons (to avoid another pass) we 
+-// can turn PhiNodes into copys in-place by NULL'ing out their RegionNode 
++// paths to the RegionNode.  For speed reasons (to avoid another pass) we
++// can turn PhiNodes into copys in-place by NULL'ing out their RegionNode
+ // input in slot 0.
+ class PhiNode : public TypeNode {
+   const TypePtr* const _adr_type; // non-null only for Type::MEMORY nodes.
+@@ -170,7 +167,7 @@
+   virtual const RegMask &out_RegMask() const;
+   virtual const RegMask &in_RegMask(uint) const;
+ #ifndef PRODUCT
+-  virtual void dump_spec() const;
++  virtual void dump_spec(outputStream *st) const;
+ #endif
+ #ifdef ASSERT
+   void verify_adr_type(VectorSet& visited, const TypePtr* at) const;
+@@ -210,11 +207,11 @@
+   virtual const Node *is_block_proj() const { return in(0); }
+   virtual const RegMask &out_RegMask() const;
+   virtual uint ideal_reg() const { return 0; }
+-}; 
++};
+ 
+ //---------------------------MultiBranchNode-----------------------------------
+ // This class defines a MultiBranchNode, a MultiNode which yields multiple
+-// control values. These are distinguished from other types of MultiNodes 
++// control values. These are distinguished from other types of MultiNodes
+ // which yield multiple values, but control is always and only projection #0.
+ class MultiBranchNode : public MultiNode {
+ public:
+@@ -237,18 +234,18 @@
+ #define PROB_UNLIKELY_MAG(N)    (1e- ## N ## f)
+ #define PROB_LIKELY_MAG(N)      (1.0f-PROB_UNLIKELY_MAG(N))
+ 
+-  // Maximum and minimum branch prediction probabilties 
++  // Maximum and minimum branch prediction probabilties
+   // 1 in 1,000,000 (magnitude 6)
+   //
+   // Although PROB_NEVER == PROB_MIN and PROB_ALWAYS == PROB_MAX
+   // they are used to distinguish different situations:
+-  // 
++  //
+   // The name PROB_MAX (PROB_MIN) is for probabilities which correspond to
+   // very likely (unlikely) but with a concrete possibility of a rare
+   // contrary case.  These constants would be used for pinning
+   // measurements, and as measures for assertions that have high
+   // confidence, but some evidence of occasional failure.
+-  // 
++  //
+   // The name PROB_ALWAYS (PROB_NEVER) is to stand for situations for which
+   // there is no evidence at all that the contrary case has ever occurred.
+ 
+@@ -258,7 +255,7 @@
+ #define PROB_MIN                PROB_UNLIKELY_MAG(6)
+ #define PROB_MAX                PROB_LIKELY_MAG(6)
+ 
+-  // Static branch prediction probabilities 
++  // Static branch prediction probabilities
+   // 1 in 10 (magnitude 1)
+ #define PROB_STATIC_INFREQUENT  PROB_UNLIKELY_MAG(1)
+ #define PROB_STATIC_FREQUENT    PROB_LIKELY_MAG(1)
+@@ -281,20 +278,20 @@
+   //     threshold for converting to conditional move
+   //     likelihood of null check failure if a null HAS been seen before
+   //     likelihood of slow path taken in library calls
+-  // 
++  //
+   // 1 in 10,000 probabilities (magnitude 4):
+   //     threshold for making an uncommon trap probability more extreme
+   //     threshold for for making a null check implicit
+   //     likelihood of needing a gc if eden top moves during an allocation
+   //     likelihood of a predicted call failure
+-  // 
++  //
+   // 1 in 100,000 probabilities (magnitude 5):
+   //     threshold for ignoring counts when estimating path frequency
+   //     likelihood of FP clipping failure
+   //     likelihood of catching an exception from a try block
+   //     likelihood of null check failure if a null has NOT been seen before
+   //
+-  // Magic manifest probabilities such as 0.83, 0.7, ... can be found in 
++  // Magic manifest probabilities such as 0.83, 0.7, ... can be found in
+   // gen_subtype_check() and catch_inline_exceptions().
+ 
+   float _prob;                  // Probability of true path being taken.
+@@ -316,7 +313,7 @@
+   static Node* up_one_dom(Node* curr, bool linear_only = false);
+ 
+ #ifndef PRODUCT
+-  virtual void dump_spec() const;
++  virtual void dump_spec(outputStream *st) const;
+ #endif
+ };
+ 
+@@ -326,7 +323,7 @@
+     init_class_id(Class_IfTrue);
+   }
+   virtual int Opcode() const;
+-  virtual Node *Identity( PhaseTransform *phase );  
++  virtual Node *Identity( PhaseTransform *phase );
+ };
+ 
+ class IfFalseNode : public CProjNode {
+@@ -335,7 +332,7 @@
+     init_class_id(Class_IfFalse);
+   }
+   virtual int Opcode() const;
+-  virtual Node *Identity( PhaseTransform *phase );  
++  virtual Node *Identity( PhaseTransform *phase );
+ };
+ 
+ 
+@@ -366,7 +363,7 @@
+ 
+ //------------------------------JumpNode---------------------------------------
+ // Indirect branch.  Uses PCTable above to implement a switch statement.
+-// It emits as a table load and local branch.  
++// It emits as a table load and local branch.
+ class JumpNode : public PCTableNode {
+ public:
+   JumpNode( Node* control, Node* switch_val, uint size) : PCTableNode(control, switch_val, size) {
+@@ -381,7 +378,7 @@
+   virtual uint hash() const;
+   virtual uint cmp( const Node &n ) const;
+   virtual uint size_of() const { return sizeof(*this); }
+-  
++
+  private:
+   const int  _dest_bci;
+   const uint _proj_no;
+@@ -398,7 +395,7 @@
+   int  switch_val()  const { return _switch_val; }
+   uint proj_no()     const { return _proj_no; }
+ #ifndef PRODUCT
+-  virtual void dump_spec() const;
++  virtual void dump_spec(outputStream *st) const;
+ #endif
+ };
+ 
+@@ -445,7 +442,7 @@
+   int  handler_bci() const        { return _handler_bci; }
+   bool is_handler_proj() const    { return _handler_bci >= 0; }
+ #ifndef PRODUCT
+-  virtual void dump_spec() const;
++  virtual void dump_spec(outputStream *st) const;
+ #endif
+ };
+ 
+@@ -479,7 +476,6 @@
+   virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { }
+   virtual uint size(PhaseRegAlloc *ra_) const { return 0; }
+ #ifndef PRODUCT
+-  virtual void format( PhaseRegAlloc * ) const;
++  virtual void format( PhaseRegAlloc *, outputStream *st ) const;
+ #endif
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/chaitin.cpp openjdk/hotspot/src/share/vm/opto/chaitin.cpp
+--- openjdk6/hotspot/src/share/vm/opto/chaitin.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/chaitin.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)chaitin.cpp	1.115 07/05/05 17:06:11 JVM"
+-#endif
+ /*
+  * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -95,7 +92,7 @@
+   // to turn a divide by a constant into a multiply by the reciprical).
+   double score = raw_score( _cost, _area);
+ 
+-  // Account for area.  Basically, LRGs covering large areas are better 
++  // Account for area.  Basically, LRGs covering large areas are better
+   // to spill because more other LRGs get freed up.
+   if( _area == 0.0 )            // No area?  Then no progress to spill
+     return 1e35;
+@@ -120,12 +117,12 @@
+ void LRG_List::extend( uint nidx, uint lidx ) {
+   _nesting.check();
+   if( nidx >= _max ) {
+-    uint size = 16; 
++    uint size = 16;
+     while( size <= nidx ) size <<=1;
+     _lidxs = REALLOC_RESOURCE_ARRAY( uint, _lidxs, _max, size );
+     _max = size;
+   }
+-  while( _cnt <= nidx ) 
++  while( _cnt <= nidx )
+     _lidxs[_cnt++] = 0;
+   _lidxs[nidx] = lidx;
+ }
+@@ -265,8 +262,8 @@
+   // Aggressive (but pessimistic) copy coalescing.
+   // This pass works on virtual copies.  Any virtual copies which are not
+   // coalesced get manifested as actual copies
+-  { 
+-    // The IFG is/was triangular.  I am 'squaring it up' so Union can run 
++  {
++    // The IFG is/was triangular.  I am 'squaring it up' so Union can run
+     // faster.  Union requires a 'for all' operation which is slow on the
+     // triangular adjacency matrix (quick reminder: the IFG is 'sparse' -
+     // meaning I can visit all the Nodes neighbors less than a Node in time
+@@ -274,7 +271,7 @@
+     // given Node and search them for an instance, i.e., time O(#MaxLRG)).
+     _ifg->SquareUp();
+ 
+-    PhaseAggressiveCoalesce coalesce( *this ); 
++    PhaseAggressiveCoalesce coalesce( *this );
+     coalesce.coalesce_driver( );
+     // Insert un-coalesced copies.  Visit all Phis.  Where inputs to a Phi do
+     // not match the Phi itself, insert a copy.
+@@ -298,7 +295,7 @@
+   uint must_spill = 0;
+   must_spill = build_ifg_physical( &live_arena );
+   // If we have a guaranteed spill, might as well spill now
+-  if( must_spill ) {  
++  if( must_spill ) {
+     if( !_maxlrg ) return;
+     // Bail out if unique gets too large (ie - unique > MaxNodeLimit)
+     C->check_node_count(10*must_spill, "out of nodes before split");
+@@ -358,11 +355,11 @@
+ 
+   // Simplify the InterFerence Graph by removing LRGs of low degree.
+   // LRGs of low degree are trivially colorable.
+-  Simplify(); 
++  Simplify();
+ 
+   // Select colors by re-inserting LRGs back into the IFG in reverse order.
+   // Return whether or not something spills.
+-  uint spills = Select( ); 
++  uint spills = Select( );
+ 
+   // If we spill, split and recycle the entire thing
+   while( spills ) {
+@@ -373,7 +370,7 @@
+         return;
+       }
+     }
+-  
++
+     if( !_maxlrg ) return;
+     _maxlrg = Split( _maxlrg );        // Split spilling LRG everywhere
+     // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor)
+@@ -396,7 +393,7 @@
+       IndexSet::reset_memory(C, &live_arena);
+       ifg.init(_maxlrg);
+ 
+-      // Create LiveRanGe array. 
++      // Create LiveRanGe array.
+       // Intersect register masks for all USEs and DEFs
+       gather_lrg_masks( true );
+       live.compute( _maxlrg );
+@@ -421,7 +418,7 @@
+ 
+     // Simplify the InterFerence Graph by removing LRGs of low degree.
+     // LRGs of low degree are trivially colorable.
+-    Simplify(); 
++    Simplify();
+ 
+     // Select colors by re-inserting LRGs back into the IFG in reverse order.
+     // Return whether or not something spills.
+@@ -437,13 +434,13 @@
+ 
+   // max_reg is past the largest *register* used.
+   // Convert that to a frame_slot number.
+-  if( _max_reg <= _matcher._new_SP ) 
++  if( _max_reg <= _matcher._new_SP )
+     _framesize = C->out_preserve_stack_slots();
+   else _framesize = _max_reg -_matcher._new_SP;
+   assert((int)(_matcher._new_SP+_framesize) >= (int)_matcher._out_arg_limit, "framesize must be large enough");
+ 
+   // This frame must preserve the required fp alignment
+-  const int stack_alignment_in_words = Matcher::stack_alignment_in_slots(); 
++  const int stack_alignment_in_words = Matcher::stack_alignment_in_slots();
+   if (stack_alignment_in_words > 0)
+     _framesize = round_to(_framesize, Matcher::stack_alignment_in_bytes());
+   assert( _framesize >= 0 && _framesize <= 1000000, "sanity check" );
+@@ -491,7 +488,7 @@
+     }
+   }
+ 
+-  // Done!  
++  // Done!
+   _live = NULL;
+   _ifg = NULL;
+   C->set_indexSet_arena(NULL);  // ResourceArea is at end of scope
+@@ -565,10 +562,10 @@
+         // further spilling is unlikely to make progress.
+         if( _spilled_once.test(n->_idx) ) {
+           lrg._was_spilled1 = 1;
+-          if( _spilled_twice.test(n->_idx) ) 
++          if( _spilled_twice.test(n->_idx) )
+             lrg._was_spilled2 = 1;
+         }
+-        
++
+ #ifndef PRODUCT
+         if (trace_spilling() && lrg._def != NULL) {
+           // collect defs for MultiDef printing
+@@ -611,35 +608,35 @@
+           lrg._is_bound = 1;
+           break;
+         case Op_RegP:
+-#ifdef _LP64          
++#ifdef _LP64
+           lrg.set_num_regs(2);  // Size is 2 stack words
+ #else
+-          lrg.set_num_regs(1);  // Size is 1 stack word 
++          lrg.set_num_regs(1);  // Size is 1 stack word
+ #endif
+-          // Register pressure is tracked relative to the maximum values 
+-          // suggested for that platform, INTPRESSURE and FLOATPRESSURE, 
++          // Register pressure is tracked relative to the maximum values
++          // suggested for that platform, INTPRESSURE and FLOATPRESSURE,
+           // and relative to other types which compete for the same regs.
+-          // 
+-          // The following table contains suggested values based on the 
++          //
++          // The following table contains suggested values based on the
+           // architectures as defined in each .ad file.
+-          // INTPRESSURE and FLOATPRESSURE may be tuned differently for 
++          // INTPRESSURE and FLOATPRESSURE may be tuned differently for
+           // compile-speed or performance.
+-          // Note1: 
+-          // SPARC and SPARCV9 reg_pressures are at 2 instead of 1 
+-          // since .ad registers are defined as high and low halves. 
+-          // These reg_pressure values remain compatible with the code 
+-          // in is_high_pressure() which relates get_invalid_mask_size(), 
+-          // Block::_reg_pressure and INTPRESSURE, FLOATPRESSURE.  
+-          // Note2: 
+-          // SPARC -d32 has 24 registers available for integral values, 
+-          // but only 10 of these are safe for 64-bit longs.  
+-          // Using set_reg_pressure(2) for both int and long means 
+-          // the allocator will believe it can fit 26 longs into 
+-          // registers.  Using 2 for longs and 1 for ints means the 
++          // Note1:
++          // SPARC and SPARCV9 reg_pressures are at 2 instead of 1
++          // since .ad registers are defined as high and low halves.
++          // These reg_pressure values remain compatible with the code
++          // in is_high_pressure() which relates get_invalid_mask_size(),
++          // Block::_reg_pressure and INTPRESSURE, FLOATPRESSURE.
++          // Note2:
++          // SPARC -d32 has 24 registers available for integral values,
++          // but only 10 of these are safe for 64-bit longs.
++          // Using set_reg_pressure(2) for both int and long means
++          // the allocator will believe it can fit 26 longs into
++          // registers.  Using 2 for longs and 1 for ints means the
+           // allocator will attempt to put 52 integers into registers.
+-          // The settings below limit this problem to methods with 
++          // The settings below limit this problem to methods with
+           // many long values which are being run on 32-bit SPARC.
+-          //        
++          //
+           // ------------------- reg_pressure --------------------
+           // Each entry is reg_pressure_per_value,number_of_regs
+           //         RegL  RegI  RegFlags   RegF RegD    INTPRESSURE  FLOATPRESSURE
+@@ -670,10 +667,10 @@
+           } else {
+             lrg.set_reg_pressure(1);
+           }
+-#else 
++#else
+           lrg.set_reg_pressure(1);  // normally one value per register
+ #endif
+-          // If this def of a double forces a mis-aligned double, 
++          // If this def of a double forces a mis-aligned double,
+           // flag as '_fat_proj' - really flag as allowing misalignment
+           // AND changes how we count interferences.  A mis-aligned
+           // double can interfere with TWO aligned pairs, or effectively
+@@ -738,11 +735,11 @@
+         // }
+ 
+         // Limit result register mask to acceptable registers.
+-        // Do not limit registers from uncommon uses before 
+-        // AggressiveCoalesce.  This effectively pre-virtual-splits 
++        // Do not limit registers from uncommon uses before
++        // AggressiveCoalesce.  This effectively pre-virtual-splits
+         // around uncommon uses of common defs.
+         const RegMask &rm = n->in_RegMask(k);
+-        if( !after_aggressive && 
++        if( !after_aggressive &&
+           _cfg._bbs[n->in(k)->_idx]->_freq > 1000*b->_freq ) {
+           // Since we are BEFORE aggressive coalesce, leave the register
+           // mask untrimmed by the call.  This encourages more coalescing.
+@@ -755,7 +752,7 @@
+         const RegMask &lrgmask = lrg.mask();
+         if( lrgmask.is_bound1() || lrgmask.is_bound2() )
+           lrg._is_bound = 1;
+-        // If this use of a double forces a mis-aligned double, 
++        // If this use of a double forces a mis-aligned double,
+         // flag as '_fat_proj' - really flag as allowing misalignment
+         // AND changes how we count interferences.  A mis-aligned
+         // double can interfere with TWO aligned pairs, or effectively
+@@ -766,7 +763,7 @@
+         }
+         // if the LRG is an unaligned pair, we will have to spill
+         // so clear the LRG's register mask if it is not already spilled
+-        if ( !n->is_SpillCopy() && 
++        if ( !n->is_SpillCopy() &&
+                (lrg._def == NULL || lrg._def == NodeSentinel || !lrg._def->is_SpillCopy()) &&
+                lrgmask.is_misaligned_Pair()) {
+           lrg.Clear();
+@@ -775,7 +772,7 @@
+         // Check for maximum frequency value
+         if( lrg._maxfreq < b->_freq )
+           lrg._maxfreq = b->_freq;
+-            
++
+       } // End for all allocated inputs
+     } // end for all instructions
+   } // end for all blocks
+@@ -791,7 +788,7 @@
+       lrg._direct_conflict = 1;
+     }
+     lrg.set_degree(0);          // no neighbors in IFG yet
+-  }  
++  }
+ }
+ 
+ //------------------------------set_was_low------------------------------------
+@@ -808,7 +805,7 @@
+     if( lrgs(i).lo_degree() ) {
+       lrgs(i)._was_lo = 1;      // Trivially of low degree
+     } else {                    // Else check the Brigg's assertion
+-      // Brigg's observation is that the lo-degree neighbors of a 
++      // Brigg's observation is that the lo-degree neighbors of a
+       // hi-degree live range will not interfere with the color choices
+       // of said hi-degree live range.  The Simplify reverse-stack-coloring
+       // order takes care of the details.  Hence you do not have to count
+@@ -818,10 +815,10 @@
+       IndexSetIterator elements(s);
+       uint lidx;
+       while((lidx = elements.next()) != 0) {
+-        if( !lrgs(lidx).lo_degree() ) 
++        if( !lrgs(lidx).lo_degree() )
+           briggs_degree += MAX2(size,lrgs(lidx).num_regs());
+       }
+-      if( briggs_degree < lrgs(i).degrees_of_freedom() ) 
++      if( briggs_degree < lrgs(i).degrees_of_freedom() )
+         lrgs(i)._was_lo = 1;    // Low degree via the briggs assertion
+     }
+     assert(old_was_lo <= lrgs(i)._was_lo, "_was_lo may not decrease");
+@@ -882,16 +879,16 @@
+ 
+   while( lo_no_copy ) {
+     uint lo = lo_no_copy;
+-    lo_no_copy = lrgs(lo)._next; 
++    lo_no_copy = lrgs(lo)._next;
+     int size = lrgs(lo).num_regs();
+ 
+     // Put the simplified guy on the simplified list.
+     lrgs(lo)._next = _simplified;
+     _simplified = lo;
+-    
+-    // Yank this guy from the IFG.  
++
++    // Yank this guy from the IFG.
+     IndexSet *adj = _ifg->remove_node( lo );
+-      
++
+     // If any neighbors' degrees fall below their number of
+     // allowed registers, then put that neighbor on the low degree
+     // list.  Note that 'degree' can only fall and 'numregs' is
+@@ -907,9 +904,9 @@
+       if( n->just_lo_degree() && !n->_has_copy ) {
+         assert(!(*_ifg->_yanked)[neighbor],"Cannot move to lo degree twice");
+         // Put on lo-degree list
+-        n->_next = lo_no_copy; 
++        n->_next = lo_no_copy;
+         lo_no_copy = neighbor;
+-      } 
++      }
+     }
+   } // End of while lo-degree no_copy worklist not empty
+ 
+@@ -946,10 +943,10 @@
+           lrgs(datum)._risk_bias = lo;
+         }
+       }
+-      
+-      // Yank this guy from the IFG.  
++
++      // Yank this guy from the IFG.
+       IndexSet *adj = _ifg->remove_node( lo );
+-      
++
+       // If any neighbors' degrees fall below their number of
+       // allowed registers, then put that neighbor on the low degree
+       // list.  Note that 'degree' can only fall and 'numregs' is
+@@ -975,7 +972,7 @@
+           if( prev ) lrgs(prev)._next = next;
+           else _hi_degree = next;
+           lrgs(next)._prev = prev;
+-          n->_next = _lo_degree; 
++          n->_next = _lo_degree;
+           _lo_degree = neighbor;
+         }
+       }
+@@ -988,7 +985,7 @@
+     uint lo_score = _hi_degree;
+     double score = lrgs(lo_score).score();
+     double area = lrgs(lo_score)._area;
+-    
++
+     // Find cheapest guy
+     debug_only( int lo_no_simplify=0; );
+     for( uint i = _hi_degree; i; i = lrgs(i)._next ) {
+@@ -1036,7 +1033,7 @@
+     lrgs(lo_score)._at_risk = true;
+     _lo_degree = lo_score;
+     lo_lrg->_next = 0;
+-    
++
+   } // End of while not simplified everything
+ 
+ }
+@@ -1096,15 +1093,15 @@
+   if( lrg.num_regs() == 2 ) {
+     // Find an aligned pair
+     return OptoReg::add(lrg.mask().find_first_pair(),chunk);
+-  } 
+- 
++  }
++
+   // CNC - Fun hack.  Alternate 1st and 2nd selection.  Enables post-allocate
+   // copy removal to remove many more copies, by preventing a just-assigned
+   // register from being repeatedly assigned.
+   OptoReg::Name reg = lrg.mask().find_first_elem();
+   if( (++_alternate & 1) && OptoReg::is_valid(reg) ) {
+     // This 'Remove; find; Insert' idiom is an expensive way to find the
+-    // SECOND element in the mask.  
++    // SECOND element in the mask.
+     lrg.Remove(reg);
+     OptoReg::Name reg2 = lrg.mask().find_first_elem();
+     lrg.Insert(reg);
+@@ -1116,7 +1113,7 @@
+ 
+ //------------------------------choose_color-----------------------------------
+ // Choose a color in the current chunk
+-OptoReg::Name PhaseChaitin::choose_color( LRG &lrg, int chunk ) {  
++OptoReg::Name PhaseChaitin::choose_color( LRG &lrg, int chunk ) {
+   assert( C->in_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP-1)), "must not allocate stack0 (inside preserve area)");
+   assert(C->out_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP+0)), "must not allocate stack0 (inside preserve area)");
+ 
+@@ -1127,7 +1124,7 @@
+ 
+   assert( lrg.num_regs() >= 2, "dead live ranges do not color" );
+ 
+-  // Fat-proj case or misaligned double argument. 
++  // Fat-proj case or misaligned double argument.
+   assert(lrg.compute_mask_size() == lrg.num_regs() ||
+          lrg.num_regs() == 2,"fat projs exactly color" );
+   assert( !chunk, "always color in 1st chunk" );
+@@ -1231,7 +1228,7 @@
+       // Bump register mask up to next stack chunk
+       chunk += RegMask::CHUNK_SIZE;
+       lrg->Set_All();
+-      
++
+       goto retry_next_chunk;
+     }
+ 
+@@ -1282,7 +1279,7 @@
+ 
+     //---------------
+     // Live range is live and no colors available
+-    else {   
++    else {
+       assert( lrg->alive(), "" );
+       assert( !lrg->_fat_proj || lrg->_def == NodeSentinel ||
+               lrg->_def->outcnt() > 0, "fat_proj cannot spill");
+@@ -1388,7 +1385,7 @@
+           }
+           b->_nodes.map(j,cisc);          // Insert into basic block
+           n->replace_by(cisc); // Correct graph
+-          // 
++          //
+           ++_used_cisc_instructions;
+ #ifndef PRODUCT
+           if( TraceCISCSpill ) {
+@@ -1406,7 +1403,7 @@
+           ++_unused_cisc_instructions;    // input can be on stack
+         }
+       }
+-      
++
+     } // End of for all instructions
+ 
+   } // End of for all blocks
+@@ -1421,7 +1418,7 @@
+   if( derived_base_map[derived->_idx] )
+     return derived_base_map[derived->_idx];
+ 
+-  // See if this happens to be a base.  
++  // See if this happens to be a base.
+   // NOTE: we use TypePtr instead of TypeOopPtr because we can have
+   // pointers derived from NULL!  These are always along paths that
+   // can't happen at run-time but the optimizer cannot deduce it so
+@@ -1510,7 +1507,7 @@
+   // For all blocks in RPO do...
+   for( uint i=0; i<_cfg._num_blocks; i++ ) {
+     Block *b = _cfg._blocks[i];
+-    // Note use of deep-copy constructor.  I cannot hammer the original 
++    // Note use of deep-copy constructor.  I cannot hammer the original
+     // liveout bits, because they are needed by the following coalesce pass.
+     IndexSet liveout(_live->live(b));
+ 
+@@ -1571,7 +1568,7 @@
+             // pair of inputs
+             n->add_req( derived );
+             n->add_req( base );
+-          
++
+             // See if the base pointer is already live to this point.
+             // Since I'm working on the SSA form, live-ness amounts to
+             // reaching def's.  So if I find the base's live range then
+@@ -1603,7 +1600,7 @@
+     liveout.clear();  // Free the memory used by liveout.
+ 
+   } // End of forall blocks
+-  _maxlrg = maxlrg;  
++  _maxlrg = maxlrg;
+ 
+   // If I created a new live range I need to recompute live
+   if( maxlrg != _ifg->_maxlrg )
+@@ -1685,11 +1682,11 @@
+     tty->print("L%d",r);
+     tty->print("/N%d ",m->_idx);
+   }
+-  if( n->is_Mach() ) n->as_Mach()->dump_spec();
+-  else n->dump_spec();
++  if( n->is_Mach() ) n->as_Mach()->dump_spec(tty);
++  else n->dump_spec(tty);
+   if( _spilled_once.test(n->_idx ) ) {
+     tty->print(" Spill_1");
+-    if( _spilled_twice.test(n->_idx ) ) 
++    if( _spilled_twice.test(n->_idx ) )
+       tty->print(" Spill_2");
+   }
+   tty->print("\n");
+@@ -1697,9 +1694,9 @@
+ 
+ void PhaseChaitin::dump( const Block * b ) const {
+   b->dump_head( &_cfg._bbs );
+-  
++
+   // For all instructions
+-  for( uint j = 0; j < b->_nodes.size(); j++ ) 
++  for( uint j = 0; j < b->_nodes.size(); j++ )
+     dump(b->_nodes[j]);
+   // Print live-out info at end of block
+   if( _live ) {
+@@ -1721,7 +1718,7 @@
+               _matcher._new_SP, _framesize );
+ 
+   // For all blocks
+-  for( uint i = 0; i < _cfg._num_blocks; i++ ) 
++  for( uint i = 0; i < _cfg._num_blocks; i++ )
+     dump(_cfg._blocks[i]);
+   // End of per-block dump
+   tty->print("\n");
+@@ -1737,7 +1734,7 @@
+     tty->print("L%d: ",i2);
+     if( i2 < _ifg->_maxlrg ) lrgs(i2).dump( );
+     else tty->print("new LRG");
+-  }  
++  }
+   tty->print_cr("");
+ 
+   // Dump lo-degree list
+@@ -1777,7 +1774,7 @@
+   tty->print("Hi degree: ");
+   for(uint i3 = _hi_degree; i3; i3 = lrgs(i3)._next )
+     tty->print("L%d ",i3);
+-  tty->print_cr("");  
++  tty->print_cr("");
+ }
+ 
+ //------------------------------dump_simplified--------------------------------
+@@ -1814,7 +1811,7 @@
+       sprintf(buf,"L%d",lidx);  // No register binding yet
+     } else if( !lidx ) {        // Special, not allocated value
+       strcpy(buf,"Special");
+-    } else if( (lrgs(lidx).num_regs() == 1) 
++    } else if( (lrgs(lidx).num_regs() == 1)
+                 ? !lrgs(lidx).mask().is_bound1()
+                 : !lrgs(lidx).mask().is_bound2() ) {
+       sprintf(buf,"L%d",lidx); // No register binding yet
+@@ -1885,7 +1882,7 @@
+         break;
+       }
+     }
+-    if( j >= argcnt ) 
++    if( j >= argcnt )
+       tty->print_cr("HOLE, owned by SELF");
+   }
+ 
+@@ -1905,7 +1902,7 @@
+     tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg));
+     if( _matcher.return_addr() == reg )
+       tty->print_cr("return address");
+-    else if( _matcher.return_addr() == OptoReg::add(reg,1) && 
++    else if( _matcher.return_addr() == OptoReg::add(reg,1) &&
+              VerifyStackAtCalls )
+       tty->print_cr("0xBADB100D   +VerifyStackAtCalls");
+     else if ((int)OptoReg::reg2stack(reg) < C->fixed_slots())
+@@ -1997,14 +1994,14 @@
+ #endif // not PRODUCT
+ 
+ //------------------------------print_chaitin_statistics-------------------------------
+-int PhaseChaitin::_final_loads  = 0; 
+-int PhaseChaitin::_final_stores = 0; 
+-int PhaseChaitin::_final_memoves= 0; 
+-int PhaseChaitin::_final_copies = 0; 
+-double PhaseChaitin::_final_load_cost  = 0; 
+-double PhaseChaitin::_final_store_cost = 0; 
+-double PhaseChaitin::_final_memove_cost= 0; 
+-double PhaseChaitin::_final_copy_cost  = 0; 
++int PhaseChaitin::_final_loads  = 0;
++int PhaseChaitin::_final_stores = 0;
++int PhaseChaitin::_final_memoves= 0;
++int PhaseChaitin::_final_copies = 0;
++double PhaseChaitin::_final_load_cost  = 0;
++double PhaseChaitin::_final_store_cost = 0;
++double PhaseChaitin::_final_memove_cost= 0;
++double PhaseChaitin::_final_copy_cost  = 0;
+ int PhaseChaitin::_conserv_coalesce = 0;
+ int PhaseChaitin::_conserv_coalesce_pair = 0;
+ int PhaseChaitin::_conserv_coalesce_trie = 0;
+@@ -2024,10 +2021,10 @@
+ void PhaseChaitin::print_chaitin_statistics() {
+   tty->print_cr("Inserted %d spill loads, %d spill stores, %d mem-mem moves and %d copies.", _final_loads, _final_stores, _final_memoves, _final_copies);
+   tty->print_cr("Total load cost= %6.0f, store cost = %6.0f, mem-mem cost = %5.2f, copy cost = %5.0f.", _final_load_cost, _final_store_cost, _final_memove_cost, _final_copy_cost);
+-  tty->print_cr("Adjusted spill cost = %7.0f.", 
+-                _final_load_cost*4.0 + _final_store_cost  * 2.0 + 
++  tty->print_cr("Adjusted spill cost = %7.0f.",
++                _final_load_cost*4.0 + _final_store_cost  * 2.0 +
+                 _final_copy_cost*1.0 + _final_memove_cost*12.0);
+-  tty->print("Conservatively coalesced %d copies, %d pairs", 
++  tty->print("Conservatively coalesced %d copies, %d pairs",
+                 _conserv_coalesce, _conserv_coalesce_pair);
+   if( _conserv_coalesce_trie || _conserv_coalesce_quad )
+     tty->print(", %d tries, %d quads", _conserv_coalesce_trie, _conserv_coalesce_quad);
+diff -ruN openjdk6/hotspot/src/share/vm/opto/chaitin.hpp openjdk/hotspot/src/share/vm/opto/chaitin.hpp
+--- openjdk6/hotspot/src/share/vm/opto/chaitin.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/chaitin.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)chaitin.hpp	1.159 07/05/05 17:06:09 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class LoopTree;
+@@ -61,7 +58,7 @@
+ private:
+   uint _reg;                    // Chosen register; undefined if mask is plural
+ public:
+-  // Return chosen register for this LRG.  Error if the LRG is not bound to 
++  // Return chosen register for this LRG.  Error if the LRG is not bound to
+   // a single register.
+   OptoReg::Name reg() const { return OptoReg::Name(_reg); }
+   void set_reg( OptoReg::Name r ) { _reg = r; }
+@@ -87,7 +84,7 @@
+   int compute_mask_size() const { return _mask.is_AllStack() ? 65535 : _mask.Size(); }
+   void set_mask_size( int size ) {
+     assert((size == 65535) || (size == (int)_mask.Size()), "");
+-    _mask_size = size; 
++    _mask_size = size;
+     debug_only(_msize_valid=1;)
+     debug_only( if( _num_regs == 2 && !_fat_proj ) _mask.VerifyPairs(); )
+   }
+@@ -123,7 +120,7 @@
+   void set_reg_pressure(int i)  { _reg_pressure = i; }
+   int      reg_pressure() const { return _reg_pressure; }
+ 
+-  // How much 'wiggle room' does this live range have?  
++  // How much 'wiggle room' does this live range have?
+   // How many color choices can it make (scaled by _num_regs)?
+   int degrees_of_freedom() const { return mask_size() - _num_regs; }
+   // Bound LRGs have ZERO degrees of freedom.  We also count
+@@ -149,7 +146,7 @@
+     // registers and has NO interferences.
+     // If _fat_proj is clear, live range requires num_regs() to be a power of
+     // 2, and it requires registers to form an aligned, adjacent set.
+-         _fat_proj:1,           // 
++         _fat_proj:1,           //
+          _was_lo:1,             // Was lo-degree prior to coalesce
+          _msize_valid:1,        // _mask_size cache valid
+          _degree_valid:1,       // _degree cache valid
+@@ -215,9 +212,9 @@
+   uint _maxlrg;
+ 
+   Arena *_arena;
+-  
++
+   // Keep track of inserted and deleted Nodes
+-  VectorSet *_yanked;           
++  VectorSet *_yanked;
+ 
+   PhaseIFG( Arena *arena );
+   void init( uint maxlrg );
+@@ -225,7 +222,7 @@
+   // Add edge between a and b.  Returns true if actually addded.
+   int add_edge( uint a, uint b );
+ 
+-  // Add edge between a and everything in the vector 
++  // Add edge between a and everything in the vector
+   void add_vector( uint a, IndexSet *vec );
+ 
+   // Test for edge existance
+@@ -240,7 +237,7 @@
+   void Union( uint a, uint b );
+   // Test for edge in Squared-up matrix
+   int test_edge_sq( uint a, uint b ) const;
+-  // Yank a Node and all connected edges from the IFG.  Be prepared to 
++  // Yank a Node and all connected edges from the IFG.  Be prepared to
+   // re-insert the yanked Node in reverse order of yanking.  Return a
+   // list of neighbors (edges) yanked.
+   IndexSet *remove_node( uint a );
+@@ -285,7 +282,7 @@
+ 
+   int _trip_cnt;
+   int _alternate;
+-  
++
+   uint _maxlrg;                 // Max live range number
+   LRG &lrgs(uint idx) const { return _ifg->lrgs(idx); }
+   PhaseLive *_live;             // Liveness, used in the interference graph
+@@ -295,7 +292,6 @@
+   VectorSet _spilled_twice;     // Nodes that have been spilled twice
+ 
+   LRG_List _names;              // Map from Nodes to Live RanGes
+-  uint n2lidx( const Node *n ) const { return _names[n->_idx]; }
+ 
+   // Union-find map.  Declared as a short for speed.
+   // Indexed by live-range number, it returns the compacted live-range number
+@@ -305,7 +301,7 @@
+   // Remove the need for the Union-Find mapping
+   void compress_uf_map_for_nodes( );
+ 
+-  // Combine the Live Range Indices for these 2 Nodes into a single live 
++  // Combine the Live Range Indices for these 2 Nodes into a single live
+   // range.  Future requests for any Node in either live range will
+   // return the live range index for the combined live range.
+   void Union( const Node *src, const Node *dst );
+@@ -359,6 +355,8 @@
+   // Do all the real work of allocate
+   void Register_Allocate();
+ 
++  uint n2lidx( const Node *n ) const { return _names[n->_idx]; }
++
+ #ifndef PRODUCT
+   bool trace_spilling() const { return _trace_spilling; }
+ #endif
+@@ -382,7 +380,7 @@
+   }
+ 
+   // Add edge between reg and everything in the vector.
+-  // Same as _ifg->add_vector(reg,live) EXCEPT use the RegMask 
++  // Same as _ifg->add_vector(reg,live) EXCEPT use the RegMask
+   // information to trim the set of interferences.  Return the
+   // count of edges added.
+   void interfere_with_live( uint reg, IndexSet *live );
+diff -ruN openjdk6/hotspot/src/share/vm/opto/classes.cpp openjdk/hotspot/src/share/vm/opto/classes.cpp
+--- openjdk6/hotspot/src/share/vm/opto/classes.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/classes.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)classes.cpp	1.31 07/05/05 17:06:12 JVM"
+-#endif
+ /*
+  * Copyright 1997-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -35,4 +32,3 @@
+ #define macro(x) int x##Node::Opcode() const { return Op_##x; }
+ #include "classes.hpp"
+ #undef macro
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/classes.hpp openjdk/hotspot/src/share/vm/opto/classes.hpp
+--- openjdk6/hotspot/src/share/vm/opto/classes.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/classes.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)classes.hpp	1.177 07/05/05 17:06:12 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The giant table of Node classes.
+@@ -118,6 +115,7 @@
+ macro(If)
+ macro(IfFalse)
+ macro(IfTrue)
++macro(Initialize)
+ macro(JProj)
+ macro(Jump)
+ macro(JumpProj)
+diff -ruN openjdk6/hotspot/src/share/vm/opto/coalesce.cpp openjdk/hotspot/src/share/vm/opto/coalesce.cpp
+--- openjdk6/hotspot/src/share/vm/opto/coalesce.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/coalesce.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)coalesce.cpp	1.195 07/05/17 17:43:24 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -34,8 +31,8 @@
+   _maxlrg = maxlrg;
+   // Force the Union-Find mapping to be at least this large
+   _uf_map.extend(_maxlrg,0);
+-  // Initialize it to be the ID mapping.  
+-  for( uint i=0; i<_maxlrg; i++ ) 
++  // Initialize it to be the ID mapping.
++  for( uint i=0; i<_maxlrg; i++ )
+     _uf_map.map(i,i);
+ }
+ 
+@@ -57,7 +54,7 @@
+ // Straight out of Tarjan's union-find algorithm
+ uint PhaseChaitin::Find_compress( uint lrg ) {
+   uint cur = lrg;
+-  uint next = _uf_map[cur]; 
++  uint next = _uf_map[cur];
+   while( next != cur ) {        // Scan chain of equivalences
+     assert( next < cur, "always union smaller" );
+     cur = next;                 // until find a fixed-point
+@@ -85,10 +82,10 @@
+ // Like Find above, but no path compress, so bad asymptotic behavior
+ uint PhaseChaitin::Find_const( uint lrg ) const {
+   if( !lrg ) return lrg;        // Ignore the zero LRG
+-  // Off the end?  This happens during debugging dumps when you got 
++  // Off the end?  This happens during debugging dumps when you got
+   // brand new live ranges but have not told the allocator yet.
+-  if( lrg >= _maxlrg ) return lrg;    
+-  uint next = _uf_map[lrg]; 
++  if( lrg >= _maxlrg ) return lrg;
++  uint next = _uf_map[lrg];
+   while( next != lrg ) {        // Scan chain of equivalences
+     assert( next < lrg, "always union smaller" );
+     lrg = next;                 // until find a fixed-point
+@@ -168,7 +165,7 @@
+     printf("Compacted %d LRs from %d\n",i-j,i);
+   // Now change the Node->LR mapping to reflect the compacted names
+   uint unique = _names.Size();
+-  for( i=0; i<unique; i++ ) 
++  for( i=0; i<unique; i++ )
+     _names.map(i,_uf_map[_names[i]]);
+ 
+   // Reset the Union-Find mapping
+@@ -196,7 +193,7 @@
+     for( j=1; j<b->num_preds(); j++ )
+       tty->print("B%d ", _phc._cfg._bbs[b->pred(j)->_idx]->_pre_order);
+     tty->print("-> ");
+-    for( j=0; j<b->_num_succs; j++ ) 
++    for( j=0; j<b->_num_succs; j++ )
+       tty->print("B%d ",b->_succs[j]->_pre_order);
+     tty->print(" IDom: B%d/#%d\n", b->_idom ? b->_idom->_pre_order : 0, b->_dom_depth);
+     uint cnt = b->_nodes.size();
+@@ -216,13 +213,13 @@
+           if( !any_prec++ ) tty->print(" |");
+           dump( n->in(k) );
+         }
+-      
++
+       // Dump node-specific info
+-      n->dump_spec();
++      n->dump_spec(tty);
+       tty->print("\n");
+-      
++
+     }
+-    tty->print("\n");  
++    tty->print("\n");
+   }
+ }
+ #endif
+@@ -241,18 +238,18 @@
+     // Now, why is int->oop OK?  We end up declaring a raw-pointer as an oop
+     // and in general that's a bad thing.  However, int->oop conversions only
+     // happen at GC points, so the lifetime of the misclassified raw-pointer
+-    // is from the CheckCastPP (that converts it to an oop) backwards up 
+-    // through a merge point and into the slow-path call, and around the 
++    // is from the CheckCastPP (that converts it to an oop) backwards up
++    // through a merge point and into the slow-path call, and around the
+     // diamond up to the heap-top check and back down into the slow-path call.
+     // The misclassified raw pointer is NOT live across the slow-path call,
+-    // and so does not appear in any GC info, so the fact that it is 
++    // and so does not appear in any GC info, so the fact that it is
+     // misclassified is OK.
+ 
+     if( (lrg1->_is_oop || !lrg2->_is_oop) && // not an oop->int cast AND
+         // Compatible final mask
+-        lrg1->mask().overlap( lrg2->mask() ) ) { 
++        lrg1->mask().overlap( lrg2->mask() ) ) {
+       // Merge larger into smaller.
+-      if( lr1 > lr2 ) {                                          
++      if( lr1 > lr2 ) {
+         uint  tmp =  lr1;  lr1 =  lr2;  lr2 =  tmp;
+         Node   *n =   n1;   n1 =   n2;   n2 =    n;
+         LRG *ltmp = lrg1; lrg1 = lrg2; lrg2 = ltmp;
+@@ -283,12 +280,12 @@
+ //------------------------------insert_copy_with_overlap-----------------------
+ // I am inserting copies to come out of SSA form.  In the general case, I am
+ // doing a parallel renaming.  I'm in the Named world now, so I can't do a
+-// general parallel renaming.  All the copies now use  "names" (live-ranges) 
+-// to carry values instead of the explicit use-def chains.  Suppose I need to 
+-// insert 2 copies into the same block.  They copy L161->L128 and L128->L132.  
+-// If I insert them in the wrong order then L128 will get clobbered before it 
+-// can get used by the second copy.  This cannot happen in the SSA model; 
+-// direct use-def chains get me the right value.  It DOES happen in the named 
++// general parallel renaming.  All the copies now use  "names" (live-ranges)
++// to carry values instead of the explicit use-def chains.  Suppose I need to
++// insert 2 copies into the same block.  They copy L161->L128 and L128->L132.
++// If I insert them in the wrong order then L128 will get clobbered before it
++// can get used by the second copy.  This cannot happen in the SSA model;
++// direct use-def chains get me the right value.  It DOES happen in the named
+ // model so I have to handle the reordering of copies.
+ //
+ // In general, I need to topo-sort the placed copies to avoid conflicts.
+@@ -297,9 +294,9 @@
+ void PhaseAggressiveCoalesce::insert_copy_with_overlap( Block *b, Node *copy, uint dst_name, uint src_name ) {
+ 
+   // Scan backwards for the locations of the last use of the dst_name.
+-  // I am about to clobber the dst_name, so the copy must be inserted 
++  // I am about to clobber the dst_name, so the copy must be inserted
+   // after the last use.  Last use is really first-use on a backwards scan.
+-  uint i = b->end_idx()-1; 
++  uint i = b->end_idx()-1;
+   while( 1 ) {
+     Node *n = b->_nodes[i];
+     // Check for end of virtual copies; this is also the end of the
+@@ -312,12 +309,12 @@
+   }
+   uint last_use_idx = i;
+ 
+-  // Also search for any kill of src_name that exits the block.  
++  // Also search for any kill of src_name that exits the block.
+   // Since the copy uses src_name, I have to come before any kill.
+   uint kill_src_idx = b->end_idx();
+   // There can be only 1 kill that exits any block and that is
+   // the last kill.  Thus it is the first kill on a backwards scan.
+-  i = b->end_idx()-1; 
++  i = b->end_idx()-1;
+   while( 1 ) {
+     Node *n = b->_nodes[i];
+     // Check for end of virtual copies; this is also the end of the
+@@ -344,7 +341,7 @@
+     _phc._cfg._bbs.map( tmp->_idx, b );
+     last_use_idx++;
+   }
+-  
++
+   // Insert just after last use
+   b->_nodes.insert(last_use_idx+1,copy);
+ }
+@@ -365,9 +362,9 @@
+           liveout->insert(compressed_lrg);
+         }
+       }
+-    }  
++    }
+   }
+-  
++
+   // All new nodes added are actual copies to replace virtual copies.
+   // Nodes with index less than '_unique' are original, non-virtual Nodes.
+   _unique = C->unique();
+@@ -418,7 +415,7 @@
+             Node *copy;
+             assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
+             // Rematerialize constants instead of copying them
+-            if( m->is_Mach() && m->as_Mach()->is_Con() && 
++            if( m->is_Mach() && m->as_Mach()->is_Con() &&
+                 m->as_Mach()->rematerialize() ) {
+               copy = m->clone();
+               // Insert the copy in the predecessor basic block
+@@ -453,7 +450,7 @@
+             assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
+             // At this point it is unsafe to extend live ranges (6550579).
+             // Rematerialize only constants as we do for Phi above.
+-            if( m->is_Mach() && m->as_Mach()->is_Con() && 
++            if( m->is_Mach() && m->as_Mach()->is_Con() &&
+                 m->as_Mach()->rematerialize() ) {
+               copy = m->clone();
+               // Insert the copy in the basic block, just before us
+@@ -472,7 +469,7 @@
+             _phc._names.extend( copy->_idx, name );
+             _phc._cfg._bbs.map( copy->_idx, b );
+           }
+-          
++
+         } // End of is two-adr
+ 
+         // Insert a copy at a debug use for a lrg which has high frequency
+@@ -491,8 +488,8 @@
+ 
+             // If this lrg has a high frequency use/def
+             if( lrg._maxfreq >= OPTO_LRG_HIGH_FREQ ) {
+-              // If the live range is also live out of this block (like it 
+-              // would be for a fast/slow idiom), the normal spill mechanism 
++              // If the live range is also live out of this block (like it
++              // would be for a fast/slow idiom), the normal spill mechanism
+               // does an excellent job.  If it is not live out of this block
+               // (like it would be for debug info to uncommon trap) splitting
+               // the live range now allows a better allocation in the high
+@@ -500,7 +497,7 @@
+               //   Build_IFG_virtual has converted the live sets to
+               // live-IN info, not live-OUT info.
+               uint k;
+-              for( k=0; k < b->_num_succs; k++ ) 
++              for( k=0; k < b->_num_succs; k++ )
+                 if( _phc._live->live(b->_succs[k])->member( nidx ) )
+                   break;      // Live in to some successor block?
+               if( k < b->_num_succs )
+@@ -559,7 +556,7 @@
+   for( i=0; i<b->_num_succs; i++ ) {
+     Block *bs = b->_succs[i];
+     // Find index of 'b' in 'bs' predecessors
+-    uint j=1; 
++    uint j=1;
+     while( _phc._cfg._bbs[bs->pred(j)->_idx] != b ) j++;
+     // Visit all the Phis in successor block
+     for( uint k = 1; k<bs->_nodes.size(); k++ ) {
+@@ -568,7 +565,7 @@
+       combine_these_two( n, n->in(j) );
+     }
+   } // End of for all successor blocks
+-  
++
+ 
+   // Check _this_ block for 2-address instructions and copies.
+   uint cnt = b->end_idx();
+@@ -608,7 +605,7 @@
+   // and def_copy powers the other.  After merging, src_def powers
+   // the combined live range.
+   lrgs(lr1)._def = (lrgs(lr1)._def == NodeSentinel ||
+-                        lrgs(lr2)._def == NodeSentinel ) 
++                        lrgs(lr2)._def == NodeSentinel )
+     ? NodeSentinel : src_def;
+   lrgs(lr2)._def = NULL;    // No def for lrg 2
+   lrgs(lr2).Clear();        // Force empty mask for LRG 2
+@@ -619,7 +616,7 @@
+   if (lrgs(lr1)._maxfreq < lrgs(lr2)._maxfreq)
+     lrgs(lr1)._maxfreq = lrgs(lr2)._maxfreq;
+ 
+-  // Copy original value instead.  Intermediate copies go dead, and 
++  // Copy original value instead.  Intermediate copies go dead, and
+   // the dst_copy becomes useless.
+   int didx = dst_copy->is_Copy();
+   dst_copy->set_req( didx, src_def );
+@@ -629,8 +626,8 @@
+   dst_copy->replace_by( dst_copy->in(didx) );
+   dst_copy->set_req( didx, NULL);
+   b->_nodes.remove(bindex);
+-  if( bindex < b->_ihrp_index ) b->_ihrp_index--; 
+-  if( bindex < b->_fhrp_index ) b->_fhrp_index--; 
++  if( bindex < b->_ihrp_index ) b->_ihrp_index--;
++  if( bindex < b->_fhrp_index ) b->_fhrp_index--;
+ 
+   // Stretched lr1; add it to liveness of intermediate blocks
+   Block *b2 = _phc._cfg._bbs[src_copy->_idx];
+@@ -680,7 +677,7 @@
+         rm_size = rm.Size();
+         //if( rm._flags ) rm_size += 1000000;
+         if( reg_degree >= rm_size ) return max_juint;
+-      } 
++      }
+       if( rm.overlap(lrgs(lidx).mask()) ) {
+         // Insert lidx into union LRG; returns TRUE if actually inserted
+         if( _ulr.insert(lidx) ) {
+@@ -696,7 +693,7 @@
+               return max_juint;
+           } // End of if not infinite-stack neighbor
+         } // End of if actually inserted
+-      } // End of if live range overlaps 
++      } // End of if live range overlaps
+     } // End of else collect intereferences for 1 node
+   } // End of while forever, scan back for intereferences
+   return reg_degree;
+@@ -711,11 +708,11 @@
+   uint neighbor;
+   LRG &lrg1 = lrgs(lr1);
+   while ((neighbor = one.next()) != 0)
+-    if( !_ulr.member(neighbor) ) 
++    if( !_ulr.member(neighbor) )
+       if( _phc._ifg->neighbors(neighbor)->remove(lr1) )
+         lrgs(neighbor).inc_degree( -lrg1.compute_degree(lrgs(neighbor)) );
+ 
+-  
++
+   // lr2 is now called (coalesced into) lr1.
+   // Remove lr2 from the IFG.
+   IndexSetIterator two(n_lr2);
+@@ -723,7 +720,7 @@
+   while ((neighbor = two.next()) != 0)
+     if( _phc._ifg->neighbors(neighbor)->remove(lr2) )
+       lrgs(neighbor).inc_degree( -lrg2.compute_degree(lrgs(neighbor)) );
+-  
++
+   // Some neighbors of intermediate copies now interfere with the
+   // combined live range.
+   IndexSetIterator three(&_ulr);
+@@ -746,13 +743,13 @@
+ // final dest copy and the original src copy.  They can be the same Node.
+ // Compute the compatible register masks.
+ bool PhaseConservativeCoalesce::copy_copy( Node *dst_copy, Node *src_copy, Block *b, uint bindex ) {
+-  
++
+   if( !dst_copy->is_SpillCopy() ) return false;
+   if( !src_copy->is_SpillCopy() ) return false;
+   Node *src_def = src_copy->in(src_copy->is_Copy());
+   uint lr1 = _phc.Find(dst_copy);
+   uint lr2 = _phc.Find(src_def );
+-  
++
+   // Same live ranges already?
+   if( lr1 == lr2 ) return false;
+ 
+@@ -760,7 +757,7 @@
+   if( _phc._ifg->test_edge_sq( lr1, lr2 ) ) return false;
+ 
+   // Not an oop->int cast; oop->oop, int->int, AND int->oop are OK.
+-  if( !lrgs(lr1)._is_oop && lrgs(lr2)._is_oop ) // not an oop->int cast 
++  if( !lrgs(lr1)._is_oop && lrgs(lr2)._is_oop ) // not an oop->int cast
+     return false;
+ 
+   // Coalescing between an aligned live range and a mis-aligned live range?
+@@ -771,12 +768,12 @@
+   // Sort; use smaller live-range number
+   Node *lr1_node = dst_copy;
+   Node *lr2_node = src_def;
+-  if( lr1 > lr2 ) { 
+-    uint tmp = lr1; lr1 = lr2; lr2 = tmp; 
++  if( lr1 > lr2 ) {
++    uint tmp = lr1; lr1 = lr2; lr2 = tmp;
+     lr1_node = src_def;  lr2_node = dst_copy;
+   }
+ 
+-  // Check for compatibility of the 2 live ranges by 
++  // Check for compatibility of the 2 live ranges by
+   // intersecting their allowed register sets.
+   RegMask rm = lrgs(lr1).mask();
+   rm.AND(lrgs(lr2).mask());
+@@ -788,14 +785,14 @@
+   // Incompatible masks, no way to coalesce
+   if( rm_size == 0 ) return false;
+ 
+-  // Another early bail-out test is when we are double-coalescing and the 
++  // Another early bail-out test is when we are double-coalescing and the
+   // 2 copies are seperated by some control flow.
+   if( dst_copy != src_copy ) {
+     Block *src_b = _phc._cfg._bbs[src_copy->_idx];
+     Block *b2 = b;
+     while( b2 != src_b ) {
+       if( b2->num_preds() > 2 ){// Found merge-point
+-        _phc._lost_opp_cflow_coalesce++; 
++        _phc._lost_opp_cflow_coalesce++;
+         // extra record_bias commented out because Chris believes it is not
+         // productive.  Since we can record only 1 bias, we want to choose one
+         // that stands a chance of working and this one probably does not.
+@@ -814,7 +811,7 @@
+     return false;
+   }
+ 
+-  // Now I need to compute all the interferences between dst_copy and 
++  // Now I need to compute all the interferences between dst_copy and
+   // src_copy.  I'm not willing visit the entire interference graph, so
+   // I limit my search to things in dst_copy's block or in a straight
+   // line of previous blocks.  I give up at merge points or when I get
+@@ -825,7 +822,7 @@
+       record_bias( _phc._ifg, lr1, lr2 );
+       return false;
+     }
+-  } // End of if dst_copy & src_copy are different  
++  } // End of if dst_copy & src_copy are different
+ 
+ 
+   // ---- THE COMBINED LRG IS COLORABLE ----
+@@ -842,7 +839,7 @@
+   _ulr.remove(lr1);
+ 
+   // Uncomment the following code to trace Coalescing in great detail.
+-  // 
++  //
+   //if (false) {
+   //  tty->cr();
+   //  tty->print_cr("#######################################");
+@@ -912,8 +909,7 @@
+       i--;                      // Retry, same location in block
+       PhaseChaitin::_conserv_coalesce_pair++; // Collect stats on success
+       continue;
+-    } 
++    }
+     */
+   }
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/coalesce.hpp openjdk/hotspot/src/share/vm/opto/coalesce.hpp
+--- openjdk6/hotspot/src/share/vm/opto/coalesce.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/coalesce.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)coalesce.hpp	1.44 07/05/05 17:06:14 JVM"
+-#endif
+ /*
+  * Copyright 1997-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class LoopTree;
+@@ -110,4 +107,3 @@
+ 
+   void update_ifg(uint lr1, uint lr2, IndexSet *n_lr1, IndexSet *n_lr2);
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/compile.cpp openjdk/hotspot/src/share/vm/opto/compile.cpp
+--- openjdk6/hotspot/src/share/vm/opto/compile.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/compile.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)compile.cpp	1.631 07/05/17 15:57:33 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -256,7 +253,7 @@
+   uint next = 0;
+   while( next < useful.size() ) {
+     Node *n = useful.at(next++);
+-    // Use raw traversal of out edges since this code removes out edges 
++    // Use raw traversal of out edges since this code removes out edges
+     int max = n->outcnt();
+     for (int j = 0; j < max; ++j ) {
+       Node* child = n->raw_out(j);
+@@ -278,7 +275,7 @@
+ 
+ //------------------------------frame_size_in_words-----------------------------
+ // frame_slots in units of words
+-int Compile::frame_size_in_words() const { 
++int Compile::frame_size_in_words() const {
+   // shift is 0 in LP32 and 1 in LP64
+   const int shift = (LogBytesPerWord - LogBytesPerInt);
+   int words = _frame_slots >> shift;
+@@ -295,7 +292,7 @@
+ 
+   ~CompileWrapper();
+ };
+-  
++
+ CompileWrapper::CompileWrapper(Compile* compile) : _compile(compile) {
+   // the Compile* pointer is stored in the current ciEnv:
+   ciEnv* env = compile->env();
+@@ -313,8 +310,13 @@
+   compile->init_type_arena();
+   Type::Initialize(compile);
+   _compile->set_scratch_buffer_blob(NULL);
++  _compile->begin_method();
+ }
+ CompileWrapper::~CompileWrapper() {
++  if (_compile->failing()) {
++    _compile->print_method("Failed");
++  }
++  _compile->end_method();
+   if (_compile->scratch_buffer_blob() != NULL)
+     BufferBlob::free(_compile->scratch_buffer_blob());
+   _compile->env()->set_compiler_data(NULL);
+@@ -436,6 +438,7 @@
+                   _node_bundling_base(NULL),
+ #ifndef PRODUCT
+                   _trace_opto_output(TraceOptoOutput || method()->has_option("TraceOptoOutput")),
++                  _printer(IdealGraphPrinter::printer()),
+ #endif
+                   _congraph(NULL) {
+   C = this;
+@@ -477,7 +480,7 @@
+   // Node list that Iterative GVN will start with
+   Unique_Node_List for_igvn(comp_arena());
+   set_for_igvn(&for_igvn);
+-  
++
+   // GVN that will be run immediately on new nodes
+   uint estimated_size = method()->code_size()*4+64;
+   estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
+@@ -561,6 +564,12 @@
+ 
+   // Drain the list.
+   Finish_Warm();
++#ifndef PRODUCT
++  if (_printer) {
++    _printer->print_inlining(this);
++  }
++#endif
++
+   if (failing())  return;
+   NOT_PRODUCT( verify_graph_edges(); )
+ 
+@@ -597,7 +606,7 @@
+   }
+ #endif
+ 
+-  // Now that we know the size of all the monitors we can add a fixed slot 
++  // Now that we know the size of all the monitors we can add a fixed slot
+   // for the original deopt pc.
+ 
+   _orig_pc_slot =  fixed_slots();
+@@ -608,7 +617,7 @@
+   Code_Gen();
+   if (failing())  return;
+ 
+-  // Check if we want to skip execution of all compiled code.  
++  // Check if we want to skip execution of all compiled code.
+   {
+ #ifndef PRODUCT
+     if (OptoNoExecute) {
+@@ -629,7 +638,7 @@
+     env()->register_method(_method, _entry_bci,
+                            &_code_offsets,
+                            _orig_pc_slot_offset_in_bytes,
+-                           code_buffer(), 
++                           code_buffer(),
+                            frame_size_in_words(), _oop_map_set,
+                            &_handler_table, &_inc_table,
+                            compiler,
+@@ -672,6 +681,7 @@
+     _node_bundling_base(NULL),
+ #ifndef PRODUCT
+     _trace_opto_output(TraceOptoOutput),
++    _printer(NULL),
+ #endif
+     _congraph(NULL) {
+   C = this;
+@@ -736,7 +746,7 @@
+ }
+ #endif
+ 
+-void Compile::print_codes() { 
++void Compile::print_codes() {
+ }
+ 
+ //------------------------------Init-------------------------------------------
+@@ -763,6 +773,7 @@
+   set_root(new (this, 3) RootNode());
+   // Now that you have a Root to point to, create the real TOP
+   set_cached_top_node( new (this, 1) ConNode(Type::TOP) );
++  set_recent_alloc(NULL, NULL);
+ 
+   // Create Debug Information Recorder to record scopes, oopmaps, etc.
+   env()->set_oop_recorder(new OopRecorder(comp_arena()));
+@@ -794,7 +805,7 @@
+     set_do_inlining(Inline);
+     set_max_inline_size(MaxInlineSize);
+     set_freq_inline_size(FreqInlineSize);
+-    set_do_scheduling(OptoScheduling);  
++    set_do_scheduling(OptoScheduling);
+     set_do_count_invocations(false);
+     set_do_method_data_update(false);
+   }
+@@ -807,7 +818,7 @@
+ 
+   // // -- Initialize types before each compile --
+   // // Update cached type information
+-  // if( _method && _method->constants() ) 
++  // if( _method && _method->constants() )
+   //   Type::update_loaded_types(_method, _method->constants());
+ 
+   // Init alias_type map.
+@@ -849,7 +860,7 @@
+   assert(!failing(), "");
+   for (DUIterator_Fast imax, i = root()->fast_outs(imax); i < imax; i++) {
+     Node* start = root()->fast_out(i);
+-    if( start->is_Start() ) 
++    if( start->is_Start() )
+       return start->as_Start();
+   }
+   ShouldNotReachHere();
+@@ -988,7 +999,7 @@
+         tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,Type::OffsetBot, ta->instance_id());
+       } else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
+         // range is OK as-is.
+-        tj = ta = TypeAryPtr::RANGE; 
++        tj = ta = TypeAryPtr::RANGE;
+       } else if( offset == oopDesc::klass_offset_in_bytes() ) {
+         tj = TypeInstPtr::KLASS; // all klass loads look alike
+         ta = TypeAryPtr::RANGE; // generic ignored junk
+@@ -997,7 +1008,7 @@
+         tj = TypeInstPtr::MARK;
+         ta = TypeAryPtr::RANGE; // generic ignored junk
+         ptr = TypePtr::BotPTR;
+-      } else {                  // Random constant offset into array body 
++      } else {                  // Random constant offset into array body
+         offset = Type::OffsetBot;   // Flatten constant access into array body
+         tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,Type::OffsetBot, ta->instance_id());
+       }
+@@ -1068,7 +1079,7 @@
+     // If we are referencing a field within a Klass, we need
+     // to assume the worst case of an Object.  Both exact and
+     // inexact types must flatten to the same alias class.
+-    // Since the flattened result for a klass is defined to be 
++    // Since the flattened result for a klass is defined to be
+     // precisely java.lang.Object, use a constant ptr.
+     if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
+ 
+@@ -1109,8 +1120,8 @@
+ 
+   // Flatten all to bottom for now
+   switch( _AliasLevel ) {
+-  case 0: 
+-    tj = TypePtr::BOTTOM; 
++  case 0:
++    tj = TypePtr::BOTTOM;
+     break;
+   case 1:                       // Flatten to: oop, static, field or array
+     switch (tj->base()) {
+@@ -1128,21 +1139,21 @@
+     break;
+   default:
+     Unimplemented();
+-  } 
++  }
+ 
+   offset = tj->offset();
+   assert( offset != Type::OffsetTop, "Offset has fallen from constant" );
+-  
++
+   assert( (offset != Type::OffsetBot && tj->base() != Type::AryPtr) ||
+           (offset == Type::OffsetBot && tj->base() == Type::AryPtr) ||
+           (offset == Type::OffsetBot && tj == TypeOopPtr::BOTTOM) ||
+           (offset == Type::OffsetBot && tj == TypePtr::BOTTOM) ||
+           (offset == oopDesc::mark_offset_in_bytes() && tj->base() == Type::AryPtr) ||
+           (offset == oopDesc::klass_offset_in_bytes() && tj->base() == Type::AryPtr) ||
+-          (offset == arrayOopDesc::length_offset_in_bytes() && tj->base() == Type::AryPtr)  , 
++          (offset == arrayOopDesc::length_offset_in_bytes() && tj->base() == Type::AryPtr)  ,
+           "For oops, klasses, raw offset must be constant; for arrays the offset is never known" );
+   assert( tj->ptr() != TypePtr::TopPTR &&
+-          tj->ptr() != TypePtr::AnyNull &&          
++          tj->ptr() != TypePtr::AnyNull &&
+           tj->ptr() != TypePtr::Null, "No imprecise addresses" );
+ //    assert( tj->ptr() != TypePtr::Constant ||
+ //            tj->base() == Type::RawPtr ||
+@@ -1177,7 +1188,7 @@
+         st->print(" +any");
+   else  st->print(" +%-3d", offset);
+   st->print(" in ");
+-  adr_type()->dump();
++  adr_type()->dump_on(st);
+   const TypeOopPtr* tjp = adr_type()->isa_oopptr();
+   if (field() != NULL && tjp) {
+     if (tjp->klass()  != field()->holder() ||
+@@ -1447,6 +1458,8 @@
+ 
+   NOT_PRODUCT( verify_graph_edges(); )
+ 
++  print_method("Start");
++
+  {
+   // Iterative Global Value Numbering, including ideal transforms
+   // Initialize IterGVN with types and values from parse-time GVN
+@@ -1455,6 +1468,9 @@
+     NOT_PRODUCT( TracePhase t2("iterGVN", &_t_iterGVN, TimeCompiler); )
+     igvn.optimize();
+   }
++
++  print_method("Iter GVN 1", 2);
++
+   if (failing())  return;
+ 
+   // get rid of the connection graph since it's information is not
+@@ -1472,6 +1488,7 @@
+       TracePhase t2("idealLoop", &_t_idealLoop, true);
+       PhaseIdealLoop ideal_loop( igvn, NULL, true );
+       loop_opts_cnt--;
++      if (major_progress()) print_method("PhaseIdealLoop 1", 2);
+       if (failing())  return;
+     }
+     // Loop opts pass if partial peeling occurred in previous pass
+@@ -1479,6 +1496,7 @@
+       TracePhase t3("idealLoop", &_t_idealLoop, true);
+       PhaseIdealLoop ideal_loop( igvn, NULL, false );
+       loop_opts_cnt--;
++      if (major_progress()) print_method("PhaseIdealLoop 2", 2);
+       if (failing())  return;
+     }
+     // Loop opts pass for loop-unrolling before CCP
+@@ -1486,36 +1504,43 @@
+       TracePhase t4("idealLoop", &_t_idealLoop, true);
+       PhaseIdealLoop ideal_loop( igvn, NULL, false );
+       loop_opts_cnt--;
++      if (major_progress()) print_method("PhaseIdealLoop 3", 2);
+     }
+   }
+   if (failing())  return;
+ 
+   // Conditional Constant Propagation;
+-  PhaseCCP ccp( &igvn ); 
++  PhaseCCP ccp( &igvn );
+   assert( true, "Break here to ccp.dump_nodes_and_types(_root,999,1)");
+   {
+     TracePhase t2("ccp", &_t_ccp, true);
+     ccp.do_transform();
+   }
++  print_method("PhaseCPP 1", 2);
++
+   assert( true, "Break here to ccp.dump_old2new_map()");
+-    
++
+   // Iterative Global Value Numbering, including ideal transforms
+   {
+     NOT_PRODUCT( TracePhase t2("iterGVN2", &_t_iterGVN2, TimeCompiler); )
+     igvn = ccp;
+     igvn.optimize();
+   }
++
++  print_method("Iter GVN 2", 2);
++
+   if (failing())  return;
+ 
+   // Loop transforms on the ideal graph.  Range Check Elimination,
+   // peeling, unrolling, etc.
+   if(loop_opts_cnt > 0) {
+     debug_only( int cnt = 0; );
+-    while(major_progress() && (loop_opts_cnt > 0)) {      
++    while(major_progress() && (loop_opts_cnt > 0)) {
+       TracePhase t2("idealLoop", &_t_idealLoop, true);
+       assert( cnt++ < 40, "infinite cycle in loop optimization" );
+       PhaseIdealLoop ideal_loop( igvn, NULL, true );
+       loop_opts_cnt--;
++      if (major_progress()) print_method("PhaseIdealLoop iterations", 2);
+       if (failing())  return;
+     }
+   }
+@@ -1538,6 +1563,8 @@
+       return;
+     }
+   }
++
++  print_method("Optimize finished", 2);
+ }
+ 
+ 
+@@ -1583,6 +1610,9 @@
+ 
+     cfg.Estimate_Block_Frequency();
+     cfg.GlobalCodeMotion(m,unique(),proj_list);
++
++    print_method("Global code motion", 2);
++
+     if (failing())  return;
+     NOT_PRODUCT( verify_graph_edges(); )
+ 
+@@ -1609,8 +1639,8 @@
+   }
+ 
+   // Prior to register allocation we kept empty basic blocks in case the
+-  // the allocator needed a place to spill.  After register allocation we 
+-  // are not adding any new instructions.  If any basic block is empty, we 
++  // the allocator needed a place to spill.  After register allocation we
++  // are not adding any new instructions.  If any basic block is empty, we
+   // can now safely remove it.
+   {
+     NOT_PRODUCT( TracePhase t2("removeEmpty", &_t_removeEmptyBlocks, TimeCompiler); )
+@@ -1635,6 +1665,8 @@
+     Output();
+   }
+ 
++  print_method("End");
++
+   // He's dead, Jim.
+   _cfg     = (PhaseCFG*)0xdeadbeef;
+   _regalloc = (PhaseChaitin*)0xdeadbeef;
+@@ -1670,8 +1702,8 @@
+       tty->print_cr("        # Empty connector block");
+     } else if (b->num_preds() == 2 && b->pred(1)->is_CatchProj() && b->pred(1)->as_CatchProj()->_con == CatchProjNode::fall_through_index) {
+       tty->print_cr("        # Block is sole successor of call");
+-    } 
+-    
++    }
++
+     // For all instructions
+     Node *delay = NULL;
+     for( uint j = 0; j<b->_nodes.size(); j++ ) {
+@@ -1703,7 +1735,7 @@
+         tty->print(" %c ", starts_bundle);
+         starts_bundle = ' ';
+         tty->print("\t");
+-        n->format(_regalloc);
++        n->format(_regalloc, tty);
+         tty->cr();
+       }
+ 
+@@ -1720,7 +1752,7 @@
+         tty->print(" %c ", starts_bundle);
+         starts_bundle = ' ';
+         tty->print("\t");
+-        delay->format(_regalloc);
++        delay->format(_regalloc, tty);
+         tty->print_cr("");
+         delay = NULL;
+       }
+@@ -1747,17 +1779,17 @@
+ #endif
+ 
+ //------------------------------Final_Reshape_Counts---------------------------
+-// This class defines counters to help identify when a method 
++// This class defines counters to help identify when a method
+ // may/must be executed using hardware with only 24-bit precision.
+ struct Final_Reshape_Counts : public StackObj {
+-  int  _call_count;             // count non-inlined 'common' calls 
++  int  _call_count;             // count non-inlined 'common' calls
+   int  _float_count;            // count float ops requiring 24-bit precision
+   int  _double_count;           // count double ops requiring more precision
+-  int  _java_call_count;        // count non-inlined 'java' calls 
++  int  _java_call_count;        // count non-inlined 'java' calls
+   VectorSet _visited;           // Visitation flags
+   Node_List _tests;             // Set of IfNodes & PCTableNodes
+ 
+-  Final_Reshape_Counts() : 
++  Final_Reshape_Counts() :
+     _call_count(0), _float_count(0), _double_count(0), _java_call_count(0),
+     _visited( Thread::current()->resource_area() ) { }
+ 
+@@ -1797,7 +1829,7 @@
+     case Op_AddI:  case Op_AddF:  case Op_AddD:  case Op_AddL:
+     case Op_MaxI:  case Op_MinI:
+     case Op_MulI:  case Op_MulF:  case Op_MulD:  case Op_MulL:
+-    case Op_AndL:  case Op_XorL:  case Op_OrL: 
++    case Op_AndL:  case Op_XorL:  case Op_OrL:
+     case Op_AndI:  case Op_XorI:  case Op_OrI: {
+       // Move "last use" input to left by swapping inputs
+       n->swap_edges(1, 2);
+@@ -1862,7 +1894,7 @@
+     CallNode *call = n->as_Call();
+     // Count call sites where the FP mode bit would have to be flipped.
+     // Do not count uncommon runtime calls:
+-    // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking, 
++    // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
+     // _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
+     if( !call->is_CallStaticJava() || !call->as_CallStaticJava()->_name ) {
+       fpu.inc_call_count();   // Count the call site
+@@ -1870,10 +1902,10 @@
+       Node *n = call->in(TypeFunc::Parms);
+       int nop = n->Opcode();
+       // Clone shared simple arguments to uncommon calls, item (1).
+-      if( n->outcnt() > 1 && 
+-          !n->is_Proj() && 
+-          nop != Op_CreateEx && 
+-          nop != Op_CheckCastPP && 
++      if( n->outcnt() > 1 &&
++          !n->is_Proj() &&
++          nop != Op_CreateEx &&
++          nop != Op_CheckCastPP &&
+           !n->is_Mem() ) {
+         Node *x = n->clone();
+         call->set_req( TypeFunc::Parms, x );
+@@ -1933,9 +1965,9 @@
+ 
+   case Op_AddP: {               // Assert sane base pointers
+     const Node *addp = n->in(AddPNode::Address);
+-    assert( !addp->is_AddP() || 
++    assert( !addp->is_AddP() ||
+             addp->in(AddPNode::Base)->is_top() || // Top OK for allocation
+-            addp->in(AddPNode::Base) == n->in(AddPNode::Base), 
++            addp->in(AddPNode::Base) == n->in(AddPNode::Base),
+             "Base pointers must match" );
+     break;
+   }
+@@ -2025,10 +2057,10 @@
+       n->replace_by(btp);
+     }
+     break;
+-  default: 
++  default:
+     assert( !n->is_Call(), "" );
+     assert( !n->is_Mem(), "" );
+-    if( n->is_If() || n->is_PCTable() ) 
++    if( n->is_If() || n->is_PCTable() )
+       fpu._tests.push(n);       // Collect CFG split points
+     break;
+   }
+@@ -2049,7 +2081,7 @@
+       ++i;
+       if (m != NULL && !fpu._visited.test_set(m->_idx)) {
+         cnt = m->req();
+-        nstack.push(n, i); // put on stack parent and next input's index 
++        nstack.push(n, i); // put on stack parent and next input's index
+         n = m;
+         i = 0;
+       }
+@@ -2058,7 +2090,7 @@
+       final_graph_reshaping_impl( n, fpu );
+       if (nstack.is_empty())
+         break;             // finished
+-      n = nstack.node();   // Get node from stack 
++      n = nstack.node();   // Get node from stack
+       cnt = n->req();
+       i = nstack.index();
+       nstack.pop();        // Shift to the next node on stack
+@@ -2067,10 +2099,10 @@
+ }
+ 
+ //------------------------------final_graph_reshaping--------------------------
+-// Final Graph Reshaping.  
++// Final Graph Reshaping.
+ //
+ // (1) Clone simple inputs to uncommon calls, so they can be scheduled late
+-//     and not commoned up and forced early.  Must come after regular 
++//     and not commoned up and forced early.  Must come after regular
+ //     optimizations to avoid GVN undoing the cloning.  Clone constant
+ //     inputs to Loop Phis; these will be split by the allocator anyways.
+ //     Remove Opaque nodes.
+@@ -2085,7 +2117,7 @@
+ //     clearing the mode bit around call sites).  The mode bit is only used
+ //     if the relative frequency of single FP ops to calls is low enough.
+ //     This is a key transform for SPEC mpeg_audio.
+-// (4) Detect infinite loops; blobs of code reachable from above but not 
++// (4) Detect infinite loops; blobs of code reachable from above but not
+ //     below.  Several of the Code_Gen algorithms fail on such code shapes,
+ //     so we simply bail out.  Happens a lot in ZKM.jar, but also happens
+ //     from time to time in other codes (such as -Xcomp finalizer loops, etc).
+@@ -2123,15 +2155,26 @@
+           CallNode *call = n->in(0)->in(0)->as_Call();
+           if (call->entry_point() == OptoRuntime::rethrow_stub()) {
+             expected_kids--;      // Rethrow always has 1 less kid
+-          } else if (call->req() > TypeFunc::Parms) {
+-            // Check for null receiver. In such case, the optimizer has 
+-            // detected that the virtual call will always result in a null 
+-            // pointer exception. The fall-through projection of this CatchNode 
++          } else if (call->req() > TypeFunc::Parms &&
++                     call->is_CallDynamicJava()) {
++            // Check for null receiver. In such case, the optimizer has
++            // detected that the virtual call will always result in a null
++            // pointer exception. The fall-through projection of this CatchNode
+             // will not be populated.
+             Node *arg0 = call->in(TypeFunc::Parms);
+-            if (call->is_CallDynamicJava() &&
+-                arg0->is_Type() &&
+-                arg0->as_Type()->type()->higher_equal(TypePtr::NULL_PTR)) { 
++            if (arg0->is_Type() &&
++                arg0->as_Type()->type()->higher_equal(TypePtr::NULL_PTR)) {
++              expected_kids--;
++            }
++          } else if (call->entry_point() == OptoRuntime::new_array_Java() &&
++                     call->req() > TypeFunc::Parms+1 &&
++                     call->is_CallStaticJava()) {
++            // Check for negative array length. In such case, the optimizer has
++            // detected that the allocation attempt will always result in an
++            // exception. There is no fall-through projection of this CatchNode .
++            Node *arg1 = call->in(TypeFunc::Parms+1);
++            if (arg1->is_Type() &&
++                arg1->as_Type()->type()->join(TypeInt::POS)->empty()) {
+               expected_kids--;
+             }
+           }
+@@ -2154,8 +2197,8 @@
+ 
+   // If original bytecodes contained a mixture of floats and doubles
+   // check if the optimizer has made it homogenous, item (3).
+-  if( Use24BitFPMode && Use24BitFP && 
+-      fpu.get_float_count() > 32 && 
++  if( Use24BitFPMode && Use24BitFP &&
++      fpu.get_float_count() > 32 &&
+       fpu.get_double_count() == 0 &&
+       (10 * fpu.get_call_count() < fpu.get_float_count()) ) {
+     set_24_bit_selection_and_mode( false,  true );
+@@ -2170,8 +2213,8 @@
+ //-----------------------------too_many_traps----------------------------------
+ // Report if there are too many traps at the current method and bci.
+ // Return true if there was a trap, and/or PerMethodTrapLimit is exceeded.
+-bool Compile::too_many_traps(ciMethod* method, 
+-                             int bci, 
++bool Compile::too_many_traps(ciMethod* method,
++                             int bci,
+                              Deoptimization::DeoptReason reason) {
+   ciMethodData* md = method->method_data();
+   if (md->is_empty()) {
+@@ -2218,8 +2261,8 @@
+ // Consults PerBytecodeRecompilationCutoff and PerMethodRecompilationCutoff.
+ // Is not eager to return true, since this will cause the compiler to use
+ // Action_none for a trap point, to avoid too many recompilations.
+-bool Compile::too_many_recompiles(ciMethod* method, 
+-                                  int bci, 
++bool Compile::too_many_recompiles(ciMethod* method,
++                                  int bci,
+                                   Deoptimization::DeoptReason reason) {
+   ciMethodData* md = method->method_data();
+   if (md->is_empty()) {
+diff -ruN openjdk6/hotspot/src/share/vm/opto/compile.hpp openjdk/hotspot/src/share/vm/opto/compile.hpp
+--- openjdk6/hotspot/src/share/vm/opto/compile.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/compile.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)compile.hpp	1.230 07/05/17 15:57:38 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class Block;
+@@ -60,7 +57,7 @@
+ #endif
+ 
+ //------------------------------Compile----------------------------------------
+-// This class defines a top-level Compiler invocation. 
++// This class defines a top-level Compiler invocation.
+ 
+ class Compile : public Phase {
+  public:
+@@ -174,6 +171,9 @@
+   GrowableArray<CallGenerator*>* _intrinsics;   // List of intrinsics.
+   GrowableArray<Node*>* _macro_nodes;           // List of nodes which need to be expanded before matching.
+   ConnectionGraph*      _congraph;
++#ifndef PRODUCT
++  IdealGraphPrinter*    _printer;
++#endif
+ 
+   // Node management
+   uint                  _unique;                // Counter for unique Node indices
+@@ -185,6 +185,9 @@
+ 
+   Node*                 _immutable_memory;      // Initial memory state
+ 
++  Node*                 _recent_alloc_obj;
++  Node*                 _recent_alloc_ctl;
++
+   // Blocked array of debugging and profiling information,
+   // tracked per node.
+   enum { _log2_node_notes_block_size = 8,
+@@ -219,7 +222,7 @@
+   PhaseCFG*             _cfg;                   // Results of CFG finding
+   bool                  _select_24_bit_instr;   // We selected an instruction with a 24-bit result
+   bool                  _in_24_bit_fp_mode;     // We are emitting instructions with 24-bit results
+-  bool                  _has_java_calls;        // True if the method has java calls 
++  bool                  _has_java_calls;        // True if the method has java calls
+   Matcher*              _matcher;               // Engine to map ideal to machine instructions
+   PhaseRegAlloc*        _regalloc;              // Results of register allocation.
+   int                   _frame_slots;           // Size of total frame in stack slots
+@@ -316,6 +319,23 @@
+ #ifndef PRODUCT
+   bool          trace_opto_output() const       { return _trace_opto_output; }
+ #endif
++
++  void begin_method() {
++#ifndef PRODUCT
++    if (_printer) _printer->begin_method(this);
++#endif
++  }
++  void print_method(const char * name, int level = 1) {
++#ifndef PRODUCT
++    if (_printer) _printer->print_method(this, name, level);
++#endif
++  }
++  void end_method() {
++#ifndef PRODUCT
++    if (_printer) _printer->end_method();
++#endif
++  }
++
+   int           macro_count()                   { return _macro_nodes->length(); }
+   Node*         macro_node(int idx)             { return _macro_nodes->at(idx); }
+   ConnectionGraph* congraph()                   { return _congraph;}
+@@ -340,14 +360,14 @@
+   bool              failure_reason_is(const char* r) { return (r==_failure_reason) || (r!=NULL && _failure_reason!=NULL && strcmp(r, _failure_reason)==0); }
+ 
+   void record_failure(const char* reason);
+-  void record_method_not_compilable(const char* reason, bool all_tiers = false) { 
++  void record_method_not_compilable(const char* reason, bool all_tiers = false) {
+     // All bailouts cover "all_tiers" when TieredCompilation is off.
+     if (!TieredCompilation) all_tiers = true;
+     env()->record_method_not_compilable(reason, all_tiers);
+     // Record failure reason.
+     record_failure(reason);
+   }
+-  void record_method_not_compilable_all_tiers(const char* reason) { 
++  void record_method_not_compilable_all_tiers(const char* reason) {
+     record_method_not_compilable(reason, true);
+   }
+   bool check_node_count(uint margin, const char* reason) {
+@@ -373,6 +393,13 @@
+   void         init_start(StartNode* s);
+   Node*             immutable_memory();
+ 
++  Node*             recent_alloc_ctl() const    { return _recent_alloc_ctl; }
++  Node*             recent_alloc_obj() const    { return _recent_alloc_obj; }
++  void          set_recent_alloc(Node* ctl, Node* obj) {
++                                                  _recent_alloc_ctl = ctl;
++                                                  _recent_alloc_obj = obj;
++                                                }
++
+   // Handy undefined Node
+   Node*             top() const                 { return _top; }
+ 
+@@ -537,7 +564,7 @@
+ 
+   // Second major entry point.  From the TypeFunc signature, generate code
+   // to pass arguments from the Java calling convention to the C calling
+-  // convention.  
++  // convention.
+   Compile(ciEnv* ci_env, const TypeFunc *(*gen)(),
+           address stub_function, const char *stub_name,
+           int is_fancy_jump, bool pass_tls,
+@@ -565,7 +592,7 @@
+ 
+   // returns true if adr overlaps with the given alias category
+   bool can_alias(const TypePtr* adr, int alias_idx);
+-  
++
+   // Driver for converting compiler's IR into machine code bits
+   void Output();
+ 
+@@ -590,7 +617,7 @@
+   // Determine which variable sized branches can be shortened
+   void Shorten_branches(Label *labels, int& code_size, int& reloc_size, int& stub_size, int& const_size);
+ 
+-  // Compute the size of first NumberOfLoopInstrToAlign instructions 
++  // Compute the size of first NumberOfLoopInstrToAlign instructions
+   // at the head of a loop.
+   void compute_loop_first_inst_sizes();
+ 
+@@ -603,7 +630,7 @@
+   uint in_preserve_stack_slots();
+ 
+   // "Top of Stack" slots that may be unused by the calling convention but must
+-  // otherwise be preserved.  
++  // otherwise be preserved.
+   // On Intel these are not necessary and the value can be zero.
+   // On Sparc this describes the words reserved for storing a register window
+   // when an interrupt occurs.
+diff -ruN openjdk6/hotspot/src/share/vm/opto/connode.cpp openjdk/hotspot/src/share/vm/opto/connode.cpp
+--- openjdk6/hotspot/src/share/vm/opto/connode.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/connode.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)connode.cpp	1.217 07/05/17 15:57:42 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Optimization - Graph Style
+@@ -81,7 +78,7 @@
+ Alas, the CastPP's interfere with GVN (some values are the regular oop, some
+ are the CastPP of the oop, all merge at Phi's which cannot collapse, etc).
+ This cost us 10% on SpecJVM, even when I removed some of the more trivial
+-cases in the optimizer.  Removing more useless Phi's started allowing Loads to 
++cases in the optimizer.  Removing more useless Phi's started allowing Loads to
+ illegally float above null checks.  I gave up on this approach.
+ 
+ (4) Add BOTH control edges to both tests.  Alas, too much code knows that
+@@ -99,12 +96,12 @@
+ 
+ 
+ //------------------------------Ideal------------------------------------------
+-// Return a node which is more "ideal" than the current node.  
++// Return a node which is more "ideal" than the current node.
+ // Move constants to the right.
+ Node *CMoveNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+   if( in(0) && remove_dead_region(phase, can_reshape) ) return this;
+-  assert( !phase->eqv(in(Condition), this) &&  
+-          !phase->eqv(in(IfFalse), this) && 
++  assert( !phase->eqv(in(Condition), this) &&
++          !phase->eqv(in(IfFalse), this) &&
+           !phase->eqv(in(IfTrue), this), "dead loop in CMoveNode::Ideal" );
+   if( phase->type(in(Condition)) == Type::TOP )
+     return NULL; // return NULL when Condition is dead
+@@ -129,7 +126,7 @@
+       (phase->eqv(cmp->in(2),f) &&
+        phase->eqv(cmp->in(1),t)) ) {
+     // Check for "(t==f)?t:f;" and replace with "f"
+-    if( b->_test._test == BoolTest::eq ) 
++    if( b->_test._test == BoolTest::eq )
+       return f;
+     // Allow the inverted case as well
+     // Check for "(t!=f)?t:f;" and replace with "t"
+@@ -145,9 +142,9 @@
+ Node *CMoveNode::Identity( PhaseTransform *phase ) {
+   if( phase->eqv(in(IfFalse),in(IfTrue)) ) // C-moving identical inputs?
+     return in(IfFalse);         // Then it doesn't matter
+-  if( phase->type(in(Condition)) == TypeInt::ZERO ) 
++  if( phase->type(in(Condition)) == TypeInt::ZERO )
+     return in(IfFalse);         // Always pick left(false) input
+-  if( phase->type(in(Condition)) == TypeInt::ONE ) 
++  if( phase->type(in(Condition)) == TypeInt::ONE )
+     return in(IfTrue);          // Always pick right(true) input
+ 
+   // Check for CMove'ing a constant after comparing against the constant.
+@@ -195,7 +192,7 @@
+ 
+ //=============================================================================
+ //------------------------------Ideal------------------------------------------
+-// Return a node which is more "ideal" than the current node.  
++// Return a node which is more "ideal" than the current node.
+ // Check for conversions to boolean
+ Node *CMoveINode::Ideal(PhaseGVN *phase, bool can_reshape) {
+   // Try generic ideal's first
+@@ -247,14 +244,14 @@
+       return NULL;
+     flip = 1 - flip;
+   } else return NULL;
+-  
++
+   // Convert to a bool (flipped)
+   // Build int->bool conversion
+ #ifndef PRODUCT
+   if( PrintOpto ) tty->print_cr("CMOV to I2B");
+ #endif
+   Node *n = new (phase->C, 2) Conv2BNode( cmp->in(1) );
+-  if( flip ) 
++  if( flip )
+     n = new (phase->C, 3) XorINode( phase->transform(n), phase->intcon(1) );
+ 
+   return n;
+@@ -262,7 +259,7 @@
+ 
+ //=============================================================================
+ //------------------------------Ideal------------------------------------------
+-// Return a node which is more "ideal" than the current node.  
++// Return a node which is more "ideal" than the current node.
+ // Check for absolute value
+ Node *CMoveFNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+   // Try generic ideal's first
+@@ -305,8 +302,8 @@
+   Node *sub = in(phi_sub_idx);
+ 
+   // Allow only SubF(0,X) and fail out for all others; NegF is not OK
+-  if( sub->Opcode() != Op_SubF || 
+-      sub->in(2) != X || 
++  if( sub->Opcode() != Op_SubF ||
++      sub->in(2) != X ||
+       phase->type(sub->in(1)) != TypeF::ZERO ) return NULL;
+ 
+   Node *abs = new (phase->C, 2) AbsFNode( X );
+@@ -318,7 +315,7 @@
+ 
+ //=============================================================================
+ //------------------------------Ideal------------------------------------------
+-// Return a node which is more "ideal" than the current node.  
++// Return a node which is more "ideal" than the current node.
+ // Check for absolute value
+ Node *CMoveDNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+   // Try generic ideal's first
+@@ -361,8 +358,8 @@
+   Node *sub = in(phi_sub_idx);
+ 
+   // Allow only SubD(0,X) and fail out for all others; NegD is not OK
+-  if( sub->Opcode() != Op_SubD || 
+-      sub->in(2) != X || 
++  if( sub->Opcode() != Op_SubD ||
++      sub->in(2) != X ||
+       phase->type(sub->in(1)) != TypeD::ZERO ) return NULL;
+ 
+   Node *abs = new (phase->C, 2) AbsDNode( X );
+@@ -409,10 +406,10 @@
+ }
+ 
+ //------------------------------Ideal------------------------------------------
+-// Return a node which is more "ideal" than the current node.  Strip out 
++// Return a node which is more "ideal" than the current node.  Strip out
+ // control copies
+ Node *ConstraintCastNode::Ideal(PhaseGVN *phase, bool can_reshape){
+-  return (in(0) && remove_dead_region(phase, can_reshape)) ? this : NULL; 
++  return (in(0) && remove_dead_region(phase, can_reshape)) ? this : NULL;
+ }
+ 
+ //------------------------------Ideal_DU_postCCP-------------------------------
+@@ -502,10 +499,10 @@
+   // JOIN NOT DONE HERE BECAUSE OF INTERFACE ISSUES.
+   // FIX THIS (DO THE JOIN) WHEN UNION TYPES APPEAR!
+ 
+-  // 
++  //
+   // Remove this code after overnight run indicates no performance
+   // loss from not performing JOIN at CheckCastPPNode
+-  // 
++  //
+   // const TypeInstPtr *in_oop = in->isa_instptr();
+   // const TypeInstPtr *my_oop = _type->isa_instptr();
+   // // If either input is an 'interface', return destination type
+@@ -520,9 +517,9 @@
+   //   }
+   //   return _type;
+   // }
+-  // 
+-  // // Neither the input nor the destination type is an interface, 
+-  // 
++  //
++  // // Neither the input nor the destination type is an interface,
++  //
+   // // history: JOIN used to cause weird corner case bugs
+   // //          return (in == TypeOopPtr::NULL_PTR) ? in : _type;
+   // // JOIN picks up NotNull in common instance-of/check-cast idioms, both oops.
+@@ -538,7 +535,7 @@
+   //       join_ptr == TypePtr::NotNull || join_ptr == TypePtr::Constant ) {
+   //     return join;
+   //   }
+-  //   // ELSE return same old type as before 
++  //   // ELSE return same old type as before
+   //   return _type;
+   // }
+   // // Not joining two pointers
+@@ -546,10 +543,10 @@
+ }
+ 
+ //------------------------------Ideal------------------------------------------
+-// Return a node which is more "ideal" than the current node.  Strip out 
++// Return a node which is more "ideal" than the current node.  Strip out
+ // control copies
+ Node *CheckCastPPNode::Ideal(PhaseGVN *phase, bool can_reshape){
+-  return (in(0) && remove_dead_region(phase, can_reshape)) ? this : NULL; 
++  return (in(0) && remove_dead_region(phase, can_reshape)) ? this : NULL;
+ }
+ 
+ //=============================================================================
+@@ -780,7 +777,7 @@
+   // remove this node's type assertion until no more loop ops can happen.
+   // The progress bit is set in the major loop optimizations THEN comes the
+   // call to IterGVN and any chance of hitting this code.  Cf. Opaque1Node.
+-  if (!phase->C->major_progress()) {
++  if (can_reshape && !phase->C->major_progress()) {
+     const TypeInt* in_type = phase->type(in(1))->isa_int();
+     if (in_type != NULL && this_type != NULL &&
+         (in_type->_lo != this_type->_lo ||
+@@ -790,10 +787,32 @@
+       // of slightly differing type assertions.  Such slight differences
+       // arise routinely as a result of loop unrolling, so this is a
+       // post-unrolling graph cleanup.  Choose a type which depends only
+-      // on my input.
+-      set_type(TypeLong::make(in_type->_lo, in_type->_hi, in_type->_widen));
+-      // Note: this_type still has old type value, for the logic below.
+-      this_changed = this;
++      // on my input.  (Exception:  Keep a range assertion of >=0 or <0.)
++      jlong lo1 = this_type->_lo;
++      jlong hi1 = this_type->_hi;
++      int   w1  = this_type->_widen;
++      if (lo1 != (jint)lo1 ||
++          hi1 != (jint)hi1 ||
++          lo1 > hi1) {
++        // Overflow leads to wraparound, wraparound leads to range saturation.
++        lo1 = min_jint; hi1 = max_jint;
++      } else if (lo1 >= 0) {
++        // Keep a range assertion of >=0.
++        lo1 = 0;        hi1 = max_jint;
++      } else if (hi1 < 0) {
++        // Keep a range assertion of <0.
++        lo1 = min_jint; hi1 = -1;
++      } else {
++        lo1 = min_jint; hi1 = max_jint;
++      }
++      const TypeLong* wtype = TypeLong::make(MAX2((jlong)in_type->_lo, lo1),
++                                             MIN2((jlong)in_type->_hi, hi1),
++                                             MAX2((int)in_type->_widen, w1));
++      if (wtype != type()) {
++        set_type(wtype);
++        // Note: this_type still has old type value, for the logic below.
++        this_changed = this;
++      }
+     }
+   }
+ 
+@@ -933,7 +952,7 @@
+ }
+ 
+ //------------------------------Ideal------------------------------------------
+-// Return a node which is more "ideal" than the current node.  
++// Return a node which is more "ideal" than the current node.
+ // Blow off prior masking to int
+ Node *ConvL2INode::Ideal(PhaseGVN *phase, bool can_reshape) {
+   Node *andl = in(1);
+@@ -965,7 +984,7 @@
+ 
+   // Fold up with a prior LoadL: LoadL->ConvL2I ==> LoadI
+   // Requires we understand the 'endianess' of Longs.
+-  if( andl_op == Op_LoadL ) { 
++  if( andl_op == Op_LoadL ) {
+     Node *adr = andl->in(MemNode::Address);
+     // VM_LITTLE_ENDIAN is #defined appropriately in the Makefiles
+ #ifndef VM_LITTLE_ENDIAN
+@@ -975,7 +994,7 @@
+     // subsequent StoreL because different memory offsets provoke
+     // flatten_alias_type() into indicating two different types.  See bug
+     // 4755222.
+-    
++
+     // Node *base = adr->is_AddP() ? adr->in(AddPNode::Base) : adr;
+     // adr = phase->transform( new (phase->C, 4) AddPNode(base,adr,phase->MakeConX(sizeof(jint))));
+     return NULL;
+@@ -1073,7 +1092,7 @@
+ }
+ 
+ Node *CastP2XNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+-  return (in(0) && remove_dead_region(phase, can_reshape)) ? this : NULL; 
++  return (in(0) && remove_dead_region(phase, can_reshape)) ? this : NULL;
+ }
+ 
+ //------------------------------Identity---------------------------------------
+@@ -1131,7 +1150,7 @@
+ //=============================================================================
+ // Do not allow value-numbering
+ uint Opaque1Node::hash() const { return NO_HASH; }
+-uint Opaque1Node::cmp( const Node &n ) const { 
++uint Opaque1Node::cmp( const Node &n ) const {
+   return (&n == this);          // Always fail except on self
+ }
+ 
+@@ -1158,7 +1177,7 @@
+ 
+ // Do not allow value-numbering
+ uint Opaque2Node::hash() const { return NO_HASH; }
+-uint Opaque2Node::cmp( const Node &n ) const { 
++uint Opaque2Node::cmp( const Node &n ) const {
+   return (&n == this);          // Always fail except on self
+ }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/opto/connode.hpp openjdk/hotspot/src/share/vm/opto/connode.hpp
+--- openjdk6/hotspot/src/share/vm/opto/connode.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/connode.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)connode.hpp	1.160 07/05/05 17:06:13 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class PhaseTransform;
+@@ -32,7 +29,7 @@
+ // Simple constants
+ class ConNode : public TypeNode {
+ public:
+-  ConNode( const Type *t ) : TypeNode(t,1) { 
++  ConNode( const Type *t ) : TypeNode(t,1) {
+     init_req(0, (Node*)Compile::current()->root());
+     init_flags(Flag_is_Con);
+   }
+@@ -127,7 +124,7 @@
+ // Place holder for the 2 conditional inputs to a CMove.  CMove needs 4
+ // inputs: the Bool (for the lt/gt/eq/ne bits), the flags (result of some
+ // compare), and the 2 values to select between.  The Matcher requires a
+-// binary tree so we break it down like this: 
++// binary tree so we break it down like this:
+ //     (CMove (Binary bol cmp) (Binary src1 src2))
+ class BinaryNode : public Node {
+ public:
+@@ -147,7 +144,7 @@
+   CMoveNode( Node *bol, Node *left, Node *right, const Type *t ) : TypeNode(t,4)
+   {
+     init_class_id(Class_CMove);
+-    // all inputs are nullified in Node::Node(int) 
++    // all inputs are nullified in Node::Node(int)
+     // init_req(Control,NULL);
+     init_req(Condition,bol);
+     init_req(IfFalse,left);
+@@ -534,7 +531,7 @@
+ // The 2nd slow-half of a subtype check.  Scan the subklass's 2ndary superklass
+ // array for an instance of the superklass.  Set a hidden internal cache on a
+ // hit (cache is checked with exposed code in gen_subtype_check()).  Return
+-// not zero for a miss or zero for a hit.  
++// not zero for a miss or zero for a hit.
+ class PartialSubtypeCheckNode : public Node {
+ public:
+   PartialSubtypeCheckNode(Node* c, Node* sub, Node* super) : Node(c,sub,super) {}
+@@ -543,7 +540,7 @@
+   virtual uint ideal_reg() const { return Op_RegP; }
+ };
+ 
+-// 
++//
+ class MoveI2FNode : public Node {
+  public:
+   MoveI2FNode( Node *value ) : Node(0,value) {}
+diff -ruN openjdk6/hotspot/src/share/vm/opto/divnode.cpp openjdk/hotspot/src/share/vm/opto/divnode.cpp
+--- openjdk6/hotspot/src/share/vm/opto/divnode.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/divnode.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)divnode.cpp	1.88 07/05/05 17:06:13 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Portions of code courtesy of Clifford Click
+@@ -89,7 +86,7 @@
+     bool needs_rounding = true;
+     const Type *dt = phase->type(dividend);
+     const TypeInt *dti = dt->isa_int();
+-  
++
+     // we don't need to round a positive dividend
+     if (dti && dti->_lo >= 0)
+       needs_rounding = false;
+@@ -196,7 +193,7 @@
+   // Either input is BOTTOM ==> the result is the local BOTTOM
+   const Type *bot = bottom_type();
+   if( (t1 == bot) || (t2 == bot) ||
+-      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) ) 
++      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) )
+     return bot;
+ 
+   // Divide the two numbers.  We approximate.
+@@ -213,7 +210,7 @@
+       hi = i1->_hi/d;
+     } else {
+       if( d == -1 && i1->_lo == min_jint ) {
+-        // 'min_jint/-1' throws arithmetic exception during compilation 
++        // 'min_jint/-1' throws arithmetic exception during compilation
+         lo = min_jint;
+         // do not support holes, 'hi' must go to either min_jint or max_jint:
+         // [min_jint, -10]/[-1,-1] ==> [min_jint] UNION [10,max_jint]
+@@ -224,7 +221,7 @@
+       }
+     }
+     return TypeInt::make(lo, hi, widen);
+-  }  
++  }
+ 
+   // If the dividend is a constant
+   if( i1->is_con() ) {
+@@ -356,7 +353,7 @@
+   // Either input is BOTTOM ==> the result is the local BOTTOM
+   const Type *bot = bottom_type();
+   if( (t1 == bot) || (t2 == bot) ||
+-      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) ) 
++      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) )
+     return bot;
+ 
+   // Divide the two numbers.  We approximate.
+@@ -373,7 +370,7 @@
+       hi = i1->_hi/d;
+     } else {
+       if( d == CONST64(-1) && i1->_lo == min_jlong ) {
+-        // 'min_jlong/-1' throws arithmetic exception during compilation 
++        // 'min_jlong/-1' throws arithmetic exception during compilation
+         lo = min_jlong;
+         // do not support holes, 'hi' must go to either min_jlong or max_jlong:
+         // [min_jlong, -10]/[-1,-1] ==> [min_jlong] UNION [10,max_jlong]
+@@ -384,7 +381,7 @@
+       }
+     }
+     return TypeLong::make(lo, hi, widen);
+-  }  
++  }
+ 
+   // If the dividend is a constant
+   if( i1->is_con() ) {
+@@ -419,7 +416,7 @@
+   // Either input is BOTTOM ==> the result is the local BOTTOM
+   const Type *bot = bottom_type();
+   if( (t1 == bot) || (t2 == bot) ||
+-      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) ) 
++      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) )
+     return bot;
+ 
+   // x/x == 1, we ignore 0/0.
+@@ -433,7 +430,7 @@
+     return t1;
+ 
+   // If divisor is a constant and not zero, divide them numbers
+-  if( t1->base() == Type::FloatCon && 
++  if( t1->base() == Type::FloatCon &&
+       t2->base() == Type::FloatCon &&
+       t2->getf() != 0.0 ) // could be negative zero
+     return TypeF::make( t1->getf()/t2->getf() );
+@@ -505,7 +502,7 @@
+   // Either input is BOTTOM ==> the result is the local BOTTOM
+   const Type *bot = bottom_type();
+   if( (t1 == bot) || (t2 == bot) ||
+-      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) ) 
++      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) )
+     return bot;
+ 
+   // x/x == 1, we ignore 0/0.
+@@ -519,7 +516,7 @@
+     return t1;
+ 
+   // If divisor is a constant and not zero, divide them numbers
+-  if( t1->base() == Type::DoubleCon && 
++  if( t1->base() == Type::DoubleCon &&
+       t2->base() == Type::DoubleCon &&
+       t2->getd() != 0.0 ) // could be negative zero
+     return TypeD::make( t1->getd()/t2->getd() );
+@@ -614,7 +611,7 @@
+     if( trip_count <= 5 && ConditionalMoveLimit != 0 ) {
+       Node *x = in(1);            // Value being mod'd
+       Node *divisor = in(2);      // Also is mask
+-      
++
+       hook->init_req(0, x);       // Add a use to x to prevent him from dying
+       // Generate code to reduce X rapidly to nearly 2^k-1.
+       for( int i = 0; i < trip_count; i++ ) {
+@@ -637,7 +634,7 @@
+       // since Ideal is expected to return a modified 'this' or a new node.
+       Node *cmov2= new (phase->C, 4) CMoveINode(bol2, x, sub, TypeInt::INT);
+       // cmov2 is now the mod
+-      
++
+       // Now remove the bogus extra edges used to keep things alive
+       if (can_reshape) {
+         phase->is_IterGVN()->remove_dead_node(hook);
+@@ -712,14 +709,14 @@
+ 
+   // We always generate the dynamic check for 0.
+   // 0 MOD X is 0
+-  if( t1 == TypeInt::ZERO ) return TypeInt::ZERO; 
++  if( t1 == TypeInt::ZERO ) return TypeInt::ZERO;
+   // X MOD X is 0
+   if( phase->eqv( in(1), in(2) ) ) return TypeInt::ZERO;
+ 
+   // Either input is BOTTOM ==> the result is the local BOTTOM
+   const Type *bot = bottom_type();
+   if( (t1 == bot) || (t2 == bot) ||
+-      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) ) 
++      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) )
+     return bot;
+ 
+   const TypeInt *i1 = t1->is_int();
+@@ -732,10 +729,10 @@
+   }
+   // Mod by zero?  Throw exception at runtime!
+   if( !i2->get_con() ) return TypeInt::POS;
+-  
++
+   // We must be modulo'ing 2 float constants.
+   // Check for min_jint % '-1', result is defined to be '0'.
+-  if( i1->get_con() == min_jint && i2->get_con() == -1 ) 
++  if( i1->get_con() == min_jint && i2->get_con() == -1 )
+     return TypeInt::ZERO;
+ 
+   return TypeInt::make( i1->get_con() % i2->get_con() );
+@@ -783,10 +780,10 @@
+     if( k < ARRAY_SIZE(unroll_factor)) trip_count = unroll_factor[k];
+     if( trip_count > 4 ) return NULL; // Too much unrolling
+     if (ConditionalMoveLimit == 0) return NULL;  // cmov is required
+-    
++
+     Node *x = in(1);            // Value being mod'd
+     Node *divisor = in(2);      // Also is mask
+-    
++
+     Node *hook = new (phase->C, 1) Node(x);
+     // Generate code to reduce X rapidly to nearly 2^k-1.
+     for( int i = 0; i < trip_count; i++ ) {
+@@ -808,7 +805,7 @@
+     // since Ideal is expected to return a modified 'this' or a new node.
+     Node *cmov2= new (phase->C, 4) CMoveLNode(bol2, x, sub, TypeLong::LONG);
+     // cmov2 is now the mod
+-    
++
+     // Now remove the bogus extra edges used to keep things alive
+     if (can_reshape) {
+       phase->is_IterGVN()->remove_dead_node(hook);
+@@ -830,14 +827,14 @@
+ 
+   // We always generate the dynamic check for 0.
+   // 0 MOD X is 0
+-  if( t1 == TypeLong::ZERO ) return TypeLong::ZERO; 
++  if( t1 == TypeLong::ZERO ) return TypeLong::ZERO;
+   // X MOD X is 0
+   if( phase->eqv( in(1), in(2) ) ) return TypeLong::ZERO;
+ 
+   // Either input is BOTTOM ==> the result is the local BOTTOM
+   const Type *bot = bottom_type();
+   if( (t1 == bot) || (t2 == bot) ||
+-      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) ) 
++      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) )
+     return bot;
+ 
+   const TypeLong *i1 = t1->is_long();
+@@ -850,10 +847,10 @@
+   }
+   // Mod by zero?  Throw exception at runtime!
+   if( !i2->get_con() ) return TypeLong::POS;
+-  
++
+   // We must be modulo'ing 2 float constants.
+   // Check for min_jint % '-1', result is defined to be '0'.
+-  if( i1->get_con() == min_jlong && i2->get_con() == -1 ) 
++  if( i1->get_con() == min_jlong && i2->get_con() == -1 )
+     return TypeLong::ZERO;
+ 
+   return TypeLong::make( i1->get_con() % i2->get_con() );
+@@ -872,7 +869,7 @@
+   // Either input is BOTTOM ==> the result is the local BOTTOM
+   const Type *bot = bottom_type();
+   if( (t1 == bot) || (t2 == bot) ||
+-      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) ) 
++      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) )
+     return bot;
+ 
+   // If either is a NaN, return an input NaN
+@@ -898,7 +895,7 @@
+   }
+   // X MOD X is 0
+   // Does not work for variables because of NaN's
+-  if( phase->eqv( in(1), in(2) ) && t1->base() == Type::FloatCon) 
++  if( phase->eqv( in(1), in(2) ) && t1->base() == Type::FloatCon)
+     if (!g_isnan(t1->getf()) && (t1->getf() != 0.0) && ((int)t1->getf() != 0x80000000)) {
+       if(t1->getf() < 0.0) {
+         float result = jfloat_cast(0x80000000);
+@@ -911,7 +908,7 @@
+   // If both numbers are not constants, we know nothing.
+   if( (t1->base() != Type::FloatCon) || (t2->base() != Type::FloatCon) )
+     return Type::FLOAT;
+-  
++
+   // We must be modulo'ing 2 float constants.
+   // Make sure that the sign of the fmod is equal to the sign of the dividend
+   float result = (float)fmod( t1->getf(), t2->getf() );
+@@ -940,7 +937,7 @@
+   // Either input is BOTTOM ==> the result is the local BOTTOM
+   const Type *bot = bottom_type();
+   if( (t1 == bot) || (t2 == bot) ||
+-      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) ) 
++      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) )
+     return bot;
+ 
+   // If either is a NaN, return an input NaN
+@@ -959,15 +956,15 @@
+ 
+   // X MOD X is 0
+   // does not work for variables because of NaN's
+-  if( phase->eqv( in(1), in(2) ) && t1->base() == Type::DoubleCon ) 
++  if( phase->eqv( in(1), in(2) ) && t1->base() == Type::DoubleCon )
+     if (!g_isnan(t1->getd()) && t1->getd() != 0.0)
+       return TypeD::ZERO;
+- 
++
+ 
+   // If both numbers are not constants, we know nothing.
+   if( (t1->base() != Type::DoubleCon) || (t2->base() != Type::DoubleCon) )
+     return Type::DOUBLE;
+-  
++
+   // We must be modulo'ing 2 double constants.
+   return TypeD::make( fmod( t1->getd(), t2->getd() ) );
+ }
+@@ -985,7 +982,7 @@
+   Node* n = div_or_mod;
+   assert(n->Opcode() == Op_DivI || n->Opcode() == Op_ModI,
+          "only div or mod input pattern accepted");
+-  
++
+   DivModINode* divmod = new (C, 3) DivModINode(n->in(0), n->in(1), n->in(2));
+   Node*        dproj  = new (C, 1) ProjNode(divmod, DivModNode::div_proj_num);
+   Node*        mproj  = new (C, 1) ProjNode(divmod, DivModNode::mod_proj_num);
+@@ -997,7 +994,7 @@
+   Node* n = div_or_mod;
+   assert(n->Opcode() == Op_DivL || n->Opcode() == Op_ModL,
+          "only div or mod input pattern accepted");
+-  
++
+   DivModLNode* divmod = new (C, 3) DivModLNode(n->in(0), n->in(1), n->in(2));
+   Node*        dproj  = new (C, 1) ProjNode(divmod, DivModNode::div_proj_num);
+   Node*        mproj  = new (C, 1) ProjNode(divmod, DivModNode::mod_proj_num);
+diff -ruN openjdk6/hotspot/src/share/vm/opto/divnode.hpp openjdk/hotspot/src/share/vm/opto/divnode.hpp
+--- openjdk6/hotspot/src/share/vm/opto/divnode.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/divnode.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)divnode.hpp	1.31 07/05/05 17:06:16 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Portions of code courtesy of Clifford Click
+@@ -33,7 +30,7 @@
+ //------------------------------DivINode---------------------------------------
+ // Integer division
+ // Note: this is division as defined by JVMS, i.e., MinInt/-1 == MinInt.
+-// On processors which don't naturally support this special case (e.g., x86), 
++// On processors which don't naturally support this special case (e.g., x86),
+ // the matcher or runtime system must take care of this.
+ class DivINode : public Node {
+ public:
+@@ -178,4 +175,3 @@
+   // Make a divmod and associated projections from a div or mod.
+   static DivModLNode* make(Compile* C, Node* div_or_mod);
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/doCall.cpp openjdk/hotspot/src/share/vm/opto/doCall.cpp
+--- openjdk6/hotspot/src/share/vm/opto/doCall.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/doCall.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)doCall.cpp	1.206 07/05/17 15:57:45 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -147,29 +144,29 @@
+       // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count.
+       bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent);
+       ciMethod* receiver_method = NULL;
+-      if (have_major_receiver || profile.morphism() == 1 || 
++      if (have_major_receiver || profile.morphism() == 1 ||
+           (profile.morphism() == 2 && UseBimorphicInlining)) {
+         // receiver_method = profile.method();
+         // Profiles do not suggest methods now.  Look it up in the major receiver.
+-        ciInstanceKlass* ik = profile.receiver(0)->as_instance_klass();
+-        receiver_method = call_method->resolve_invoke(jvms->method()->holder(), ik);
++        receiver_method = call_method->resolve_invoke(jvms->method()->holder(),
++                                                      profile.receiver(0));
+       }
+       if (receiver_method != NULL) {
+         // The single majority receiver sufficiently outweighs the minority.
+-        CallGenerator* hit_cg = this->call_generator(receiver_method, 
++        CallGenerator* hit_cg = this->call_generator(receiver_method,
+               vtable_index, !call_is_virtual, jvms, allow_inline, prof_factor);
+         if (hit_cg != NULL) {
+           // Look up second receiver.
+           CallGenerator* next_hit_cg = NULL;
+           ciMethod* next_receiver_method = NULL;
+-          if (profile.morphism() == 2 && UseBimorphicInlining) { 
+-            ciInstanceKlass* next_ik = profile.receiver(1)->as_instance_klass();
+-            next_receiver_method = call_method->resolve_invoke(jvms->method()->holder(), next_ik);
++          if (profile.morphism() == 2 && UseBimorphicInlining) {
++            next_receiver_method = call_method->resolve_invoke(jvms->method()->holder(),
++                                                               profile.receiver(1));
+             if (next_receiver_method != NULL) {
+-              next_hit_cg = this->call_generator(next_receiver_method, 
+-                                  vtable_index, !call_is_virtual, jvms, 
++              next_hit_cg = this->call_generator(next_receiver_method,
++                                  vtable_index, !call_is_virtual, jvms,
+                                   allow_inline, prof_factor);
+-              if (next_hit_cg != NULL && !next_hit_cg->is_inline() && 
++              if (next_hit_cg != NULL && !next_hit_cg->is_inline() &&
+                   have_major_receiver && UseOnlyInlinedBimorphic) {
+                   // Skip if we can't inline second receiver's method
+                   next_hit_cg = NULL;
+@@ -177,19 +174,19 @@
+             }
+           }
+           CallGenerator* miss_cg;
+-          if (( profile.morphism() == 1 || 
+-               (profile.morphism() == 2 && next_hit_cg != NULL) ) && 
++          if (( profile.morphism() == 1 ||
++               (profile.morphism() == 2 && next_hit_cg != NULL) ) &&
+ 
+               !too_many_traps(Deoptimization::Reason_class_check)
+ 
+               // Check only total number of traps per method to allow
+-              // the transition from monomorphic to bimorphic case between 
++              // the transition from monomorphic to bimorphic case between
+               // compilations without falling into virtual call.
+-              // A monomorphic case may have the class_check trap flag is set 
+-              // due to the time gap between the uncommon trap processing 
++              // A monomorphic case may have the class_check trap flag is set
++              // due to the time gap between the uncommon trap processing
+               // when flags are set in MDO and the call site bytecode execution
+               // in Interpreter when MDO counters are updated.
+-              // There was also class_check trap in monomorphic case due to 
++              // There was also class_check trap in monomorphic case due to
+               // the bug 6225440.
+ 
+              ) {
+@@ -312,8 +309,8 @@
+ 
+   // Try to get the most accurate receiver type
+   if (is_virtual_or_interface) {
+-    Node*              receiver_node = stack(sp() - nargs);
+-    const TypeInstPtr* receiver_type = _gvn.type(receiver_node)->isa_instptr();
++    Node*             receiver_node = stack(sp() - nargs);
++    const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
+     ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, dest_method, receiver_type);
+ 
+     // Have the call been sufficiently improved such that it is no longer a virtual?
+@@ -359,7 +356,7 @@
+   // save across call, for a subsequent cast_not_null.
+   Node* receiver = has_receiver ? argument(0) : NULL;
+ 
+-  // Bump method data counters (We profile *before* the call is made 
++  // Bump method data counters (We profile *before* the call is made
+   // because exceptions don't return to the call site.)
+   profile_call(receiver);
+ 
+@@ -464,7 +461,7 @@
+     // Do not introduce unloaded exception types into the graph:
+     if (!h_klass->is_loaded()) {
+       if (saw_unloaded->contains(h_bci)) {
+-        /* We've already seen an unloaded exception with h_bci, 
++        /* We've already seen an unloaded exception with h_bci,
+            so don't duplicate. Duplication will cause the CatchNode to be
+            unnecessarily large. See 4713716. */
+         continue;
+@@ -597,7 +594,7 @@
+         ex_klass_node->init_req( i, k );
+       }
+       _gvn.set_type(ex_klass_node, TypeKlassPtr::OBJECT);
+-      
++
+     }
+   }
+ 
+@@ -671,7 +668,7 @@
+       C->dependencies()->assert_leaf_type(klass);
+     }
+ 
+-    // Implement precise test 
++    // Implement precise test
+     const TypeKlassPtr *tk = TypeKlassPtr::make(klass);
+     Node* con = _gvn.makecon(tk);
+     Node* cmp = _gvn.transform( new (C, 3) CmpPNode(ex_klass_node, con) );
+@@ -748,7 +745,7 @@
+       switch (bc()) {
+       case Bytecodes::_invokevirtual:   increment_counter(SharedRuntime::nof_inlined_calls_addr()); break;
+       case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_inlined_interface_calls_addr()); break;
+-      case Bytecodes::_invokestatic:  
++      case Bytecodes::_invokestatic:
+       case Bytecodes::_invokespecial:   increment_counter(SharedRuntime::nof_inlined_static_calls_addr()); break;
+       default: fatal("unexpected call bytecode");
+       }
+@@ -756,7 +753,7 @@
+       switch (bc()) {
+       case Bytecodes::_invokevirtual:   increment_counter(SharedRuntime::nof_normal_calls_addr()); break;
+       case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_interface_calls_addr()); break;
+-      case Bytecodes::_invokestatic:  
++      case Bytecodes::_invokestatic:
+       case Bytecodes::_invokespecial:   increment_counter(SharedRuntime::nof_static_calls_addr()); break;
+       default: fatal("unexpected call bytecode");
+       }
+@@ -767,8 +764,8 @@
+ 
+ 
+ // Identify possible target method and inlining style
+-ciMethod* Parse::optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass, 
+-                                   ciMethod *dest_method, const TypeInstPtr* receiver_type) {
++ciMethod* Parse::optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
++                                   ciMethod *dest_method, const TypeOopPtr* receiver_type) {
+   // only use for virtual or interface calls
+ 
+   // If it is obviously final, do not bother to call find_monomorphic_target,
+@@ -783,11 +780,22 @@
+   bool actual_receiver_is_exact = false;
+   ciInstanceKlass* actual_receiver = klass;
+   if (receiver_type != NULL) {
++    // Array methods are all inherited from Object, and are monomorphic.
++    if (receiver_type->isa_aryptr() &&
++        dest_method->holder() == env()->Object_klass()) {
++      return dest_method;
++    }
++
++    // All other interesting cases are instance klasses.
++    if (!receiver_type->isa_instptr()) {
++      return NULL;
++    }
++
+     ciInstanceKlass *ikl = receiver_type->klass()->as_instance_klass();
+     if (ikl->is_loaded() && ikl->is_initialized() && !ikl->is_interface() &&
+         (ikl == actual_receiver || ikl->is_subclass_of(actual_receiver))) {
+-      // ikl is a same or better type than the original actual_receiver, 
+-      // e.g. static receiver from bytecodes. 
++      // ikl is a same or better type than the original actual_receiver,
++      // e.g. static receiver from bytecodes.
+       actual_receiver = ikl;
+       // Is the actual_receiver exact?
+       actual_receiver_is_exact = receiver_type->klass_is_exact();
+diff -ruN openjdk6/hotspot/src/share/vm/opto/domgraph.cpp openjdk/hotspot/src/share/vm/opto/domgraph.cpp
+--- openjdk6/hotspot/src/share/vm/opto/domgraph.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/domgraph.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)domgraph.cpp	1.75 07/05/05 17:06:16 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Portions of code courtesy of Clifford Click
+@@ -38,13 +35,13 @@
+   Block *_block;                // Basic block for this info
+ 
+   uint _semi;                   // Semi-dominators
+-  uint _size;                   // Used for faster LINK and EVAL 
++  uint _size;                   // Used for faster LINK and EVAL
+   Tarjan *_parent;              // Parent in DFS
+   Tarjan *_label;               // Used for LINK and EVAL
+-  Tarjan *_ancestor;            // Used for LINK and EVAL 
+-  Tarjan *_child;               // Used for faster LINK and EVAL 
++  Tarjan *_ancestor;            // Used for LINK and EVAL
++  Tarjan *_child;               // Used for faster LINK and EVAL
+   Tarjan *_dom;                 // Parent in dominator tree (immediate dom)
+-  Tarjan *_bucket;              // Set of vertices with given semidominator 
++  Tarjan *_bucket;              // Set of vertices with given semidominator
+ 
+   Tarjan *_dom_child;           // Child in dominator tree
+   Tarjan *_dom_next;            // Next in dominator tree
+@@ -57,21 +54,21 @@
+   void setdepth( uint size );
+ 
+ };
+-         
++
+ //------------------------------Dominator--------------------------------------
+-// Compute the dominator tree of the CFG.  The CFG must already have been 
++// Compute the dominator tree of the CFG.  The CFG must already have been
+ // constructed.  This is the Lengauer & Tarjan O(E-alpha(E,V)) algorithm.
+ void PhaseCFG::Dominators( ) {
+   // Pre-grow the blocks array, prior to the ResourceMark kicking in
+   _blocks.map(_num_blocks,0);
+ 
+   ResourceMark rm;
+-  // Setup mappings from my Graph to Tarjan's stuff and back 
++  // Setup mappings from my Graph to Tarjan's stuff and back
+   // Note: Tarjan uses 1-based arrays
+   Tarjan *tarjan = NEW_RESOURCE_ARRAY(Tarjan,_num_blocks+1);
+ 
+-  // Tarjan's algorithm, almost verbatim: 
+-  // Step 1: 
++  // Tarjan's algorithm, almost verbatim:
++  // Step 1:
+   _rpo_ctr = _num_blocks;
+   uint dfsnum = DFS( tarjan );
+   if( dfsnum-1 != _num_blocks ) {// Check for unreachable loops!
+@@ -96,12 +93,12 @@
+   // Tarjan is using 1-based arrays, so these are some initialize flags
+   tarjan[0]._size = tarjan[0]._semi = 0;
+   tarjan[0]._label = &tarjan[0];
+-  
++
+   uint i;
+-  for( i=_num_blocks; i>=2; i-- ) { // For all vertices in DFS order 
+-    Tarjan *w = &tarjan[i];     // Get vertex from DFS 
++  for( i=_num_blocks; i>=2; i-- ) { // For all vertices in DFS order
++    Tarjan *w = &tarjan[i];     // Get vertex from DFS
+ 
+-    // Step 2: 
++    // Step 2:
+     Node *whead = w->_block->head();
+     for( uint j=1; j < whead->req(); j++ ) {
+       Block *b = _bbs[whead->in(j)->_idx];
+@@ -109,7 +106,7 @@
+       Tarjan *u = vx->EVAL();
+       if( u->_semi < w->_semi )
+         w->_semi = u->_semi;
+-    } 
++    }
+ 
+     // w is added to a bucket here, and only here.
+     // Thus w is in at most one bucket and the sum of all bucket sizes is O(n).
+@@ -120,14 +117,14 @@
+ 
+     w->_parent->LINK( w, &tarjan[0] );
+ 
+-    // Step 3: 
++    // Step 3:
+     for( Tarjan *vx = w->_parent->_bucket; vx; vx = vx->_bucket ) {
+       Tarjan *u = vx->EVAL();
+       vx->_dom = (u->_semi < vx->_semi) ? u : w->_parent;
+     }
+   }
+ 
+-  // Step 4: 
++  // Step 4:
+   for( i=2; i <= _num_blocks; i++ ) {
+     Tarjan *w = &tarjan[i];
+     if( w->_dom != &tarjan[w->_semi] )
+@@ -136,13 +133,13 @@
+   }
+   // No immediate dominator for the root
+   Tarjan *w = &tarjan[_broot->_pre_order];
+-  w->_dom = NULL;       
++  w->_dom = NULL;
+   w->_dom_next = w->_dom_child = NULL;  // Initialize for building tree later
+ 
+-  // Convert the dominator tree array into my kind of graph 
++  // Convert the dominator tree array into my kind of graph
+   for( i=1; i<=_num_blocks;i++){// For all Tarjan vertices
+     Tarjan *t = &tarjan[i];     // Handy access
+-    Tarjan *tdom = t->_dom;     // Handy access to immediate dominator 
++    Tarjan *tdom = t->_dom;     // Handy access to immediate dominator
+     if( tdom )  {               // Root has no immediate dominator
+       t->_block->_idom = tdom->_block; // Set immediate dominator
+       t->_dom_next = tdom->_dom_child; // Make me a sibling of parent's child
+@@ -177,9 +174,9 @@
+       Tarjan *t = &_tarjan[pre_order]; // Fast local access
+       b->_pre_order = pre_order;    // Flag as visited
+       t->_block = b;                // Save actual block
+-      t->_semi = pre_order;         // Block to DFS map 
+-      t->_label = t;                // DFS to vertex map 
+-      t->_ancestor = NULL;          // Fast LINK & EVAL setup 
++      t->_semi = pre_order;         // Block to DFS map
++      t->_label = t;                // DFS to vertex map
++      t->_ancestor = NULL;          // Fast LINK & EVAL setup
+       t->_child = &_tarjan[0];      // Sentenial
+       t->_size = 1;
+       t->_bucket = NULL;
+@@ -200,11 +197,11 @@
+     Block* pop() { Block* b = _stack_top->block; _stack_top--; return b; }
+     bool is_nonempty() { return (_stack_top >= _stack); }
+     bool last_successor() { return (_stack_top->index == _stack_top->freq_idx); }
+-    Block* next_successor()  { 
+-      int i = _stack_top->index; 
++    Block* next_successor()  {
++      int i = _stack_top->index;
+       i++;
+       if (i == _stack_top->freq_idx) i++;
+-      if (i >= (int)(_stack_top->block->_num_succs)) { 
++      if (i >= (int)(_stack_top->block->_num_succs)) {
+         i = _stack_top->freq_idx;   // process most frequent successor last
+       }
+       _stack_top->index = i;
+@@ -230,7 +227,7 @@
+     break;
+   }
+   case Op_Catch:                // Split frequency amongst children
+-    for( freq_idx = 0; freq_idx < b->_num_succs; freq_idx++ ) 
++    for( freq_idx = 0; freq_idx < b->_num_succs; freq_idx++ )
+       if( b->_nodes[eidx+1+freq_idx]->as_CatchProj()->_con == CatchProjNode::fall_through_index )
+         break;
+     // Handle case of no fall-thru (e.g., check-cast MUST throw an exception)
+@@ -250,15 +247,15 @@
+   case Op_Halt:
+   case Op_Rethrow:
+     break;
+-  default: 
++  default:
+     ShouldNotReachHere();
+   }
+   return freq_idx;
+ }
+ 
+ //------------------------------DFS--------------------------------------------
+-// Perform DFS search.  Setup 'vertex' as DFS to vertex mapping.  Setup      
+-// 'semi' as vertex to DFS mapping.  Set 'parent' to DFS parent.             
++// Perform DFS search.  Setup 'vertex' as DFS to vertex mapping.  Setup
++// 'semi' as vertex to DFS mapping.  Set 'parent' to DFS parent.
+ uint PhaseCFG::DFS( Tarjan *tarjan ) {
+   Block *b = _broot;
+   uint pre_order = 1;
+@@ -266,22 +263,24 @@
+   Block_Stack bstack(tarjan, _num_blocks+1);
+ 
+   // Push on stack the state for the first block
+-  bstack.push(pre_order, b); 
++  bstack.push(pre_order, b);
+   ++pre_order;
+ 
+   while (bstack.is_nonempty()) {
+-    if (!bstack.last_successor()) { 
++    if (!bstack.last_successor()) {
+       // Walk over all successors in pre-order (DFS).
+       Block *s = bstack.next_successor();
+       if (s->_pre_order == 0) { // Check for no-pre-order, not-visited
+         // Push on stack the state of successor
+-        bstack.push(pre_order, s); 
++        bstack.push(pre_order, s);
+         ++pre_order;
+       }
+     }
+     else {
+       // Build a reverse post-order in the CFG _blocks array
+-      _blocks.map(--_rpo_ctr, bstack.pop());
++      Block *stack_top = bstack.pop();
++      stack_top->_rpo = --_rpo_ctr;
++      _blocks.map(stack_top->_rpo, stack_top);
+     }
+   }
+   return pre_order;
+@@ -350,7 +349,7 @@
+         Tarjan *dom_child = t->_dom_child;
+         t = t->_dom_next;    // next tarjan
+         if (dom_child != NULL) {
+-          *top = dom_child;  // save child on stack 
++          *top = dom_child;  // save child on stack
+           ++top;
+         }
+       } while (t != NULL);
+@@ -365,21 +364,21 @@
+   Node *_control;               // Control node associated with this info
+ 
+   uint _semi;                   // Semi-dominators
+-  uint _size;                   // Used for faster LINK and EVAL 
++  uint _size;                   // Used for faster LINK and EVAL
+   NTarjan *_parent;             // Parent in DFS
+   NTarjan *_label;              // Used for LINK and EVAL
+-  NTarjan *_ancestor;           // Used for LINK and EVAL 
+-  NTarjan *_child;              // Used for faster LINK and EVAL 
++  NTarjan *_ancestor;           // Used for LINK and EVAL
++  NTarjan *_child;              // Used for faster LINK and EVAL
+   NTarjan *_dom;                // Parent in dominator tree (immediate dom)
+-  NTarjan *_bucket;             // Set of vertices with given semidominator 
++  NTarjan *_bucket;             // Set of vertices with given semidominator
+ 
+   NTarjan *_dom_child;          // Child in dominator tree
+   NTarjan *_dom_next;           // Next in dominator tree
+ 
+-  // Perform DFS search.  
+-  // Setup 'vertex' as DFS to vertex mapping.  
+-  // Setup 'semi' as vertex to DFS mapping.  
+-  // Set 'parent' to DFS parent.             
++  // Perform DFS search.
++  // Setup 'vertex' as DFS to vertex mapping.
++  // Setup 'semi' as vertex to DFS mapping.
++  // Set 'parent' to DFS parent.
+   static int DFS( NTarjan *ntarjan, VectorSet &visited, PhaseIdealLoop *pil, uint *dfsorder );
+   void setdepth( uint size, uint *dom_depth );
+ 
+@@ -399,19 +398,19 @@
+ // Lengauer & Tarjan O(E-alpha(E,V)) algorithm.
+ void PhaseIdealLoop::Dominators( ) {
+   ResourceMark rm;
+-  // Setup mappings from my Graph to Tarjan's stuff and back 
++  // Setup mappings from my Graph to Tarjan's stuff and back
+   // Note: Tarjan uses 1-based arrays
+   NTarjan *ntarjan = NEW_RESOURCE_ARRAY(NTarjan,C->unique()+1);
+   // Initialize _control field for fast reference
+   int i;
+-  for( i= C->unique()-1; i>=0; i-- ) 
++  for( i= C->unique()-1; i>=0; i-- )
+     ntarjan[i]._control = NULL;
+- 
++
+   // Store the DFS order for the main loop
+   uint *dfsorder = NEW_RESOURCE_ARRAY(uint,C->unique()+1);
+   memset(dfsorder, max_uint, (C->unique()+1) * sizeof(uint));
+ 
+-  // Tarjan's algorithm, almost verbatim: 
++  // Tarjan's algorithm, almost verbatim:
+   // Step 1:
+   VectorSet visited(Thread::current()->resource_area());
+   int dfsnum = NTarjan::DFS( ntarjan, visited, this, dfsorder);
+@@ -419,12 +418,12 @@
+   // Tarjan is using 1-based arrays, so these are some initialize flags
+   ntarjan[0]._size = ntarjan[0]._semi = 0;
+   ntarjan[0]._label = &ntarjan[0];
+-  
++
+   for( i = dfsnum-1; i>1; i-- ) {        // For all nodes in reverse DFS order
+-    NTarjan *w = &ntarjan[i];            // Get Node from DFS 
++    NTarjan *w = &ntarjan[i];            // Get Node from DFS
+     assert(w->_control != NULL,"bad DFS walk");
+ 
+-    // Step 2: 
++    // Step 2:
+     Node *whead = w->_control;
+     for( uint j=0; j < whead->req(); j++ ) { // For each predecessor
+       if( whead->in(j) == NULL || !whead->in(j)->is_CFG() )
+@@ -435,7 +434,7 @@
+       NTarjan *u = vx->EVAL();
+       if( u->_semi < w->_semi )
+         w->_semi = u->_semi;
+-    } 
++    }
+ 
+     // w is added to a bucket here, and only here.
+     // Thus w is in at most one bucket and the sum of all bucket sizes is O(n).
+@@ -445,7 +444,7 @@
+ 
+     w->_parent->LINK( w, &ntarjan[0] );
+ 
+-    // Step 3: 
++    // Step 3:
+     for( NTarjan *vx = w->_parent->_bucket; vx; vx = vx->_bucket ) {
+       NTarjan *u = vx->EVAL();
+       vx->_dom = (u->_semi < vx->_semi) ? u : w->_parent;
+@@ -459,7 +458,7 @@
+       for( uint i = 1; i < whead->req(); i++ ) {
+         if (!has_node(whead->in(i))) {
+           // Kill dead input path
+-          assert( !visited.test(whead->in(i)->_idx), 
++          assert( !visited.test(whead->in(i)->_idx),
+                   "input with no loop must be dead" );
+           _igvn.hash_delete(whead);
+           whead->del_req(i);
+@@ -478,7 +477,7 @@
+     } // End if if whead is a Region
+   } // End of for all Nodes in reverse DFS order
+ 
+-  // Step 4: 
++  // Step 4:
+   for( i=2; i < dfsnum; i++ ) { // DFS order
+     NTarjan *w = &ntarjan[i];
+     assert(w->_control != NULL,"Bad DFS walk");
+@@ -492,11 +491,11 @@
+   w->_parent = NULL;
+   w->_dom_next = w->_dom_child = NULL;  // Initialize for building tree later
+ 
+-  // Convert the dominator tree array into my kind of graph 
++  // Convert the dominator tree array into my kind of graph
+   for( i=1; i<dfsnum; i++ ) {          // For all Tarjan vertices
+     NTarjan *t = &ntarjan[i];          // Handy access
+     assert(t->_control != NULL,"Bad DFS walk");
+-    NTarjan *tdom = t->_dom;           // Handy access to immediate dominator 
++    NTarjan *tdom = t->_dom;           // Handy access to immediate dominator
+     if( tdom )  {                      // Root has no immediate dominator
+       _idom[t->_control->_idx] = tdom->_control; // Set immediate dominator
+       t->_dom_next = tdom->_dom_child; // Make me a sibling of parent's child
+@@ -518,8 +517,8 @@
+ }
+ 
+ //------------------------------DFS--------------------------------------------
+-// Perform DFS search.  Setup 'vertex' as DFS to vertex mapping.  Setup      
+-// 'semi' as vertex to DFS mapping.  Set 'parent' to DFS parent.             
++// Perform DFS search.  Setup 'vertex' as DFS to vertex mapping.  Setup
++// 'semi' as vertex to DFS mapping.  Set 'parent' to DFS parent.
+ int NTarjan::DFS( NTarjan *ntarjan, VectorSet &visited, PhaseIdealLoop *pil, uint *dfsorder) {
+   // Allocate stack of size C->unique()/8 to avoid frequent realloc
+   GrowableArray <Node *> dfstack(pil->C->unique() >> 3);
+@@ -537,9 +536,9 @@
+       // Use parent's cached dfsnum to identify "Parent in DFS"
+       w->_parent = &ntarjan[dfsorder[b->_idx]];
+       dfsorder[b->_idx] = dfsnum;      // Save DFS order info
+-      w->_semi = dfsnum;               // Node to DFS map 
+-      w->_label = w;                   // DFS to vertex map 
+-      w->_ancestor = NULL;             // Fast LINK & EVAL setup 
++      w->_semi = dfsnum;               // Node to DFS map
++      w->_label = w;                   // DFS to vertex map
++      w->_ancestor = NULL;             // Fast LINK & EVAL setup
+       w->_child = &ntarjan[0];         // Sentinal
+       w->_size = 1;
+       w->_bucket = NULL;
+@@ -547,7 +546,7 @@
+       // Need DEF-USE info for this pass
+       for ( int i = b->outcnt(); i-- > 0; ) { // Put on stack backwards
+         Node* s = b->raw_out(i);       // Get a use
+-        // CFG nodes only and not dead stuff 
++        // CFG nodes only and not dead stuff
+         if( s->is_CFG() && pil->has_node(s) && !visited.test(s->_idx) ) {
+           dfsorder[s->_idx] = dfsnum;  // Cache parent's dfsnum for a later use
+           dfstack.push(s);
+@@ -663,4 +662,3 @@
+ 
+ }
+ #endif
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/escape.cpp openjdk/hotspot/src/share/vm/opto/escape.cpp
+--- openjdk6/hotspot/src/share/vm/opto/escape.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/escape.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)escape.cpp	1.10 07/05/17 15:58:23 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -317,7 +314,7 @@
+ // Search memory chain of "mem" to find a MemNode whose address
+ // is the specified alias index.  Returns the MemNode found or the
+ // first non-MemNode encountered.
+-// 
++//
+ Node *ConnectionGraph::find_mem(Node *mem, int alias_idx, PhaseGVN  *igvn) {
+   if (mem == NULL)
+     return mem;
+@@ -472,7 +469,7 @@
+ //
+ //  Convert the types of unescaped object to instance types where possible,
+ //  propagate the new type information through the graph, and update memory
+-//  edges and MergeMem inputs to reflect the new type.  
++//  edges and MergeMem inputs to reflect the new type.
+ //
+ //  We start with allocations (and calls which may be allocations)  on alloc_worklist.
+ //  The processing is done in 4 phases:
+@@ -952,17 +949,17 @@
+         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
+           const Type* at = d->field_at(i);
+           int k = i - TypeFunc::Parms;
+-  
++
+           if (at->isa_oopptr() != NULL) {
+             Node *arg = skip_casts(call->in(i));
+-  
++
+             if (!call_analyzer.is_arg_stack(k)) {
+               // The argument global escapes, mark everything it could point to
+               ptset.Clear();
+               PointsTo(ptset, arg, phase);
+               for( VectorSetI j(&ptset); j.test(); ++j ) {
+                 uint pt = j.elem;
+-  
++
+                 set_escape_state(pt, PointsToNode::GlobalEscape);
+               }
+             } else if (!call_analyzer.is_arg_local(k)) {
+@@ -1069,7 +1066,7 @@
+ 
+       // Note:  we use isa_ptr() instead of isa_oopptr()  here because the
+       //        _multianewarray functions return a TypeRawPtr.
+-      if (ret_type == NULL || ret_type->isa_ptr() == NULL) 
++      if (ret_type == NULL || ret_type->isa_ptr() == NULL)
+         break;  // doesn't return a pointer type
+ 
+       ciMethod *meth = call->as_CallJava()->method();
+@@ -1088,10 +1085,10 @@
+           set_escape_state(call->_idx, PointsToNode::NoEscape);
+           for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
+             const Type* at = d->field_at(i);
+-    
++
+             if (at->isa_oopptr() != NULL) {
+               Node *arg = skip_casts(call->in(i));
+-    
++
+               if (call_analyzer.is_arg_returned(i - TypeFunc::Parms)) {
+                 PointsToNode *arg_esp = _nodes->adr_at(arg->_idx);
+                 if (arg_esp->node_type() == PointsToNode::JavaObject)
+diff -ruN openjdk6/hotspot/src/share/vm/opto/escape.hpp openjdk/hotspot/src/share/vm/opto/escape.hpp
+--- openjdk6/hotspot/src/share/vm/opto/escape.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/escape.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)escape.hpp	1.9 07/05/17 15:58:25 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //
+@@ -84,7 +81,7 @@
+ //     CreateEx
+ //     ConP
+ //     LoadKlass
+-//     
++//
+ // AddP nodes are fields.
+ //
+ // After building the graph, a pass is made over the nodes, deleting deferred
+@@ -248,7 +245,7 @@
+   // matches "offset"
+   void add_deferred_edge_to_fields(uint from_i, uint adr, int offs);
+ 
+-  
++
+   // Remove outgoing deferred edges from the node referenced by "ni".
+   // Any outgoing edges from the target of the deferred edge are copied
+   // to "ni".
+@@ -320,4 +317,3 @@
+   void dump();
+ #endif
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/gcm.cpp openjdk/hotspot/src/share/vm/opto/gcm.cpp
+--- openjdk6/hotspot/src/share/vm/opto/gcm.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/gcm.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)gcm.cpp	1.251 07/05/17 15:58:45 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Portions of code courtesy of Clifford Click
+@@ -36,7 +33,7 @@
+ // Insert node n into block b. Look for projections of n and make sure they
+ // are in b also.
+ void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
+-  // Set basic block of n, Add n to b, 
++  // Set basic block of n, Add n to b,
+   _bbs.map(n->_idx, b);
+   b->add_inst(n);
+ 
+@@ -45,7 +42,7 @@
+   // float to another block below this one.  Move them up.
+   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+     Node*  use  = n->fast_out(i);
+-    if (use->is_Proj()) {         
++    if (use->is_Proj()) {
+       Block* buse = _bbs[use->_idx];
+       if (buse != b) {              // In wrong block?
+         if (buse != NULL)
+@@ -56,7 +53,7 @@
+     }
+   }
+ }
+-    
++
+ 
+ //------------------------------schedule_pinned_nodes--------------------------
+ // Set the basic block for Nodes pinned into blocks
+@@ -92,7 +89,7 @@
+   assert(b1->_dom_depth < b2->_dom_depth, "sanity");
+   Block* tmp = b2;
+   while (tmp != b1 && tmp != NULL) {
+-    tmp = tmp->_idom;      
++    tmp = tmp->_idom;
+   }
+   if (tmp != b1) {
+     // Detected an unschedulable graph.  Print some nice stuff and die.
+@@ -101,7 +98,7 @@
+       Node* inn = n->in(j); // Get input
+       if (inn == NULL)  continue;  // Ignore NULL, missing inputs
+       Block* inb = bbs[inn->_idx];
+-      tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order, 
++      tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,
+                  inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);
+       inn->dump();
+     }
+@@ -137,7 +134,7 @@
+ 
+ //------------------------------schedule_early---------------------------------
+ // Find the earliest Block any instruction can be placed in.  Some instructions
+-// are pinned into Blocks.  Unpinned instructions can appear in last block in 
++// are pinned into Blocks.  Unpinned instructions can appear in last block in
+ // which all their inputs occur.
+ bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) {
+   // Allocate stack with enough space to avoid frequent realloc
+@@ -162,7 +159,7 @@
+         // While I am here, go ahead and look for Nodes which are taking control
+         // from a is_block_proj Node.  After I inserted RegionNodes to make proper
+         // blocks, the control at a is_block_proj more properly comes from the
+-        // Region being controlled by the block_proj Node.  
++        // Region being controlled by the block_proj Node.
+         const Node *in0 = n->in(0);
+         if (in0 != NULL) {              // Control-dependent?
+           const Node *p = in0->is_block_proj();
+@@ -200,7 +197,7 @@
+       bool done = true;              // Assume all n's inputs will be processed
+       while (i < n->len()) {         // For all inputs
+         Node *in = n->in(i);         // Get input
+-        ++i; 
++        ++i;
+         if (in == NULL) continue;    // Ignore NULL, missing inputs
+         int is_visited = visited.test_set(in->_idx);
+         if (!_bbs.lookup(in->_idx)) { // Missing block selection?
+@@ -211,7 +208,7 @@
+           nstack.push(n, i);         // Save parent node and next input's index.
+           nstack_top_n = in;         // Process current input now.
+           nstack_top_i = 0;
+-          done = false;              // Not all n's inputs processed. 
++          done = false;              // Not all n's inputs processed.
+           break; // continue while_nstack_nonempty;
+         } else if (!is_visited) {    // Input not yet visited?
+           roots.push(in);            // Visit this guy later, using worklist
+@@ -229,11 +226,11 @@
+         }
+ 
+         if (nstack.is_empty()) {
+-          // Finished all nodes on stack. 
++          // Finished all nodes on stack.
+           // Process next node on the worklist 'roots'.
+           break;
+         }
+-        // Get saved parent node and next input's index. 
++        // Get saved parent node and next input's index.
+         nstack_top_n = nstack.node();
+         nstack_top_i = nstack.index();
+         nstack.pop();
+@@ -280,7 +277,7 @@
+   // the Phi?  Well...it's like this.  I do not have true def-use/use-def
+   // chains.  Means I cannot distinguish, from the def-use direction, which
+   // of many use-defs lead from the same use to the same def.  That is, this
+-  // Phi might have several uses of the same def.  Each use appears in a 
++  // Phi might have several uses of the same def.  Each use appears in a
+   // different predecessor block.  But when I enter here, I cannot distinguish
+   // which use-def edge I should find the predecessor block for.  So I find
+   // them all.  Means I do a little extra work if a Phi uses the same value
+@@ -336,8 +333,8 @@
+ // Find the "early" block for a load, if we considered only memory and
+ // address inputs, that is, if other data inputs were ignored.
+ //
+-// Because a subset of edges are considered, the resulting block will 
+-// be earlier (at a shallower dom_depth) than the true schedule_early 
++// Because a subset of edges are considered, the resulting block will
++// be earlier (at a shallower dom_depth) than the true schedule_early
+ // point of the node. We compute this earlier block as a more permissive
+ // site for anti-dependency insertion, but only if subsume_loads is enabled.
+ static Block* memory_early_block(Node* load, Block* early, Block_Array &bbs) {
+@@ -345,10 +342,10 @@
+   Node* index;
+   Node* store = load->in(MemNode::Memory);
+   load->as_Mach()->memory_inputs(base, index);
+-  
++
+   assert(base != NodeSentinel && index != NodeSentinel,
+          "unexpected base/index inputs");
+-  
++
+   Node* mem_inputs[4];
+   int mem_inputs_length = 0;
+   if (base != NULL)  mem_inputs[mem_inputs_length++] = base;
+@@ -359,9 +356,9 @@
+   // which may be null, but always takes up a spot in the in array.
+   if (mem_inputs_length + 1 < (int) load->req()) {
+     // This "load" has more inputs than just the memory, base and index inputs.
+-    // For purposes of checking anti-dependences, we need to start 
+-    // from the early block of only the address portion of the instruction, 
+-    // and ignore other blocks that may have factored into the wider 
++    // For purposes of checking anti-dependences, we need to start
++    // from the early block of only the address portion of the instruction,
++    // and ignore other blocks that may have factored into the wider
+     // schedule_early calculation.
+     if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0);
+ 
+@@ -421,7 +418,7 @@
+     if (VerifyAliases)  assert(load_alias_idx != Compile::AliasIdxBot, "");
+   }
+ #endif
+-  assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrComp), 
++  assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrComp),
+          "String compare is only known 'load' that does not conflict with any stores");
+ 
+   if (!C->alias_type(load_alias_idx)->is_rewritable()) {
+@@ -441,8 +438,8 @@
+   Block* early = _bbs[load_index];
+ 
+   // If we are subsuming loads, compute an "early" block that only considers
+-  // memory or address inputs. This block may be different than the 
+-  // schedule_early block in that it could be at an even shallower depth in the 
++  // memory or address inputs. This block may be different than the
++  // schedule_early block in that it could be at an even shallower depth in the
+   // dominator tree, and allow for a broader discovery of anti-dependences.
+   if (C->subsume_loads()) {
+     early = memory_early_block(load, early, _bbs);
+@@ -457,7 +454,7 @@
+ 
+ #ifdef TRACK_PHI_INPUTS
+   // %%% This extra checking fails because MergeMem nodes are not GVNed.
+-  // Provide "phi_inputs" to check if every input to a PhiNode is from the 
++  // Provide "phi_inputs" to check if every input to a PhiNode is from the
+   // original memory state.  This indicates a PhiNode for which should not
+   // prevent the load from sinking.  For such a block, set_raise_LCA_mark
+   // may be overly conservative.
+@@ -548,14 +545,14 @@
+         // Same for SafePoints: they read/write Raw but only read otherwise.
+         // This is basically a workaround for SafePoints only defining control
+         // instead of control + memory.
+-        if (mstore->ideal_Opcode() == Op_SafePoint) 
++        if (mstore->ideal_Opcode() == Op_SafePoint)
+           continue;
+       } else {
+-        // Some raw memory, such as the load of "top" at an allocation, 
+-        // can be control dependent on the previous safepoint. See 
+-        // comments in GraphKit::allocate_heap() about control input.  
+-        // Inserting an anti-dep between such a safepoint and a use 
+-        // creates a cycle, and will cause a subsequent failure in 
++        // Some raw memory, such as the load of "top" at an allocation,
++        // can be control dependent on the previous safepoint. See
++        // comments in GraphKit::allocate_heap() about control input.
++        // Inserting an anti-dep between such a safepoint and a use
++        // creates a cycle, and will cause a subsequent failure in
+         // local scheduling.  (BugId 4919904)
+         // (%%% How can a control input be a safepoint and not a projection??)
+         if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore)
+@@ -600,7 +597,7 @@
+       assert(found_match, "no worklist bug");
+ #ifdef TRACK_PHI_INPUTS
+ #ifdef ASSERT
+-      // This assert asks about correct handling of PhiNodes, which may not 
++      // This assert asks about correct handling of PhiNodes, which may not
+       // have all input edges directly from 'mem'. See BugId 4621264
+       int num_mem_inputs = phi_inputs.at_grow(store->_idx,0) + 1;
+       // Increment by exactly one even if there are multiple copies of 'mem'
+@@ -740,11 +737,11 @@
+   while( 1 ) {
+ 
+     _visited.set(self->_idx);
+-      
++
+     // Now schedule all uses as late as possible.
+-    uint src           = self->is_Proj() ? self->in(0)->_idx : self->_idx;
+-    uint src_pre_order = _bbs[src]->_pre_order;
+-      
++    uint src     = self->is_Proj() ? self->in(0)->_idx : self->_idx;
++    uint src_rpo = _bbs[src]->_rpo;
++
+     // Schedule all nodes in a post-order visit
+     Node *unvisited = NULL;  // Unvisited anti-dependent Node, if any
+ 
+@@ -759,27 +756,27 @@
+ 
+       // do not traverse backward control edges
+       Node *use = n->is_Proj() ? n->in(0) : n;
+-      uint use_pre_order = _bbs[use->_idx]->_pre_order;
++      uint use_rpo = _bbs[use->_idx]->_rpo;
+ 
+-      if ( use_pre_order < src_pre_order )
++      if ( use_rpo < src_rpo )
+         continue;
+ 
+       // Phi nodes always precede uses in a basic block
+-      if ( use_pre_order == src_pre_order && use->is_Phi() )
++      if ( use_rpo == src_rpo && use->is_Phi() )
+         continue;
+ 
+       unvisited = n;      // Found unvisited
+ 
+-      // Check for possible-anti-dependent 
+-      if( !n->needs_anti_dependence_check() ) 
++      // Check for possible-anti-dependent
++      if( !n->needs_anti_dependence_check() )
+         break;            // Not visited, not anti-dep; schedule it NOW
+     }
+-      
++
+     // Did I find an unvisited not-anti-dependent Node?
+-    if ( !unvisited ) 
++    if ( !unvisited )
+       break;                  // All done with children; post-visit 'self'
+ 
+-    // Visit the unvisited Node.  Contains the obvious push to 
++    // Visit the unvisited Node.  Contains the obvious push to
+     // indicate I'm entering a deeper level of recursion.  I push the
+     // old state onto the _stack and set a new state and loop (recurse).
+     _stack.push(self);
+@@ -835,7 +832,7 @@
+ 
+     if (!def || def == n)
+       continue;
+-      
++
+     // Walk backwards thru projections
+     if (def->is_Proj())
+       def = def->in(0);
+@@ -843,7 +840,7 @@
+ #ifndef PRODUCT
+     if (trace_opto_pipelining()) {
+       tty->print("#    in(%2d): ", j);
+-      def->dump(); 
++      def->dump();
+     }
+ #endif
+ 
+@@ -865,7 +862,7 @@
+ #ifndef PRODUCT
+     if (trace_opto_pipelining()) {
+       tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d",
+-                    use_latency, j, delta_latency, current_latency, def->_idx, 
++                    use_latency, j, delta_latency, current_latency, def->_idx,
+                     _node_latency.at_grow(def->_idx));
+     }
+ #endif
+@@ -878,7 +875,7 @@
+   // If self-reference, return no latency
+   if (use == n || use->is_Root())
+     return 0;
+-    
++
+   uint def_pre_order = _bbs[def->_idx]->_pre_order;
+   uint latency = 0;
+ 
+@@ -935,7 +932,7 @@
+   // Set the latency for this instruction
+ #ifndef PRODUCT
+   if (trace_opto_pipelining()) {
+-    tty->print("# latency_from_outputs: node_latency[%d] = %d for node", 
++    tty->print("# latency_from_outputs: node_latency[%d] = %d for node",
+                n->_idx, _node_latency.at_grow(n->_idx));
+     dump();
+   }
+@@ -953,7 +950,7 @@
+ }
+ 
+ //------------------------------hoist_to_cheaper_block-------------------------
+-// Pick a block for node self, between early and LCA, that is a cheaper 
++// Pick a block for node self, between early and LCA, that is a cheaper
+ // alternative to LCA.
+ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
+   const double delta = 1+PROB_UNLIKELY_MAG(4);
+@@ -1056,7 +1053,7 @@
+ 
+ 
+ //------------------------------schedule_late-----------------------------------
+-// Now schedule all codes as LATE as possible.  This is the LCA in the 
++// Now schedule all codes as LATE as possible.  This is the LCA in the
+ // dominator tree of all USES of a value.  Pick the block with the least
+ // loop nesting depth that is lowest in the dominator tree.
+ extern const char must_clone[];
+@@ -1109,7 +1106,7 @@
+         break;
+       }
+     }
+-    
++
+     // Gather LCA of all uses
+     Block *LCA = NULL;
+     {
+@@ -1175,7 +1172,7 @@
+ 
+     // Put the node into target block
+     schedule_node_into_block(self, late);
+-    
++
+ #ifdef ASSERT
+     if (self->needs_anti_dependence_check()) {
+       // since precedence edges are only inserted when we're sure they
+@@ -1230,11 +1227,11 @@
+   _node_latency = node_latency;
+ 
+   if( C->do_scheduling() )
+-    ComputeLatenciesBackwards(visited, stack); 
++    ComputeLatenciesBackwards(visited, stack);
+ 
+-  // Now schedule all codes as LATE as possible.  This is the LCA in the 
++  // Now schedule all codes as LATE as possible.  This is the LCA in the
+   // dominator tree of all USES of a value.  Pick the block with the least
+-  // loop nesting depth that is lowest in the dominator tree.  
++  // loop nesting depth that is lowest in the dominator tree.
+   // ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() )
+   schedule_late(visited, stack);
+   if( C->failing() ) {
+@@ -1251,7 +1248,7 @@
+   }
+ #endif
+ 
+-  // Detect implicit-null-check opportunities.  Basically, find NULL checks 
++  // Detect implicit-null-check opportunities.  Basically, find NULL checks
+   // with suitable memory ops nearby.  Use the memory op to do the NULL check.
+   // I can generate a memory op if there is not one nearby.
+   if (C->is_method_compilation()) {
+@@ -1314,323 +1311,457 @@
+ #endif
+ }
+ 
+-#define MAXFREQ BLOCK_FREQUENCY(1e35f)
+-#define MINFREQ BLOCK_FREQUENCY(1e-35f)
+ 
+ //------------------------------Estimate_Block_Frequency-----------------------
+ // Estimate block frequencies based on IfNode probabilities.
+-// Two pass algorithm does a forward propagation in the first pass with some
+-// correction factors where static predictions are needed.  Then, the second
+-// pass pushes through changes caused by back edges.  This will give "exact"
+-// results for all dynamic frequencies, and for all staticly predicted code
+-// with loop nesting depth of one or less.  Static predictions with greater
+-// than nesting depth of one are already subject to so many static fudge
+-// factors that it is not worth iterating to a fixed point.
+ void PhaseCFG::Estimate_Block_Frequency() {
+-  assert( _blocks[0] == _broot, "" );
+   int cnts = C->method() ? C->method()->interpreter_invocation_count() : 1;
+   // Most of our algorithms will die horribly if frequency can become
+   // negative so make sure cnts is a sane value.
+   if( cnts <= 0 ) cnts = 1;
+   float f = (float)cnts/(float)FreqCountInvocations;
+-  _broot->_freq = f;
+-  _broot->_cnt  = f;
+-  // Do a two pass propagation of frequency information
+-  // PASS 1: Walk the blocks in RPO, propagating frequency info
+-  uint i;
+-  for( i = 0; i < _num_blocks; i++ ) {
++
++  // Create the loop tree and calculate loop depth.
++  _root_loop = create_loop_tree();
++  _root_loop->compute_loop_depth(0);
++
++  // Compute block frequency of each block, relative to a single loop entry.
++  _root_loop->compute_freq();
++
++  // Adjust all frequencies to be relative to a single method entry
++  _root_loop->_freq = f * 1.0;
++  _root_loop->scale_freq();
++
++  // force paths ending at uncommon traps to be infrequent
++  Block_List worklist;
++  Block* root_blk = _blocks[0];
++  for (uint i = 0; i < root_blk->num_preds(); i++) {
++    Block *pb = _bbs[root_blk->pred(i)->_idx];
++    if (pb->has_uncommon_code()) {
++      worklist.push(pb);
++    }
++  }
++  while (worklist.size() > 0) {
++    Block* uct = worklist.pop();
++    uct->_freq = PROB_MIN;
++    for (uint i = 0; i < uct->num_preds(); i++) {
++      Block *pb = _bbs[uct->pred(i)->_idx];
++      if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) {
++        worklist.push(pb);
++      }
++    }
++  }
++
++#ifndef PRODUCT
++  if (PrintCFGBlockFreq) {
++    tty->print_cr("CFG Block Frequencies");
++    _root_loop->dump_tree();
++    if (Verbose) {
++      tty->print_cr("PhaseCFG dump");
++      dump();
++      tty->print_cr("Node dump");
++      _root->dump(99999);
++    }
++  }
++#endif
++}
++
++//----------------------------create_loop_tree--------------------------------
++// Create a loop tree from the CFG
++CFGLoop* PhaseCFG::create_loop_tree() {
++
++#ifdef ASSERT
++  assert( _blocks[0] == _broot, "" );
++  for (uint i = 0; i < _num_blocks; i++ ) {
+     Block *b = _blocks[i];
++    // Check that _loop field are clear...we could clear them if not.
++    assert(b->_loop == NULL, "clear _loop expected");
++    // Sanity check that the RPO numbering is reflected in the _blocks array.
++    // It doesn't have to be for the loop tree to be built, but if it is not,
++    // then the blocks have been reordered since dom graph building...which
++    // may question the RPO numbering
++    assert(b->_rpo == i, "unexpected reverse post order number");
++  }
++#endif
++
++  int idct = 0;
++  CFGLoop* root_loop = new CFGLoop(idct++);
++
++  Block_List worklist;
+ 
+-    // Make any necessary modifications to b's frequency
+-    int hop = b->head()->Opcode();
+-    // On first trip, scale loop heads by 10 if no counts are available
+-    if( (hop == Op_Loop || hop == Op_CountedLoop) &&
+-        (b->_cnt == COUNT_UNKNOWN) && (b->_freq < MAXFREQ) ) {
+-      // Try to figure out how much to scale the loop by; look for a
+-      // gating loop-exit test with "reasonable" back-branch
+-      // frequency.
+-
+-      // Try and find a real loop-back controlling edge and use that
+-      // frequency. If we can't find it, use the old default of 10
+-      // otherwise use the new value. This helps loops with low
+-      // frequency (like allocation contention loops with -UseTLE).
+-      // Note special treatment below of LoopNode::EntryControl edges.      
+-      Block *loopprior = b;          
+-      Block *loopback = _bbs[b->pred(LoopNode::LoopBackControl)->_idx];
+-      // See if this block ends in a test (probably not) or just a
+-      // goto the loop head.
+-      if( loopback->_num_succs == 1 &&
+-          loopback->num_preds() == 2 ) {
+-        loopprior = loopback;
+-        // NOTE: constant 1 here isn't magic, it's just that there's exactly 1
+-        // predecessor (checked just above) and predecessors are 1-based, so
+-        // the "1" refers to the first (and only) predecessor.
+-        loopback = _bbs[loopprior->pred(1)->_idx];
+-      }
+-      // Call the edge frequency leading from loopback to loopprior f.
+-      // Then scale the loop by 1/(1-f).  Thus a loop-back edge
+-      // frequency of 0.9 leads to a scale factor of 10.
+-      float f = 0.9f;           // Default scale factor
+-
+-      if( loopback->_num_succs == 2 ) {
+-        int eidx = loopback->end_idx();
+-        Node *mn = loopback->_nodes[eidx]; // Get ending Node
+-        if( mn->is_MachIf() ) {
+-          // MachIfNode has branch probability info
+-          f = mn->as_MachIf()->_prob;
+-          int taken = (loopback->_succs[1] == loopprior);
+-          assert( loopback->_succs[taken] == loopprior, "" );
+-          if( loopback->_nodes[eidx+1+taken]->Opcode() == Op_IfFalse ) 
+-            f = 1-f;              // Inverted branch sense
+-          if( f > 0.99f )         // Limit scale to 100
+-            f = 0.99f;
++  // Assign blocks to loops
++  for(uint i = _num_blocks - 1; i > 0; i-- ) { // skip Root block
++    Block *b = _blocks[i];
++
++    if (b->head()->is_Loop()) {
++      Block* loop_head = b;
++      assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
++      Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);
++      Block* tail = _bbs[tail_n->_idx];
++
++      // Defensively filter out Loop nodes for non-single-entry loops.
++      // For all reasonable loops, the head occurs before the tail in RPO.
++      if (i <= tail->_rpo) {
++
++        // The tail and (recursive) predecessors of the tail
++        // are made members of a new loop.
++
++        assert(worklist.size() == 0, "nonempty worklist");
++        CFGLoop* nloop = new CFGLoop(idct++);
++        assert(loop_head->_loop == NULL, "just checking");
++        loop_head->_loop = nloop;
++        // Add to nloop so push_pred() will skip over inner loops
++        nloop->add_member(loop_head);
++        nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, _bbs);
++
++        while (worklist.size() > 0) {
++          Block* member = worklist.pop();
++          if (member != loop_head) {
++            for (uint j = 1; j < member->num_preds(); j++) {
++              nloop->push_pred(member, j, worklist, _bbs);
++            }
++          }
+         }
+       }
+-      
+-      // Scale loop head by this much
+-      b->_freq *= 1/(1-f);
+-      assert(b->_freq > 0.0f,"Bad frequency assignment");
+-    }
+-
+-    // Push b's frequency to successors
+-    int eidx = b->end_idx();    
+-    Node *n = b->_nodes[eidx];  // Get ending Node
+-    int op = n->is_Mach() ? n->as_Mach()->ideal_Opcode() : n->Opcode();
+-    // Switch on branch type
+-    switch( op ) {
+-    // Conditionals pass on only part of their frequency and count
+-    case Op_CountedLoopEnd:
+-    case Op_If: {
+-      int taken  = 0;  // this is the index of the TAKEN path
+-      int ntaken = 1;  // this is the index of the NOT TAKEN path
+-      // If succ[0] is the FALSE branch, invert path info
+-      if( b->_nodes[eidx+1]->Opcode() == Op_IfFalse ) {
+-        taken  = 1;
+-        ntaken = 0;
+-      }
+-      float prob  = n->as_MachIf()->_prob;
+-      float nprob = 1.0f - prob;
+-      float cnt   = n->as_MachIf()->_fcnt;
+-      // If branch frequency info is available, use it
+-      if(cnt != COUNT_UNKNOWN) {
+-        float tcnt = b->_succs[taken]->_cnt;
+-        float ncnt = b->_succs[ntaken]->_cnt;
+-        // Taken Branch
+-        b->_succs[taken]->_freq += prob * cnt;
+-        b->_succs[taken]->_cnt = (tcnt == COUNT_UNKNOWN) ? (prob * cnt) : tcnt + (prob * cnt);
+-        // Not Taken Branch
+-        b->_succs[ntaken]->_freq += nprob * cnt;
+-        b->_succs[ntaken]->_cnt = (ncnt == COUNT_UNKNOWN) ? (nprob * cnt) : ncnt + (nprob * cnt);
+-      }
+-      // Otherwise, split frequency amongst children
+-      else {
+-        b->_succs[taken]->_freq  +=  prob * b->_freq;
+-        b->_succs[ntaken]->_freq += nprob * b->_freq;
+-      }
+-      // Special case for underflow caused by infrequent branches
+-      if(b->_succs[taken]->_freq < MINFREQ) b->_succs[taken]->_freq = MINFREQ;
+-      if(b->_succs[ntaken]->_freq < MINFREQ) b->_succs[ntaken]->_freq = MINFREQ;
+-      assert(b->_succs[0]->has_valid_counts(),"Bad frequency/count");
+-      assert(b->_succs[1]->has_valid_counts(),"Bad frequency/count");
+-      break;
+     }
+-    case Op_NeverBranch:  {
+-      b->_succs[0]->_freq += b->_freq;
+-      // Special case for underflow caused by infrequent branches
+-      if(b->_succs[0]->_freq < MINFREQ) b->_succs[0]->_freq = MINFREQ;
+-      if(b->_succs[1]->_freq < MINFREQ) b->_succs[1]->_freq = MINFREQ;
+-      break;
++  }
++
++  // Create a member list for each loop consisting
++  // of both blocks and (immediate child) loops.
++  for (uint i = 0; i < _num_blocks; i++) {
++    Block *b = _blocks[i];
++    CFGLoop* lp = b->_loop;
++    if (lp == NULL) {
++      // Not assigned to a loop. Add it to the method's pseudo loop.
++      b->_loop = root_loop;
++      lp = root_loop;
+     }
+-      // Split frequency amongst children
+-    case Op_Jump: {
+-      // Divide the frequency between all successors evenly
+-      float predfreq = b->_freq/b->_num_succs;
+-      float predcnt = COUNT_UNKNOWN;
+-      for (uint j = 0; j < b->_num_succs; j++) {
+-        b->_succs[j]->_freq += predfreq;
+-        if (b->_succs[j]->_freq < MINFREQ) {
+-          b->_succs[j]->_freq = MINFREQ;
+-        }
+-        assert(b->_succs[j]->has_valid_counts(), "Bad frequency/count");
++    if (lp == root_loop || b != lp->head()) { // loop heads are already members
++      lp->add_member(b);
++    }
++    if (lp != root_loop) {
++      if (lp->parent() == NULL) {
++        // Not a nested loop. Make it a child of the method's pseudo loop.
++        root_loop->add_nested_loop(lp);
+       }
+-      break;
+-    }      
+-      // Split frequency amongst children
+-    case Op_Catch: {
+-      // Fall-thru path gets the lion's share.
+-      float fall = (1.0f - PROB_UNLIKELY_MAG(5)*b->_num_succs)*b->_freq;
+-      // Exception exits are uncommon.
+-      float expt = PROB_UNLIKELY_MAG(5) * b->_freq;
+-      // Iterate over children pushing out frequency
+-      for( uint j = 0; j < b->_num_succs; j++ ) {
+-        const CatchProjNode *x = b->_nodes[eidx+1+j]->as_CatchProj();
+-        b->_succs[j]->_freq += 
+-          ((x->_con == CatchProjNode::fall_through_index) ? fall : expt);
+-        // Special case for underflow caused by nested catches
+-        if(b->_succs[j]->_freq < MINFREQ) b->_succs[j]->_freq = MINFREQ;
+-        assert(b->_succs[j]->has_valid_counts(), "Bad Catch frequency/count assignment");
++      if (b == lp->head()) {
++        // Add nested loop to member list of parent loop.
++        lp->parent()->add_member(lp);
+       }
+-      break;
+     }
+-    // Pass frequency straight thru to target
+-    case Op_Root:
+-    case Op_Goto: {
+-      Block *bs = b->_succs[0];
+-      int hop = bs->head()->Opcode();
+-      bool notloop = (hop != Op_Loop && hop != Op_CountedLoop);
+-      // Pass count straight thru to target (except for loops)
+-      if( notloop && b->_cnt != COUNT_UNKNOWN ) {
+-        if( bs->_cnt == COUNT_UNKNOWN )
+-          bs->_cnt = 0;
+-        bs->_cnt += b->_cnt;
+-      }
+-      // Loops and counted loops have already had their heads scaled
+-      // by an amount which accounts for the backedge (but not their
+-      // entry).  Add frequency for normal blocks and loop entries.
+-      // Note special treatment above of LoopNode::LoopBackControl edges.
+-      if( notloop || bs->_freq <= 0 /*this is needed for irreducible loops*/||
+-          _bbs[bs->pred(LoopNode::EntryControl)->_idx] == b )
+-        bs->_freq += b->_freq;
++  }
+ 
+-      assert(bs->has_valid_counts(), "Bad goto frequency/count assignment");
+-      break;
++  return root_loop;
++}
++
++//------------------------------push_pred--------------------------------------
++void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk) {
++  Node* pred_n = blk->pred(i);
++  Block* pred = node_to_blk[pred_n->_idx];
++  CFGLoop *pred_loop = pred->_loop;
++  if (pred_loop == NULL) {
++    // Filter out blocks for non-single-entry loops.
++    // For all reasonable loops, the head occurs before the tail in RPO.
++    if (pred->_rpo > head()->_rpo) {
++      pred->_loop = this;
++      worklist.push(pred);
++    }
++  } else if (pred_loop != this) {
++    // Nested loop.
++    while (pred_loop->_parent != NULL && pred_loop->_parent != this) {
++      pred_loop = pred_loop->_parent;
++    }
++    // Make pred's loop be a child
++    if (pred_loop->_parent == NULL) {
++      add_nested_loop(pred_loop);
++      // Continue with loop entry predecessor.
++      Block* pred_head = pred_loop->head();
++      assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
++      assert(pred_head != head(), "loop head in only one loop");
++      push_pred(pred_head, LoopNode::EntryControl, worklist, node_to_blk);
++    } else {
++      assert(pred_loop->_parent == this && _parent == NULL, "just checking");
+     }
+-    // Do not push out freq to root block
+-    case Op_TailCall:
+-    case Op_TailJump:
+-    case Op_Return:
+-    case Op_Halt:
+-    case Op_Rethrow:
+-      break;
+-    default: 
+-      ShouldNotReachHere();
+-    } // End switch(op)
+-    assert(b->has_valid_counts(), "Bad first pass frequency/count");
+-  } // End for all blocks
++  }
++}
+ 
++//------------------------------add_nested_loop--------------------------------
++// Make cl a child of the current loop in the loop tree.
++void CFGLoop::add_nested_loop(CFGLoop* cl) {
++  assert(_parent == NULL, "no parent yet");
++  assert(cl != this, "not my own parent");
++  cl->_parent = this;
++  CFGLoop* ch = _child;
++  if (ch == NULL) {
++    _child = cl;
++  } else {
++    while (ch->_sibling != NULL) { ch = ch->_sibling; }
++    ch->_sibling = cl;
++  }
++}
+ 
+-  // PASS 2: Fix up loop bodies
+-  for( i = 1; i < _num_blocks; i++ ) {
+-    Block *b = _blocks[i];
+-    float freq = 0.0f;
+-    float cnt  = COUNT_UNKNOWN;
+-    // If it ends in a Halt or call marked uncommon, assume the block is uncommon.
+-    Node* be = b->end();
+-    if (be->is_Goto())
+-      be = be->in(0);
+-    if (be->is_Catch())
+-      be = be->in(0);
+-    if (be->is_Proj() && be->in(0)->is_MachCall()) {
+-      MachCallNode* call = be->in(0)->as_MachCall();
+-      if (call->cnt() != COUNT_UNKNOWN && call->cnt() <= PROB_UNLIKELY_MAG(4)) {
+-        // This is true for slow-path stubs like new_{instance,array},
+-        // slow_arraycopy, complete_monitor_locking, uncommon_trap.
+-        // The magic number corresponds to the probability of an uncommon_trap,
+-        // even though it is a count not a probability.
+-        if (b->_freq > BLOCK_FREQUENCY(1e-6))
+-          b->_freq = BLOCK_FREQUENCY(1e-6f);
+-        continue;
++//------------------------------compute_loop_depth-----------------------------
++// Store the loop depth in each CFGLoop object.
++// Recursively walk the children to do the same for them.
++void CFGLoop::compute_loop_depth(int depth) {
++  _depth = depth;
++  CFGLoop* ch = _child;
++  while (ch != NULL) {
++    ch->compute_loop_depth(depth + 1);
++    ch = ch->_sibling;
++  }
++}
++
++//------------------------------compute_freq-----------------------------------
++// Compute the frequency of each block and loop, relative to a single entry
++// into the dominating loop head.
++void CFGLoop::compute_freq() {
++  // Bottom up traversal of loop tree (visit inner loops first.)
++  // Set loop head frequency to 1.0, then transitively
++  // compute frequency for all successors in the loop,
++  // as well as for each exit edge.  Inner loops are
++  // treated as single blocks with loop exit targets
++  // as the successor blocks.
++
++  // Nested loops first
++  CFGLoop* ch = _child;
++  while (ch != NULL) {
++    ch->compute_freq();
++    ch = ch->_sibling;
++  }
++  assert (_members.length() > 0, "no empty loops");
++  Block* hd = head();
++  hd->_freq = 1.0f;
++  for (int i = 0; i < _members.length(); i++) {
++    CFGElement* s = _members.at(i);
++    float freq = s->_freq;
++    if (s->is_block()) {
++      Block* b = s->as_Block();
++      for (uint j = 0; j < b->_num_succs; j++) {
++        Block* sb = b->_succs[j];
++        update_succ_freq(sb, freq * b->succ_prob(j));
++      }
++    } else {
++      CFGLoop* lp = s->as_CFGLoop();
++      assert(lp->_parent == this, "immediate child");
++      for (int k = 0; k < lp->_exits.length(); k++) {
++        Block* eb = lp->_exits.at(k).get_target();
++        float prob = lp->_exits.at(k).get_prob();
++        update_succ_freq(eb, freq * prob);
+       }
+     }
+-    if (be->is_Mach() && be->as_Mach()->ideal_Opcode() == Op_Halt) {
+-      if( b->_freq > BLOCK_FREQUENCY(1e-6) )
+-        b->_freq = BLOCK_FREQUENCY(1e-6f);
+-      continue;
++  }
++
++#if 0
++  // Raise frequency of the loop backedge block, in an effort
++  // to keep it empty.  Skip the method level "loop".
++  if (_parent != NULL) {
++    CFGElement* s = _members.at(_members.length() - 1);
++    if (s->is_block()) {
++      Block* bk = s->as_Block();
++      if (bk->_num_succs == 1 && bk->_succs[0] == hd) {
++        // almost any value >= 1.0f works
++        // FIXME: raw constant
++        bk->_freq = 1.05f;
++      }
+     }
++  }
++#endif
+ 
+-    // Recompute frequency based upon predecessors' frequencies
+-    for(uint j = 1; j < b->num_preds(); j++) {
+-      // Compute the frequency passed along this path
+-      Node *pred = b->head()->in(j);
+-      // Peek through projections
+-      if(pred->is_Proj()) pred = pred->in(0);
+-      // Grab the predecessor block's frequency
+-      Block *pblock = _bbs[pred->_idx];
+-      float predfreq = pblock->_freq;
+-      float predcnt = pblock->_cnt;
+-      // Properly modify the frequency for this exit path
+-      int op = pred->is_Mach() ? pred->as_Mach()->ideal_Opcode() : pred->Opcode();
+-      // Switch on branch type
+-      switch(op) {
+-      // Conditionals pass on only part of their frequency and count
+-      case Op_CountedLoopEnd:
+-      case Op_If: {
+-        float prob = pred->as_MachIf()->_prob;
+-        float cnt  = pred->as_MachIf()->_fcnt;
+-        bool path  = true;
+-        // Is this the TRUE branch or the FALSE branch?
+-        if( b->head()->in(j)->Opcode() == Op_IfFalse )
+-          path = false;
+-        // If branch frequency info is available, use it
+-        if(cnt != COUNT_UNKNOWN) {
+-          predfreq = (path) ? (prob * cnt) : ((1.0f-prob) * cnt);
+-          predcnt  = (path) ? (prob * cnt) : ((1.0f-prob) * cnt);
+-        }
+-        // Otherwise, split frequency amongst children
+-        else {
+-          predfreq = (path) ? (prob * predfreq) : ((1.0f-prob) * predfreq);
+-          predcnt  = COUNT_UNKNOWN;
+-        }
+-        if( predfreq < MINFREQ ) predfreq = MINFREQ;
++  // For all loops other than the outer, "method" loop,
++  // sum and normalize the exit probability. The "method" loop
++  // should keep the initial exit probability of 1, so that
++  // inner blocks do not get erroneously scaled.
++  if (_depth != 0) {
++    // Total the exit probabilities for this loop.
++    float exits_sum = 0.0f;
++    for (int i = 0; i < _exits.length(); i++) {
++      exits_sum += _exits.at(i).get_prob();
++    }
++
++    // Normalize the exit probabilities. Until now, the
++    // probabilities estimate the possibility of exit per
++    // a single loop iteration; afterward, they estimate
++    // the probability of exit per loop entry.
++    for (int i = 0; i < _exits.length(); i++) {
++      Block* et = _exits.at(i).get_target();
++      float new_prob = _exits.at(i).get_prob() / exits_sum;
++      BlockProbPair bpp(et, new_prob);
++      _exits.at_put(i, bpp);
++    }
++
++    // Save the total, but guard against unreasoable probability,
++    // as the value is used to estimate the loop trip count.
++    // An infinite trip count would blur relative block
++    // frequencies.
++    if (exits_sum > 1.0f) exits_sum = 1.0;
++    if (exits_sum < PROB_MIN) exits_sum = PROB_MIN;
++    _exit_prob = exits_sum;
++  }
++}
+ 
+-        // Raise frequency of the loop backedge block, in an effort
+-        // to keep it empty.  Must raise it by 10%+ because counted
+-        // loops normally keep a 90/10 exit ratio.
+-        if( op == Op_CountedLoopEnd && b->num_preds() == 2 && path == true )
+-          predfreq *= 1.15f;
+-        break;
+-      }
+-        // Catch splits frequency amongst multiple children
+-      case Op_Jump: {
+-        // Divide the frequency between all successors evenly
+-        predfreq = predfreq / pblock->_num_succs;
+-        predcnt = COUNT_UNKNOWN;
+-        if (predfreq < MINFREQ) predfreq = MINFREQ;
+-        break;
+-      }
+-      // Catch splits frequency amongst multiple children, favoring
+-      // fall through
+-      case Op_Catch: {
+-        // Fall-thru path gets the lion's share.
+-        float fall  = (1.0f - PROB_UNLIKELY_MAG(5)*pblock->_num_succs)*predfreq;
+-        // Exception exits are uncommon.
+-        float expt  = PROB_UNLIKELY_MAG(5) * predfreq;
+-        // Determine if this is fall-thru path
+-        const CatchProjNode *x = b->head()->in(j)->as_CatchProj();
+-        predfreq = (x->_con == CatchProjNode::fall_through_index) ? fall :expt;
+-        predcnt  = COUNT_UNKNOWN;
+-        if(predfreq < MINFREQ) predfreq = MINFREQ;
+-        break;
+-      }
+-      // Pass frequency straight thru to target
+-      case Op_Root:
+-      case Op_Goto:
+-      case Op_Start:
+-      case Op_NeverBranch:
+-        break;
+-      // These do not push out a frequency or count
+-      case Op_TailCall:
+-      case Op_TailJump:
+-      case Op_Return:
+-      case Op_Halt:
+-      case Op_Rethrow:
+-        predfreq = 0.0f;
+-        predcnt = COUNT_UNKNOWN;
+-        break;
+-      default: 
+-        ShouldNotReachHere();
+-      } // End switch(op)
+-      assert(predfreq > 0.0f,"Bad intermediate frequency");
+-      assert((predcnt > 0.0f) || (predcnt == COUNT_UNKNOWN),"Bad intermediate count");
+-      // Accumulate frequency from predecessor block
+-      freq += predfreq;
+-      if (predcnt != COUNT_UNKNOWN) {
+-        cnt = (cnt == COUNT_UNKNOWN) ? predcnt : cnt + predcnt;
+-      }
+-    }
+-    // Assign new frequency
+-    b->_freq = freq;
+-    b->_cnt = cnt;
+-    assert(b->has_valid_counts(), "Bad final frequency/count assignment");
+-  } // End for all blocks
++//------------------------------succ_prob-------------------------------------
++// Determine the probability of reaching successor 'i' from the receiver block.
++float Block::succ_prob(uint i) {
++  int eidx = end_idx();
++  Node *n = _nodes[eidx];  // Get ending Node
++  int op = n->is_Mach() ? n->as_Mach()->ideal_Opcode() : n->Opcode();
++
++  // Switch on branch type
++  switch( op ) {
++  case Op_CountedLoopEnd:
++  case Op_If: {
++    assert (i < 2, "just checking");
++    // Conditionals pass on only part of their frequency
++    float prob  = n->as_MachIf()->_prob;
++    assert(prob >= 0.0 && prob <= 1.0, "out of range probability");
++    // If succ[i] is the FALSE branch, invert path info
++    if( _nodes[i + eidx + 1]->Opcode() == Op_IfFalse ) {
++      return 1.0f - prob; // not taken
++    } else {
++      return prob; // taken
++    }
++  }
++
++  case Op_Jump:
++    // Divide the frequency between all successors evenly
++    return 1.0f/_num_succs;
++
++  case Op_Catch: {
++    const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
++    if (ci->_con == CatchProjNode::fall_through_index) {
++      // Fall-thru path gets the lion's share.
++      return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs;
++    } else {
++      // Presume exceptional paths are equally unlikely
++      return PROB_UNLIKELY_MAG(5);
++    }
++  }
++
++  case Op_Root:
++  case Op_Goto:
++    // Pass frequency straight thru to target
++    return 1.0f;
++
++  case Op_NeverBranch:
++    return 0.0f;
++
++  case Op_TailCall:
++  case Op_TailJump:
++  case Op_Return:
++  case Op_Halt:
++  case Op_Rethrow:
++    // Do not push out freq to root block
++    return 0.0f;
++
++  default:
++    ShouldNotReachHere();
++  }
++
++  return 0.0f;
++}
++
++//------------------------------update_succ_freq-------------------------------
++// Update the appropriate frequency associated with block 'b', a succesor of
++// a block in this loop.
++void CFGLoop::update_succ_freq(Block* b, float freq) {
++  if (b->_loop == this) {
++    if (b == head()) {
++      // back branch within the loop
++      // Do nothing now, the loop carried frequency will be
++      // adjust later in scale_freq().
++    } else {
++      // simple branch within the loop
++      b->_freq += freq;
++    }
++  } else if (!in_loop_nest(b)) {
++    // branch is exit from this loop
++    BlockProbPair bpp(b, freq);
++    _exits.append(bpp);
++  } else {
++    // branch into nested loop
++    CFGLoop* ch = b->_loop;
++    ch->_freq += freq;
++  }
+ }
++
++//------------------------------in_loop_nest-----------------------------------
++// Determine if block b is in the receiver's loop nest.
++bool CFGLoop::in_loop_nest(Block* b) {
++  int depth = _depth;
++  CFGLoop* b_loop = b->_loop;
++  int b_depth = b_loop->_depth;
++  if (depth == b_depth) {
++    return true;
++  }
++  while (b_depth > depth) {
++    b_loop = b_loop->_parent;
++    b_depth = b_loop->_depth;
++  }
++  return b_loop == this;
++}
++
++//------------------------------scale_freq-------------------------------------
++// Scale frequency of loops and blocks by trip counts from outer loops
++// Do a top down traversal of loop tree (visit outer loops first.)
++void CFGLoop::scale_freq() {
++  float loop_freq = _freq * trip_count();
++  for (int i = 0; i < _members.length(); i++) {
++    CFGElement* s = _members.at(i);
++    s->_freq *= loop_freq;
++  }
++  CFGLoop* ch = _child;
++  while (ch != NULL) {
++    ch->scale_freq();
++    ch = ch->_sibling;
++  }
++}
++
++#ifndef PRODUCT
++//------------------------------dump_tree--------------------------------------
++void CFGLoop::dump_tree() const {
++  dump();
++  if (_child != NULL)   _child->dump_tree();
++  if (_sibling != NULL) _sibling->dump_tree();
++}
++
++//------------------------------dump-------------------------------------------
++void CFGLoop::dump() const {
++  for (int i = 0; i < _depth; i++) tty->print("   ");
++  tty->print("%s: %d  trip_count: %6.0f freq: %6.0f\n",
++             _depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq);
++  for (int i = 0; i < _depth; i++) tty->print("   ");
++  tty->print("         members:", _id);
++  int k = 0;
++  for (int i = 0; i < _members.length(); i++) {
++    if (k++ >= 6) {
++      tty->print("\n              ");
++      for (int j = 0; j < _depth+1; j++) tty->print("   ");
++      k = 0;
++    }
++    CFGElement *s = _members.at(i);
++    if (s->is_block()) {
++      Block *b = s->as_Block();
++      tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq);
++    } else {
++      CFGLoop* lp = s->as_CFGLoop();
++      tty->print(" L%d(%6.3f)", lp->_id, lp->_freq);
++    }
++  }
++  tty->print("\n");
++  for (int i = 0; i < _depth; i++) tty->print("   ");
++  tty->print("         exits:  ");
++  k = 0;
++  for (int i = 0; i < _exits.length(); i++) {
++    if (k++ >= 7) {
++      tty->print("\n              ");
++      for (int j = 0; j < _depth+1; j++) tty->print("   ");
++      k = 0;
++    }
++    Block *blk = _exits.at(i).get_target();
++    float prob = _exits.at(i).get_prob();
++    tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100));
++  }
++  tty->print("\n");
++}
++#endif
+diff -ruN openjdk6/hotspot/src/share/vm/opto/generateOptoStub.cpp openjdk/hotspot/src/share/vm/opto/generateOptoStub.cpp
+--- openjdk6/hotspot/src/share/vm/opto/generateOptoStub.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/generateOptoStub.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)generateOptoStub.cpp	1.101 07/05/05 17:06:17 JVM"
+-#endif
+ /*
+  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -50,7 +47,7 @@
+   assert(SynchronizationEntryBCI == InvocationEntryBci, "");
+   JVMState* jvms = new (C) JVMState(0);
+   jvms->set_bci(InvocationEntryBci);
+-  jvms->set_monoff(max_map); 
++  jvms->set_monoff(max_map);
+   jvms->set_endoff(max_map);
+   {
+     SafePointNode *map = new (C, max_map) SafePointNode( max_map, jvms );
+@@ -61,7 +58,7 @@
+ 
+   // Make up the parameters
+   uint i;
+-  for( i = 0; i < parm_cnt; i++ ) 
++  for( i = 0; i < parm_cnt; i++ )
+     map()->init_req(i, _gvn.transform(new (C, 1) ParmNode(start, i)));
+   for( ; i<map()->req(); i++ )
+     map()->init_req(i, top());      // For nicer debugging
+@@ -74,15 +71,15 @@
+ 
+   const int NoAlias = Compile::AliasIdxBot;
+ 
+-  Node* adr_last_Java_pc = basic_plus_adr(top(), 
+-					    thread, 
+-					    in_bytes(JavaThread::frame_anchor_offset()) +
+-					    in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
++  Node* adr_last_Java_pc = basic_plus_adr(top(),
++                                            thread,
++                                            in_bytes(JavaThread::frame_anchor_offset()) +
++                                            in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
+ #if defined(SPARC) || defined(IA64)
+-  Node* adr_flags = basic_plus_adr(top(), 
+-				   thread, 
+-				   in_bytes(JavaThread::frame_anchor_offset()) +
+-				   in_bytes(JavaFrameAnchor::flags_offset()));
++  Node* adr_flags = basic_plus_adr(top(),
++                                   thread,
++                                   in_bytes(JavaThread::frame_anchor_offset()) +
++                                   in_bytes(JavaFrameAnchor::flags_offset()));
+ #endif /* defined(SPARC) || defined(IA64) */
+ 
+ 
+@@ -115,7 +112,7 @@
+   // Also pass in the caller's PC, if asked for.
+   if( return_pc )
+     fields[cnt++] = TypeRawPtr::BOTTOM; // Return PC
+-  
++
+   const TypeTuple* domain = TypeTuple::make(cnt,fields);
+   // The C routine we are about to call cannot return an oop; it can block on
+   // exit and a GC will trash the oop while it sits in C-land.  Instead, we
+@@ -140,7 +137,7 @@
+ 
+   } else if( jrange->cnt() >= TypeFunc::Parms+1 ) { // Else copy other types
+     rfields[TypeFunc::Parms] = jrange->field_at(TypeFunc::Parms);
+-    if( jrange->cnt() == TypeFunc::Parms+2 ) 
++    if( jrange->cnt() == TypeFunc::Parms+2 )
+       rfields[TypeFunc::Parms+1] = jrange->field_at(TypeFunc::Parms+1);
+   }
+   const TypeTuple* range = TypeTuple::make(jrange->cnt(),rfields);
+@@ -158,7 +155,7 @@
+   call->set_jvms( new (C) JVMState(0) );
+   call->jvms()->set_bci(0);
+   call->jvms()->set_offsets(cnt);
+-  
++
+   // Set fixed predefined input arguments
+   cnt = 0;
+   for( i=0; i<TypeFunc::Parms; i++ )
+@@ -292,4 +289,3 @@
+   }
+   root()->add_req(_gvn.transform(ret));
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/graphKit.cpp openjdk/hotspot/src/share/vm/opto/graphKit.cpp
+--- openjdk6/hotspot/src/share/vm/opto/graphKit.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/graphKit.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)graphKit.cpp	1.128 07/05/17 17:43:32 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -538,15 +535,14 @@
+       Node*              ex_node = _gvn.transform(new (C, 1) ConPNode(ex_con));
+ 
+       // Clear the detail message of the preallocated exception object.
+-      // Weblogic sometimes mutates the detail message of exceptions 
++      // Weblogic sometimes mutates the detail message of exceptions
+       // using reflection.
+       int offset = java_lang_Throwable::get_detailMessage_offset();
+-      uint alias_idx = C->get_alias_index(ex_con->add_offset(offset));
+-      
++      const TypePtr* adr_typ = ex_con->add_offset(offset);
++
+       Node *adr = basic_plus_adr(ex_node, ex_node, offset);
+-      Node *store = store_to_memory(control(), adr, null(), T_OBJECT, alias_idx, false);
+-      store_barrier(store, T_OBJECT, ex_node, adr, null());
+-        
++      Node *store = store_oop_to_object(control(), ex_node, adr, adr_typ, null(), ex_con, T_OBJECT);
++
+       add_exception_state(make_exception_state(ex_node));
+       return;
+     }
+@@ -634,7 +630,7 @@
+ 
+ SafePointNode* GraphKit::clone_map() {
+   if (map() == NULL)  return NULL;
+-  
++
+   // Clone the memory edge first
+   Node* mem = MergeMemNode::make(C, map()->memory());
+   gvn().set_type_bottom(mem);
+@@ -691,7 +687,7 @@
+   // should help register allocation time and cut down on the size
+   // of the deoptimization information.
+   MethodLivenessResult live_locals = method()->liveness_at_bci(bci());
+-  
++
+   int len = (int)live_locals.size();
+   assert(len <= jvms()->loc_size(), "too many live locals");
+   for (int local = 0; local < len; local++) {
+@@ -1052,10 +1048,10 @@
+ }
+ 
+ //------------------------------do_null_check----------------------------------
+-// Helper function to do a NULL pointer check.  Returned value is 
++// Helper function to do a NULL pointer check.  Returned value is
+ // the incoming address with NULL casted away.  You are allowed to use the
+ // not-null value only if you are control dependent on the test.
+-extern int explicit_null_checks_inserted, 
++extern int explicit_null_checks_inserted,
+            explicit_null_checks_elided;
+ Node* GraphKit::null_check_common(Node* value, BasicType type,
+                                   // optional arguments for variations:
+@@ -1108,7 +1104,7 @@
+         // See if the type is contained in NULL_PTR.
+         // If so, then the value is already null.
+         if (t->higher_equal(TypePtr::NULL_PTR)) {
+-          explicit_null_checks_elided++;  
++          explicit_null_checks_elided++;
+           return value;           // Elided null assert quickly!
+         }
+       } else {
+@@ -1117,17 +1113,17 @@
+         // type.  In other words, "value" was not-null.
+         if (t->meet(TypePtr::NULL_PTR) != t) {
+           // same as: if (!TypePtr::NULL_PTR->higher_equal(t)) ...
+-          explicit_null_checks_elided++;  
++          explicit_null_checks_elided++;
+           return value;           // Elided null check quickly!
+         }
+       }
+       chk = new (C, 3) CmpPNode( value, null() );
+-      break;    
++      break;
+     }
+ 
+     default      : ShouldNotReachHere();
+   }
+-  assert(chk != NULL, "sanity check"); 
++  assert(chk != NULL, "sanity check");
+   chk = _gvn.transform(chk);
+ 
+   BoolTest::mask btest = assert_null ? BoolTest::eq : BoolTest::ne;
+@@ -1156,7 +1152,7 @@
+         set_control(cfg);
+         Node *res = cast_not_null(value);
+         set_control(oldcontrol);
+-        explicit_null_checks_elided++;  
++        explicit_null_checks_elided++;
+         return res;
+       }
+       cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true);
+@@ -1231,7 +1227,7 @@
+       replace_in_map(value, cast);
+     value = cast;
+   }
+-      
++
+   return value;
+ }
+ 
+@@ -1288,7 +1284,7 @@
+   Node* mem = map()->memory();
+   // do not use this node for any more parsing!
+   debug_only( map()->set_memory((Node*)NULL) );
+-  return _gvn.transform( mem ); 
++  return _gvn.transform( mem );
+ }
+ 
+ //------------------------------set_all_memory---------------------------------
+@@ -1346,12 +1342,108 @@
+   set_memory(st, adr_idx);
+   // Back-to-back stores can only remove intermediate store with DU info
+   // so push on worklist for optimizer.
+-  if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))  
++  if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
+     record_for_igvn(st);
+ 
+   return st;
+ }
+ 
++void GraphKit::pre_barrier(Node* ctl,
++                           Node* obj,
++                           Node* adr,
++                           uint adr_idx,
++                           Node *val,
++                           const Type* val_type,
++                           BasicType bt) {
++  BarrierSet* bs = Universe::heap()->barrier_set();
++  set_control(ctl);
++  switch (bs->kind()) {
++
++    case BarrierSet::CardTableModRef:
++    case BarrierSet::CardTableExtension:
++    case BarrierSet::ModRef:
++      break;
++
++    case BarrierSet::Other:
++    default      :
++      ShouldNotReachHere();
++
++  }
++}
++
++void GraphKit::post_barrier(Node* ctl,
++                            Node* store,
++                            Node* obj,
++                            Node* adr,
++                            uint adr_idx,
++                            Node *val,
++                            BasicType bt,
++                            bool use_precise) {
++  BarrierSet* bs = Universe::heap()->barrier_set();
++  set_control(ctl);
++  switch (bs->kind()) {
++
++    case BarrierSet::CardTableModRef:
++    case BarrierSet::CardTableExtension:
++      write_barrier_post(store, obj, adr, val, use_precise);
++      break;
++
++    case BarrierSet::ModRef:
++      break;
++
++    case BarrierSet::Other:
++    default      :
++      ShouldNotReachHere();
++
++  }
++}
++
++Node* GraphKit::store_oop_to_object(Node* ctl,
++                                    Node* obj,
++                                    Node* adr,
++                                    const TypePtr* adr_type,
++                                    Node *val,
++                                    const Type* val_type,
++                                    BasicType bt) {
++  uint adr_idx = C->get_alias_index(adr_type);
++  Node* store;
++  pre_barrier(ctl, obj, adr, adr_idx, val, val_type, bt);
++  store = store_to_memory(control(), adr, val, bt, adr_idx);
++  post_barrier(control(), store, obj, adr, adr_idx, val, bt, false);
++  return store;
++}
++
++Node* GraphKit::store_oop_to_array(Node* ctl,
++                                   Node* obj,
++                                   Node* adr,
++                                   const TypePtr* adr_type,
++                                   Node *val,
++                                   const Type* val_type,
++                                   BasicType bt) {
++  uint adr_idx = C->get_alias_index(adr_type);
++  Node* store;
++  pre_barrier(ctl, obj, adr, adr_idx, val, val_type, bt);
++  store = store_to_memory(control(), adr, val, bt, adr_idx);
++  post_barrier(control(), store, obj, adr, adr_idx, val, bt, true);
++  return store;
++}
++
++Node* GraphKit::store_oop_to_unknown(Node* ctl,
++                                     Node* obj,
++                                     Node* adr,
++                                     const TypePtr* adr_type,
++                                     Node *val,
++                                     const Type* val_type,
++                                     BasicType bt) {
++  uint adr_idx = C->get_alias_index(adr_type);
++  Node* store;
++  pre_barrier(ctl, obj, adr, adr_idx, val, val_type, bt);
++  store = store_to_memory(control(), adr, val, bt, adr_idx);
++  post_barrier(control(), store, obj, adr, adr_idx, val, bt, true);
++  return store;
++}
++
++
+ //-------------------------array_element_address-------------------------
+ Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
+                                       const TypeInt* sizetype) {
+@@ -1366,7 +1458,7 @@
+   }
+ 
+   // must be correct type for alignment purposes
+-  Node* base  = basic_plus_adr(ary, header); 
++  Node* base  = basic_plus_adr(ary, header);
+ #ifdef _LP64
+   // The scaled index operand to AddP must be a clean 64-bit value.
+   // Java allows a 32-bit int to be incremented to a negative
+@@ -1513,7 +1605,7 @@
+     // This is not a "slow path" call; all memory comes from the call.
+     set_all_memory_call(call);
+   }
+-} 
++}
+ 
+ //------------------------------increment_counter------------------------------
+ // for statistics: increment a VM counter by 1
+@@ -1534,10 +1626,11 @@
+ //------------------------------uncommon_trap----------------------------------
+ // Bail out to the interpreter in mid-method.  Implemented by calling the
+ // uncommon_trap blob.  This helper function inserts a runtime call with the
+-// right debug info.  
++// right debug info.
+ void GraphKit::uncommon_trap(int trap_request,
+                              ciKlass* klass, const char* comment,
+-                             bool must_throw) {
++                             bool must_throw,
++                             bool keep_exact_action) {
+   if (failing())  stop();
+   if (stopped())  return; // trap reachable?
+ 
+@@ -1564,8 +1657,11 @@
+   switch (action) {
+   case Deoptimization::Action_maybe_recompile:
+   case Deoptimization::Action_reinterpret:
+-    if (Deoptimization::trap_request_index(trap_request) < 0
+-        && too_many_recompiles(reason)) {
++    // Temporary fix for 6529811 to allow virtual calls to be sure they
++    // get the chance to go from mono->bi->mega
++    if (!keep_exact_action &&
++        Deoptimization::trap_request_index(trap_request) < 0 &&
++        too_many_recompiles(reason)) {
+       // This BCI is causing too many recompilations.
+       action = Deoptimization::Action_none;
+       trap_request = Deoptimization::make_trap_request(reason, action);
+@@ -1646,30 +1742,55 @@
+ }
+ 
+ 
++//--------------------------just_allocated_object------------------------------
++// Report the object that was just allocated.
++// It must be the case that there are no intervening safepoints.
++// We use this to determine if an object is so "fresh" that
++// it does not require card marks.
++Node* GraphKit::just_allocated_object(Node* current_control) {
++  if (C->recent_alloc_ctl() == current_control)
++    return C->recent_alloc_obj();
++  return NULL;
++}
++
++
+ //------------------------------store_barrier----------------------------------
+-// Insert a write-barrier store.  This is to let generational GC work; we have 
++// Insert a write-barrier store.  This is to let generational GC work; we have
+ // to flag all oop-stores before the next GC point.
+-void GraphKit::store_barrier(Node* oop_store, BasicType obj_type,
+-                             Node* obj, Node* adr, Node* val) {
++void GraphKit::write_barrier_post(Node* oop_store, Node* obj, Node* adr,
++                                  Node* val, bool use_precise) {
+   // No store check needed if we're storing a NULL or an old object
+   // (latter case is probably a string constant). The concurrent
+   // mark sweep garbage collector, however, needs to have all nonNull
+   // oop updates flagged via card-marks.
+-  if (val != NULL && val->is_Con() &&
+-      (!UseConcMarkSweepGC || val->bottom_type() == TypePtr::NULL_PTR)) {
++  if (val != NULL && val->is_Con()) {
+     // must be either an oop or NULL
+     const Type* t = val->bottom_type();
+-    assert( t == Type::TOP || t == TypePtr::NULL_PTR || t->is_oopptr()->const_oop() != NULL,
+-            "must be either a constant oop or NULL");
+-    // no store barrier needed, because no old-to-new ref created
++    if (t == TypePtr::NULL_PTR || t == Type::TOP)
++      // stores of null never (?) need barriers
++      return;
++    ciObject* con = t->is_oopptr()->const_oop();
++    if (con != NULL
++        && con->is_perm()
++        && Universe::heap()->can_elide_permanent_oop_store_barriers())
++      // no store barrier needed, because no old-to-new ref created
++      return;
++  }
++
++  if (use_ReduceInitialCardMarks()
++      && obj == just_allocated_object(control())) {
++    // We can skip marks on a freshly-allocated object.
++    // Keep this code in sync with do_eager_card_mark in runtime.cpp.
++    // That routine eagerly marks the occasional object which is produced
++    // by the slow path, so that we don't have to do it here.
+     return;
+   }
+ 
+-  if (obj_type == T_OBJECT) {
++  if (!use_precise) {
+     // All card marks for a (non-array) instance are in one place:
+     adr = obj;
+   }
+-  // (Else it's an array, and we want more precise card marks.)
++  // (Else it's an array (or unknown), and we want more precise card marks.)
+   assert(adr != NULL, "");
+ 
+   // Get the alias_index for raw card-mark memory
+@@ -1681,9 +1802,8 @@
+          "Only one we handle so far.");
+   CardTableModRefBS* ct =
+     (CardTableModRefBS*)(Universe::heap()->barrier_set());
+-  Node *a = _gvn.transform(new (C, 3) URShiftXNode( cast, _gvn.intcon(CardTableModRefBS::card_shift) ));
++  Node *b = _gvn.transform(new (C, 3) URShiftXNode( cast, _gvn.intcon(CardTableModRefBS::card_shift) ));
+   // We store into a byte array, so do not bother to left-shift by zero
+-  Node *b = a;
+   // Get base of card map
+   assert(sizeof(*ct->byte_map_base) == sizeof(jbyte),
+          "adjust this code");
+@@ -1717,9 +1837,9 @@
+   Node *store = _gvn.transform( new (C, 5) StoreCMNode(ctl, mem, adr, type, val, oop_store) );
+   set_memory(store, adr_idx);
+ 
+-  // For CMS, back-to-back card-marks can only remove the first one 
++  // For CMS, back-to-back card-marks can only remove the first one
+   // and this requires DU info.  Push on worklist for optimizer.
+-  if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))  
++  if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
+     record_for_igvn(store);
+ }
+ 
+@@ -1741,7 +1861,7 @@
+ }
+ 
+ void GraphKit::round_double_result(ciMethod* dest_method) {
+-  // A non-strict method may return a double value which has an extended 
++  // A non-strict method may return a double value which has an extended
+   // exponent, but this must not be visible in a caller which is 'strict'
+   // If a strict caller invokes a non-strict callee, round a double result
+ 
+@@ -1758,7 +1878,7 @@
+ 
+ // rounding for strict float precision conformance
+ Node* GraphKit::precision_rounding(Node* n) {
+-  return UseStrictFP && _method->flags().is_strict() 
++  return UseStrictFP && _method->flags().is_strict()
+     && UseSSE == 0 && Matcher::strict_fp_requires_explicit_rounding
+     ? _gvn.transform( new (C, 2) RoundFloatNode(0, n) )
+     : n;
+@@ -1766,7 +1886,7 @@
+ 
+ // rounding for strict double precision conformance
+ Node* GraphKit::dprecision_rounding(Node *n) {
+-  return UseStrictFP && _method->flags().is_strict() 
++  return UseStrictFP && _method->flags().is_strict()
+     && UseSSE <= 1 && Matcher::strict_fp_requires_explicit_rounding
+     ? _gvn.transform( new (C, 2) RoundDoubleNode(0, n) )
+     : n;
+@@ -1791,7 +1911,7 @@
+ //                   If
+ //                  /  \
+ //              True    False-<2>
+-//              / |        
++//              / |
+ //             /  cast_not_null
+ //           Load  |    |   ^
+ //        [fast_test]   |   |
+@@ -1801,7 +1921,7 @@
+ //        |         \\  |
+ //   [slow_call]     \[fast_result]
+ //    Ctl   Val       \      \
+-//     |               \      \ 
++//     |               \      \
+ //    Catch       <1>   \      \
+ //   /    \        ^     \      \
+ //  Ex    No_Ex    |      \      \
+@@ -1812,7 +1932,7 @@
+ //              --------Region     Phi
+ //
+ //=============================================================================
+-// Code is structured as a series of driver functions all called 'do_XXX' that 
++// Code is structured as a series of driver functions all called 'do_XXX' that
+ // call a set of helper functions.  Helper functions first, then drivers.
+ 
+ //------------------------------null_check_oop---------------------------------
+@@ -1849,7 +1969,7 @@
+ 
+   // Fast path taken; set region slot 2
+   Node *fast_taken = _gvn.transform( new (C, 1) IfFalseNode(opt_iff) );
+-  region->init_req(2,fast_taken); // Capture fast-control 
++  region->init_req(2,fast_taken); // Capture fast-control
+ 
+   // Fast path not-taken, i.e. slow path
+   Node *slow_taken = _gvn.transform( new (C, 1) IfTrueNode(opt_iff) );
+@@ -2002,9 +2122,9 @@
+     }
+   }
+ 
+-  // Get the no-exception control from the CatchNode. 
++  // Get the no-exception control from the CatchNode.
+   set_control(norm);
+-}  
++}
+ 
+ 
+ //-------------------------------gen_subtype_check-----------------------------
+@@ -2103,7 +2223,7 @@
+   // return those results immediately.
+   if (!might_be_cache) {
+     Node* not_subtype_ctrl = control();
+-    set_control(iftrue1); // We need exactly the 1 test above 
++    set_control(iftrue1); // We need exactly the 1 test above
+     return not_subtype_ctrl;
+   }
+ 
+@@ -2237,7 +2357,7 @@
+ Node* GraphKit::gen_instanceof( Node *subobj, Node* superklass ) {
+   C->set_has_split_ifs(true); // Has chance for split-if optimization
+   assert( !stopped(), "dead parse path should be checked in callers" );
+-  assert(!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass)->is_klassptr()), 
++  assert(!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass)->is_klassptr()),
+          "must check for not-null not-dead klass in callers");
+ 
+   // Make the merge point
+@@ -2425,7 +2545,7 @@
+   Node* res = _gvn.transform(phi);
+ 
+   // Note I do NOT always 'replace_in_map(obj,result)' here.
+-  //  if( tk->klass()->can_be_primary_super()  ) 
++  //  if( tk->klass()->can_be_primary_super()  )
+     // This means that if I successfully store an Object into an array-of-String
+     // I 'forget' that the Object is really now known to be a String.  I have to
+     // do this because we don't have true union types for interfaces - if I store
+@@ -2462,7 +2582,7 @@
+   set_all_memory_call(membar);
+   return membar;
+ }
+-  
++
+ //-------------------------insert_mem_bar_volatile----------------------------
+ // Memory barrier to avoid floating things around
+ // The membar serves as a pinch point between both control and memory(alias_idx).
+@@ -2483,6 +2603,7 @@
+   if (alias_idx == Compile::AliasIdxBot) {
+     mb->set_req(TypeFunc::Memory, merged_memory()->base_memory());
+   } else {
++    assert(!(opcode == Op_Initialize && alias_idx != Compile::AliasIdxRaw), "fix caller");
+     mb->set_req(TypeFunc::Memory, memory(alias_idx));
+   }
+   Node* membar = _gvn.transform(mb);
+@@ -2494,7 +2615,7 @@
+   }
+   return membar;
+ }
+-  
++
+ //------------------------------shared_lock------------------------------------
+ // Emit locking code.
+ FastLockNode* GraphKit::shared_lock(Node* obj) {
+@@ -2602,21 +2723,6 @@
+   map()->pop_monitor( );
+ }
+ 
+-//---------------------------set_eden_pointers-------------------------
+-void GraphKit::set_eden_pointers( Node * &eden_top_adr, Node * &eden_end_adr) {
+-  CollectedHeap* ch = Universe::heap();
+-  if( UseTLAB ) {               // Private allocation: load from TLS
+-    Node *thread = _gvn.transform(new (C, 1) ThreadLocalNode());
+-    eden_top_adr = _gvn.transform( new (C, 4) AddPNode(top()/*not oop*/, thread, _gvn.MakeConX( in_bytes(JavaThread::tlab_top_offset()))));
+-    eden_end_adr = _gvn.transform( new (C, 4) AddPNode(top()/*not oop*/, thread, _gvn.MakeConX( in_bytes(JavaThread::tlab_end_offset()))));
+-  } else {                      // Shared allocation: load from globals
+-    address top_adr = (address)ch->top_addr();
+-    address end_adr = (address)ch->end_addr();
+-    eden_top_adr = makecon(TypeRawPtr::make(top_adr));
+-    eden_end_adr = basic_plus_adr( eden_top_adr, eden_top_adr, end_adr - top_adr );
+-  }
+-}
+-
+ //-------------------------------get_layout_helper-----------------------------
+ // If the given klass is a constant or known to be an array,
+ // fetch the constant layout helper value into constant_value
+@@ -2642,6 +2748,19 @@
+   return make_load(NULL, lhp, TypeInt::INT, T_INT);
+ }
+ 
++// We just put in an allocate/initialize with a big raw-memory effect.
++// Hook selected additional alias categories on the initialization.
++static void hook_memory_on_init(GraphKit& kit, int alias_idx,
++                                MergeMemNode* init_in_merge,
++                                Node* init_out_raw) {
++  DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());
++  assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");
++
++  Node* prevmem = kit.memory(alias_idx);
++  init_in_merge->set_memory_at(alias_idx, prevmem);
++  kit.set_memory(init_out_raw, alias_idx);
++}
++
+ //---------------------------set_output_for_allocation-------------------------
+ Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
+                                           const TypeOopPtr* oop_type,
+@@ -2663,11 +2782,47 @@
+   // we create a separate i_o projection for the normal control path
+   set_i_o(_gvn.transform( new (C, 1) ProjNode(allocx, TypeFunc::I_O, false) ) );
+   Node* rawoop = _gvn.transform( new (C, 1) ProjNode(allocx, TypeFunc::Parms) );
+- 
++
++  // put in an initialization barrier
++  InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx,
++                                                 rawoop)->as_Initialize();
++  assert(alloc->initialization() == init,  "2-way macro link must work");
++  assert(init ->allocation()     == alloc, "2-way macro link must work");
++  if (ReduceFieldZeroing && !raw_mem_only) {
++    // Extract memory strands which may participate in the new object's
++    // initialization, and source them from the new InitializeNode.
++    // This will allow us to observe initializations when they occur,
++    // and link them properly (as a group) to the InitializeNode.
++    Node* klass_node = alloc->in(AllocateNode::KlassNode);
++    assert(init->in(InitializeNode::Memory) == malloc, "");
++    MergeMemNode* minit_in = MergeMemNode::make(C, malloc);
++    init->set_req(InitializeNode::Memory, minit_in);
++    record_for_igvn(minit_in); // fold it up later, if possible
++    Node* minit_out = memory(rawidx);
++    assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
++    if (oop_type->isa_aryptr()) {
++      const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
++      int            elemidx  = C->get_alias_index(telemref);
++      hook_memory_on_init(*this, elemidx, minit_in, minit_out);
++    } else if (oop_type->isa_instptr()) {
++      ciInstanceKlass* ik = oop_type->klass()->as_instance_klass();
++      for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
++        ciField* field = ik->nonstatic_field_at(i);
++        if (field->offset() >= TrackedInitializationLimit)
++          continue;  // do not bother to track really large numbers of fields
++        // Find (or create) the alias category for this field:
++        int fieldidx = C->alias_type(field)->index();
++        hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
++      }
++    }
++  }
++
+   // Cast raw oop to the real thing...
+   Node* javaoop = new (C, 2) CheckCastPPNode(control(), rawoop, oop_type);
+   javaoop = _gvn.transform(javaoop);
+- 
++  C->set_recent_alloc(control(), javaoop);
++  assert(just_allocated_object(control()) == javaoop, "just allocated");
++
+ #ifdef ASSERT
+   { // Verify that the AllocateNode::Ideal_foo recognizers work:
+     Node* kn = alloc->in(AllocateNode::KlassNode);
+@@ -2686,7 +2841,7 @@
+     }
+   }
+ #endif //ASSERT
+- 
++
+   return javaoop;
+ }
+ 
+@@ -2703,7 +2858,6 @@
+                              Node* extra_slow_test,
+                              bool raw_mem_only, // affect only raw memory
+                              Node* *return_size_val) {
+-
+   // Compute size in doublewords
+   // The size is always an integral number of doublewords, represented
+   // as a positive bytewise size stored in the klass's layout_helper.
+@@ -2713,7 +2867,6 @@
+   int   layout_is_con = (layout_val == NULL);
+ 
+   if (extra_slow_test == NULL)  extra_slow_test = intcon(0);
+-
+   // Generate the initial go-slow test.  It's either ALWAYS (return a
+   // Node for 1) or NEVER (return a NULL) or perhaps (in the reflective
+   // case) a computed value derived from the layout_helper.
+@@ -2761,16 +2914,12 @@
+   const TypeOopPtr* oop_type = tklass->as_instance_type();
+ 
+   // Now generate allocation code
+-  Node *eden_top_adr;
+-  Node *eden_end_adr;
+-  set_eden_pointers(eden_top_adr, eden_end_adr);
+-
+   AllocateNode* alloc
+     = new (C, AllocateNode::ParmLimit)
+         AllocateNode(C, AllocateNode::alloc_type(),
+                      control(), memory(Compile::AliasIdxRaw), i_o(),
+                      size, klass_node,
+-                     initial_slow_test, eden_top_adr, eden_end_adr);
++                     initial_slow_test);
+ 
+   return set_output_for_allocation(alloc, oop_type, raw_mem_only);
+ }
+@@ -2831,11 +2980,12 @@
+   // The rounding mask is strength-reduced, if possible.
+   int round_mask = MinObjAlignmentInBytes - 1;
+   Node* header_size = NULL;
+-  int   header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
+-  // (T_BYTE has the smallest alignment restriction...)
++  int   header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
++  // (T_BYTE has the weakest alignment and size restrictions...)
+   if (layout_is_con) {
+-    int hsize  = Klass::layout_helper_header_size(layout_con);
+-    int eshift = Klass::layout_helper_log2_element_size(layout_con);
++    int       hsize  = Klass::layout_helper_header_size(layout_con);
++    int       eshift = Klass::layout_helper_log2_element_size(layout_con);
++    BasicType etype  = Klass::layout_helper_element_type(layout_con);
+     if ((round_mask & ~right_n_bits(eshift)) == 0)
+       round_mask = 0;  // strength-reduce it if it goes away completely
+     assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
+@@ -2866,6 +3016,17 @@
+   // Transition to native address size for all offset calculations:
+   Node* lengthx = ConvI2X(length);
+   Node* headerx = ConvI2X(header_size);
++#ifdef _LP64
++  { const TypeLong* tllen = _gvn.find_long_type(lengthx);
++    if (tllen != NULL && tllen->_lo < 0) {
++      // Add a manual constraint to a positive range.  Cf. array_element_address.
++      jlong size_max = arrayOopDesc::max_array_length(T_BYTE);
++      if (size_max > tllen->_hi)  size_max = tllen->_hi;
++      const TypeLong* tlcon = TypeLong::make(CONST64(0), size_max, Type::WidenMin);
++      lengthx = _gvn.transform( new (C, 2) ConvI2LNode(length, tlcon));
++    }
++  }
++#endif
+ 
+   // Combine header size (plus rounding) and body size.  Then round down.
+   // This computation cannot overflow, because it is used only in two
+@@ -2886,56 +3047,43 @@
+     (*return_size_val) = size;
+   }
+ 
+-  const TypeInt* length_type = _gvn.type(length)->isa_int();
+-  const TypeInt* pos_length_type = NULL;
+-  if (length_type != NULL) {
+-    pos_length_type = length_type->join(TypeInt::POS)->isa_int();
+-    if (pos_length_type == NULL || pos_length_type->empty()) {
+-      // Optimize only if length is provably non-negative.
+-      length_type     = NULL;
+-      pos_length_type = NULL;
+-    }
+-  }
+-
+-  // Cast to correct type.  Note that the klass_node may be constant or not,
+-  // and in the latter case the actual array type will be inexact also.
+-  // (This happens via a non-constant argument to inline_native_newArray.)
+-  // In any case, the value of klass_node provides the desired array type.
+-  const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();
+-  if (ary_type->isa_aryptr() && pos_length_type != NULL) {
+-    // Try to get a better type than POS for the size
+-    ary_type = ary_type->is_aryptr()->cast_to_size(pos_length_type);
+-  }
+-
+   // Now generate allocation code
+-  Node *eden_top_adr;
+-  Node *eden_end_adr;
+-  set_eden_pointers(eden_top_adr, eden_end_adr);
+-
+   // Create the AllocateArrayNode and its result projections
+   AllocateArrayNode* alloc
+     = new (C, AllocateArrayNode::ParmLimit)
+         AllocateArrayNode(C, AllocateArrayNode::alloc_type(),
+                           control(), memory(Compile::AliasIdxRaw), i_o(),
+                           size, klass_node,
+-                          initial_slow_test,  eden_top_adr, eden_end_adr,
++                          initial_slow_test,
+                           length);
+ 
++  // Cast to correct type.  Note that the klass_node may be constant or not,
++  // and in the latter case the actual array type will be inexact also.
++  // (This happens via a non-constant argument to inline_native_newArray.)
++  // In any case, the value of klass_node provides the desired array type.
++  const TypeInt* length_type = _gvn.find_int_type(length);
++  const TypeInt* narrow_length_type = NULL;
++  const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();
++  if (ary_type->isa_aryptr() && length_type != NULL) {
++    // Try to get a better type than POS for the size
++    ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
++    narrow_length_type = ary_type->is_aryptr()->size();
++    if (narrow_length_type == length_type)
++      narrow_length_type = NULL;
++  }
++
+   Node* javaoop = set_output_for_allocation(alloc, ary_type, raw_mem_only);
+ 
+-  /*
+-    Not yet.  We don't know what to do when the cast tops out.
+   // Cast length on remaining path to be positive:
+-  if (pos_length_type != NULL &&
+-      pos_length_type != length_type &&
+-      map()->find_edge(length) >= 0) {
+-    Node* ccast = new (C, 2) CastIINode(length, pos_length_type);
++  if (narrow_length_type != NULL) {
++    Node* ccast = new (C, 2) CastIINode(length, narrow_length_type);
+     ccast->set_req(0, control());
+     _gvn.set_type_bottom(ccast);
+     record_for_igvn(ccast);
+-    replace_in_map(length, ccast);
++    if (map()->find_edge(length) >= 0) {
++      replace_in_map(length, ccast);
++    }
+   }
+-  */
+ 
+   return javaoop;
+ }
+@@ -2970,3 +3118,29 @@
+   if (base == NULL)  return NULL;
+   return Ideal_allocation(base, phase);
+ }
++
++// Trace Initialize <- Proj[Parm] <- Allocate
++AllocateNode* InitializeNode::allocation() {
++  Node* rawoop = in(InitializeNode::RawAddress);
++  if (rawoop->is_Proj()) {
++    Node* alloc = rawoop->in(0);
++    if (alloc->is_Allocate()) {
++      return alloc->as_Allocate();
++    }
++  }
++  return NULL;
++}
++
++// Trace Allocate -> Proj[Parm] -> Initialize
++InitializeNode* AllocateNode::initialization() {
++  ProjNode* rawoop = proj_out(AllocateNode::RawAddress);
++  if (rawoop == NULL)  return NULL;
++  for (DUIterator_Fast imax, i = rawoop->fast_outs(imax); i < imax; i++) {
++    Node* init = rawoop->fast_out(i);
++    if (init->is_Initialize()) {
++      assert(init->as_Initialize()->allocation() == this, "2-way link");
++      return init->as_Initialize();
++    }
++  }
++  return NULL;
++}
+diff -ruN openjdk6/hotspot/src/share/vm/opto/graphKit.hpp openjdk/hotspot/src/share/vm/opto/graphKit.hpp
+--- openjdk6/hotspot/src/share/vm/opto/graphKit.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/graphKit.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)graphKit.hpp	1.56 07/05/17 15:58:52 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class FastLockNode;
+@@ -197,7 +194,7 @@
+     ex_map->set_next_exception(_exceptions);
+     _exceptions = ex_map;
+   }
+-  
++
+   // Turn the current JVM state into an exception state, appending the ex_oop.
+   SafePointNode* make_exception_state(Node* ex_oop);
+ 
+@@ -216,7 +213,7 @@
+     }
+     return phi_map;
+   }
+-  
++
+   // Combine the two exception states, building phis as necessary.
+   // The second argument is updated to include contributions from the first.
+   void combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map);
+@@ -426,6 +423,51 @@
+                         int adr_idx,
+                         bool require_atomic_access = false);
+ 
++
++  // All in one pre-barrier, store, post_barrier
++  // Insert a write-barrier'd store.  This is to let generational GC
++  // work; we have to flag all oop-stores before the next GC point.
++  //
++  // It comes in 3 flavors of store to an object, array, or unknown.
++  // We use precise card marks for arrays to avoid scanning the entire
++  // array. We use imprecise for object. We use precise for unknown
++  // since we don't know if we have an array or and object or even
++  // where the object starts.
++  //
++  // If val==NULL, it is taken to be a completely unknown value. QQQ
++
++  Node* store_oop_to_object(Node* ctl,
++                            Node* obj,   // containing obj
++                            Node* adr,  // actual adress to store val at
++                            const TypePtr* adr_type,
++                            Node* val,
++                            const Type* val_type,
++                            BasicType bt);
++
++  Node* store_oop_to_array(Node* ctl,
++                           Node* obj,   // containing obj
++                           Node* adr,  // actual adress to store val at
++                           const TypePtr* adr_type,
++                           Node* val,
++                           const Type* val_type,
++                           BasicType bt);
++
++  // Could be an array or object we don't know at compile time (unsafe ref.)
++  Node* store_oop_to_unknown(Node* ctl,
++                             Node* obj,   // containing obj
++                             Node* adr,  // actual adress to store val at
++                             const TypePtr* adr_type,
++                             Node* val,
++                             const Type* val_type,
++                             BasicType bt);
++
++  // For the few case where the barriers need special help
++  void pre_barrier(Node* ctl, Node* obj, Node* adr, uint adr_idx,
++                   Node* val, const Type* val_type, BasicType bt);
++
++  void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx,
++                    Node* val, BasicType bt, bool use_precise);
++
+   // Return addressing for an array element.
+   Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
+                               // Optional constraint on the array size:
+@@ -449,7 +491,7 @@
+   //--------------- stub generation -------------------
+  public:
+   void gen_stub(address C_function,
+-                const char *name, 
++                const char *name,
+                 int is_fancy_jump,
+                 bool pass_tls,
+                 bool return_pc);
+@@ -505,15 +547,15 @@
+   // Optional must_throw is the same as with add_safepoint_edges.
+   void uncommon_trap(int trap_request,
+                      ciKlass* klass = NULL, const char* reason_string = NULL,
+-                     bool must_throw = false);
++                     bool must_throw = false, bool keep_exact_action = false);
+ 
+   // Shorthand, to avoid saying "Deoptimization::" so many times.
+   void uncommon_trap(Deoptimization::DeoptReason reason,
+                      Deoptimization::DeoptAction action,
+                      ciKlass* klass = NULL, const char* reason_string = NULL,
+-                     bool must_throw = false) {
++                     bool must_throw = false, bool keep_exact_action = false) {
+     uncommon_trap(Deoptimization::make_trap_request(reason, action),
+-                  klass, reason_string, must_throw);
++                  klass, reason_string, must_throw, keep_exact_action);
+   }
+ 
+   // Report if there were too many traps at the current method and bci.
+@@ -528,14 +570,16 @@
+     return C->too_many_recompiles(method(), bci(), reason);
+   }
+ 
+-  // Insert a write-barrier'd store.  This is to let generational GC
+-  // work; we have to flag all oop-stores before the next GC point.
+-  // The obj_type value is one of T_OBJECT, T_ARRAY, or T_CONFLICT,
+-  // meaning obj is an instance, an array, or either (statically unknown).
+-  // If val==NULL, it is taken to be a completely unknown value.
+-  void store_barrier(Node *store, BasicType obj_type,
+-                     // dest object, addr within obj, and stored value:
+-                     Node* obj, Node* adr, Node* val);
++  // vanilla/CMS post barrier
++  void write_barrier_post(Node *store, Node* obj, Node* adr, Node* val, bool use_precise);
++
++  // Returns the object (if any) which was created the moment before.
++  Node* just_allocated_object(Node* current_control);
++
++  static bool use_ReduceInitialCardMarks() {
++    return (ReduceInitialCardMarks
++            && Universe::heap()->can_elide_tlab_store_barriers());
++  }
+ 
+   // Helper function to round double arguments before a call
+   void round_double_arguments(ciMethod* dest_method);
+@@ -613,7 +657,6 @@
+                             Node* *casted_receiver);
+ 
+   // implementation of object creation
+-  void set_eden_pointers(Node * &eden_top_adr, Node * &eden_end_adr);
+   Node* set_output_for_allocation(AllocateNode* alloc,
+                                   const TypeOopPtr* oop_type,
+                                   bool raw_mem_only);
+@@ -675,4 +718,3 @@
+   BuildCutout(GraphKit* kit, Node* p, float prob, float cnt = COUNT_UNKNOWN);
+   ~BuildCutout();
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/idealGraphPrinter.cpp openjdk/hotspot/src/share/vm/opto/idealGraphPrinter.cpp
+--- openjdk6/hotspot/src/share/vm/opto/idealGraphPrinter.cpp	1969-12-31 19:00:00.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/idealGraphPrinter.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -0,0 +1,1919 @@
++/*
++ * Copyright 2007 Sun Microsystems, Inc.  All Rights Reserved.
++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++ *
++ * This code is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 only, as
++ * published by the Free Software Foundation.
++ *
++ * This code is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++ * version 2 for more details (a copy is included in the LICENSE file that
++ * accompanied this code).
++ *
++ * You should have received a copy of the GNU General Public License version
++ * 2 along with this work; if not, write to the Free Software Foundation,
++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
++ * CA 95054 USA or visit www.sun.com if you need additional information or
++ * have any questions.
++ *
++ */
++
++#include "incls/_precompiled.incl"
++#include "incls/_idealGraphPrinter.cpp.incl"
++
++#ifndef PRODUCT
++
++// Constants
++// Keep consistent with Java constants
++const char *IdealGraphPrinter::INDENT = "  ";
++const char *IdealGraphPrinter::TOP_ELEMENT = "graphDocument";
++const char *IdealGraphPrinter::GROUP_ELEMENT = "group";
++const char *IdealGraphPrinter::GRAPH_ELEMENT = "graph";
++const char *IdealGraphPrinter::PROPERTIES_ELEMENT = "properties";
++const char *IdealGraphPrinter::EDGES_ELEMENT = "edges";
++const char *IdealGraphPrinter::PROPERTY_ELEMENT = "p";
++const char *IdealGraphPrinter::EDGE_ELEMENT = "edge";
++const char *IdealGraphPrinter::NODE_ELEMENT = "node";
++const char *IdealGraphPrinter::NODES_ELEMENT = "nodes";
++const char *IdealGraphPrinter::REMOVE_EDGE_ELEMENT = "removeEdge";
++const char *IdealGraphPrinter::REMOVE_NODE_ELEMENT = "removeNode";
++const char *IdealGraphPrinter::METHOD_NAME_PROPERTY = "name";
++const char *IdealGraphPrinter::METHOD_IS_PUBLIC_PROPERTY = "public";
++const char *IdealGraphPrinter::METHOD_IS_STATIC_PROPERTY = "static";
++const char *IdealGraphPrinter::TRUE_VALUE = "true";
++const char *IdealGraphPrinter::NODE_NAME_PROPERTY = "name";
++const char *IdealGraphPrinter::EDGE_NAME_PROPERTY = "name";
++const char *IdealGraphPrinter::NODE_ID_PROPERTY = "id";
++const char *IdealGraphPrinter::FROM_PROPERTY = "from";
++const char *IdealGraphPrinter::TO_PROPERTY = "to";
++const char *IdealGraphPrinter::PROPERTY_NAME_PROPERTY = "name";
++const char *IdealGraphPrinter::GRAPH_NAME_PROPERTY = "name";
++const char *IdealGraphPrinter::INDEX_PROPERTY = "index";
++const char *IdealGraphPrinter::METHOD_ELEMENT = "method";
++const char *IdealGraphPrinter::INLINE_ELEMENT = "inline";
++const char *IdealGraphPrinter::BYTECODES_ELEMENT = "bytecodes";
++const char *IdealGraphPrinter::METHOD_BCI_PROPERTY = "bci";
++const char *IdealGraphPrinter::METHOD_SHORT_NAME_PROPERTY = "shortName";
++const char *IdealGraphPrinter::CONTROL_FLOW_ELEMENT = "controlFlow";
++const char *IdealGraphPrinter::BLOCK_NAME_PROPERTY = "name";
++const char *IdealGraphPrinter::BLOCK_DOMINATOR_PROPERTY = "dom";
++const char *IdealGraphPrinter::BLOCK_ELEMENT = "block";
++const char *IdealGraphPrinter::SUCCESSORS_ELEMENT = "successors";
++const char *IdealGraphPrinter::SUCCESSOR_ELEMENT = "successor";
++const char *IdealGraphPrinter::ASSEMBLY_ELEMENT = "assembly";
++
++int IdealGraphPrinter::_file_count = 0;
++
++IdealGraphPrinter *IdealGraphPrinter::printer() {
++  if (PrintIdealGraphLevel == 0) return NULL;
++
++  JavaThread *thread = JavaThread::current();
++  if (!thread->is_Compiler_thread()) return NULL;
++
++  CompilerThread *compiler_thread = (CompilerThread *)thread;
++  if (compiler_thread->ideal_graph_printer() == NULL) {
++    IdealGraphPrinter *printer = new IdealGraphPrinter();
++    compiler_thread->set_ideal_graph_printer(printer);
++  }
++
++  return compiler_thread->ideal_graph_printer();
++}
++
++void IdealGraphPrinter::clean_up() {
++  JavaThread *p;
++  for (p = Threads::first(); p; p = p->next()) {
++    if (p->is_Compiler_thread()) {
++      CompilerThread *c = (CompilerThread *)p;
++      IdealGraphPrinter *printer = c->ideal_graph_printer();
++      if (printer) {
++        delete printer;
++      }
++      c->set_ideal_graph_printer(NULL);
++    }
++  }
++}
++
++// Constructor, either file or network output
++IdealGraphPrinter::IdealGraphPrinter() {
++
++  _traverse_outs = false;
++  _should_send_method = true;
++  _output = NULL;
++  buffer[0] = 0;
++  _depth = 0;
++  _current_method = NULL;
++  assert(!_current_method, "current method must be initialized to NULL");
++  _arena = new Arena();
++
++  _stream = new (ResourceObj::C_HEAP) networkStream();
++
++  if (PrintIdealGraphFile != NULL) {
++    ThreadCritical tc;
++    // User wants all output to go to files
++    if (_file_count != 0) {
++      ResourceMark rm;
++      stringStream st;
++      const char* dot = strrchr(PrintIdealGraphFile, '.');
++      if (dot) {
++        st.write(PrintIdealGraphFile, dot - PrintIdealGraphFile);
++        st.print("%d%s", _file_count, dot);
++      } else {
++        st.print("%s%d", PrintIdealGraphFile, _file_count);
++      }
++      _output = new (ResourceObj::C_HEAP) fileStream(st.as_string());
++    } else {
++      _output = new (ResourceObj::C_HEAP) fileStream(PrintIdealGraphFile);
++    }
++    _file_count++;
++  } else {
++    // Try to connect to visualizer
++    if (_stream->connect(PrintIdealGraphAddress, PrintIdealGraphPort)) {
++      char c = 0;
++      _stream->read(&c, 1);
++      if (c != 'y') {
++        tty->print_cr("Client available, but does not want to receive data!");
++        _stream->close();
++        delete _stream;
++        _stream = NULL;
++        return;
++      }
++      _output = _stream;
++    } else {
++      // It would be nice if we could shut down cleanly but it should
++      // be an error if we can't connect to the visualizer.
++      fatal2("Couldn't connect to visualizer at %s:%d", PrintIdealGraphAddress, PrintIdealGraphPort);
++    }
++  }
++
++  start_element(TOP_ELEMENT);
++}
++
++// Destructor, close file or network stream
++IdealGraphPrinter::~IdealGraphPrinter() {
++
++  end_element(TOP_ELEMENT);
++
++  if (_stream) {
++    delete _stream;
++    if (_stream == _output) {
++      _output = NULL;
++    }
++    _stream = NULL;
++  }
++
++  if (_output) {
++    delete _output;
++    _output = NULL;
++  }
++}
++
++void IdealGraphPrinter::print_ifg(PhaseIFG* ifg) {
++
++  // Code to print an interference graph to tty, currently not used
++
++  /*
++  if (!_current_method) return;
++   // Remove neighbor colors
++
++  for (uint i = 0; i < ifg._maxlrg; i++) {
++
++    IndexSet *s = ifg.neighbors(i);
++    IndexSetIterator elements(s);
++    uint neighbor;
++    while ((neighbor = elements.next()) != 0) {
++        tty->print_cr("Edge between %d and %d\n", i, neighbor);
++    }
++  }
++
++
++  for (uint i = 0; i < ifg._maxlrg; i++) {
++    LRG &l = ifg.lrgs(i);
++    if (l._def) {
++      OptoReg::Name name = l.reg();
++      tty->print("OptoReg::dump: ");
++      OptoReg::dump(name);
++      tty->print_cr("");
++      tty->print_cr("name=%d\n", name);
++      if (name) {
++        if (OptoReg::is_stack(name)) {
++          tty->print_cr("Stack number %d\n", OptoReg::reg2stack(name));
++
++        } else if (!OptoReg::is_valid(name)) {
++          tty->print_cr("BAD!!!");
++        } else {
++
++          if (OptoReg::is_reg(name)) {
++          tty->print_cr(OptoReg::regname(name));
++          } else {
++            int x = 0;
++          }
++        }
++        int x = 0;
++      }
++
++      if (l._def == NodeSentinel) {
++        tty->print("multiple mapping from %d: ", i);
++        for (int j=0; j<l._defs->length(); j++) {
++          tty->print("%d ", l._defs->at(j)->_idx);
++        }
++        tty->print_cr("");
++      } else {
++        tty->print_cr("mapping between %d and %d\n", i, l._def->_idx);
++      }
++    }
++  }*/
++}
++
++void IdealGraphPrinter::print_method(ciMethod *method, int bci, InlineTree *tree) {
++
++  Properties properties;
++  stringStream str;
++  method->print_name(&str);
++
++  stringStream shortStr;
++  method->print_short_name(&shortStr);
++
++
++  properties.add(new Property(METHOD_NAME_PROPERTY, str.as_string()));
++  properties.add(new Property(METHOD_SHORT_NAME_PROPERTY, shortStr.as_string()));
++  properties.add(new Property(METHOD_BCI_PROPERTY, bci));
++  start_element(METHOD_ELEMENT, &properties);
++
++  start_element(BYTECODES_ELEMENT);
++  output()->print_cr("<![CDATA[");
++  method->print_codes_on(output());
++  output()->print_cr("]]>");
++  end_element(BYTECODES_ELEMENT);
++
++  start_element(INLINE_ELEMENT);
++  if (tree != NULL) {
++    GrowableArray<InlineTree *> subtrees = tree->subtrees();
++    for (int i = 0; i < subtrees.length(); i++) {
++      print_inline_tree(subtrees.at(i));
++    }
++  }
++  end_element(INLINE_ELEMENT);
++
++  end_element(METHOD_ELEMENT);
++  output()->flush();
++}
++
++void IdealGraphPrinter::print_inline_tree(InlineTree *tree) {
++
++  if (tree == NULL) return;
++
++  ciMethod *method = tree->method();
++  print_method(tree->method(), tree->caller_bci(), tree);
++
++}
++
++void IdealGraphPrinter::clear_nodes() {
++ // for (int i = 0; i < _nodes.length(); i++) {
++ //   _nodes.at(i)->clear_node();
++ // }
++}
++
++void IdealGraphPrinter::print_inlining(Compile* compile) {
++
++  // Print inline tree
++  if (_should_send_method) {
++    InlineTree *inlineTree = compile->ilt();
++    if (inlineTree != NULL) {
++      print_inline_tree(inlineTree);
++    } else {
++      // print this method only
++    }
++  }
++}
++
++// Has to be called whenever a method is compiled
++void IdealGraphPrinter::begin_method(Compile* compile) {
++
++  ciMethod *method = compile->method();
++  assert(_output, "output stream must exist!");
++  assert(method, "null methods are not allowed!");
++  assert(!_current_method, "current method must be null!");
++
++  _arena->destruct_contents();
++
++  start_element(GROUP_ELEMENT);
++
++  // Print properties
++  Properties properties;
++
++  // Add method name
++  stringStream strStream;
++  method->print_name(&strStream);
++  properties.add(new Property(METHOD_NAME_PROPERTY, strStream.as_string()));
++
++  if (method->flags().is_public()) {
++    properties.add(new Property(METHOD_IS_PUBLIC_PROPERTY, TRUE_VALUE));
++  }
++
++  if (method->flags().is_static()) {
++    properties.add(new Property(METHOD_IS_STATIC_PROPERTY, TRUE_VALUE));
++  }
++
++  properties.print(this);
++
++  if (_stream) {
++    char answer = 0;
++    _stream->flush();
++    int result = _stream->read(&answer, 1);
++    _should_send_method = (answer == 'y');
++  }
++
++  this->_nodes = GrowableArray<NodeDescription *>(_arena, 2, 0, NULL);
++  this->_edges = GrowableArray< EdgeDescription * >(_arena, 2, 0, NULL);
++
++
++  this->_current_method = method;
++
++
++
++  _output->flush();
++}
++
++// Has to be called whenever a method has finished compilation
++void IdealGraphPrinter::end_method() {
++
++//  if (finish && !in_method) return;
++
++  nmethod* method = (nmethod*)this->_current_method->code();
++
++  start_element(ASSEMBLY_ELEMENT);
++ // Disassembler::decode(method, _output);
++  end_element(ASSEMBLY_ELEMENT);
++
++
++  end_element(GROUP_ELEMENT);
++  _current_method = NULL;
++  _output->flush();
++  for (int i = 0; i < _nodes.length(); i++) {
++    NodeDescription *desc = _nodes.at(i);
++    if (desc) {
++      delete desc;
++      _nodes.at_put(i, NULL);
++    }
++  }
++  this->_nodes.clear();
++
++
++  for (int i = 0; i < _edges.length(); i++) {
++   // for (int j=0; j<_edges.at(i)->length(); j++) {
++      EdgeDescription *conn = _edges.at(i);
++      conn->print(this);
++      if (conn) {
++        delete conn;
++        _edges.at_put(i, NULL);
++      }
++    //}
++    //_edges.at(i)->clear();
++    //delete _edges.at(i);
++    //_edges.at_put(i, NULL);
++  }
++  this->_edges.clear();
++
++//  in_method = false;
++}
++
++// Outputs an XML start element
++void IdealGraphPrinter::start_element(const char *s, Properties *properties /* = NULL */, bool print_indent /* = false */, bool print_return /* = true */) {
++
++  start_element_helper(s, properties, false, print_indent, print_return);
++  _depth++;
++
++}
++
++// Outputs an XML start element without body
++void IdealGraphPrinter::simple_element(const char *s, Properties *properties /* = NULL */, bool print_indent /* = false */) {
++  start_element_helper(s, properties, true, print_indent, true);
++}
++
++// Outputs an XML start element. If outputEnd is true, the element has no body.
++void IdealGraphPrinter::start_element_helper(const char *s, Properties *properties, bool outputEnd, bool print_indent /* = false */, bool print_return /* = true */) {
++
++  assert(_output, "output stream must exist!");
++
++  if (print_indent) this->print_indent();
++  _output->print("<");
++  _output->print(s);
++  if (properties) properties->print_as_attributes(this);
++
++  if (outputEnd) {
++    _output->print("/");
++  }
++
++  _output->print(">");
++  if (print_return) _output->print_cr("");
++
++}
++
++// Print indent
++void IdealGraphPrinter::print_indent() {
++  for (int i = 0; i < _depth; i++) {
++    _output->print(INDENT);
++  }
++}
++
++// Outputs an XML end element
++void IdealGraphPrinter::end_element(const char *s, bool print_indent /* = true */, bool print_return /* = true */) {
++
++  assert(_output, "output stream must exist!");
++
++  _depth--;
++
++  if (print_indent) this->print_indent();
++  _output->print("</");
++  _output->print(s);
++  _output->print(">");
++  if (print_return) _output->print_cr("");
++
++}
++
++bool IdealGraphPrinter::traverse_outs() {
++  return _traverse_outs;
++}
++
++void IdealGraphPrinter::set_traverse_outs(bool b) {
++  _traverse_outs = b;
++}
++
++void IdealGraphPrinter::walk(Node *start) {
++
++
++  VectorSet visited(Thread::current()->resource_area());
++  GrowableArray<Node *> nodeStack(Thread::current()->resource_area(), 0, 0, NULL);
++  nodeStack.push(start);
++  visited.test_set(start->_idx);
++  while(nodeStack.length() > 0) {
++
++    Node *n = nodeStack.pop();
++    IdealGraphPrinter::pre_node(n, this);
++
++    if (_traverse_outs) {
++      for (DUIterator i = n->outs(); n->has_out(i); i++) {
++        Node* p = n->out(i);
++        if (!visited.test_set(p->_idx)) {
++          nodeStack.push(p);
++        }
++      }
++    }
++
++    for ( uint i = 0; i < n->len(); i++ ) {
++      if ( n->in(i) ) {
++        if (!visited.test_set(n->in(i)->_idx)) {
++          nodeStack.push(n->in(i));
++        }
++      }
++    }
++  }
++}
++
++void IdealGraphPrinter::compress(int index, GrowableArray<Block>* blocks) {
++  Block *block = blocks->adr_at(index);
++
++  int ancestor = block->ancestor();
++  assert(ancestor != -1, "");
++
++  Block *ancestor_block = blocks->adr_at(ancestor);
++  if (ancestor_block->ancestor() != -1) {
++    compress(ancestor, blocks);
++
++    int label = block->label();
++    Block *label_block = blocks->adr_at(label);
++
++    int ancestor_label = ancestor_block->label();
++    Block *ancestor_label_block = blocks->adr_at(label);
++    if (ancestor_label_block->semi() < label_block->semi()) {
++      block->set_label(ancestor_label);
++    }
++
++    block->set_ancestor(ancestor_block->ancestor());
++  }
++}
++
++int IdealGraphPrinter::eval(int index, GrowableArray<Block>* blocks) {
++  Block *block = blocks->adr_at(index);
++  if (block->ancestor() == -1) {
++    return index;
++  } else {
++    compress(index, blocks);
++    return block->label();
++  }
++}
++
++void IdealGraphPrinter::link(int index1, int index2, GrowableArray<Block>* blocks) {
++  Block *block2 = blocks->adr_at(index2);
++  block2->set_ancestor(index1);
++}
++
++void IdealGraphPrinter::build_dominators(GrowableArray<Block>* blocks) {
++
++  if (blocks->length() == 0) return;
++
++  GrowableArray<int> stack;
++  stack.append(0);
++
++  GrowableArray<Block *> array;
++
++  assert(blocks->length() > 0, "");
++  blocks->adr_at(0)->set_dominator(0);
++
++  int n = 0;
++  while(!stack.is_empty()) {
++    int index = stack.pop();
++    Block *block = blocks->adr_at(index);
++    block->set_semi(n);
++    array.append(block);
++    n = n + 1;
++    for (int i = 0; i < block->succs()->length(); i++) {
++      int succ_index = block->succs()->at(i);
++      Block *succ = blocks->adr_at(succ_index);
++      if (succ->semi() == -1) {
++        succ->set_parent(index);
++        stack.push(succ_index);
++      }
++      succ->add_pred(index);
++    }
++  }
++
++  for (int i=n-1; i>0; i--) {
++    Block *block = array.at(i);
++    int block_index = block->index();
++    for (int j=0; j<block->pred()->length(); j++) {
++      int pred_index = block->pred()->at(j);
++      int cur_index = eval(pred_index, blocks);
++
++      Block *cur_block = blocks->adr_at(cur_index);
++      if (cur_block->semi() < block->semi()) {
++        block->set_semi(cur_block->semi());
++      }
++    }
++
++    int semi_index = block->semi();
++    Block *semi_block = array.at(semi_index);
++    semi_block->add_to_bucket(block_index);
++
++    link(block->parent(), block_index, blocks);
++    Block *parent_block = blocks->adr_at(block->parent());
++
++    for (int j=0; j<parent_block->bucket()->length(); j++) {
++      int cur_index = parent_block->bucket()->at(j);
++      int new_index = eval(cur_index, blocks);
++      Block *cur_block = blocks->adr_at(cur_index);
++      Block *new_block = blocks->adr_at(new_index);
++      int dom = block->parent();
++
++      if (new_block->semi() < cur_block->semi()) {
++        dom = new_index;
++      }
++
++      cur_block->set_dominator(dom);
++    }
++
++    parent_block->clear_bucket();
++  }
++
++  for (int i=1; i < n; i++) {
++
++    Block *block = array.at(i);
++    int block_index = block->index();
++
++    int semi_index = block->semi();
++    Block *semi_block = array.at(semi_index);
++
++    if (block->dominator() != semi_block->index()) {
++      int new_dom = blocks->adr_at(block->dominator())->dominator();
++      block->set_dominator(new_dom);
++    }
++  }
++
++  for (int i = 0; i < blocks->length(); i++) {
++    if (blocks->adr_at(i)->dominator() == -1) {
++      blocks->adr_at(i)->set_dominator(0);
++    }
++  }
++
++  // Build dominates array
++  for (int i=1; i < blocks->length(); i++) {
++    Block *block = blocks->adr_at(i);
++    int dominator = block->dominator();
++    Block *dom_block = blocks->adr_at(dominator);
++    dom_block->add_dominates(i);
++    dom_block->add_child(i);
++
++    while(dominator != 0) {
++      dominator = dom_block->dominator();
++      dom_block = blocks->adr_at(dominator);
++      dom_block->add_child(i);
++    }
++  }
++}
++
++void IdealGraphPrinter::build_common_dominator(int **common_dominator, int index, GrowableArray<Block>* blocks) {
++
++  common_dominator[index][index] = index;
++  Block *block = blocks->adr_at(index);
++  for (int i = 0; i < block->dominates()->length(); i++) {
++    Block *dominated = blocks->adr_at(block->dominates()->at(i));
++
++    for (int j=0; j<dominated->children()->length(); j++) {
++      Block *child = blocks->adr_at(dominated->children()->at(j));
++      common_dominator[index][child->index()] = common_dominator[child->index()][index] = index;
++
++      for (int k=0; k<i; k++) {
++        Block *other_dominated = blocks->adr_at(block->dominates()->at(k));
++        common_dominator[child->index()][other_dominated->index()] = common_dominator[other_dominated->index()][child->index()] = index;
++
++        for (int l=0 ; l<other_dominated->children()->length(); l++) {
++          Block *other_child = blocks->adr_at(other_dominated->children()->at(l));
++          common_dominator[child->index()][other_child->index()] = common_dominator[other_child->index()][child->index()] = index;
++        }
++      }
++    }
++
++    build_common_dominator(common_dominator, dominated->index(), blocks);
++  }
++}
++
++void IdealGraphPrinter::schedule_latest(int **common_dominator, GrowableArray<Block>* blocks) {
++
++  int queue_size = _nodes.length() + 1;
++  NodeDescription **queue = NEW_RESOURCE_ARRAY(NodeDescription *, queue_size);
++  int queue_start = 0;
++  int queue_end = 0;
++  Arena *a = new Arena();
++  VectorSet on_queue(a);
++
++  for (int i = 0; i < _nodes.length(); i++) {
++    NodeDescription *desc = _nodes.at(i);
++    if (desc) {
++      desc->init_succs();
++    }
++  }
++
++  for (int i = 0; i < _nodes.length(); i++) {
++    NodeDescription *desc = _nodes.at(i);
++    if (desc) {
++      for (uint j=0; j<desc->node()->len(); j++) {
++        Node *n = desc->node()->in(j);
++        if (n) {
++          NodeDescription *other_desc = _nodes.at(n->_idx);
++          other_desc->add_succ(desc);
++        }
++      }
++    }
++  }
++
++  for (int i = 0; i < _nodes.length(); i++) {
++    NodeDescription *desc = _nodes.at(i);
++    if (desc && desc->block_index() == -1) {
++
++      // Put Phi into same block as region
++      if (desc->node()->is_Phi() && desc->node()->in(0) && _nodes.at(desc->node()->in(0)->_idx)->block_index() != -1) {
++        int index = _nodes.at(desc->node()->in(0)->_idx)->block_index();
++        desc->set_block_index(index);
++        blocks->adr_at(index)->add_node(desc);
++
++      // Put Projections to same block as parent
++      } else if (desc->node()->is_block_proj() && _nodes.at(desc->node()->is_block_proj()->_idx)->block_index() != -1) {
++        int index = _nodes.at(desc->node()->is_block_proj()->_idx)->block_index();
++        desc->set_block_index(index);
++        blocks->adr_at(index)->add_node(desc);
++      } else {
++        queue[queue_end] = desc;
++        queue_end++;
++        on_queue.set(desc->node()->_idx);
++      }
++    }
++  }
++
++
++  int z = 0;
++  while(queue_start != queue_end && z < 10000) {
++
++    NodeDescription *desc = queue[queue_start];
++    queue_start = (queue_start + 1) % queue_size;
++    on_queue >>= desc->node()->_idx;
++
++    Node* node = desc->node();
++
++    if (desc->succs()->length() == 0) {
++      int x = 0;
++    }
++
++    int block_index = -1;
++    if (desc->succs()->length() != 0) {
++      for (int i = 0; i < desc->succs()->length(); i++) {
++          NodeDescription *cur_desc = desc->succs()->at(i);
++          if (cur_desc != desc) {
++            if (cur_desc->succs()->length() == 0) {
++
++              // Ignore nodes with 0 successors
++
++            } else if (cur_desc->block_index() == -1) {
++
++              // Let this node schedule first
++              block_index = -1;
++              break;
++
++            } else if (cur_desc->node()->is_Phi()){
++
++              // Special treatment for Phi functions
++              PhiNode *phi = cur_desc->node()->as_Phi();
++              assert(phi->in(0) && phi->in(0)->is_Region(), "Must have region node in first input");
++              RegionNode *region = phi->in(0)->as_Region();
++
++              for (uint j=1; j<phi->len(); j++) {
++                Node *cur_phi_input = phi->in(j);
++                if (cur_phi_input == desc->node() && region->in(j)) {
++                  NodeDescription *cur_region_input = _nodes.at(region->in(j)->_idx);
++                  if (cur_region_input->block_index() == -1) {
++
++                    // Let this node schedule first
++                    block_index = -1;
++                    break;
++                  } else {
++                    if (block_index == -1) {
++                      block_index = cur_region_input->block_index();
++                    } else {
++                      block_index = common_dominator[block_index][cur_region_input->block_index()];
++                    }
++                  }
++                }
++              }
++
++            } else {
++              if (block_index == -1) {
++                block_index = cur_desc->block_index();
++              } else {
++                block_index = common_dominator[block_index][cur_desc->block_index()];
++              }
++            }
++          }
++      }
++    }
++
++    if (block_index == -1) {
++      queue[queue_end] = desc;
++      queue_end = (queue_end + 1) % queue_size;
++      on_queue.set(desc->node()->_idx);
++      z++;
++    } else {
++      assert(desc->block_index() == -1, "");
++      desc->set_block_index(block_index);
++      blocks->adr_at(block_index)->add_node(desc);
++      z = 0;
++    }
++  }
++
++  for (int i = 0; i < _nodes.length(); i++) {
++    NodeDescription *desc = _nodes.at(i);
++    if (desc && desc->block_index() == -1) {
++
++      //if (desc->node()->is_Proj() || desc->node()->is_Con()) {
++        Node *parent = desc->node()->in(0);
++        uint cur = 1;
++        while(!parent && cur < desc->node()->len()) {
++          parent = desc->node()->in(cur);
++          cur++;
++        }
++
++        if (parent && _nodes.at(parent->_idx)->block_index() != -1) {
++          int index = _nodes.at(parent->_idx)->block_index();
++          desc->set_block_index(index);
++          blocks->adr_at(index)->add_node(desc);
++        } else {
++          desc->set_block_index(0);
++          blocks->adr_at(0)->add_node(desc);
++          //ShouldNotReachHere();
++        }
++      //}
++      /*
++      if (desc->node()->is_block_proj() && _nodes.at(desc->node()->is_block_proj()->_idx)->block_index() != -1) {
++        int index = _nodes.at(desc->node()->is_block_proj()->_idx)->block_index();
++        desc->set_block_index(index);
++        blocks->adr_at(index)->add_node(desc);
++      } */
++    }
++  }
++
++  for (int i = 0; i < _nodes.length(); i++) {
++    NodeDescription *desc = _nodes.at(i);
++    if (desc) {
++      desc->clear_succs();
++    }
++  }
++
++  for (int i = 0; i < _nodes.length(); i++) {
++    NodeDescription *desc = _nodes.at(i);
++    if (desc) {
++      int block_index = desc->block_index();
++
++      assert(block_index >= 0 && block_index < blocks->length(), "Block index must be in range");
++      assert(blocks->adr_at(block_index)->nodes()->contains(desc), "Node must be child of block");
++    }
++  }
++  a->destruct_contents();
++}
++
++void IdealGraphPrinter::build_blocks(Node *root) {
++
++  Arena *a = new Arena();
++  Node_Stack stack(a, 100);
++
++  VectorSet visited(a);
++  stack.push(root, 0);
++  GrowableArray<Block> blocks(a, 2, 0, Block(0));
++
++  for (int i = 0; i < _nodes.length(); i++) {
++    if (_nodes.at(i)) _nodes.at(i)->set_block_index(-1);
++  }
++
++
++  // Order nodes such that node index is equal to idx
++  for (int i = 0; i < _nodes.length(); i++) {
++
++    if (_nodes.at(i)) {
++      NodeDescription *node = _nodes.at(i);
++      int index = node->node()->_idx;
++      if (index != i) {
++        _nodes.at_grow(index);
++        NodeDescription *tmp = _nodes.at(index);
++        *(_nodes.adr_at(index)) = node;
++        *(_nodes.adr_at(i)) = tmp;
++        i--;
++      }
++    }
++  }
++
++  for (int i = 0; i < _nodes.length(); i++) {
++    NodeDescription *node = _nodes.at(i);
++    if (node) {
++      assert(node->node()->_idx == (uint)i, "");
++    }
++  }
++
++  while(stack.is_nonempty()) {
++
++    //Node *n = stack.node();
++    //int index = stack.index();
++    Node *proj = stack.node();//n->in(index);
++    const Node *parent = proj->is_block_proj();
++    if (parent == NULL) {
++      parent = proj;
++    }
++
++    if (!visited.test_set(parent->_idx)) {
++
++      NodeDescription *end_desc = _nodes.at(parent->_idx);
++      int block_index = blocks.length();
++      Block block(block_index);
++      blocks.append(block);
++      Block *b = blocks.adr_at(block_index);
++      b->set_start(end_desc);
++     // assert(end_desc->block_index() == -1, "");
++      end_desc->set_block_index(block_index);
++      b->add_node(end_desc);
++
++      // Skip any control-pinned middle'in stuff
++      Node *p = proj;
++      NodeDescription *start_desc = NULL;
++      do {
++        proj = p;                   // Update pointer to last Control
++        if (p->in(0) == NULL) {
++          start_desc = end_desc;
++          break;
++        }
++        p = p->in(0);               // Move control forward
++        start_desc = _nodes.at(p->_idx);
++        assert(start_desc, "");
++
++        if (start_desc != end_desc && start_desc->block_index() == -1) {
++          assert(start_desc->block_index() == -1, "");
++          assert(block_index < blocks.length(), "");
++          start_desc->set_block_index(block_index);
++          b->add_node(start_desc);
++        }
++     } while( !p->is_block_proj() &&
++               !p->is_block_start() );
++
++      for (uint i = 0; i < start_desc->node()->len(); i++) {
++
++          Node *pred_node = start_desc->node()->in(i);
++
++
++          if (pred_node && pred_node != start_desc->node()) {
++            const Node *cur_parent = pred_node->is_block_proj();
++            if (cur_parent != NULL) {
++              pred_node = (Node *)cur_parent;
++            }
++
++            NodeDescription *pred_node_desc = _nodes.at(pred_node->_idx);
++            if (pred_node_desc->block_index() != -1) {
++              blocks.adr_at(pred_node_desc->block_index())->add_succ(block_index);
++            }
++          }
++      }
++
++      for (DUIterator_Fast dmax, i = end_desc->node()->fast_outs(dmax); i < dmax; i++) {
++        Node* cur_succ = end_desc->node()->fast_out(i);
++        NodeDescription *cur_succ_desc = _nodes.at(cur_succ->_idx);
++
++        DUIterator_Fast dmax2, i2 = cur_succ->fast_outs(dmax2);
++        if (cur_succ->is_block_proj() && i2 < dmax2 && !cur_succ->is_Root()) {
++
++          for (; i2<dmax2; i2++) {
++            Node *cur_succ2 = cur_succ->fast_out(i2);
++            if (cur_succ2) {
++              cur_succ_desc = _nodes.at(cur_succ2->_idx);
++              if (cur_succ_desc == NULL) {
++                // dead node so skip it
++                continue;
++              }
++              if (cur_succ2 != end_desc->node() && cur_succ_desc->block_index() != -1) {
++                b->add_succ(cur_succ_desc->block_index());
++              }
++            }
++          }
++
++        } else {
++
++          if (cur_succ != end_desc->node() && cur_succ_desc && cur_succ_desc->block_index() != -1) {
++            b->add_succ(cur_succ_desc->block_index());
++          }
++        }
++      }
++
++
++      int num_preds = p->len();
++      int bottom = -1;
++      if (p->is_Region() || p->is_Phi()) {
++        bottom = 0;
++      }
++
++      int pushed = 0;
++      for (int i=num_preds - 1; i > bottom; i--) {
++        if (p->in(i) != NULL && p->in(i) != p) {
++          stack.push(p->in(i), 0);
++          pushed++;
++        }
++      }
++
++      if (pushed == 0 && p->is_Root() && !_matcher) {
++        // Special case when backedges to root are not yet built
++        for (int i = 0; i < _nodes.length(); i++) {
++          if (_nodes.at(i) && _nodes.at(i)->node()->is_SafePoint() && _nodes.at(i)->node()->outcnt() == 0) {
++            stack.push(_nodes.at(i)->node(), 0);
++          }
++        }
++      }
++
++    } else {
++      stack.pop();
++    }
++  }
++
++  build_dominators(&blocks);
++
++  int **common_dominator = NEW_RESOURCE_ARRAY(int *, blocks.length());
++  for (int i = 0; i < blocks.length(); i++) {
++    int *cur = NEW_RESOURCE_ARRAY(int, blocks.length());
++    common_dominator[i] = cur;
++
++    for (int j=0; j<blocks.length(); j++) {
++      cur[j] = 0;
++    }
++  }
++
++  for (int i = 0; i < blocks.length(); i++) {
++    blocks.adr_at(i)->add_child(blocks.adr_at(i)->index());
++  }
++  build_common_dominator(common_dominator, 0, &blocks);
++
++  schedule_latest(common_dominator, &blocks);
++
++  start_element(CONTROL_FLOW_ELEMENT);
++
++  for (int i = 0; i < blocks.length(); i++) {
++    Block *block = blocks.adr_at(i);
++
++    Properties props;
++    props.add(new Property(BLOCK_NAME_PROPERTY, i));
++    props.add(new Property(BLOCK_DOMINATOR_PROPERTY, block->dominator()));
++    start_element(BLOCK_ELEMENT, &props);
++
++    if (block->succs()->length() > 0) {
++      start_element(SUCCESSORS_ELEMENT);
++      for (int j=0; j<block->succs()->length(); j++) {
++        int cur_index = block->succs()->at(j);
++        if (cur_index != 0 /* start_block has must not have inputs */) {
++          Properties properties;
++          properties.add(new Property(BLOCK_NAME_PROPERTY, cur_index));
++          simple_element(SUCCESSOR_ELEMENT, &properties);
++        }
++      }
++      end_element(SUCCESSORS_ELEMENT);
++    }
++
++    start_element(NODES_ELEMENT);
++
++    for (int j=0; j<block->nodes()->length(); j++) {
++      NodeDescription *n = block->nodes()->at(j);
++      Properties properties;
++      properties.add(new Property(NODE_ID_PROPERTY, n->id()));
++      simple_element(NODE_ELEMENT, &properties);
++    }
++
++    end_element(NODES_ELEMENT);
++
++    end_element(BLOCK_ELEMENT);
++  }
++
++
++  end_element(CONTROL_FLOW_ELEMENT);
++
++  a->destruct_contents();
++}
++
++void IdealGraphPrinter::print_method(Compile* compile, const char *name, int level, bool clear_nodes) {
++  print(compile, name, (Node *)compile->root(), level, clear_nodes);
++}
++
++// Print current ideal graph
++void IdealGraphPrinter::print(Compile* compile, const char *name, Node *node, int level, bool clear_nodes) {
++
++//  if (finish && !in_method) return;
++  if (!_current_method || !_should_send_method || level > PrintIdealGraphLevel) return;
++
++  assert(_current_method, "newMethod has to be called first!");
++
++  if (clear_nodes) {
++    int x = 0;
++  }
++
++  _clear_nodes = clear_nodes;
++
++  // Warning, unsafe cast?
++  _chaitin = (PhaseChaitin *)compile->regalloc();
++  _matcher = compile->matcher();
++
++
++  // Update nodes
++  for (int i = 0; i < _nodes.length(); i++) {
++    NodeDescription *desc = _nodes.at(i);
++    if (desc) {
++      desc->set_state(Invalid);
++    }
++  }
++  Node *n = node;
++  walk(n);
++
++  // Update edges
++  for (int i = 0; i < _edges.length(); i++) {
++      _edges.at(i)->set_state(Invalid);
++  }
++
++  for (int i = 0; i < _nodes.length(); i++) {
++    NodeDescription *desc = _nodes.at(i);
++    if (desc && desc->state() != Invalid) {
++
++      int to = desc->id();
++      uint len = desc->node()->len();
++      for (uint j=0; j<len; j++) {
++        Node *n = desc->node()->in(j);
++
++        if (n) {
++
++
++          intptr_t from = (intptr_t)n;
++
++          // Assert from node is valid
++          /*
++          bool ok = false;
++          for (int k=0; k<_nodes.length(); k++) {
++            NodeDescription *desc = _nodes.at(k);
++            if (desc && desc->id() == from) {
++              assert(desc->state() != Invalid, "");
++              ok = true;
++            }
++          }
++          assert(ok, "");*/
++
++          uint index = j;
++          if (index >= desc->node()->req()) {
++            index = desc->node()->req();
++          }
++
++          print_edge(from, to, index);
++        }
++      }
++    }
++  }
++
++  bool is_different = false;
++
++  for (int i = 0; i < _nodes.length(); i++) {
++    NodeDescription *desc = _nodes.at(i);
++    if (desc && desc->state() != Valid) {
++      is_different = true;
++      break;
++    }
++  }
++
++  if (!is_different) {
++    for (int i = 0; i < _edges.length(); i++) {
++      EdgeDescription *conn = _edges.at(i);
++      if (conn && conn->state() != Valid) {
++        is_different = true;
++        break;
++      }
++    }
++  }
++
++  // No changes -> do not print graph
++  if (!is_different) return;
++
++  Properties properties;
++  properties.add(new Property(GRAPH_NAME_PROPERTY, (const char *)name));
++  start_element(GRAPH_ELEMENT, &properties);
++
++  start_element(NODES_ELEMENT);
++  for (int i = 0; i < _nodes.length(); i++) {
++    NodeDescription *desc = _nodes.at(i);
++    if (desc) {
++      desc->print(this);
++      if (desc->state() == Invalid) {
++        delete desc;
++        _nodes.at_put(i, NULL);
++      } else {
++        desc->set_state(Valid);
++      }
++    }
++  }
++  end_element(NODES_ELEMENT);
++
++  build_blocks(node);
++
++  start_element(EDGES_ELEMENT);
++  for (int i = 0; i < _edges.length(); i++) {
++    EdgeDescription *conn = _edges.at(i);
++
++    // Assert from and to nodes are valid
++    /*
++    if (!conn->state() == Invalid) {
++      bool ok1 = false;
++      bool ok2 = false;
++      for (int j=0; j<_nodes.length(); j++) {
++        NodeDescription *desc = _nodes.at(j);
++        if (desc && desc->id() == conn->from()) {
++          ok1 = true;
++        }
++
++        if (desc && desc->id() == conn->to()) {
++          ok2 = true;
++        }
++      }
++
++      assert(ok1, "from node not found!");
++      assert(ok2, "to node not found!");
++    }*/
++
++    conn->print(this);
++    if (conn->state() == Invalid) {
++      _edges.remove_at(i);
++      delete conn;
++      i--;
++    }
++  }
++
++  end_element(EDGES_ELEMENT);
++
++  end_element(GRAPH_ELEMENT);
++
++  _output->flush();
++}
++
++// Print edge
++void IdealGraphPrinter::print_edge(int from, int to, int index) {
++
++  EdgeDescription *conn = new EdgeDescription(from, to, index);
++  for (int i = 0; i < _edges.length(); i++) {
++    if (_edges.at(i)->equals(conn)) {
++      conn->set_state(Valid);
++      delete _edges.at(i);
++      _edges.at_put(i, conn);
++      return;
++    }
++  }
++
++  _edges.append(conn);
++}
++
++extern const char *NodeClassNames[];
++
++// Create node description
++IdealGraphPrinter::NodeDescription *IdealGraphPrinter::create_node_description(Node* node) {
++
++#ifndef PRODUCT
++  node->_in_dump_cnt++;
++  NodeDescription *desc = new NodeDescription(node);
++  desc->properties()->add(new Property(NODE_NAME_PROPERTY, (const char *)node->Name()));
++
++  const Type *t = node->bottom_type();
++  desc->properties()->add(new Property("type", (const char *)Type::msg[t->base()]));
++
++  desc->properties()->add(new Property("idx", node->_idx));
++#ifdef ASSERT
++  desc->properties()->add(new Property("debug_idx", node->_debug_idx));
++#endif
++
++
++  const jushort flags = node->flags();
++  if (flags & Node::Flag_is_Copy) {
++    desc->properties()->add(new Property("is_copy", "true"));
++  }
++  if (flags & Node::Flag_is_Call) {
++    desc->properties()->add(new Property("is_call", "true"));
++  }
++  if (flags & Node::Flag_rematerialize) {
++    desc->properties()->add(new Property("rematerialize", "true"));
++  }
++  if (flags & Node::Flag_needs_anti_dependence_check) {
++    desc->properties()->add(new Property("needs_anti_dependence_check", "true"));
++  }
++  if (flags & Node::Flag_is_macro) {
++    desc->properties()->add(new Property("is_macro", "true"));
++  }
++  if (flags & Node::Flag_is_Con) {
++    desc->properties()->add(new Property("is_con", "true"));
++  }
++  if (flags & Node::Flag_is_cisc_alternate) {
++    desc->properties()->add(new Property("is_cisc_alternate", "true"));
++  }
++  if (flags & Node::Flag_is_Branch) {
++    desc->properties()->add(new Property("is_branch", "true"));
++  }
++  if (flags & Node::Flag_is_block_start) {
++    desc->properties()->add(new Property("is_block_start", "true"));
++  }
++  if (flags & Node::Flag_is_Goto) {
++    desc->properties()->add(new Property("is_goto", "true"));
++  }
++  if (flags & Node::Flag_is_dead_loop_safe) {
++    desc->properties()->add(new Property("is_dead_loop_safe", "true"));
++  }
++  if (flags & Node::Flag_may_be_short_branch) {
++    desc->properties()->add(new Property("may_be_short_branch", "true"));
++  }
++  if (flags & Node::Flag_is_safepoint_node) {
++    desc->properties()->add(new Property("is_safepoint_node", "true"));
++  }
++  if (flags & Node::Flag_is_pc_relative) {
++    desc->properties()->add(new Property("is_pc_relative", "true"));
++  }
++
++  if (_matcher) {
++    if (_matcher->is_shared(desc->node())) {
++      desc->properties()->add(new Property("is_shared", "true"));
++    } else {
++      desc->properties()->add(new Property("is_shared", "false"));
++    }
++
++    if (_matcher->is_dontcare(desc->node())) {
++      desc->properties()->add(new Property("is_dontcare", "true"));
++    } else {
++      desc->properties()->add(new Property("is_dontcare", "false"));
++    }
++  }
++
++  if (node->is_Proj()) {
++    desc->properties()->add(new Property("con", (int)node->as_Proj()->_con));
++  }
++
++  if (node->is_Mach()) {
++    desc->properties()->add(new Property("idealOpcode", (const char *)NodeClassNames[node->as_Mach()->ideal_Opcode()]));
++  }
++
++
++
++
++
++  outputStream *oldTty = tty;
++  buffer[0] = 0;
++  stringStream s2(buffer, sizeof(buffer) - 1);
++
++  node->dump_spec(&s2);
++  assert(s2.size() < sizeof(buffer), "size in range");
++  desc->properties()->add(new Property("dump_spec", buffer));
++
++  if (node->is_block_proj()) {
++    desc->properties()->add(new Property("is_block_proj", "true"));
++  }
++
++  if (node->is_block_start()) {
++    desc->properties()->add(new Property("is_block_start", "true"));
++  }
++
++  const char *short_name = "short_name";
++  if (strcmp(node->Name(), "Parm") == 0 && node->as_Proj()->_con >= TypeFunc::Parms) {
++      int index = node->as_Proj()->_con - TypeFunc::Parms;
++      if (index >= 10) {
++        desc->properties()->add(new Property(short_name, "PA"));
++      } else {
++        sprintf(buffer, "P%d", index);
++        desc->properties()->add(new Property(short_name, buffer));
++      }
++  } else if (strcmp(node->Name(), "IfTrue") == 0) {
++     desc->properties()->add(new Property(short_name, "T"));
++  } else if (strcmp(node->Name(), "IfFalse") == 0) {
++     desc->properties()->add(new Property(short_name, "F"));
++  } else if ((node->is_Con() && node->is_Type()) || node->is_Proj()) {
++
++    if (t->base() == Type::Int && t->is_int()->is_con()) {
++      const TypeInt *typeInt = t->is_int();
++      assert(typeInt->is_con(), "must be constant");
++      jint value = typeInt->get_con();
++
++      // max. 2 chars allowed
++      if (value >= -9 && value <= 99) {
++        sprintf(buffer, "%d", value);
++        desc->properties()->add(new Property(short_name, buffer));
++      }
++      else
++      {
++        desc->properties()->add(new Property(short_name, "I"));
++      }
++    } else if (t == Type::TOP) {
++      desc->properties()->add(new Property(short_name, "^"));
++    } else if (t->base() == Type::Long && t->is_long()->is_con()) {
++      const TypeLong *typeLong = t->is_long();
++      assert(typeLong->is_con(), "must be constant");
++      jlong value = typeLong->get_con();
++
++      // max. 2 chars allowed
++      if (value >= -9 && value <= 99) {
++        sprintf(buffer, "%d", value);
++        desc->properties()->add(new Property(short_name, buffer));
++      }
++      else
++      {
++        desc->properties()->add(new Property(short_name, "L"));
++      }
++    } else if (t->base() == Type::KlassPtr) {
++      const TypeKlassPtr *typeKlass = t->is_klassptr();
++      desc->properties()->add(new Property(short_name, "CP"));
++    } else if (t->base() == Type::Control) {
++      desc->properties()->add(new Property(short_name, "C"));
++    } else if (t->base() == Type::Memory) {
++      desc->properties()->add(new Property(short_name, "M"));
++    } else if (t->base() == Type::Abio) {
++      desc->properties()->add(new Property(short_name, "IO"));
++    } else if (t->base() == Type::Return_Address) {
++      desc->properties()->add(new Property(short_name, "RA"));
++    } else if (t->base() == Type::AnyPtr) {
++      desc->properties()->add(new Property(short_name, "P"));
++    } else if (t->base() == Type::RawPtr) {
++      desc->properties()->add(new Property(short_name, "RP"));
++    } else if (t->base() == Type::AryPtr) {
++      desc->properties()->add(new Property(short_name, "AP"));
++    }
++  }
++
++  if (node->is_SafePoint()) {
++    SafePointNode *safePointNode = node->as_SafePoint();
++    if (safePointNode->jvms()) {
++      stringStream bciStream;
++      bciStream.print("%d ", safePointNode->jvms()->bci());
++      JVMState *caller = safePointNode->jvms()->caller();
++      while(caller) {
++        bciStream.print("%d ", caller->bci());
++
++        caller = caller->caller();
++      }
++      desc->properties()->add(new Property("bci", bciStream.as_string()));
++    }
++  }
++
++  if (_chaitin && _chaitin != (PhaseChaitin *)0xdeadbeef) {
++    buffer[0] = 0;
++    _chaitin->dump_register(node, buffer);
++    desc->properties()->add(new Property("reg", buffer));
++    desc->properties()->add(new Property("lrg", _chaitin->n2lidx(node)));
++  }
++
++
++  node->_in_dump_cnt--;
++  return desc;
++#else
++  return NULL;
++#endif
++}
++
++void IdealGraphPrinter::pre_node(Node* node, void *env) {
++
++  IdealGraphPrinter *printer = (IdealGraphPrinter *)env;
++
++  NodeDescription *newDesc = printer->create_node_description(node);
++
++  if (printer->_clear_nodes) {
++
++    printer->_nodes.append(newDesc);
++  } else {
++
++    NodeDescription *desc = printer->_nodes.at_grow(node->_idx, NULL);
++
++    if (desc && desc->equals(newDesc)) {
++      //desc->set_state(Valid);
++      //desc->set_node(node);
++      delete desc;
++      printer->_nodes.at_put(node->_idx, NULL);
++      newDesc->set_state(Valid);
++      //printer->_nodes.at_put(node->_idx, newDesc);
++    } else {
++
++      if (desc && desc->id() == newDesc->id()) {
++        delete desc;
++        printer->_nodes.at_put(node->_idx, NULL);
++        newDesc->set_state(New);
++
++      }
++
++      //if (desc) {
++      //  delete desc;
++      //}
++
++      //printer->_nodes.at_put(node->_idx, newDesc);
++    }
++
++    printer->_nodes.append(newDesc);
++  }
++}
++
++void IdealGraphPrinter::post_node(Node* node, void *env) {
++}
++
++outputStream *IdealGraphPrinter::output() {
++  return _output;
++}
++
++IdealGraphPrinter::Description::Description() {
++  _state = New;
++}
++
++void IdealGraphPrinter::Description::print(IdealGraphPrinter *printer) {
++  if (_state == Invalid) {
++    print_removed(printer);
++  } else if (_state == New) {
++    print_changed(printer);
++  }
++}
++
++void IdealGraphPrinter::Description::set_state(State s) {
++  _state = s;
++}
++
++IdealGraphPrinter::State IdealGraphPrinter::Description::state() {
++  return _state;
++}
++
++void IdealGraphPrinter::Block::set_proj(NodeDescription *n) {
++  _proj = n;
++}
++
++void IdealGraphPrinter::Block::set_start(NodeDescription *n) {
++  _start = n;
++}
++
++int IdealGraphPrinter::Block::semi() {
++  return _semi;
++}
++
++int IdealGraphPrinter::Block::parent() {
++  return _parent;
++}
++
++GrowableArray<int>* IdealGraphPrinter::Block::bucket() {
++  return &_bucket;
++}
++
++GrowableArray<int>* IdealGraphPrinter::Block::children() {
++  return &_children;
++}
++
++void IdealGraphPrinter::Block::add_child(int i) {
++  _children.append(i);
++}
++
++GrowableArray<int>* IdealGraphPrinter::Block::dominates() {
++  return &_dominates;
++}
++
++void IdealGraphPrinter::Block::add_dominates(int i) {
++  _dominates.append(i);
++}
++
++void IdealGraphPrinter::Block::add_to_bucket(int i) {
++  _bucket.append(i);
++}
++
++void IdealGraphPrinter::Block::clear_bucket() {
++  _bucket.clear();
++}
++
++void IdealGraphPrinter::Block::set_dominator(int i) {
++  _dominator = i;
++}
++
++void IdealGraphPrinter::Block::set_label(int i) {
++  _label = i;
++}
++
++int IdealGraphPrinter::Block::label() {
++  return _label;
++}
++
++int IdealGraphPrinter::Block::ancestor() {
++  return _ancestor;
++}
++
++void IdealGraphPrinter::Block::set_ancestor(int i) {
++  _ancestor = i;
++}
++
++int IdealGraphPrinter::Block::dominator() {
++  return _dominator;
++}
++
++int IdealGraphPrinter::Block::index() {
++  return _index;
++}
++
++void IdealGraphPrinter::Block::set_parent(int i) {
++  _parent = i;
++}
++
++GrowableArray<int>* IdealGraphPrinter::Block::pred() {
++  return &_pred;
++}
++
++void IdealGraphPrinter::Block::set_semi(int i) {
++  _semi = i;
++}
++
++IdealGraphPrinter::Block::Block() {
++}
++
++IdealGraphPrinter::Block::Block(int index) {
++  _index = index;
++  _label = index;
++  _semi = -1;
++  _ancestor = -1;
++  _dominator = -1;
++}
++
++void IdealGraphPrinter::Block::add_pred(int i) {
++  _pred.append(i);
++}
++
++IdealGraphPrinter::NodeDescription *IdealGraphPrinter::Block::proj() {
++  return _proj;
++}
++
++IdealGraphPrinter::NodeDescription *IdealGraphPrinter::Block::start() {
++  return _start;
++}
++
++GrowableArray<int>* IdealGraphPrinter::Block::succs() {
++  return &_succs;
++}
++
++void IdealGraphPrinter::Block::add_succ(int index) {
++
++  if (this->_index == 16 && index == 15) {
++    int x = 0;
++  }
++
++  if (!_succs.contains(index)) {
++    _succs.append(index);
++  }
++}
++
++
++void IdealGraphPrinter::Block::add_node(NodeDescription *n) {
++  if (!_nodes.contains(n)) {
++    _nodes.append(n);
++  }
++}
++
++GrowableArray<IdealGraphPrinter::NodeDescription *>* IdealGraphPrinter::Block::nodes() {
++  return &_nodes;
++}
++
++int IdealGraphPrinter::NodeDescription::count = 0;
++
++IdealGraphPrinter::NodeDescription::NodeDescription(Node* node) : _node(node) {
++  _id = (intptr_t)(node);
++  _block_index = -1;
++}
++
++IdealGraphPrinter::NodeDescription::~NodeDescription() {
++  _properties.clean();
++}
++
++// void IdealGraphPrinter::NodeDescription::set_node(Node* node) {
++//   //this->_node = node;
++// }
++
++int IdealGraphPrinter::NodeDescription::block_index() {
++  return _block_index;
++}
++
++
++GrowableArray<IdealGraphPrinter::NodeDescription *>* IdealGraphPrinter::NodeDescription::succs() {
++  return &_succs;
++}
++
++void IdealGraphPrinter::NodeDescription::clear_succs() {
++  _succs.clear();
++}
++
++void IdealGraphPrinter::NodeDescription::init_succs() {
++  _succs = GrowableArray<NodeDescription *>();
++}
++
++void IdealGraphPrinter::NodeDescription::add_succ(NodeDescription *desc) {
++  _succs.append(desc);
++}
++
++void IdealGraphPrinter::NodeDescription::set_block_index(int i) {
++  _block_index = i;
++}
++
++bool IdealGraphPrinter::NodeDescription::equals(NodeDescription *desc) {
++  if (desc == NULL) return false;
++  if (desc->id() != id()) return false;
++  return properties()->equals(desc->properties());
++}
++
++Node* IdealGraphPrinter::NodeDescription::node() {
++  return _node;
++}
++
++IdealGraphPrinter::Properties* IdealGraphPrinter::NodeDescription::properties() {
++  return &_properties;
++}
++
++uint IdealGraphPrinter::NodeDescription::id() {
++  return _id;
++}
++
++void IdealGraphPrinter::NodeDescription::print_changed(IdealGraphPrinter *printer) {
++
++
++  Properties properties;
++  properties.add(new Property(NODE_ID_PROPERTY, id()));
++  printer->start_element(NODE_ELEMENT, &properties);
++
++  this->properties()->print(printer);
++
++
++  printer->end_element(NODE_ELEMENT);
++}
++
++void IdealGraphPrinter::NodeDescription::print_removed(IdealGraphPrinter *printer) {
++
++  Properties properties;
++  properties.add(new Property(NODE_ID_PROPERTY, id()));
++  printer->simple_element(REMOVE_NODE_ELEMENT, &properties);
++}
++
++IdealGraphPrinter::EdgeDescription::EdgeDescription(int from, int to, int index) {
++  this->_from = from;
++  this->_to = to;
++  this->_index = index;
++}
++
++IdealGraphPrinter::EdgeDescription::~EdgeDescription() {
++}
++
++int IdealGraphPrinter::EdgeDescription::from() {
++  return _from;
++}
++
++int IdealGraphPrinter::EdgeDescription::to() {
++  return _to;
++}
++
++void IdealGraphPrinter::EdgeDescription::print_changed(IdealGraphPrinter *printer) {
++
++  Properties properties;
++  properties.add(new Property(INDEX_PROPERTY, _index));
++  properties.add(new Property(FROM_PROPERTY, _from));
++  properties.add(new Property(TO_PROPERTY, _to));
++  printer->simple_element(EDGE_ELEMENT, &properties);
++}
++
++void IdealGraphPrinter::EdgeDescription::print_removed(IdealGraphPrinter *printer) {
++
++  Properties properties;
++  properties.add(new Property(INDEX_PROPERTY, _index));
++  properties.add(new Property(FROM_PROPERTY, _from));
++  properties.add(new Property(TO_PROPERTY, _to));
++  printer->simple_element(REMOVE_EDGE_ELEMENT, &properties);
++}
++
++bool IdealGraphPrinter::EdgeDescription::equals(IdealGraphPrinter::EdgeDescription *desc) {
++  if (desc == NULL) return false;
++  return (_from == desc->_from && _to == desc->_to && _index == desc->_index);
++}
++
++IdealGraphPrinter::Properties::Properties() : list(new (ResourceObj::C_HEAP) GrowableArray<Property *>(2, 0, NULL, true)) {
++}
++
++IdealGraphPrinter::Properties::~Properties() {
++  clean();
++  delete list;
++}
++
++void IdealGraphPrinter::Properties::add(Property *p) {
++  assert(p != NULL, "Property not NULL");
++  list->append(p);
++}
++
++void IdealGraphPrinter::Properties::print(IdealGraphPrinter *printer) {
++  printer->start_element(PROPERTIES_ELEMENT);
++
++  for (int i = 0; i < list->length(); i++) {
++    list->at(i)->print(printer);
++  }
++
++  printer->end_element(PROPERTIES_ELEMENT);
++}
++
++void IdealGraphPrinter::Properties::clean() {
++  for (int i = 0; i < list->length(); i++) {
++    delete list->at(i);
++    list->at_put(i, NULL);
++  }
++  list->clear();
++  assert(list->length() == 0, "List cleared");
++}
++
++void IdealGraphPrinter::Properties::remove(const char *name) {
++  for (int i = 0; i < list->length(); i++) {
++    if (strcmp(list->at(i)->name(), name) == 0) {
++      delete list->at(i);
++      list->remove_at(i);
++      i--;
++    }
++  }
++}
++
++void IdealGraphPrinter::Properties::print_as_attributes(IdealGraphPrinter *printer) {
++
++  for (int i = 0; i < list->length(); i++) {
++    assert(list->at(i) != NULL, "Property not null!");
++    printer->output()->print(" ");
++    list->at(i)->print_as_attribute(printer);
++  }
++}
++
++bool IdealGraphPrinter::Properties::equals(Properties* p) {
++  if (p->list->length() != this->list->length()) return false;
++
++  for (int i = 0; i < list->length(); i++) {
++    assert(list->at(i) != NULL, "Property not null!");
++    if (!list->at(i)->equals(p->list->at(i))) return false;
++  }
++
++  return true;
++}
++
++IdealGraphPrinter::Property::Property() {
++  _name = NULL;
++  _value = NULL;
++}
++
++const char *IdealGraphPrinter::Property::name() {
++  return _name;
++}
++
++IdealGraphPrinter::Property::Property(const Property* p) {
++
++  this->_name = NULL;
++  this->_value = NULL;
++
++  if (p->_name != NULL) {
++    _name = dup(p->_name);
++  }
++
++  if (p->_value) {
++    _value = dup(p->_value);
++  }
++}
++
++IdealGraphPrinter::Property::~Property() {
++
++  clean();
++}
++
++IdealGraphPrinter::Property::Property(const char *name, const char *value) {
++
++  assert(name, "Name must not be null!");
++  assert(value, "Value must not be null!");
++
++  _name = dup(name);
++  _value = dup(value);
++}
++
++IdealGraphPrinter::Property::Property(const char *name, int intValue) {
++  _name = dup(name);
++
++  stringStream stream;
++  stream.print("%d", intValue);
++  _value = dup(stream.as_string());
++}
++
++void IdealGraphPrinter::Property::clean() {
++  if (_name) {
++    delete _name;
++    _name = NULL;
++  }
++
++  if (_value) {
++    delete _value;
++    _value = NULL;
++  }
++}
++
++
++bool IdealGraphPrinter::Property::is_null() {
++  return _name == NULL;
++}
++
++void IdealGraphPrinter::Property::print(IdealGraphPrinter *printer) {
++
++  assert(!is_null(), "null properties cannot be printed!");
++  Properties properties;
++  properties.add(new Property(PROPERTY_NAME_PROPERTY, _name));
++  printer->start_element(PROPERTY_ELEMENT, &properties, false, false);
++  printer->print_xml(_value);
++  printer->end_element(PROPERTY_ELEMENT, false, true);
++}
++
++void IdealGraphPrinter::Property::print_as_attribute(IdealGraphPrinter *printer) {
++
++  printer->output()->print(_name);
++  printer->output()->print("=\"");
++  printer->print_xml(_value);
++  printer->output()->print("\"");
++}
++
++
++bool IdealGraphPrinter::Property::equals(Property* p) {
++
++  if (is_null() && p->is_null()) return true;
++  if (is_null()) return false;
++  if (p->is_null()) return false;
++
++  int cmp1 = strcmp(p->_name, _name);
++  if (cmp1 != 0) return false;
++
++  int cmp2 = strcmp(p->_value, _value);
++  if (cmp2 != 0) return false;
++
++  return true;
++}
++
++void IdealGraphPrinter::print_xml(const char *value) {
++  size_t len = strlen(value);
++
++  char buf[2];
++  buf[1] = 0;
++  for (size_t i = 0; i < len; i++) {
++    char c = value[i];
++
++    switch(c) {
++      case '<':
++        output()->print("&lt;");
++        break;
++
++      case '>':
++        output()->print("&gt;");
++        break;
++
++      default:
++        buf[0] = c;
++        output()->print(buf);
++        break;
++    }
++  }
++}
++
++#endif
+diff -ruN openjdk6/hotspot/src/share/vm/opto/idealGraphPrinter.hpp openjdk/hotspot/src/share/vm/opto/idealGraphPrinter.hpp
+--- openjdk6/hotspot/src/share/vm/opto/idealGraphPrinter.hpp	1969-12-31 19:00:00.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/idealGraphPrinter.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -0,0 +1,323 @@
++/*
++ * Copyright 2007 Sun Microsystems, Inc.  All Rights Reserved.
++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++ *
++ * This code is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 only, as
++ * published by the Free Software Foundation.
++ *
++ * This code is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++ * version 2 for more details (a copy is included in the LICENSE file that
++ * accompanied this code).
++ *
++ * You should have received a copy of the GNU General Public License version
++ * 2 along with this work; if not, write to the Free Software Foundation,
++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
++ * CA 95054 USA or visit www.sun.com if you need additional information or
++ * have any questions.
++ *
++ */
++
++#ifndef PRODUCT
++
++class Compile;
++class PhaseIFG;
++class PhaseChaitin;
++class Matcher;
++class Node;
++class InlineTree;
++class ciMethod;
++
++class IdealGraphPrinter
++{
++private:
++
++  enum State
++  {
++    Invalid,
++    Valid,
++    New
++  };
++
++private:
++
++  static const char *INDENT;
++  static const char *TOP_ELEMENT;
++  static const char *GROUP_ELEMENT;
++  static const char *GRAPH_ELEMENT;
++  static const char *PROPERTIES_ELEMENT;
++  static const char *EDGES_ELEMENT;
++  static const char *PROPERTY_ELEMENT;
++  static const char *EDGE_ELEMENT;
++  static const char *NODE_ELEMENT;
++  static const char *NODES_ELEMENT;
++  static const char *CONTROL_FLOW_ELEMENT;
++  static const char *REMOVE_EDGE_ELEMENT;
++  static const char *REMOVE_NODE_ELEMENT;
++  static const char *METHOD_NAME_PROPERTY;
++  static const char *BLOCK_NAME_PROPERTY;
++  static const char *BLOCK_DOMINATOR_PROPERTY;
++  static const char *BLOCK_ELEMENT;
++  static const char *SUCCESSORS_ELEMENT;
++  static const char *SUCCESSOR_ELEMENT;
++  static const char *METHOD_IS_PUBLIC_PROPERTY;
++  static const char *METHOD_IS_STATIC_PROPERTY;
++  static const char *TRUE_VALUE;
++  static const char *NODE_NAME_PROPERTY;
++  static const char *EDGE_NAME_PROPERTY;
++  static const char *NODE_ID_PROPERTY;
++  static const char *FROM_PROPERTY;
++  static const char *TO_PROPERTY;
++  static const char *PROPERTY_NAME_PROPERTY;
++  static const char *GRAPH_NAME_PROPERTY;
++  static const char *INDEX_PROPERTY;
++  static const char *METHOD_ELEMENT;
++  static const char *INLINE_ELEMENT;
++  static const char *BYTECODES_ELEMENT;
++  static const char *METHOD_BCI_PROPERTY;
++  static const char *METHOD_SHORT_NAME_PROPERTY;
++  static const char *ASSEMBLY_ELEMENT;
++
++  class Property {
++
++  private:
++
++    const char *_name;
++    const char *_value;
++
++  public:
++
++    Property();
++    Property(const Property* p);
++    ~Property();
++    Property(const char *name, const char *value);
++    Property(const char *name, int value);
++    bool equals(Property* p);
++    void print(IdealGraphPrinter *printer);
++    void print_as_attribute(IdealGraphPrinter *printer);
++    bool is_null();
++    void clean();
++    const char *name();
++
++    static const char* dup(const char *str) {
++      char * copy = new char[strlen(str)+1];
++      strcpy(copy, str);
++      return copy;
++    }
++
++  };
++
++  class Properties {
++
++  private:
++
++    GrowableArray<Property *> *list;
++
++  public:
++
++    Properties();
++    ~Properties();
++    void add(Property *p);
++    void remove(const char *name);
++    bool equals(Properties* p);
++    void print(IdealGraphPrinter *printer);
++    void print_as_attributes(IdealGraphPrinter *printer);
++    void clean();
++
++  };
++
++
++  class Description {
++
++  private:
++
++    State _state;
++
++  public:
++
++    Description();
++
++    State state();
++    void set_state(State s);
++    void print(IdealGraphPrinter *printer);
++    virtual void print_changed(IdealGraphPrinter *printer) = 0;
++    virtual void print_removed(IdealGraphPrinter *printer) = 0;
++
++  };
++
++  class NodeDescription : public Description{
++
++  public:
++
++    static int count;
++
++  private:
++
++    GrowableArray<NodeDescription *> _succs;
++    int _block_index;
++    uintptr_t _id;
++    Properties _properties;
++    Node* _node;
++
++  public:
++
++    NodeDescription(Node* node);
++    ~NodeDescription();
++    Node* node();
++
++    // void set_node(Node* node);
++    GrowableArray<NodeDescription *>* succs();
++    void init_succs();
++    void clear_succs();
++    void add_succ(NodeDescription *desc);
++    int block_index();
++    void set_block_index(int i);
++    Properties* properties();
++    virtual void print_changed(IdealGraphPrinter *printer);
++    virtual void print_removed(IdealGraphPrinter *printer);
++    bool equals(NodeDescription *desc);
++    uint id();
++
++  };
++
++  class Block {
++
++  private:
++
++    NodeDescription *_start;
++    NodeDescription *_proj;
++    GrowableArray<int> _succs;
++    GrowableArray<NodeDescription *> _nodes;
++    GrowableArray<int> _dominates;
++    GrowableArray<int> _children;
++    int _semi;
++    int _parent;
++    GrowableArray<int> _pred;
++    GrowableArray<int> _bucket;
++    int _index;
++    int _dominator;
++    int _ancestor;
++    int _label;
++
++  public:
++
++    Block();
++    Block(int index);
++
++    void add_node(NodeDescription *n);
++    GrowableArray<NodeDescription *>* nodes();
++    GrowableArray<int>* children();
++    void add_child(int i);
++    void add_succ(int index);
++    GrowableArray<int>* succs();
++    GrowableArray<int>* dominates();
++    void add_dominates(int i);
++    NodeDescription *start();
++    NodeDescription *proj();
++    void set_start(NodeDescription *n);
++    void set_proj(NodeDescription *n);
++
++    int label();
++    void set_label(int i);
++    int ancestor();
++    void set_ancestor(int i);
++    int index();
++    int dominator();
++    void set_dominator(int i);
++    int parent();
++    void set_parent(int i);
++    int semi();
++    GrowableArray<int>* bucket();
++    void add_to_bucket(int i);
++    void clear_bucket();
++    GrowableArray<int>* pred();
++    void set_semi(int i);
++    void add_pred(int i);
++
++  };
++
++  class EdgeDescription : public Description {
++
++  private:
++
++    int _from;
++    int _to;
++    int _index;
++  public:
++
++    EdgeDescription(int from, int to, int index);
++    ~EdgeDescription();
++
++    virtual void print_changed(IdealGraphPrinter *printer);
++    virtual void print_removed(IdealGraphPrinter *printer);
++    bool equals(EdgeDescription *desc);
++    int from();
++    int to();
++  };
++
++
++  static int _file_count;
++  networkStream *_stream;
++  outputStream *_output;
++  ciMethod *_current_method;
++  GrowableArray<NodeDescription *> _nodes;
++  GrowableArray<EdgeDescription *> _edges;
++  int _depth;
++  Arena *_arena;
++  char buffer[128];
++  bool _should_send_method;
++  PhaseChaitin* _chaitin;
++  bool _clear_nodes;
++  Matcher* _matcher;
++  bool _traverse_outs;
++
++  void start_element_helper(const char *name, Properties *properties, bool endElement, bool print_indent = false, bool print_return = true);
++  NodeDescription *create_node_description(Node* node);
++
++  static void pre_node(Node* node, void *env);
++  static void post_node(Node* node, void *env);
++
++  void schedule_latest(int **common_dominator, GrowableArray<Block>* blocks);
++  void build_common_dominator(int **common_dominator, int index, GrowableArray<Block>* blocks);
++  void compress(int index, GrowableArray<Block>* blocks);
++  int eval(int index, GrowableArray<Block>* blocks);
++  void link(int index1, int index2, GrowableArray<Block>* blocks);
++  void build_dominators(GrowableArray<Block>* blocks);
++  void build_blocks(Node *node);
++  void walk(Node *n);
++  void start_element(const char *name, Properties *properties = NULL, bool print_indent = false, bool print_return = true);
++  void simple_element(const char *name, Properties *properties = NULL, bool print_indent = false);
++  void end_element(const char *name, bool print_indent = false, bool print_return = true);
++  void print_edge(int from, int to, int index);
++  void print_indent();
++  void print_method(ciMethod *method, int bci, InlineTree *tree);
++  void print_inline_tree(InlineTree *tree);
++  void clear_nodes();
++
++  IdealGraphPrinter();
++  ~IdealGraphPrinter();
++
++public:
++
++  static void clean_up();
++  static IdealGraphPrinter *printer();
++
++  bool traverse_outs();
++  void set_traverse_outs(bool b);
++  void print_ifg(PhaseIFG* ifg);
++  outputStream *output();
++  void print_inlining(Compile* compile);
++  void begin_method(Compile* compile);
++  void end_method();
++  void print_method(Compile* compile, const char *name, int level=1, bool clear_nodes = false);
++  void print(Compile* compile, const char *name, Node *root, int level=1, bool clear_nodes = false);
++  void print_xml(const char *name);
++
++
++};
++
++#endif
+diff -ruN openjdk6/hotspot/src/share/vm/opto/idealKit.cpp openjdk/hotspot/src/share/vm/opto/idealKit.cpp
+--- openjdk6/hotspot/src/share/vm/opto/idealKit.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/idealKit.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)idealKit.cpp	1.7 07/05/05 17:06:29 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,22 +19,30 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+ #include "incls/_idealKit.cpp.incl"
+ 
+ // Static initialization
+-const uint IdealKit::first_var = 1;
++
++// This declares the position where vars are kept in the cvstate
++// For some degree of consistency we use the TypeFunc enum to
++// soak up spots in the inputs even though we only use early Control
++// and Memory slots. (So far.)
++const uint IdealKit::first_var = TypeFunc::Parms + 1;
+ 
+ //----------------------------IdealKit-----------------------------------------
+-IdealKit::IdealKit(PhaseGVN &gvn, Node* control, bool delay_all_transforms) :
++IdealKit::IdealKit(PhaseGVN &gvn, Node* control, Node* mem, bool delay_all_transforms) :
+   _gvn(gvn), C(gvn.C) {
+   _initial_ctrl = control;
++  _initial_memory = mem;
+   _delay_all_transforms = delay_all_transforms;
+   _var_ct = 0;
+   _cvstate = NULL;
++  // We can go memory state free or else we need the entire memory state
++  assert(mem == NULL || mem->Opcode() == Op_MergeMem, "memory must be pre-split");
+   int init_size = 5;
+   _pending_cvstates = new (C->node_arena()) GrowableArray<Node*>(C->node_arena(), init_size, 0, 0);
+   _delay_transform  = new (C->node_arena()) GrowableArray<Node*>(C->node_arena(), init_size, 0, 0);
+@@ -52,14 +57,25 @@
+ void IdealKit::if_then(Node* left, BoolTest::mask relop,
+                        Node* right, float prob, float cnt, bool push_new_state) {
+   assert((state() & (BlockS|LoopS|IfThenS|ElseS)), "bad state for new If");
+-  Node* bol = Bool(CmpI(left, right), relop);
++  Node* bol;
++  if (left->bottom_type()->isa_ptr() == NULL) {
++    if (left->bottom_type()->isa_int() != NULL) {
++      bol = Bool(CmpI(left, right), relop);
++    } else {
++      assert(left->bottom_type()->isa_long() != NULL, "what else?");
++      bol = Bool(CmpL(left, right), relop);
++    }
++
++  } else {
++    bol = Bool(CmpP(left, right), relop);
++  }
+   // Delay gvn.tranform on if-nodes until construction is finished
+   // to prevent a constant bool input from discarding a control output.
+   IfNode* iff = delay_transform(new (C, 2) IfNode(ctrl(), bol, prob, cnt))->as_If();
+   Node* then  = IfTrue(iff);
+   Node* elsen = IfFalse(iff);
+   Node* else_cvstate = copy_cvstate();
+-  else_cvstate->set_req(0, elsen);
++  else_cvstate->set_req(TypeFunc::Control, elsen);
+   _pending_cvstates->push(else_cvstate);
+   DEBUG_ONLY(if (push_new_state) _state->push(IfThenS));
+   set_ctrl(then);
+@@ -79,16 +95,42 @@
+ }
+ 
+ //-------------------------------end_if-------------------------------------
+-// Merge the "then" and "else" cvstates from an "if" via:
+-// create label, generate a goto from the current cvstate to the
+-// new label, pop the other cvstate from the if ("else" cvstate if
+-// no else_() and "then" cvstate if there was), and bind the label
+-// to the popped cvstate.
++// Merge the "then" and "else" cvstates.
++//
++// The if_then() pushed the current state for later use
++// as the initial state for a future "else" clause.  The
++// current state then became the initial state for the
++// then clause.  If an "else" clause was encountered, it will
++// pop the top state and use it for it's initial state.
++// It will also push the current state (the state at the end of
++// the "then" clause) for latter use at the end_if.
++//
++// At the endif, the states are:
++// 1) else exists a) current state is end of "else" clause
++//                b) top stack state is end of "then" clause
++//
++// 2) no else:    a) current state is end of "then" clause
++//                b) top stack state is from the "if_then" which
++//                   would have been the initial state of the else.
++//
++// Merging the states is accomplished by:
++//   1) make a label for the merge
++//   2) terminate the current state with a goto to the label
++//   3) pop the top state from the stack and make it the
++//        current state
++//   4) bind the label at the current state.  Binding a label
++//        terminates the current state with a goto to the
++//        label and makes the label's state the current state.
++//
+ void IdealKit::end_if() {
+   assert(state() & (IfThenS|ElseS), "bad state for new Endif");
+   Node* lab = make_label(1);
++
++  // Node* join_state = _pending_cvstates->pop();
++                  /* merging, join */
+   goto_(lab);
+   _cvstate = _pending_cvstates->pop();
++
+   bind(lab);
+   DEBUG_ONLY(_state->pop());
+ }
+@@ -102,7 +144,7 @@
+ //           i = i + 1
+ //           goto top
+ //  *     } else // exits loop
+-// 
++//
+ // Pushes the loop top cvstate first, then the else (loop exit) cvstate
+ // onto the stack.
+ void IdealKit::loop(IdealVariable& iv, Node* init, BoolTest::mask relop, Node* limit, float prob, float cnt) {
+@@ -115,7 +157,7 @@
+   if_then(value(iv), relop, limit, prob, cnt, false /* no new state */);
+   DEBUG_ONLY(_state->push(LoopS));
+   assert(ctrl()->is_IfTrue(), "true branch stays in loop");
+-  assert(_pending_cvstates->top()->in(0)->is_IfFalse(), "false branch exits loop");
++  assert(_pending_cvstates->top()->in(TypeFunc::Control)->is_IfFalse(), "false branch exits loop");
+ }
+ 
+ //-------------------------------end_loop-------------------------------------
+@@ -141,7 +183,7 @@
+   Node* lab = new_cvstate();
+   int sz = 1 + goto_ct + 1 /* fall thru */;
+   Node* reg = delay_transform(new (C, sz) RegionNode(sz));
+-  lab->init_req(0, reg);
++  lab->init_req(TypeFunc::Control, reg);
+   return lab;
+ }
+ 
+@@ -160,7 +202,7 @@
+ // all live values have phis created. Used to create phis
+ // at loop-top regions.
+ void IdealKit::goto_(Node* lab, bool bind) {
+-  Node* reg = lab->in(0);
++  Node* reg = lab->in(TypeFunc::Control);
+   // find next empty slot in region
+   uint slot = 1;
+   while (slot < reg->req() && reg->in(slot) != NULL) slot++;
+@@ -170,23 +212,40 @@
+   reg->init_req(slot, ctrl());
+   assert(first_var + _var_ct == _cvstate->req(), "bad _cvstate size");
+   for (uint i = first_var; i < _cvstate->req(); i++) {
++
++    // l is the value of var reaching the label. Could be a single value
++    // reaching the label, or a phi that merges multiples values reaching
++    // the label.  The latter is true if the label's input: in(..) is
++    // a phi whose control input is the region node for the label.
++
+     Node* l = lab->in(i);
++    // Get the current value of the var
+     Node* m = _cvstate->in(i);
++    // If the var went unused no need for a phi
+     if (m == NULL) {
+       continue;
+     } else if (l == NULL || m == l) {
++      // Only one unique value "m" is known to reach this label so a phi
++      // is not yet necessary unless:
++      //    the label is being bound and all predecessors have not been seen,
++      //    in which case "bind" will be true.
+       if (bind) {
+         m = promote_to_phi(m, reg);
+       }
++      // Record the phi/value used for this var in the label's cvstate
+       lab->set_req(i, m);
+     } else {
++      // More than one value for the variable reaches this label so
++      // a create a phi if one does not already exist.
+       if (!was_promoted_to_phi(l, reg)) {
+         l = promote_to_phi(l, reg);
+         lab->set_req(i, l);
+       }
++      // Record in the phi, the var's value from the current state
+       l->set_req(slot, m);
+     }
+   }
++  do_memory_merge(_cvstate, lab);
+   stop();
+ }
+ 
+@@ -203,6 +262,7 @@
+ void IdealKit::declares_done() {
+   _cvstate = new_cvstate();   // initialize current cvstate
+   set_ctrl(_initial_ctrl);    // initialize control in current cvstate
++  set_all_memory(_initial_memory);// initialize memory in current cvstate
+   DEBUG_ONLY(_state->push(BlockS));
+ }
+ 
+@@ -232,6 +292,8 @@
+ Node* IdealKit::copy_cvstate() {
+   Node* ns = new_cvstate();
+   for (uint i = 0; i < ns->req(); i++) ns->init_req(i, _cvstate->in(i));
++  // We must clone memory since it will be updated as we do stores.
++  ns->set_req(TypeFunc::Memory, MergeMemNode::make(C, ns->in(TypeFunc::Memory)));
+   return ns;
+ }
+ 
+@@ -256,3 +318,186 @@
+   k.declare(this);
+ }
+ 
++Node* IdealKit::memory(uint alias_idx) {
++  MergeMemNode* mem = merged_memory();
++  Node* p = mem->memory_at(alias_idx);
++  _gvn.set_type(p, Type::MEMORY);  // must be mapped
++  return p;
++}
++
++void IdealKit::set_memory(Node* mem, uint alias_idx) {
++  merged_memory()->set_memory_at(alias_idx, mem);
++}
++
++//----------------------------- make_load ----------------------------
++Node* IdealKit::load(Node* ctl,
++                     Node* adr,
++                     const Type* t,
++                     BasicType bt,
++                     int adr_idx,
++                     bool require_atomic_access) {
++
++  assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
++  const TypePtr* adr_type = NULL; // debug-mode-only argument
++  debug_only(adr_type = C->get_adr_type(adr_idx));
++  Node* mem = memory(adr_idx);
++  Node* ld;
++  if (require_atomic_access && bt == T_LONG) {
++    ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t);
++  } else {
++    ld = LoadNode::make(C, ctl, mem, adr, adr_type, t, bt);
++  }
++  return transform(ld);
++}
++
++Node* IdealKit::store(Node* ctl, Node* adr, Node *val, BasicType bt,
++                                int adr_idx,
++                                bool require_atomic_access) {
++  assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
++  const TypePtr* adr_type = NULL;
++  debug_only(adr_type = C->get_adr_type(adr_idx));
++  Node *mem = memory(adr_idx);
++  Node* st;
++  if (require_atomic_access && bt == T_LONG) {
++    st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val);
++  } else {
++    st = StoreNode::make(C, ctl, mem, adr, adr_type, val, bt);
++  }
++  st = transform(st);
++  set_memory(st, adr_idx);
++
++  return st;
++}
++
++// Card mark store. Must be ordered so that it will come after the store of
++// the oop.
++Node* IdealKit::storeCM(Node* ctl, Node* adr, Node *val, Node* oop_store,
++                        BasicType bt,
++                        int adr_idx) {
++  assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
++  const TypePtr* adr_type = NULL;
++  debug_only(adr_type = C->get_adr_type(adr_idx));
++  Node *mem = memory(adr_idx);
++
++  // Add required edge to oop_store, optimizer does not support precedence edges.
++  // Convert required edge to precedence edge before allocation.
++  Node* st = new (C, 5) StoreCMNode(ctl, mem, adr, adr_type, val, oop_store);
++
++  st = transform(st);
++  set_memory(st, adr_idx);
++
++  return st;
++}
++
++//---------------------------- do_memory_merge --------------------------------
++// The memory from one merging cvstate needs to be merged with the memory for another
++// join cvstate. If the join cvstate doesn't have a merged memory yet then we
++// can just copy the state from the merging cvstate
++
++// Merge one slow path into the rest of memory.
++void IdealKit::do_memory_merge(Node* merging, Node* join) {
++
++  // Get the region for the join state
++  Node* join_region = join->in(TypeFunc::Control);
++  assert(join_region != NULL, "join region must exist");
++  if (join->in(TypeFunc::Memory) == NULL ) {
++    join->set_req(TypeFunc::Memory,  merging->in(TypeFunc::Memory));
++    return;
++  }
++
++  // The control flow for merging must have already been attached to the join region
++  // we need its index for the phis.
++  uint slot;
++  for (slot = 1; slot < join_region->req() ; slot ++ ) {
++    if (join_region->in(slot) == merging->in(TypeFunc::Control)) break;
++  }
++  assert(slot !=  join_region->req(), "edge must already exist");
++
++  MergeMemNode* join_m    = join->in(TypeFunc::Memory)->as_MergeMem();
++  MergeMemNode* merging_m = merging->in(TypeFunc::Memory)->as_MergeMem();
++
++  // join_m should be an ancestor mergemem of merging
++  // Slow path memory comes from the current map (which is from a slow call)
++  // Fast path/null path memory comes from the call's input
++
++  // Merge the other fast-memory inputs with the new slow-default memory.
++  // for (MergeMemStream mms(merged_memory(), fast_mem->as_MergeMem()); mms.next_non_empty2(); ) {
++  for (MergeMemStream mms(join_m, merging_m); mms.next_non_empty2(); ) {
++    Node* join_slice = mms.force_memory();
++    Node* merging_slice = mms.memory2();
++    if (join_slice != merging_slice) {
++      PhiNode* phi;
++      // bool new_phi = false;
++      // Is the phi for this slice one that we created for this join region or simply
++      // one we copied? If it is ours then add
++      if (join_slice->is_Phi() && join_slice->as_Phi()->region() == join_region) {
++        phi = join_slice->as_Phi();
++      } else {
++        // create the phi with join_slice filling supplying memory for all of the
++        // control edges to the join region
++        phi = PhiNode::make(join_region, join_slice, Type::MEMORY, mms.adr_type(C));
++        phi = (PhiNode*) delay_transform(phi);
++        // gvn().set_type(phi, Type::MEMORY);
++        // new_phi = true;
++      }
++      // Now update the phi with the slice for the merging slice
++      phi->set_req(slot, merging_slice/* slow_path, slow_slice */);
++      // this updates join_m with the phi
++      mms.set_memory(phi);
++    }
++  }
++}
++
++
++//----------------------------- make_call  ----------------------------
++// Trivial runtime call
++void IdealKit::make_leaf_call(const TypeFunc *slow_call_type,
++                              address slow_call,
++                              const char *leaf_name,
++                              Node* parm0,
++                              Node* parm1,
++                              Node* parm2) {
++
++  // We only handle taking in RawMem and modifying RawMem
++  const TypePtr* adr_type = TypeRawPtr::BOTTOM;
++  uint adr_idx = C->get_alias_index(adr_type);
++
++  // Clone initial memory
++  MergeMemNode* cloned_mem =  MergeMemNode::make(C, merged_memory());
++
++  // Slow-path leaf call
++  int size = slow_call_type->domain()->cnt();
++  CallNode *call =  (CallNode*)new (C, size) CallLeafNode( slow_call_type, slow_call, leaf_name, adr_type);
++
++  // Set fixed predefined input arguments
++  call->init_req( TypeFunc::Control, ctrl() );
++  call->init_req( TypeFunc::I_O    , top() )     ;   // does no i/o
++  // Narrow memory as only memory input
++  call->init_req( TypeFunc::Memory , memory(adr_idx));
++  call->init_req( TypeFunc::FramePtr, top() /* frameptr() */ );
++  call->init_req( TypeFunc::ReturnAdr, top() );
++
++  if (parm0 != NULL)  call->init_req(TypeFunc::Parms+0, parm0);
++  if (parm1 != NULL)  call->init_req(TypeFunc::Parms+1, parm1);
++  if (parm2 != NULL)  call->init_req(TypeFunc::Parms+2, parm2);
++
++  // Node *c = _gvn.transform(call);
++  call = (CallNode *) _gvn.transform(call);
++  Node *c = call; // dbx gets confused with call call->dump()
++
++  // Slow leaf call has no side-effects, sets few values
++
++  set_ctrl(transform( new (C, 1) ProjNode(call,TypeFunc::Control) ));
++
++  // Set the incoming clone of memory as current memory
++  set_all_memory(cloned_mem);
++
++  // Make memory for the call
++  Node* mem = _gvn.transform( new (C, 1) ProjNode(call, TypeFunc::Memory) );
++
++  // Set the RawPtr memory state only.
++  set_memory(mem, adr_idx);
++
++  assert(C->alias_type(call->adr_type()) == C->alias_type(adr_type),
++         "call node must be constructed correctly");
++}
+diff -ruN openjdk6/hotspot/src/share/vm/opto/idealKit.hpp openjdk/hotspot/src/share/vm/opto/idealKit.hpp
+--- openjdk6/hotspot/src/share/vm/opto/idealKit.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/idealKit.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)idealKit.hpp	1.7 07/05/05 17:06:17 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //-----------------------------------------------------------------------------
+@@ -63,7 +60,7 @@
+ //       increment(i, ConI(1));
+ //    } end_loop(); dead(i);
+ //    bind(exit);
+-// 
++//
+ // See string_indexOf for a more complete example.
+ 
+ class IdealKit;
+@@ -93,42 +90,57 @@
+   PhaseGVN &_gvn;
+   GrowableArray<Node*>* _pending_cvstates; // stack of cvstates
+   GrowableArray<Node*>* _delay_transform;  // delay invoking gvn.transform until drain
+-  Node* _cvstate;                          // current cvstate (control and variables)
++  Node* _cvstate;                          // current cvstate (control, memory and variables)
+   uint _var_ct;                            // number of variables
+   bool _delay_all_transforms;              // flag forcing all transforms to be delayed
+   Node* _initial_ctrl;                     // saves initial control until variables declared
++  Node* _initial_memory;                   // saves initial memory  until variables declared
+ 
+   PhaseGVN& gvn() const { return _gvn; }
+   // Create a new cvstate filled with nulls
+   Node* new_cvstate();                     // Create a new cvstate
+   Node* cvstate() { return _cvstate; }     // current cvstate
+   Node* copy_cvstate();                    // copy current cvstate
+-  void set_ctrl(Node* ctrl) { _cvstate->set_req(0, ctrl); }
++  void set_ctrl(Node* ctrl) { _cvstate->set_req(TypeFunc::Control, ctrl); }
++
++  // Should this assert this is a MergeMem???
++  void set_all_memory(Node* mem){ _cvstate->set_req(TypeFunc::Memory, mem); }
++  void set_memory(Node* mem, uint alias_idx );
++  void do_memory_merge(Node* merging, Node* join);
+   void clear(Node* m);                     // clear a cvstate
+   void stop() { clear(_cvstate); }         // clear current cvstate
+-  Node* delay_transform(Node* n);          
++  Node* delay_transform(Node* n);
+   Node* transform(Node* n);                // gvn.transform or push node on delay list
+   Node* promote_to_phi(Node* n, Node* reg);// Promote "n" to a phi on region "reg"
+   bool was_promoted_to_phi(Node* n, Node* reg) {
+     return (n->is_Phi() && n->in(0) == reg);
+   }
+   void declare(IdealVariable* v) { v->set_id(_var_ct++); }
+-  static const uint first_var;
++  // This declares the position where vars are kept in the cvstate
++  // For some degree of consistency we use the TypeFunc enum to
++  // soak up spots in the inputs even though we only use early Control
++  // and Memory slots. (So far.)
++  static const uint first_var; // = TypeFunc::Parms + 1;
+ 
+ #ifdef ASSERT
+   enum State { NullS=0, BlockS=1, LoopS=2, IfThenS=4, ElseS=8, EndifS= 16 };
+   GrowableArray<int>* _state;
+   State state() { return (State)(_state->top()); }
+ #endif
+- 
++
++  // Users should not care about slices only MergedMem so no access for them.
++  Node* memory(uint alias_idx);
++
+  public:
+-  IdealKit(PhaseGVN &gvn, Node* control, bool delay_all_transforms = false);
++  IdealKit(PhaseGVN &gvn, Node* control, Node* memory, bool delay_all_transforms = false);
+   ~IdealKit() {
+     stop();
+     drain_delay_transform();
+   }
+   // Control
+-  Node* ctrl()                          { return _cvstate->in(0); }
++  Node* ctrl()                          { return _cvstate->in(TypeFunc::Control); }
++  Node* top()                           { return C->top(); }
++  MergeMemNode* merged_memory()         { return _cvstate->in(TypeFunc::Memory)->as_MergeMem(); }
+   void set(IdealVariable& v, Node* rhs) { _cvstate->set_req(first_var + v.id(), rhs); }
+   Node* value(IdealVariable& v)         { return _cvstate->in(first_var + v.id()); }
+   void dead(IdealVariable& v)           { set(v, (Node*)NULL); }
+@@ -145,10 +157,14 @@
+   void goto_(Node* lab, bool bind = false);
+   void declares_done();
+   void drain_delay_transform();
++
+   Node* IfTrue(IfNode* iff)  { return transform(new (C,1) IfTrueNode(iff)); }
+   Node* IfFalse(IfNode* iff) { return transform(new (C,1) IfFalseNode(iff)); }
++
+   // Data
+   Node* ConI(jint k) { return (Node*)gvn().intcon(k); }
++  Node* makecon(const Type *t)  const { return _gvn.makecon(t); }
++
+   Node* AddI(Node* l, Node* r) { return transform(new (C,3) AddINode(l, r)); }
+   Node* SubI(Node* l, Node* r) { return transform(new (C,3) SubINode(l, r)); }
+   Node* AndI(Node* l, Node* r) { return transform(new (C,3) AndINode(l, r)); }
+@@ -158,4 +174,57 @@
+   Node* Bool(Node* cmp, BoolTest::mask relop) { return transform(new (C,2) BoolNode(cmp, relop)); }
+   void  increment(IdealVariable& v, Node* j)  { set(v, AddI(value(v), j)); }
+   void  decrement(IdealVariable& v, Node* j)  { set(v, SubI(value(v), j)); }
++
++  Node* CmpL(Node* l, Node* r) { return transform(new (C,3) CmpLNode(l, r)); }
++
++  // TLS
++  Node* thread()  {  return gvn().transform(new (C, 1) ThreadLocalNode()); }
++
++  // Pointers
++  Node* AddP(Node *base, Node *ptr, Node *off) { return transform(new (C,4) AddPNode(base, ptr, off)); }
++  Node* CmpP(Node* l, Node* r) { return transform(new (C,3) CmpPNode(l, r)); }
++#ifdef _LP64
++  Node* XorX(Node* l, Node* r) { return transform(new (C,3) XorLNode(l, r)); }
++#else // _LP64
++  Node* XorX(Node* l, Node* r) { return transform(new (C,3) XorINode(l, r)); }
++#endif // _LP64
++  Node* URShiftX(Node* l, Node* r) { return transform(new (C,3) URShiftXNode(l, r)); }
++  Node* ConX(jint k) { return (Node*)gvn().MakeConX(k); }
++  Node* CastPX(Node* ctl, Node* p) { return transform(new (C,2) CastP2XNode(ctl, p)); }
++  // Add a fixed offset to a pointer
++  Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset);
++
++  // Memory operations
++
++  // This is the base version which is given an alias index.
++  Node* load(Node* ctl,
++             Node* adr,
++             const Type* t,
++             BasicType bt,
++             int adr_idx,
++             bool require_atomic_access = false);
++
++  // Return the new StoreXNode
++  Node* store(Node* ctl,
++              Node* adr,
++              Node* val,
++              BasicType bt,
++              int adr_idx,
++              bool require_atomic_access = false);
++
++  // Store a card mark ordered after store_oop
++  Node* storeCM(Node* ctl,
++                Node* adr,
++                Node* val,
++                Node* oop_store,
++                BasicType bt,
++                int adr_idx);
++
++  // Trivial call
++  void make_leaf_call(const TypeFunc *slow_call_type,
++                      address slow_call,
++                      const char *leaf_name,
++                      Node* parm0,
++                      Node* parm1 = NULL,
++                      Node* parm2 = NULL);
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/opto/ifg.cpp openjdk/hotspot/src/share/vm/opto/ifg.cpp
+--- openjdk6/hotspot/src/share/vm/opto/ifg.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/ifg.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ifg.cpp	1.62 07/05/05 17:06:13 JVM"
+-#endif
+ /*
+  * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -263,7 +260,7 @@
+     h_cnt[neighbor_cnt(i)]++;
+   }
+   tty->print_cr("--Histogram of counts--");
+-  for( i = 0; i < _maxlrg*2; i++ ) 
++  for( i = 0; i < _maxlrg*2; i++ )
+     if( h_cnt[i] )
+       tty->print("%d/%d ",i,h_cnt[i]);
+   tty->print_cr("");
+@@ -287,7 +284,7 @@
+       last = idx;
+     }
+     assert( !lrgs(i)._degree_valid ||
+-            effective_degree(i) == lrgs(i).degree(), "degree is valid but wrong" ); 
++            effective_degree(i) == lrgs(i).degree(), "degree is valid but wrong" );
+   }
+ }
+ #endif
+@@ -298,14 +295,14 @@
+ // inteferences as an estimate of register pressure.
+ void PhaseChaitin::interfere_with_live( uint r, IndexSet *liveout ) {
+   uint retval = 0;
+-  // Interfere with everything live.  
++  // Interfere with everything live.
+   const RegMask &rm = lrgs(r).mask();
+   // Check for interference by checking overlap of regmasks.
+   // Only interfere if acceptable register masks overlap.
+   IndexSetIterator elements(liveout);
+   uint l;
+-  while( (l = elements.next()) != 0 ) 
+-    if( rm.overlap( lrgs(l).mask() ) ) 
++  while( (l = elements.next()) != 0 )
++    if( rm.overlap( lrgs(l).mask() ) )
+       _ifg->add_edge( r, l );
+ }
+ 
+@@ -371,10 +368,10 @@
+         // We generally want the USE-DEF register to refer to the
+         // loop-varying quantity, to avoid a copy.
+         uint op = mach->ideal_Opcode();
+-        // Check that mach->num_opnds() == 3 to ensure instruction is 
++        // Check that mach->num_opnds() == 3 to ensure instruction is
+         // not subsuming constants, effectively excludes addI_cin_imm
+-        // Can NOT swap for instructions like addI_cin_imm since it 
+-        // is adding zero to yhi + carry and the second ideal-input 
++        // Can NOT swap for instructions like addI_cin_imm since it
++        // is adding zero to yhi + carry and the second ideal-input
+         // points to the result of adding low-halves.
+         // Checking req() and num_opnds() does NOT distinguish addI_cout from addI_cout_imm
+         if( (op == Op_AddI && mach->req() == 3 && mach->num_opnds() == 3) &&
+@@ -476,7 +473,7 @@
+     uint last_inst = b->end_idx();
+     // Compute last phi index
+     uint last_phi;
+-    for( last_phi = 1; last_phi < last_inst; last_phi++ ) 
++    for( last_phi = 1; last_phi < last_inst; last_phi++ )
+       if( !b->_nodes[last_phi]->is_Phi() )
+         break;
+ 
+@@ -533,7 +530,7 @@
+ 
+       // Some special values do not allocate
+       if( r ) {
+-        // A DEF normally costs block frequency; rematerialized values are 
++        // A DEF normally costs block frequency; rematerialized values are
+         // removed from the DEF sight, so LOWER costs here.
+         lrgs(r)._cost += n->rematerialize() ? 0 : b->_freq;
+ 
+@@ -589,24 +586,24 @@
+               hrp_index[1] = j-1;
+             }
+           }
+-       
++
+         } else {                // Else it is live
+           // A DEF also ends 'area' partway through the block.
+           lrgs(r)._area -= cost;
+           assert( lrgs(r)._area >= 0, "negative spill area" );
+ 
+           // Insure high score for immediate-use spill copies so they get a color
+-          if( n->is_SpillCopy()             
++          if( n->is_SpillCopy()
+               && lrgs(r)._def != NodeSentinel     // MultiDef live range can still split
+               && n->outcnt() == 1              // and use must be in this block
+               && _cfg._bbs[n->unique_out()->_idx] == b ) {
+-            // All single-use MachSpillCopy(s) that immediately precede their 
++            // All single-use MachSpillCopy(s) that immediately precede their
+             // use must color early.  If a longer live range steals their
+-            // color, the spill copy will split and may push another spill copy 
++            // color, the spill copy will split and may push another spill copy
+             // further away resulting in an infinite spill-split-retry cycle.
+-            // Assigning a zero area results in a high score() and a good 
++            // Assigning a zero area results in a high score() and a good
+             // location in the simplify list.
+-            // 
++            //
+ 
+             Node *single_use = n->unique_out();
+             assert( b->find_node(single_use) >= j, "Use must be later in block");
+@@ -616,14 +613,14 @@
+             // (j - 1) is index for current instruction 'n'
+             Node *m = n;
+             for( uint i = j; i <= last_inst && m->is_SpillCopy(); ++i ) { m = b->_nodes[i]; }
+-            if( m == single_use ) { 
++            if( m == single_use ) {
+               lrgs(r)._area = 0.0;
+             }
+           }
+ 
+           // Remove from live-out set
+           if( liveout.remove(r) ) {
+-            // Adjust register pressure.  
++            // Adjust register pressure.
+             // Capture last hi-to-lo pressure transition
+             lower_pressure( &lrgs(r), j-1, b, pressure, hrp_index );
+             assert( pressure[0] == count_int_pressure  (&liveout), "" );
+@@ -637,7 +634,7 @@
+             uint x = n2lidx(n->in(idx));
+             if( liveout.remove( x ) ) {
+               lrgs(x)._area -= cost;
+-              // Adjust register pressure.  
++              // Adjust register pressure.
+               lower_pressure( &lrgs(x), j-1, b, pressure, hrp_index );
+               assert( pressure[0] == count_int_pressure  (&liveout), "" );
+               assert( pressure[1] == count_float_pressure(&liveout), "" );
+@@ -649,9 +646,9 @@
+         // go in a particular register, just remove that register from
+         // all conflicting parties and avoid the interference.
+ 
+-        // Make exclusions for rematerializable defs.  Since rematerializable 
+-        // DEFs are not bound but the live range is, some uses must be bound.  
+-        // If we spill live range 'r', it can rematerialize at each use site 
++        // Make exclusions for rematerializable defs.  Since rematerializable
++        // DEFs are not bound but the live range is, some uses must be bound.
++        // If we spill live range 'r', it can rematerialize at each use site
+         // according to its bindings.
+         const RegMask &rmask = lrgs(r).mask();
+         if( lrgs(r).is_bound() && !(n->rematerialize()) && rmask.is_NotEmpty() ) {
+@@ -814,4 +811,3 @@
+ 
+   return must_spill;
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/ifnode.cpp openjdk/hotspot/src/share/vm/opto/ifnode.cpp
+--- openjdk6/hotspot/src/share/vm/opto/ifnode.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/ifnode.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ifnode.cpp	1.60 07/05/17 17:43:44 JVM"
+-#endif
+ /*
+  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Portions of code courtesy of Clifford Click
+@@ -54,7 +51,7 @@
+   return TypeTuple::IFBOTH;     // No progress
+ }
+ 
+-const RegMask &IfNode::out_RegMask() const { 
++const RegMask &IfNode::out_RegMask() const {
+   return RegMask::Empty;
+ }
+ 
+@@ -138,12 +135,12 @@
+     }
+     // Make sure we can account for all Phi uses
+     for (DUIterator_Fast kmax, k = u->fast_outs(kmax); k < kmax; k++) {
+-      Node* v = u->fast_out(k); // User of the phi  
++      Node* v = u->fast_out(k); // User of the phi
+       // CNC - Allow only really simple patterns.
+       // In particular I disallow AddP of the Phi, a fairly common pattern
+       if( v == cmp ) continue;  // The compare is OK
+       if( (v->is_ConstraintCast()) &&
+-          v->in(0)->in(0) == iff ) 
++          v->in(0)->in(0) == iff )
+         continue;               // CastPP/II of the IfNode is OK
+       // Disabled following code because I cannot tell if exactly one
+       // path dominates without a real dominator check. CNC 9/9/1999
+@@ -207,11 +204,11 @@
+   //      T  F                              T  F                  T  F
+   // ..s..    ..t ..                   ..s..    ..t..        ..s..    ..t..
+   //
+-  // Split the paths coming into the merge point into 2 seperate groups of 
++  // Split the paths coming into the merge point into 2 seperate groups of
+   // merges.  On the left will be all the paths feeding constants into the
+   // Cmp's Phi.  On the right will be the remaining paths.  The Cmp's Phi
+   // will fold up into a constant; this will let the Cmp fold up as well as
+-  // all the control flow.  Below the original IF we have 2 control 
++  // all the control flow.  Below the original IF we have 2 control
+   // dependent regions, 's' and 't'.  Now we will merge the two paths
+   // just prior to 's' and 't' from the two IFs.  At least 1 path (and quite
+   // likely 2 or more) will promptly constant fold away.
+@@ -238,8 +235,8 @@
+     }
+   }
+ 
+-  // Register the new RegionNodes but do not transform them.  Cannot 
+-  // transform until the entire Region/Phi conglerate has been hacked 
++  // Register the new RegionNodes but do not transform them.  Cannot
++  // transform until the entire Region/Phi conglerate has been hacked
+   // as a single huge transform.
+   igvn->register_new_node_with_optimizer( region_c );
+   igvn->register_new_node_with_optimizer( region_x );
+@@ -265,12 +262,12 @@
+   igvn->set_type_bottom(iff_c);
+   igvn->_worklist.push(iff_c);
+   hook->init_req(2, iff_c);
+-  
++
+   IfNode *iff_x = new (igvn->C, 2) IfNode(region_x,b_x,iff->_prob, iff->_fcnt);
+   igvn->set_type_bottom(iff_x);
+   igvn->_worklist.push(iff_x);
+   hook->init_req(3, iff_x);
+-  
++
+   // Make the true/false arms
+   Node *iff_c_t = phase->transform(new (igvn->C, 1) IfTrueNode (iff_c));
+   Node *iff_c_f = phase->transform(new (igvn->C, 1) IfFalseNode(iff_c));
+@@ -299,7 +296,7 @@
+   Node *phi_s = NULL;     // do not construct unless needed
+   Node *phi_f = NULL;     // do not construct unless needed
+   for (DUIterator_Last i2min, i2 = phi->last_outs(i2min); i2 >= i2min; --i2) {
+-    Node* v = phi->last_out(i2);// User of the phi  
++    Node* v = phi->last_out(i2);// User of the phi
+     igvn->hash_delete(v);       // Have to fixup other Phi users
+     igvn->_worklist.push(v);
+     uint vop = v->Opcode();
+@@ -352,7 +349,7 @@
+     } else if( v->is_ConstraintCast() ) {
+       v->set_req(0, proj_path_ctrl );
+       v->set_req(1, proj_path_data );
+-    } else 
++    } else
+       ShouldNotReachHere();
+   }
+ 
+@@ -364,13 +361,19 @@
+     Node *u = (p->Opcode() == Op_IfTrue) ? region_s : region_f;
+     // Replace p with u
+     igvn->add_users_to_worklist(p);
+-    for (DUIterator_Last jmin, j = p->last_outs(jmin); j >= jmin; --j) {
+-      Node* x = p->last_out(j);
+-      igvn->hash_delete(x);  
+-      for( uint j = 0; j < x->req(); j++ )
+-        if( x->in(j) == p )
+-          x->set_req_X(j,u,igvn);
++    for (DUIterator_Last lmin, l = p->last_outs(lmin); l >= lmin;) {
++      Node* x = p->last_out(l);
++      igvn->hash_delete(x);
++      uint uses_found = 0;
++      for( uint j = 0; j < x->req(); j++ ) {
++        if( x->in(j) == p ) {
++          x->set_req(j, u);
++          uses_found++;
++        }
++      }
++      l -= uses_found;    // we deleted 1 or more copies of this edge
+     }
++    igvn->remove_dead_node(p);
+   }
+ 
+   // Force the original merge dead
+@@ -446,7 +449,7 @@
+   Node* ind = l;
+   jint  off = 0;
+   if (l->is_top()) {
+-    return 0;                
++    return 0;
+   } else if (l->is_Add()) {
+     if ((off = l->in(1)->find_int_con(0)) != 0) {
+       ind = l->in(2);
+@@ -487,8 +490,8 @@
+   if( index ) {
+     new_add = off_lo ? gvn->transform(new (gvn->C, 3) AddINode( index, new_add )) : index;
+   }
+-  Node *new_cmp = (flip == 1) 
+-    ? new (gvn->C, 3) CmpUNode( new_add, range ) 
++  Node *new_cmp = (flip == 1)
++    ? new (gvn->C, 3) CmpUNode( new_add, range )
+     : new (gvn->C, 3) CmpUNode( range, new_add );
+   new_cmp = gvn->transform(new_cmp);
+   // See if no need to adjust the existing check
+@@ -516,7 +519,7 @@
+     return NULL;
+ 
+   // Else hit a Region.  Check for a loop header
+-  if( dom->is_Loop() ) 
++  if( dom->is_Loop() )
+     return dom->in(1);          // Skip up thru loops
+ 
+   // Check for small diamonds
+@@ -571,7 +574,7 @@
+ 
+   // phi->region->if_proj->ifnode->bool->cmp
+   BoolNode *bol2 = phi->in(0)->in(1)->in(0)->in(1)->as_Bool();
+-  
++
+   // Now get the 'sense' of the test correct so we can plug in
+   // either iff2->in(1) or its complement.
+   int flip = 0;
+@@ -604,7 +607,7 @@
+ static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff);
+ 
+ //------------------------------Ideal------------------------------------------
+-// Return a node which is more "ideal" than the current node.  Strip out 
++// Return a node which is more "ideal" than the current node.  Strip out
+ // control copies
+ Node *IfNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+   if (remove_dead_region(phase, can_reshape))  return this;
+@@ -650,8 +653,8 @@
+     // so all checks we inspect post-dominate the top-most check we find.
+     // If we are going to fail the current check and we reach the top check
+     // then we are guarenteed to fail, so just start interpreting there.
+-    // We 'expand' the top 2 range checks to include all post-dominating 
+-    // checks.  
++    // We 'expand' the top 2 range checks to include all post-dominating
++    // checks.
+ 
+     // The top 2 range checks seen
+     Node *prev_chk1 = NULL;
+@@ -671,7 +674,7 @@
+         int flip2 = dom->as_If()->is_range_check(range2, index2, offset2);
+         // See if this is a _matching_ range check, checking against
+         // the same array bounds.
+-        if( flip2 == flip1 && range2 == range1 && index2 == index1 && 
++        if( flip2 == flip1 && range2 == range1 && index2 == index1 &&
+             dom->outcnt() == 2 ) {
+           // Gather expanded bounds
+           off_lo = MIN2(off_lo,offset2);
+@@ -704,7 +707,7 @@
+       if( !prev_chk2 ) return NULL;
+       // 'Widen' the offsets of the 1st and 2nd covering check
+       adjust_check( prev_chk1, range1, index1, flip1, off_lo, igvn );
+-      // Do not call adjust_check twice on the same projection 
++      // Do not call adjust_check twice on the same projection
+       // as the first call may have transformed the BoolNode to a ConI
+       if( prev_chk1 != prev_chk2 ) {
+         adjust_check( prev_chk2, range1, index1, flip1, off_hi, igvn );
+@@ -726,7 +729,7 @@
+     Node *cmp;
+     int dist = 0;               // Cutoff limit for search
+     int op = Opcode();
+-    if( op == Op_If && 
++    if( op == Op_If &&
+         (cmp=in(1)->in(1))->Opcode() == Op_CmpP ) {
+       if( cmp->in(2) != NULL && // make sure cmp is not already dead
+           cmp->in(2)->bottom_type() == TypePtr::NULL_PTR ) {
+@@ -755,7 +758,7 @@
+     }
+ 
+     // Check that we did not follow a loop back to ourselves
+-    if( this == dom ) 
++    if( this == dom )
+       return NULL;
+ 
+     if( dist > 2 )              // Add to count of NULL checks elided
+@@ -769,7 +772,7 @@
+     tty->print("   Removing IfNode: "); this->dump();
+   }
+   if( VerifyOpto && !phase->allow_progress() ) {
+-    // Found an equivalent dominating test, 
++    // Found an equivalent dominating test,
+     // we can not guarantee reaching a fix-point for these during iterativeGVN
+     // since intervening nodes may not change.
+     return NULL;
+@@ -807,7 +810,7 @@
+     Node *ctrl_target = (ifp->Opcode() == prev_op ) ?     idom : top;
+ 
+     // For each child of an IfTrue/IfFalse projection, reroute.
+-    // Loop ends when projection has no more uses. 
++    // Loop ends when projection has no more uses.
+     for (DUIterator_Last jmin, j = ifp->last_outs(jmin); j >= jmin; --j) {
+       Node* s = ifp->last_out(j);   // Get child of IfTrue/IfFalse
+       igvn->hash_delete(s);         // Yank from hash table before edge hacking
+@@ -815,9 +818,9 @@
+         // Find the control input matching this def-use edge.
+         // For Regions it may not be in slot 0.
+         uint l;
+-        for( l = 0; s->in(l) != ifp; l++ ) { } 
++        for( l = 0; s->in(l) != ifp; l++ ) { }
+         s->set_req(l, ctrl_target);
+-      } else {                      // Else, for control producers, 
++      } else {                      // Else, for control producers,
+         s->set_req(0, data_target); // Move child to data-target
+       }
+       igvn->_worklist.push(s);  // Revisit collapsed Phis
+@@ -842,8 +845,8 @@
+ 
+ //------------------------------dump_spec--------------------------------------
+ #ifndef PRODUCT
+-void IfNode::dump_spec() const { 
+-  tty->print("P=%f, C=%f",_prob,_fcnt);
++void IfNode::dump_spec(outputStream *st) const {
++  st->print("P=%f, C=%f",_prob,_fcnt);
+ }
+ #endif
+ 
+@@ -867,7 +870,7 @@
+   BoolNode *b = iff->in(1)->as_Bool();
+   BoolTest bt = b->_test;
+   // Test already in good order?
+-  if( bt.is_canonical() ) 
++  if( bt.is_canonical() )
+     return NULL;
+ 
+   // Flip test to be canonical.  Requires flipping the IfFalse/IfTrue and
+@@ -878,7 +881,7 @@
+ 
+   PhaseIterGVN *igvn = phase->is_IterGVN();
+   assert( igvn, "Test is not canonical in parser?" );
+-  
++
+   // The IF node never really changes, but it needs to be cloned
+   iff = new (phase->C, 2) IfNode( iff->in(0), b, 1.0-iff->_prob, iff->_fcnt);
+ 
+@@ -900,7 +903,7 @@
+   igvn->register_new_node_with_optimizer(new_if_t);
+   igvn->hash_delete(old_if_f);
+   igvn->hash_delete(old_if_t);
+-  // Flip test, so flip trailing control 
++  // Flip test, so flip trailing control
+   igvn->subsume_node(old_if_f, new_if_t);
+   igvn->subsume_node(old_if_t, new_if_f);
+ 
+@@ -917,4 +920,3 @@
+     ? in(0)->in(0)              // IfNode control
+     : this;                     // no progress
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/indexSet.cpp openjdk/hotspot/src/share/vm/opto/indexSet.cpp
+--- openjdk6/hotspot/src/share/vm/opto/indexSet.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/indexSet.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)indexSet.cpp	1.24 07/05/05 17:06:16 JVM"
+-#endif
+ /*
+  * Copyright 1998-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This file defines the IndexSet class, a set of sparse integer indices.
+@@ -78,40 +75,40 @@
+ // in case we want to switch back.
+ 
+ /*const byte IndexSetIterator::_first_bit[256] = {
+-  8, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 
+-  4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 
+-  5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 
+-  4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 
+-  6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 
+-  4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 
+-  5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 
+-  4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 
+-  7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 
+-  4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 
+-  5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 
+-  4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 
+-  6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 
+-  4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 
+-  5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 
++  8, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
++  4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
++  5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
++  4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
++  6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
++  4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
++  5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
++  4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
++  7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
++  4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
++  5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
++  4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
++  6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
++  4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
++  5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+   4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0
+ };
+ 
+ const byte IndexSetIterator::_second_bit[256] = {
+-  8, 8, 8, 1, 8, 2, 2, 1, 8, 3, 3, 1, 3, 2, 2, 1, 
+-  8, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 
+-  8, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 
+-  5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 
+-  8, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1, 
+-  6, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 
+-  6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 
+-  5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 
+-  8, 7, 7, 1, 7, 2, 2, 1, 7, 3, 3, 1, 3, 2, 2, 1, 
+-  7, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 
+-  7, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 
+-  5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 
+-  7, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1, 
+-  6, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 
+-  6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 
++  8, 8, 8, 1, 8, 2, 2, 1, 8, 3, 3, 1, 3, 2, 2, 1,
++  8, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
++  8, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1,
++  5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
++  8, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1,
++  6, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
++  6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1,
++  5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
++  8, 7, 7, 1, 7, 2, 2, 1, 7, 3, 3, 1, 3, 2, 2, 1,
++  7, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
++  7, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1,
++  5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
++  7, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1,
++  6, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
++  6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1,
+   5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1
+ };*/
+ 
+@@ -200,7 +197,7 @@
+ // the union is performed.
+ 
+ uint IndexSet::lrg_union(uint lr1, uint lr2,
+-                         const uint fail_degree, 
++                         const uint fail_degree,
+                          const PhaseIFG *ifg,
+                          const RegMask &mask ) {
+   IndexSet *one = ifg->neighbors(lr1);
+@@ -225,13 +222,13 @@
+     one = two;
+     two = temp;
+   }
+-  
++
+   clear();
+ 
+   // Used to compute degree of register-only interferences.  Infinite-stack
+   // neighbors do not alter colorability, as they can always color to some
+   // other color.  (A variant of the Briggs assertion)
+-  uint reg_degree = 0; 
++  uint reg_degree = 0;
+ 
+   uint element;
+   // Load up the combined interference set with the neighbors of one
+@@ -244,7 +241,7 @@
+         reg_degree += lrg1.compute_degree(lrg);
+         if( reg_degree >= fail_degree ) return reg_degree;
+       } else {
+-        // !!!!! Danger!  No update to reg_degree despite having a neighbor.  
++        // !!!!! Danger!  No update to reg_degree despite having a neighbor.
+         // A variant of the Briggs assertion.
+         // Not needed if I simplify during coalesce, ala George/Appel.
+         assert( lrg.lo_degree(), "" );
+@@ -261,7 +258,7 @@
+           reg_degree += lrg2.compute_degree(lrg);
+           if( reg_degree >= fail_degree ) return reg_degree;
+         } else {
+-          // !!!!! Danger!  No update to reg_degree despite having a neighbor.  
++          // !!!!! Danger!  No update to reg_degree despite having a neighbor.
+           // A variant of the Briggs assertion.
+           // Not needed if I simplify during coalesce, ala George/Appel.
+           assert( lrg.lo_degree(), "" );
+@@ -269,7 +266,7 @@
+       }
+     }
+   }
+-    
++
+   return reg_degree;
+ }
+ 
+@@ -375,7 +372,7 @@
+ #ifndef PRODUCT
+ void IndexSet::dump() const {
+   IndexSetIterator elements(this);
+-  
++
+   tty->print("{");
+   uint i;
+   while ((i = elements.next()) != 0) {
+diff -ruN openjdk6/hotspot/src/share/vm/opto/indexSet.hpp openjdk/hotspot/src/share/vm/opto/indexSet.hpp
+--- openjdk6/hotspot/src/share/vm/opto/indexSet.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/indexSet.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)indexSet.hpp	1.29 07/05/05 17:06:18 JVM"
+-#endif
+ /*
+  * Copyright 1998-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This file defines the IndexSet class, a set of sparse integer indices.
+@@ -84,7 +81,7 @@
+   static uint get_bit_index(uint element) {
+     return mask_bits(element,bit_index_mask);
+   }
+-  
++
+   //------------------------------ class BitBlock ----------------------------
+   // The BitBlock class is a segment of a bitvector set.
+ 
+@@ -122,7 +119,7 @@
+       uint word_index = IndexSet::get_word_index(element);
+       uint bit_index = IndexSet::get_bit_index(element);
+ 
+-      return ((words()[word_index] & (uint32)(0x1 << bit_index)) != 0);  
++      return ((words()[word_index] & (uint32)(0x1 << bit_index)) != 0);
+     }
+ 
+     bool insert(uint element) {
+@@ -245,7 +242,7 @@
+ 
+   // Get a BitBlock from the free list and place it in the top level array
+   BitBlock *alloc_block_containing(uint element);
+-  
++
+   // Free a block from the top level array, placing it on the free BitBlock list
+   void free_block(uint i);
+ 
+@@ -276,7 +273,7 @@
+ 
+   bool insert(uint element) {
+ #ifdef ASSERT
+-    if( VerifyOpto ) 
++    if( VerifyOpto )
+       check_watch("insert", element);
+ #endif
+     if (element == 0) {
+@@ -295,7 +292,7 @@
+ 
+   bool remove(uint element) {
+ #ifdef ASSERT
+-    if( VerifyOpto ) 
++    if( VerifyOpto )
+       check_watch("remove", element);
+ #endif
+ 
+@@ -313,7 +310,7 @@
+   // exceeds fail_degree, the union bails out.  The underlying set is
+   // cleared before the union is performed.
+   uint lrg_union(uint lr1, uint lr2,
+-                 const uint fail_degree, 
++                 const uint fail_degree,
+                  const class PhaseIFG *ifg,
+                  const RegMask &mask);
+ 
+@@ -386,7 +383,7 @@
+ 
+ 
+ //-------------------------------- class IndexSetIterator --------------------
+-// An iterator for IndexSets.  
++// An iterator for IndexSets.
+ 
+ class IndexSetIterator VALUE_OBJ_CLASS_SPEC {
+  friend class IndexSet;
+diff -ruN openjdk6/hotspot/src/share/vm/opto/lcm.cpp openjdk/hotspot/src/share/vm/opto/lcm.cpp
+--- openjdk6/hotspot/src/share/vm/opto/lcm.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/lcm.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)lcm.cpp	1.102 07/05/17 15:58:55 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Optimization - Graph Style
+@@ -31,7 +28,7 @@
+ #include "incls/_lcm.cpp.incl"
+ 
+ //------------------------------implicit_null_check----------------------------
+-// Detect implicit-null-check opportunities.  Basically, find NULL checks 
++// Detect implicit-null-check opportunities.  Basically, find NULL checks
+ // with suitable memory ops nearby.  Use the memory op to do the NULL check.
+ // I can generate a memory op if there is not one nearby.
+ // The proj is the control projection for the not-null case.
+@@ -95,7 +92,7 @@
+       return;
+     }
+   }
+-  
++
+   // Search the successor block for a load or store who's base value is also
+   // the tested value.  There may be several.
+   Node_List *out = new Node_List(Thread::current()->resource_area());
+@@ -133,8 +130,8 @@
+       // are storing the checked value, which does NOT check the value!
+       if( mach->in(2) != val ) continue;
+       break;                    // Found a memory op?
+-    case Op_StrComp:            
+-      // Not a legit memory op for implicit null check regardless of 
++    case Op_StrComp:
++      // Not a legit memory op for implicit null check regardless of
+       // embedded loads
+       continue;
+     default:                    // Also check for embedded loads
+@@ -155,7 +152,7 @@
+         if( offset == Type::OffsetBot || tptr->_offset == Type::OffsetBot )
+           continue;
+         offset += tptr->_offset; // correct if base is offseted
+-        if( MacroAssembler::needs_explicit_null_check(offset) ) 
++        if( MacroAssembler::needs_explicit_null_check(offset) )
+           continue;             // Give up is reference is beyond 4K page size
+       }
+     }
+@@ -190,9 +187,9 @@
+       if( b != inb )
+         break;
+     }
+-    if( j > 0 ) 
++    if( j > 0 )
+       continue;
+-    Block *mb = cfg->_bbs[mach->_idx]; 
++    Block *mb = cfg->_bbs[mach->_idx];
+     // Hoisting stores requires more checks for the anti-dependence case.
+     // Give up hoisting if we have to move the store past any load.
+     if( was_store ) {
+@@ -203,7 +200,7 @@
+         uint k;
+         for( k = 1; k < b->_nodes.size(); k++ ) {
+           Node *n = b->_nodes[k];
+-          if( n->needs_anti_dependence_check() && 
++          if( n->needs_anti_dependence_check() &&
+               n->in(LoadNode::Memory) == mach->in(StoreNode::Memory) )
+             break;              // Found anti-dependent load
+         }
+@@ -221,9 +218,9 @@
+     if( e->is_MachNullCheck() && e->in(1) == mach )
+       continue;                 // Already being used as a NULL check
+ 
+-    // Found a candidate!  Pick one with least dom depth - the highest 
++    // Found a candidate!  Pick one with least dom depth - the highest
+     // in the dom tree should be closest to the null check.
+-    if( !best || 
++    if( !best ||
+         cfg->_bbs[mach->_idx]->_dom_depth < cfg->_bbs[best->_idx]->_dom_depth ) {
+       best = mach;
+       bidx = vidx;
+@@ -271,7 +268,7 @@
+     Node *tmp1 = _nodes[end_idx()+1];
+     Node *tmp2 = _nodes[end_idx()+2];
+     _nodes.map(end_idx()+1, tmp2);
+-    _nodes.map(end_idx()+2, tmp1);    
++    _nodes.map(end_idx()+2, tmp1);
+     Node *tmp = new (C, 1) Node(C->top()); // Use not NULL input
+     tmp1->replace_by(tmp);
+     tmp2->replace_by(tmp1);
+@@ -335,7 +332,7 @@
+         n->Opcode()== Op_Con || // So does constant 'Top'
+         iop == Op_CreateEx ||   // Create-exception must start block
+         iop == Op_CheckCastPP
+-        ) {  
++        ) {
+       worklist.map(i,worklist.pop());
+       return n;
+     }
+@@ -372,7 +369,7 @@
+         if (ready_cnt[use->_idx] > 1)
+           n_choice = 1;
+       }
+-  
++
+       // loop terminated, prefer not to use this instruction
+       if (found_machif)
+         continue;
+@@ -424,16 +421,16 @@
+   for( uint i=0; i<n->len(); i++ ) {
+     Node *m = n->in(i);
+     if( !m ) continue;  // must see all nodes in block that precede call
+-    if( bbs[m->_idx] == this ) 
++    if( bbs[m->_idx] == this )
+       set_next_call( m, next_call, bbs );
+   }
+ }
+ 
+ //------------------------------needed_for_next_call---------------------------
+ // Set the flag 'next_call' for each Node that is needed for the next call to
+-// be scheduled.  This flag lets me bias scheduling so Nodes needed for the 
++// be scheduled.  This flag lets me bias scheduling so Nodes needed for the
+ // next subroutine call get priority - basically it moves things NOT needed
+-// for the next call till after the call.  This prevents me from trying to 
++// for the next call till after the call.  This prevents me from trying to
+ // carry lots of stuff live across a call.
+ void Block::needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs) {
+   // Find the next control-defining Node in this block
+@@ -468,7 +465,7 @@
+     // Collect defined registers
+     regs.OR(n->out_RegMask());
+     // Check for scheduling the next control-definer
+-    if( n->bottom_type() == Type::CONTROL ) 
++    if( n->bottom_type() == Type::CONTROL )
+       // Warm up next pile of heuristic bits
+       needed_for_next_call(n, next_call, bbs);
+ 
+@@ -477,10 +474,10 @@
+       Node* m = n->fast_out(j); // Get user
+       if( bbs[m->_idx] != this ) continue;
+       if( m->is_Phi() ) continue;
+-      if( !--ready_cnt[m->_idx] ) 
++      if( !--ready_cnt[m->_idx] )
+         worklist.push(m);
+     }
+-  
++
+   }
+ 
+   // Act as if the call defines the Frame Pointer.
+@@ -513,7 +510,7 @@
+     default:
+       ShouldNotReachHere();
+   }
+-                                                      
++
+   // When using CallRuntime mark SOE registers as killed by the call
+   // so values that could show up in the RegisterMap aren't live in a
+   // callee saved register since the register wouldn't know where to
+@@ -526,7 +523,7 @@
+ 
+   // Fill in the kill mask for the call
+   for( OptoReg::Name r = OptoReg::Name(0); r < _last_Mach_Reg; r=OptoReg::add(r,1) ) {
+-    if( !regs.Member(r) ) {     // Not already defined by the call  
++    if( !regs.Member(r) ) {     // Not already defined by the call
+       // Save-on-call register?
+       if ((save_policy[r] == 'C') ||
+           (save_policy[r] == 'A') ||
+@@ -623,7 +620,7 @@
+   // Make a worklist
+   Node_List worklist;
+   for(uint i4=i3; i4<node_cnt; i4++ ) {    // Put ready guys on worklist
+-    Node *m = _nodes[i4];    
++    Node *m = _nodes[i4];
+     if( !ready_cnt[m->_idx] ) {   // Zero ready count?
+       if (m->is_iteratively_computed()) {
+         // Push induction variable increments last to allow other uses
+@@ -699,7 +696,7 @@
+       Node* m = n->fast_out(i5); // Get user
+       if( cfg->_bbs[m->_idx] != this ) continue;
+       if( m->is_Phi() ) continue;
+-      if( !--ready_cnt[m->_idx] ) 
++      if( !--ready_cnt[m->_idx] )
+         worklist.push(m);
+     }
+   }
+@@ -751,35 +748,35 @@
+ //------------------------------catch_cleanup_find_cloned_def------------------
+ static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, Block_Array &bbs, int n_clone_idx) {
+   assert( use_blk != def_blk, "Inter-block cleanup only");
+-  
++
+   // The use is some block below the Catch.  Find and return the clone of the def
+   // that dominates the use. If there is no clone in a dominating block, then
+   // create a phi for the def in a dominating block.
+-  
++
+   // Find which successor block dominates this use.  The successor
+   // blocks must all be single-entry (from the Catch only; I will have
+   // split blocks to make this so), hence they all dominate.
+   while( use_blk->_dom_depth > def_blk->_dom_depth+1 )
+     use_blk = use_blk->_idom;
+-  
++
+   // Find the successor
+   Node *fixup = NULL;
+ 
+   uint j;
+   for( j = 0; j < def_blk->_num_succs; j++ )
+-    if( use_blk == def_blk->_succs[j] ) 
++    if( use_blk == def_blk->_succs[j] )
+       break;
+ 
+   if( j == def_blk->_num_succs ) {
+-    // Block at same level in dom-tree is not a successor.  It needs a 
++    // Block at same level in dom-tree is not a successor.  It needs a
+     // PhiNode, the PhiNode uses from the def and IT's uses need fixup.
+     Node_Array inputs = new Node_List(Thread::current()->resource_area());
+     for(uint k = 1; k < use_blk->num_preds(); k++) {
+       inputs.map(k, catch_cleanup_find_cloned_def(bbs[use_blk->pred(k)->_idx], def, def_blk, bbs, n_clone_idx));
+     }
+ 
+-    // Check to see if the use_blk already has an identical phi inserted.  
+-    // If it exists, it will be at the first position since all uses of a 
++    // Check to see if the use_blk already has an identical phi inserted.
++    // If it exists, it will be at the first position since all uses of a
+     // def are processed together.
+     Node *phi = use_blk->_nodes[1];
+     if( phi->is_Phi() ) {
+@@ -803,7 +800,7 @@
+       }
+       fixup = new_phi;
+     }
+-    
++
+   } else {
+     // Found the use just below the Catch.  Make it use the clone.
+     fixup = use_blk->_nodes[n_clone_idx];
+@@ -854,7 +851,7 @@
+   if( !_nodes[end]->is_Catch() ) return;
+   // Start of region to clone
+   uint beg = end;
+-  while( _nodes[beg-1]->Opcode() != Op_MachProj || 
++  while( _nodes[beg-1]->Opcode() != Op_MachProj ||
+         !_nodes[beg-1]->in(0)->is_Call() ) {
+     beg--;
+     assert(beg > 0,"Catch cleanup walking beyond block boundary");
+@@ -935,7 +932,3 @@
+     }
+   }
+ }
+-
+-
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/library_call.cpp openjdk/hotspot/src/share/vm/opto/library_call.cpp
+--- openjdk6/hotspot/src/share/vm/opto/library_call.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/library_call.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)library_call.cpp	1.164 07/05/17 15:59:02 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -76,8 +73,12 @@
+   Node* generate_guard(Node* test, RegionNode* region, float true_prob);
+   Node* generate_slow_guard(Node* test, RegionNode* region);
+   Node* generate_fair_guard(Node* test, RegionNode* region);
+-  Node* generate_negative_guard(Node* index, RegionNode* region);
+-  Node* generate_nonpositive_guard(Node* index, bool never_negative);
++  Node* generate_negative_guard(Node* index, RegionNode* region,
++                                // resulting CastII of index:
++                                Node* *pos_index = NULL);
++  Node* generate_nonpositive_guard(Node* index, bool never_negative,
++                                   // resulting CastII of index:
++                                   Node* *pos_index = NULL);
+   Node* generate_limit_guard(Node* offset, Node* subseq_length,
+                              Node* array_length,
+                              RegionNode* region);
+@@ -180,8 +181,22 @@
+                           Node* copy_length,
+                           int nargs,  // arguments on stack for debug info
+                           bool disjoint_bases = false,
+-			  bool length_never_negative = false,
+-                          Node* slow_region = NULL);
++                          bool length_never_negative = false,
++                          RegionNode* slow_region = NULL);
++  AllocateArrayNode* tightly_coupled_allocation(Node* ptr,
++                                                RegionNode* slow_region);
++  void generate_clear_array(const TypePtr* adr_type,
++                            Node* dest,
++                            BasicType basic_elem_type,
++                            Node* slice_off,
++                            Node* slice_len,
++                            Node* slice_end);
++  bool generate_block_arraycopy(const TypePtr* adr_type,
++                                BasicType basic_elem_type,
++                                AllocateNode* alloc,
++                                Node* src,  Node* src_offset,
++                                Node* dest, Node* dest_offset,
++                                Node* dest_size);
+   void generate_slow_arraycopy(const TypePtr* adr_type,
+                                Node* src,  Node* src_offset,
+                                Node* dest, Node* dest_offset,
+@@ -196,8 +211,8 @@
+                                    Node* src,  Node* src_offset,
+                                    Node* dest, Node* dest_offset,
+                                    Node* copy_length, int nargs);
+-  void generate_unchecked_arraycopy(BasicType basic_elem_type,
+-                                    const TypePtr* adr_type,
++  void generate_unchecked_arraycopy(const TypePtr* adr_type,
++                                    BasicType basic_elem_type,
+                                     bool disjoint_bases,
+                                     Node* src,  Node* src_offset,
+                                     Node* dest, Node* dest_offset,
+@@ -631,6 +646,14 @@
+ 
+ //------------------------------generate_guard---------------------------
+ // Helper function for generating guarded fast-slow graph structures.
++// The given 'test', if true, guards a slow path.  If the test fails
++// then a fast path can be taken.  (We generally hope it fails.)
++// In all cases, GraphKit::control() is updated to the fast path.
++// The returned value represents the control for the slow path.
++// The return value is never 'top'; it is either a valid control
++// or NULL if it is obvious that the slow path can never be taken.
++// Also, if region and the slow control are not NULL, the slow edge
++// is appended to the region.
+ Node* LibraryCallKit::generate_guard(Node* test, RegionNode* region, float true_prob) {
+   if (stopped()) {
+     // Already short circuited.
+@@ -654,7 +677,7 @@
+ 
+   if (region != NULL)
+     region->add_req(if_slow);
+-  
++
+   Node* if_fast = _gvn.transform( new (C, 1) IfFalseNode(iff) );
+   set_control(if_fast);
+ 
+@@ -668,7 +691,8 @@
+   return generate_guard(test, region, PROB_FAIR);
+ }
+ 
+-inline Node* LibraryCallKit::generate_negative_guard(Node* index, RegionNode* region) {
++inline Node* LibraryCallKit::generate_negative_guard(Node* index, RegionNode* region,
++                                                     Node* *pos_index) {
+   if (stopped())
+     return NULL;                // already stopped
+   if (_gvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint]
+@@ -676,10 +700,17 @@
+   Node* cmp_lt = _gvn.transform( new (C, 3) CmpINode(index, intcon(0)) );
+   Node* bol_lt = _gvn.transform( new (C, 2) BoolNode(cmp_lt, BoolTest::lt) );
+   Node* is_neg = generate_guard(bol_lt, region, PROB_MIN);
++  if (is_neg != NULL && pos_index != NULL) {
++    // Emulate effect of Parse::adjust_map_after_if.
++    Node* ccast = new (C, 2) CastIINode(index, TypeInt::POS);
++    ccast->set_req(0, control());
++    (*pos_index) = _gvn.transform(ccast);
++  }
+   return is_neg;
+ }
+ 
+-inline Node* LibraryCallKit::generate_nonpositive_guard(Node* index, bool never_negative) {
++inline Node* LibraryCallKit::generate_nonpositive_guard(Node* index, bool never_negative,
++                                                        Node* *pos_index) {
+   if (stopped())
+     return NULL;                // already stopped
+   if (_gvn.type(index)->higher_equal(TypeInt::POS1)) // [1,maxint]
+@@ -688,6 +719,12 @@
+   BoolTest::mask le_or_eq = (never_negative ? BoolTest::eq : BoolTest::le);
+   Node* bol_le = _gvn.transform( new (C, 2) BoolNode(cmp_le, le_or_eq) );
+   Node* is_notp = generate_guard(bol_le, NULL, PROB_MIN);
++  if (is_notp != NULL && pos_index != NULL) {
++    // Emulate effect of Parse::adjust_map_after_if.
++    Node* ccast = new (C, 2) CastIINode(index, TypeInt::POS1);
++    ccast->set_req(0, control());
++    (*pos_index) = _gvn.transform(ccast);
++  }
+   return is_notp;
+ }
+ 
+@@ -711,10 +748,12 @@
+                                                   RegionNode* region) {
+   if (stopped())
+     return NULL;                // already stopped
+-  if (_gvn.type(offset) == TypeInt::ZERO &&
+-      _gvn.eqv_uncast(subseq_length, array_length))
++  bool zero_offset = _gvn.type(offset) == TypeInt::ZERO;
++  if (zero_offset && _gvn.eqv_uncast(subseq_length, array_length))
+     return NULL;                // common case of whole-array copy
+-  Node* last = _gvn.transform( new (C, 3) AddINode(offset, subseq_length));
++  Node* last = subseq_length;
++  if (!zero_offset)             // last += offset
++    last = _gvn.transform( new (C, 3) AddINode(last, offset));
+   Node* cmp_lt = _gvn.transform( new (C, 3) CmpUNode(array_length, last) );
+   Node* bol_lt = _gvn.transform( new (C, 2) BoolNode(cmp_lt, BoolTest::lt) );
+   Node* is_over = generate_guard(bol_lt, region, PROB_MIN);
+@@ -744,7 +783,7 @@
+   _sp += 2;
+   Node *argument = pop();  // pop non-receiver first:  it was pushed second
+   Node *receiver = pop();
+-  
++
+   // Null check on self without removing any arguments.  The argument
+   // null check technically happens in the wrong place, which can lead to
+   // invalid stack traces when string compare is inlined into a method
+@@ -756,19 +795,19 @@
+   if (stopped()) {
+     return true;
+   }
+-  
++
+   ciInstanceKlass* klass = env()->String_klass();
+   const TypeInstPtr* string_type =
+     TypeInstPtr::make(TypePtr::BotPTR, klass, false, NULL, 0);
+-  
++
+   Node* compare =
+     _gvn.transform(new (C, 7) StrCompNode(
+-                        control(), 
++                        control(),
+                         memory(TypeAryPtr::CHARS),
+                         memory(string_type->add_offset(value_offset)),
+                         memory(string_type->add_offset(count_offset)),
+                         memory(string_type->add_offset(offset_offset)),
+-                        receiver, 
++                        receiver,
+                         argument));
+   push(compare);
+   return true;
+@@ -862,7 +901,7 @@
+   const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin));
+   const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot);
+ 
+-  IdealKit kit(gvn(), control());
++  IdealKit kit(gvn(), control(), merged_memory());
+ #define __ kit.
+   Node* zero             = __ ConI(0);
+   Node* one              = __ ConI(1);
+@@ -1042,8 +1081,8 @@
+     // static const unsigned char neg_pi_2_bits_x[] = {0x35,0xc2,0x68,0x21,0xa2,0xda,0x0f,0xc9,0xff,0xbf,0x00,0x00,0x00,0x00,0x00,0x00};
+     // Cutoff value for using this argument reduction technique
+     //static const double    pi_2_minus_epsilon =  1.564660403643354;
+-    //static const double neg_pi_2_plus_epsilon = -1.564660403643354;  
+-    
++    //static const double neg_pi_2_plus_epsilon = -1.564660403643354;
++
+     // Pseudocode for sin:
+     // if (x <= Math.PI / 4.0) {
+     //   if (x >= -Math.PI / 4.0) return  fsin(x);
+@@ -1052,7 +1091,7 @@
+     //   if (x <=  Math.PI / 2.0) return  fcos(x - Math.PI / 2.0);
+     // }
+     // return StrictMath.sin(x);
+-    
++
+     // Pseudocode for cos:
+     // if (x <= Math.PI / 4.0) {
+     //   if (x >= -Math.PI / 4.0) return  fcos(x);
+@@ -1061,12 +1100,12 @@
+     //   if (x <=  Math.PI / 2.0) return -fsin(x - Math.PI / 2.0);
+     // }
+     // return StrictMath.cos(x);
+-    
++
+     // Actually, sticking in an 80-bit Intel value into C2 will be tough; it
+     // requires a special machine instruction to load it.  Instead we'll try
+     // the 'easy' case.  If we really need the extra range +/- PI/2 we'll
+     // probably do the math inside the SIN encoding.
+-    
++
+     // Make the merge point
+     RegionNode *r = new (C, 3) RegionNode(3);
+     Node *phi = new (C, 3) PhiNode(r,Type::DOUBLE);
+@@ -1082,7 +1121,7 @@
+     // Branch either way
+     IfNode *iff = create_and_xform_if(control(),bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
+     set_control(opt_iff(r,iff));
+-    
++
+     // Set fast path result
+     phi->init_req(2,trig);
+ 
+@@ -1105,7 +1144,7 @@
+                                "Tan", NULL, arg, top());
+       break;
+     }
+-    assert(control()->in(0) == call, ""); 
++    assert(control()->in(0) == call, "");
+     Node* slow_result = _gvn.transform(new (C, 1) ProjNode(call,TypeFunc::Parms));
+     r->init_req(1,control());
+     phi->init_req(1,slow_result);
+@@ -1163,7 +1202,7 @@
+   Node* cmpisnan = _gvn.transform(new (C, 3) CmpDNode(result,result));
+   // Build the boolean node
+   Node* bolisnum = _gvn.transform( new (C, 2) BoolNode(cmpisnan, BoolTest::eq) );
+-  
++
+   { BuildCutout unless(this, bolisnum, PROB_STATIC_FREQUENT);
+     // End the current control-flow path
+     push_pair(x);
+@@ -1172,16 +1211,16 @@
+     uncommon_trap(Deoptimization::Reason_intrinsic,
+                   Deoptimization::Action_make_not_entrant);
+   }
+-  
++
+   C->set_has_split_ifs(true); // Has chance for split-if optimization
+-  
++
+   push_pair(result);
+-  
++
+   return true;
+ }
+ 
+ //------------------------------inline_pow-------------------------------------
+-// Inline power instructions, if possible.  
++// Inline power instructions, if possible.
+ bool LibraryCallKit::inline_pow(vmIntrinsics::ID id) {
+   assert(id == vmIntrinsics::_dpow, "Not pow");
+ 
+@@ -1191,7 +1230,7 @@
+ 
+   // Do not intrinsify on older platforms which lack cmove.
+   if (ConditionalMoveLimit == 0)  return false;
+-  
++
+   // Pseudocode for pow
+   // if (x <= 0.0) {
+   //   if ((double)((int)y)==y) { // if y is int
+@@ -1208,12 +1247,12 @@
+   // return result;
+ 
+   _sp += arg_size();        // restore stack pointer
+-  Node* y = pop_math_arg();  
+-  Node* x = pop_math_arg();  
++  Node* y = pop_math_arg();
++  Node* x = pop_math_arg();
+ 
+   Node *fast_result = _gvn.transform( new (C, 3) PowDNode(0, x, y) );
+ 
+-  // Short form: if not top-level (i.e., Math.pow but inlining Math.pow 
++  // Short form: if not top-level (i.e., Math.pow but inlining Math.pow
+   // inside of something) then skip the fancy tests and just check for
+   // NaN result.
+   Node *result = NULL;
+@@ -1225,7 +1264,7 @@
+     // There are four possible paths to region node and phi node
+     RegionNode *r = new (C, 4) RegionNode(4);
+     Node *phi = new (C, 4) PhiNode(r, Type::DOUBLE);
+-    
++
+     // Build the first if node: if (x <= 0.0)
+     // Node for 0 constant
+     Node *zeronode = makecon(TypeD::ZERO);
+@@ -1233,22 +1272,22 @@
+     Node *cmp = _gvn.transform(new (C, 3) CmpDNode(x, zeronode));
+     // Check: If (x<=0) then go complex path
+     Node *bol1 = _gvn.transform( new (C, 2) BoolNode( cmp, BoolTest::le ) );
+-    // Branch either way 
++    // Branch either way
+     IfNode *if1 = create_and_xform_if(control(),bol1, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN);
+     Node *opt_test = _gvn.transform(if1);
+     //assert( opt_test->is_If(), "Expect an IfNode");
+     IfNode *opt_if1 = (IfNode*)opt_test;
+     // Fast path taken; set region slot 3
+     Node *fast_taken = _gvn.transform( new (C, 1) IfFalseNode(opt_if1) );
+-    r->init_req(3,fast_taken); // Capture fast-control 
+-    
++    r->init_req(3,fast_taken); // Capture fast-control
++
+     // Fast path not-taken, i.e. slow path
+     Node *complex_path = _gvn.transform( new (C, 1) IfTrueNode(opt_if1) );
+-    
++
+     // Set fast path result
+     Node *fast_result = _gvn.transform( new (C, 3) PowDNode(0, y, x) );
+     phi->init_req(3, fast_result);
+-    
++
+     // Complex path
+     // Build the second if node (if y is int)
+     // Node for (int)y
+@@ -1258,12 +1297,12 @@
+     // Check (double)((int) y) : y
+     Node *cmpinty= _gvn.transform(new (C, 3) CmpDNode(doubleinty, y));
+     // Check if (y isn't int) then go to slow path
+-    
++
+     Node *bol2 = _gvn.transform( new (C, 2) BoolNode( cmpinty, BoolTest::ne ) );
+     // Branch eith way
+     IfNode *if2 = create_and_xform_if(complex_path,bol2, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN);
+     Node *slow_path = opt_iff(r,if2); // Set region path 2
+-    
++
+     // Calculate DPow(abs(x), y)*(1 & (int)y)
+     // Node for constant 1
+     Node *conone = intcon(1);
+@@ -1283,15 +1322,15 @@
+     Node *negabsxpowy = _gvn.transform(new (C, 2) NegDNode (absxpowy));
+     // (1&(int)y)==1?-DPow(abs(x), y):DPow(abs(x), y)
+     Node *signresult = _gvn.transform( CMoveNode::make(C, NULL, bol3, absxpowy, negabsxpowy, Type::DOUBLE));
+-    // Set complex path fast result 
++    // Set complex path fast result
+     phi->init_req(2, signresult);
+-    
++
+     static const jlong nan_bits = CONST64(0x7ff8000000000000);
+     Node *slow_result = makecon(TypeD::make(*(double*)&nan_bits)); // return NaN
+     r->init_req(1,slow_path);
+     phi->init_req(1,slow_result);
+-    
+-    // Post merge      
++
++    // Post merge
+     set_control(_gvn.transform(r));
+     record_for_igvn(r);
+     result=_gvn.transform(phi);
+@@ -1308,7 +1347,7 @@
+     // End the current control-flow path
+     push_pair(x);
+     push_pair(y);
+-    // Math.pow intrinsic returned a NaN, which requires StrictMath.pow 
++    // Math.pow intrinsic returned a NaN, which requires StrictMath.pow
+     // to handle.  Recompile without intrinsifying Math.pow.
+     uncommon_trap(Deoptimization::Reason_intrinsic,
+                   Deoptimization::Action_make_not_entrant);
+@@ -1317,12 +1356,12 @@
+   C->set_has_split_ifs(true); // Has chance for split-if optimization
+ 
+   push_pair(result);
+-  
++
+   return true;
+ }
+ 
+ //------------------------------inline_trans-------------------------------------
+-// Inline transcendental instructions, if possible.  The Intel hardware gets 
++// Inline transcendental instructions, if possible.  The Intel hardware gets
+ // these right, no funny corner cases missed.
+ bool LibraryCallKit::inline_trans(vmIntrinsics::ID id) {
+   _sp += arg_size();        // restore stack pointer
+@@ -1351,8 +1390,8 @@
+   Node* a = NULL;
+   Node* b = NULL;
+ 
+-  assert(call_type == OptoRuntime::Math_DD_D_Type() || call_type == OptoRuntime::Math_D_D_Type(), 
+-	 "must be (DD)D or (D)D type");
++  assert(call_type == OptoRuntime::Math_DD_D_Type() || call_type == OptoRuntime::Math_D_D_Type(),
++         "must be (DD)D or (D)D type");
+ 
+   // Inputs
+   _sp += arg_size();        // restore stack pointer
+@@ -1374,19 +1413,19 @@
+   push_pair(value);
+   return true;
+ }
+-  
++
+ //------------------------------inline_math_native-----------------------------
+ bool LibraryCallKit::inline_math_native(vmIntrinsics::ID id) {
+   switch (id) {
+     // These intrinsics are not properly supported on all hardware
+   case vmIntrinsics::_dcos: return Matcher::has_match_rule(Op_CosD) ? inline_trig(id) :
+     runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dcos), "COS");
+-  case vmIntrinsics::_dsin: return Matcher::has_match_rule(Op_SinD) ? inline_trig(id) : 
++  case vmIntrinsics::_dsin: return Matcher::has_match_rule(Op_SinD) ? inline_trig(id) :
+     runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dsin), "SIN");
+   case vmIntrinsics::_dtan: return Matcher::has_match_rule(Op_TanD) ? inline_trig(id) :
+     runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dtan), "TAN");
+ 
+-  case vmIntrinsics::_dlog:   return Matcher::has_match_rule(Op_LogD) ? inline_trans(id) : 
++  case vmIntrinsics::_dlog:   return Matcher::has_match_rule(Op_LogD) ? inline_trans(id) :
+     runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dlog), "LOG");
+   case vmIntrinsics::_dlog10: return Matcher::has_match_rule(Op_Log10D) ? inline_trans(id) :
+     runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), "LOG10");
+@@ -1400,7 +1439,7 @@
+     // implementation returns a NaN on overflow. See bug: 6304089
+     // Once the ad implementations are fixed, change the code below
+     // to match the intrinsics above
+-    
++
+   case vmIntrinsics::_dexp:  return
+     runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP");
+   case vmIntrinsics::_dpow:  return
+@@ -1582,6 +1621,8 @@
+ 
+   // Use a flow-free graph structure, to avoid creating excess control edges
+   // which could hinder other optimizations.
++  // Since Math.min/max is often used with arraycopy, we want
++  // tightly_coupled_allocation to be able to see beyond min/max expressions.
+   Node* cmov = CMoveNode::make(C, NULL, best_bol,
+                                answer_if_false, answer_if_true,
+                                TypeInt::make(lo, hi, widen));
+@@ -1665,7 +1706,7 @@
+     break;
+   default:
+     ;
+-  } 
++  }
+   return true;
+ }
+ 
+@@ -1732,8 +1773,9 @@
+   Node* val;
+   debug_only(val = (Node*)(uintptr_t)-1);
+ 
++
+   if (is_store) {
+-    // Get the value being stored.  (Pop it first; it was pushed last.) 
++    // Get the value being stored.  (Pop it first; it was pushed last.)
+     switch (type) {
+     case T_DOUBLE:
+     case T_LONG:
+@@ -1901,46 +1943,34 @@
+       val = _gvn.transform( new (C, 2) CastX2PNode(val) );
+       break;
+     }
+-    Node* store = store_to_memory(control(), adr, val, type, adr_type, is_volatile);
+ 
+-    if (type == T_OBJECT
+-        && !_gvn.type(heap_base_oop)->higher_equal(TypePtr::NULL_PTR)) {
++    if (type != T_OBJECT ) {
++      (void) store_to_memory(control(), adr, val, type, adr_type, is_volatile);
++    } else {
++      // Possibly an oop being stored to Java heap or native memory
+       if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) {
+-        store_barrier(store, T_CONFLICT, heap_base_oop, adr, val);
++        // oop to Java heap.
++        (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, val->bottom_type(), type);
+       } else {
+-        // Base pointer may or may not be null, so put out a conditional
+-        // store barrier.  (Yech.)
+-        Node* oldctl = control();
+-        Node* cmp = _gvn.transform( new (C, 3) CmpPNode(heap_base_oop, null()) );
+-        Node* bol = _gvn.transform( new (C, 2) BoolNode(cmp, BoolTest::ne) );
+-        IfNode* iff = create_and_map_if(oldctl, bol, PROB_MAX, COUNT_UNKNOWN);
+-        enum {
+-          heap_store_path = 1,
+-          null_base_path,
+-          num_paths
+-        };
+-        RegionNode* rgn = new (C, num_paths) RegionNode(num_paths);
+-        // fall-through path (base is null, offset is memory address)
+-        rgn->init_req(null_base_path, _gvn.transform( new (C, 1) IfFalseNode(iff) ));
+-        Node* newrawmem = PhiNode::make(rgn, memory(Compile::AliasIdxRaw));
+-        set_control(_gvn.transform( new (C, 1) IfTrueNode(iff) ));
+-        store_barrier(store, T_CONFLICT, heap_base_oop, adr, val);
+-        if (memory(Compile::AliasIdxRaw) == newrawmem->in(null_base_path)) {
+-          // The store barrier did nothing, after all.
+-          set_control(oldctl);
+-        } else {
+-          // Finish heap_store_path:
+-          rgn->init_req(heap_store_path, control());
+-          set_control(_gvn.transform(rgn));
+-          newrawmem->init_req(heap_store_path, memory(Compile::AliasIdxRaw));
+-          set_memory(_gvn.transform(newrawmem), Compile::AliasIdxRaw);
+-        }
++
++        // We can't tell at compile time if we are storing in the Java heap or outside
++        // of it. So we need to emit code to conditionally do the proper type of
++        // store.
++
++        IdealKit kit(gvn(), control(),  merged_memory());
++        kit.declares_done();
++        // QQQ who knows what probability is here??
++        kit.if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); {
++          (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, val->bottom_type(), type);
++        } kit.else_(); {
++          (void) store_to_memory(control(), adr, val, type, adr_type, is_volatile);
++        } kit.end_if();
+       }
+     }
+   }
+ 
+   if (is_volatile) {
+-    if (!is_store) 
++    if (!is_store)
+       insert_mem_bar(Op_MemBarAcquire);
+     else
+       insert_mem_bar(Op_MemBarVolatile);
+@@ -2004,11 +2034,12 @@
+     adr = make_unsafe_address(NULL, ptr);
+   }
+ 
+-  assert(saved_sp == _sp, "must have correct argument count");
+-
+-  if (!is_static) {
++  if (is_static) {
++    assert(saved_sp == _sp, "must have correct argument count");
++  } else {
+     // Pop receiver last:  it was pushed first.
+     Node *receiver = pop();
++    assert(saved_sp == _sp, "must have correct argument count");
+ 
+     // Null check on self without removing any arguments.  The argument
+     // null check technically happens in the wrong place, which can lead to
+@@ -2044,7 +2075,7 @@
+   // them, but even I was confused by it!) As much code/comments as
+   // possible are retained from inline_unsafe_access though to make
+   // the correspondances clearer. - dl
+-  
++
+   if (callee()->is_static())  return false;  // caller must have the capability!
+ 
+ #ifndef PRODUCT
+@@ -2070,7 +2101,7 @@
+     return false;
+ 
+   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
+-    
++
+   // Argument words:  "this" plus oop plus offset plus oldvalue plus newvalue;
+   int nargs = 1 + 1 + 2  + type_words + type_words;
+ 
+@@ -2081,7 +2112,7 @@
+   Node* oldval   = (type_words == 1) ? pop() : pop_pair();
+   Node *offset   = pop_pair();
+   Node *base     = pop();
+-  Node *receiver = pop(); 
++  Node *receiver = pop();
+   assert(saved_sp == _sp, "must have correct argument count");
+ 
+   //  Null check receiver.
+@@ -2117,7 +2148,7 @@
+   insert_mem_bar(Op_MemBarRelease);
+   insert_mem_bar(Op_MemBarCPUOrder);
+ 
+-  // 4984716: MemBars must be inserted before this 
++  // 4984716: MemBars must be inserted before this
+   //          memory node in order to avoid a false
+   //          dependency which will confuse the scheduler.
+   Node *mem = memory(alias_idx);
+@@ -2133,10 +2164,11 @@
+     cas = _gvn.transform(new (C, 5) CompareAndSwapLNode(control(), mem, adr, newval, oldval));
+     break;
+   case T_OBJECT:
+-    cas = _gvn.transform(new (C, 5) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
+     // reference stores need a store barrier.
+     // (They don't if CAS fails, but it isn't worth checking.)
+-    store_barrier(cas, T_CONFLICT, base, adr, newval);
++    pre_barrier(control(), base, adr, alias_idx, newval, value_type, T_OBJECT);
++    cas = _gvn.transform(new (C, 5) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
++    post_barrier(control(), cas, base, adr, alias_idx, newval, T_OBJECT, true);
+     break;
+   default:
+     ShouldNotReachHere();
+@@ -2161,7 +2193,7 @@
+   // This is another variant of inline_unsafe_access, differing in
+   // that it always issues store-store ("release") barrier and ensures
+   // store-atomicity (which only matters for "long").
+-  
++
+   if (callee()->is_static())  return false;  // caller must have the capability!
+ 
+ #ifndef PRODUCT
+@@ -2183,7 +2215,7 @@
+   int type_words = type2size[type];
+ 
+   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
+-    
++
+   // Argument words:  "this" plus oop plus offset plus value;
+   int nargs = 1 + 1 + 2 + type_words;
+ 
+@@ -2193,7 +2225,7 @@
+   Node* val      = (type_words == 1) ? pop() : pop_pair();
+   Node *offset   = pop_pair();
+   Node *base     = pop();
+-  Node *receiver = pop(); 
++  Node *receiver = pop();
+   assert(saved_sp == _sp, "must have correct argument count");
+ 
+   //  Null check receiver.
+@@ -2217,11 +2249,13 @@
+   insert_mem_bar(Op_MemBarCPUOrder);
+   // Ensure that the store is atomic for longs:
+   bool require_atomic_access = true;
+-  Node* store = store_to_memory(control(), adr, val, type, adr_type, require_atomic_access);
++  Node* store;
+   if (type == T_OBJECT) // reference stores need a store barrier.
+-    store_barrier(store, T_CONFLICT, base, adr, val);
++    store = store_oop_to_unknown(control(), base, adr, adr_type, val, value_type, type);
++  else {
++    store = store_to_memory(control(), adr, val, type, adr_type, require_atomic_access);
++  }
+   insert_mem_bar(Op_MemBarCPUOrder);
+-
+   return true;
+ }
+ 
+@@ -2279,7 +2313,7 @@
+ bool LibraryCallKit::inline_native_currentThread() {
+   Node* junk = NULL;
+   push(generate_current_thread(junk));
+-  return true; 
++  return true;
+ }
+ 
+ //------------------------inline_native_isInterrupted------------------
+@@ -2931,17 +2965,18 @@
+   }
+ 
+   // Bail out if either start or end is negative.
+-  generate_negative_guard(start, bailout);
+-  generate_negative_guard(end,   bailout);
+- 
++  generate_negative_guard(start, bailout, &start);
++  generate_negative_guard(end,   bailout, &end);
++
+   Node* length = end;
+   if (_gvn.type(start) != TypeInt::ZERO) {
+     length = _gvn.transform( new (C, 3) SubINode(end, start) );
+   }
+ 
+   // Bail out if length is negative.
+-  generate_negative_guard(length, bailout);
+- 
++  // ...Not needed, since the new_array will throw the right exception.
++  //generate_negative_guard(length, bailout, &length);
++
+   if (bailout->req() > 1) {
+     PreserveJVMState pjvms(this);
+     set_control( _gvn.transform(bailout) );
+@@ -3031,7 +3066,7 @@
+     null_check_receiver(method);
+     int vtable_index = methodOopDesc::invalid_vtable_index;
+     if (UseInlineCaches) {
+-      // Suppress the vtable call 
++      // Suppress the vtable call
+     } else {
+       // hashCode and clone are not a miranda methods,
+       // so the vtable index is fixed.
+@@ -3219,7 +3254,7 @@
+   Node* caller_depth_node = pop();
+ 
+   assert(saved_sp == _sp, "must have correct argument count");
+-  
++
+   // The depth value must be a constant in order for the runtime call
+   // to be eliminated.
+   const TypeInt* caller_depth_type = _gvn.type(caller_depth_node)->isa_int();
+@@ -3452,7 +3487,7 @@
+     Node *cmpisnan = _gvn.transform( new (C, 3) CmpDNode(value, value));
+     // Build the boolean node
+     Node *bolisnan = _gvn.transform( new (C, 2) BoolNode( cmpisnan, BoolTest::ne ) );
+-    
++
+     // Branch either way.
+     // NaN case is less traveled, which makes all the difference.
+     IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
+@@ -3460,7 +3495,7 @@
+     assert( opt_isnan->is_If(), "Expect an IfNode");
+     IfNode *opt_ifisnan = (IfNode*)opt_isnan;
+     Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(opt_ifisnan) );
+-    
++
+     set_control(iftrue);
+ 
+     static const jlong nan_bits = CONST64(0x7ff8000000000000);
+@@ -3471,11 +3506,11 @@
+     // Else fall through
+     Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(opt_ifisnan) );
+     set_control(iffalse);
+-    
++
+     phi->init_req(2, _gvn.transform( new (C, 2) MoveD2LNode(value)));
+     r->init_req(2, iffalse);
+-    
+-    // Post merge      
++
++    // Post merge
+     set_control(_gvn.transform(r));
+     record_for_igvn(r);
+ 
+@@ -3484,7 +3519,7 @@
+     push_pair(result);
+ 
+     C->set_has_split_ifs(true); // Has chance for split-if optimization
+-    
++
+     break;
+   }
+ 
+@@ -3498,7 +3533,7 @@
+     Node *cmpisnan = _gvn.transform( new (C, 3) CmpFNode(value, value));
+     // Build the boolean node
+     Node *bolisnan = _gvn.transform( new (C, 2) BoolNode( cmpisnan, BoolTest::ne ) );
+-    
++
+     // Branch either way.
+     // NaN case is less traveled, which makes all the difference.
+     IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
+@@ -3506,7 +3541,7 @@
+     assert( opt_isnan->is_If(), "Expect an IfNode");
+     IfNode *opt_ifisnan = (IfNode*)opt_isnan;
+     Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(opt_ifisnan) );
+-    
++
+     set_control(iftrue);
+ 
+     static const jint nan_bits = 0x7fc00000;
+@@ -3517,11 +3552,11 @@
+     // Else fall through
+     Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(opt_ifisnan) );
+     set_control(iffalse);
+-    
++
+     phi->init_req(2, _gvn.transform( new (C, 2) MoveF2INode(value)));
+     r->init_req(2, iffalse);
+-    
+-    // Post merge      
++
++    // Post merge
+     set_control(_gvn.transform(r));
+     record_for_igvn(r);
+ 
+@@ -3530,7 +3565,7 @@
+     push(result);
+ 
+     C->set_has_split_ifs(true); // Has chance for split-if optimization
+-    
++
+     break;
+   }
+ 
+@@ -3666,8 +3701,15 @@
+     assert(obj_size != NULL, "");
+     Node* raw_obj = alloc_obj->in(1);
+     assert(raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
++    if (ReduceBulkZeroing) {
++      AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
++      if (alloc != NULL) {
++        // We will be completely responsible for initializing this object.
++        alloc->maybe_set_complete(&_gvn);
++      }
++    }
+ 
+-    if (true) { // TO DO: check ReduceInitialCardMarks
++    if (!use_ReduceInitialCardMarks()) {
+       // If it is an oop array, it requires very special treatment,
+       // because card marking is required on each card of the array.
+       Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
+@@ -3676,17 +3718,22 @@
+         set_control(is_obja);
+         // Generate a direct call to the right arraycopy function(s).
+         bool disjoint_bases = true;
+-	bool length_never_negative = true;
++        bool length_never_negative = true;
+         generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
+                            obj, intcon(0), alloc_obj, intcon(0),
+                            obj_length, nargs,
+-			   disjoint_bases, length_never_negative);
++                           disjoint_bases, length_never_negative);
+         result_reg->init_req(_objArray_path, control());
+         result_val->init_req(_objArray_path, alloc_obj);
+         result_i_o ->set_req(_objArray_path, i_o());
+         result_mem ->set_req(_objArray_path, reset_memory());
+       }
+     }
++    // We can dispense with card marks if we know the allocation
++    // comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
++    // causes the non-eden paths to simulate a fresh allocation,
++    // insofar that no further card marks are required to initialize
++    // the object.
+ 
+     // Otherwise, there are no card marks to worry about.
+     alloc_val->init_req(_typeArray_alloc, raw_obj);
+@@ -3731,7 +3778,12 @@
+     assert(obj_size != NULL, "");
+     Node* raw_obj = alloc_obj->in(1);
+     assert(raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
+-    if (true) { // TO DO: check ReduceInitialCardMarks
++    if (ReduceBulkZeroing) {
++      AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
++      if (alloc != NULL && !alloc->maybe_set_complete(&_gvn))
++        alloc = NULL;
++    }
++    if (!use_ReduceInitialCardMarks()) {
+       // Put in store barrier for any and all oops we are sticking
+       // into this object.  (We could avoid this if we could prove
+       // that the object type contains no oop fields at all.)
+@@ -3786,10 +3838,14 @@
+     // The CopyArray instruction (if supported) can be optimized
+     // into a discrete set of scalar loads and stores.
+     bool disjoint_bases = true;
+-    generate_unchecked_arraycopy(T_LONG, raw_adr_type, disjoint_bases,
++    generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases,
+                                  src, NULL, dest, NULL, countx);
+-    
++
+     // Now that the object is properly initialized, type it as an oop.
++    // Use a secondary InitializeNode memory barrier.
++    InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, raw_adr_idx,
++                                                   raw_obj)->as_Initialize();
++    init->set_complete(&_gvn);  // (there is no corresponding AllocateNode)
+     Node* new_obj = new(C, 2) CheckCastPPNode(control(), raw_obj,
+                                               TypeInstPtr::NOTNULL);
+     new_obj = _gvn.transform(new_obj);
+@@ -3798,8 +3854,14 @@
+     if (card_mark) {
+       Node* no_particular_value = NULL;
+       Node* no_particular_field = NULL;
+-      store_barrier(memory(raw_adr_type), T_OBJECT, new_obj,
+-                    no_particular_field, no_particular_value);
++      post_barrier(control(),
++                   memory(raw_adr_type),
++                   new_obj,
++                   no_particular_field,
++                   raw_adr_idx,
++                   no_particular_value,
++                   T_OBJECT,
++                   false);
+     }
+     // Present the results of the slow call.
+     result_reg->init_req(_fast_path, control());
+@@ -3988,7 +4050,7 @@
+   // We will make a fast path for this call to arraycopy.
+ 
+   // We have the following tests left to perform:
+-  // 
++  //
+   // (3) src and dest must not be null.
+   // (4) src_offset must not be negative.
+   // (5) dest_offset must not be negative.
+@@ -4083,15 +4145,61 @@
+                                    Node* dest, Node* dest_offset,
+                                    Node* copy_length,
+                                    int nargs,
+-				   bool disjoint_bases,
+-				   bool length_never_negative,
+-                                   Node* slow_control) {
++                                   bool disjoint_bases,
++                                   bool length_never_negative,
++                                   RegionNode* slow_region) {
++
++  if (slow_region == NULL) {
++    slow_region = new(C,1) RegionNode(1);
++    record_for_igvn(slow_region);
++  }
++
++  Node* original_dest      = dest;
++  AllocateArrayNode* alloc = NULL;  // used for zeroing, if needed
++  Node* raw_dest           = NULL;  // used before zeroing, if needed
++  bool  must_clear_dest    = false;
++
++  // See if this is the initialization of a newly-allocated array.
++  // If so, we will take responsibility here for initializing it to zero.
++  // (Note:  Because tightly_coupled_allocation performs checks on the
++  // out-edges of the dest, we need to avoid making derived pointers
++  // from it until we have checked its uses.)
++  if (ReduceBulkZeroing
++      && !ZeroTLAB              // pointless if already zeroed
++      && basic_elem_type != T_CONFLICT // avoid corner case
++      && !_gvn.eqv_uncast(src, dest)
++      && ((alloc = tightly_coupled_allocation(dest, slow_region))
++          != NULL)
++      && alloc->maybe_set_complete(&_gvn)) {
++    // "You break it, you buy it."
++    InitializeNode* init = alloc->initialization();
++    assert(init->is_complete(), "we just did this");
++    assert(dest->Opcode() == Op_CheckCastPP, "sanity");
++    assert(dest->in(0)->in(0) == init, "dest pinned");
++    raw_dest = dest->in(1);  // grab the raw pointer!
++    original_dest = dest;
++    dest = raw_dest;
++    adr_type = TypeRawPtr::BOTTOM;  // all initializations are into raw memory
++    // Decouple the original InitializeNode, turning it into a simple membar.
++    // We will build a new one at the end of this routine.
++    init->set_req(InitializeNode::RawAddress, top());
++    // From this point on, every exit path is responsible for
++    // initializing any non-copied parts of the object to zero.
++    must_clear_dest = true;
++  } else {
++    // No zeroing elimination here.
++    alloc             = NULL;
++    //original_dest   = dest;
++    //must_clear_dest = false;
++  }
++
+   // Results are placed here:
+-  enum { fast_path        = 1,
+-         checked_path     = 2,
+-         slow_call_path   = 3,
+-         zero_path        = 4,
+-         PATH_LIMIT       = 5
++  enum { fast_path        = 1,  // normal void-returning assembly stub
++         checked_path     = 2,  // special assembly stub with cleanup
++         slow_call_path   = 3,  // something went wrong; call the VM
++         zero_path        = 4,  // bypass when length of copy is zero
++         bcopy_path       = 5,  // copy primitive array by 64-bit blocks
++         PATH_LIMIT       = 6
+   };
+   RegionNode* result_region = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT);
+   PhiNode*    result_i_o    = new(C, PATH_LIMIT) PhiNode(result_region, Type::ABIO);
+@@ -4099,10 +4207,13 @@
+   record_for_igvn(result_region);
+   _gvn.set_type_bottom(result_i_o);
+   _gvn.set_type_bottom(result_memory);
++  assert(adr_type != TypePtr::BOTTOM, "must be RawMem or a T[] slice");
+ 
+-  // Other parts of the slow_control edge:
++  // The slow_control path:
++  Node* slow_control;
+   Node* slow_i_o = i_o();
+   Node* slow_mem = memory(adr_type);
++  debug_only(slow_control = (Node*) badAddress);
+ 
+   // Checked control path:
+   Node* checked_control = top();
+@@ -4111,43 +4222,123 @@
+   Node* checked_value   = NULL;
+ 
+   if (basic_elem_type == T_CONFLICT) {
+-    { PreserveJVMState pjvms(this);
+-      Node* cv = generate_generic_arraycopy(adr_type,
+-                                            src, src_offset, dest, dest_offset,
+-                                            copy_length, nargs);
+-      if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
+-      checked_control = control();
+-      checked_i_o     = i_o();
+-      checked_mem     = reset_memory();
+-      checked_value   = cv;
+-    }
++    assert(!must_clear_dest, "");
++    Node* cv = generate_generic_arraycopy(adr_type,
++                                          src, src_offset, dest, dest_offset,
++                                          copy_length, nargs);
++    if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
++    checked_control = control();
++    checked_i_o     = i_o();
++    checked_mem     = memory(adr_type);
++    checked_value   = cv;
+     set_control(top());         // no fast path
+   }
+ 
+   Node* not_pos = generate_nonpositive_guard(copy_length, length_never_negative);
+   if (not_pos != NULL) {
+-    Node* fast_ctrl = control();
+-
++    PreserveJVMState pjvms(this);
+     set_control(not_pos);
++
+     // (6) length must not be negative.
+     if (!length_never_negative) {
+-      if (slow_control == NULL) {
+-        slow_control = new(C,1) RegionNode(1);
+-        record_for_igvn(slow_control);
++      generate_negative_guard(copy_length, slow_region);
++    }
++
++    if (!stopped() && must_clear_dest) {
++      Node* dest_length = alloc->in(AllocateNode::ALength);
++      if (_gvn.eqv_uncast(copy_length, dest_length)
++          || _gvn.find_int_con(dest_length, 1) <= 0) {
++        // There is no zeroing to do.
++      } else {
++        // Clear the whole thing since there are no source elements to copy.
++        generate_clear_array(adr_type, dest, basic_elem_type,
++                             intcon(0), NULL,
++                             alloc->in(AllocateNode::AllocSize));
+       }
+-      generate_negative_guard(copy_length, slow_control->as_Region());
+     }
+ 
+     // Present the results of the fast call.
+     result_region->init_req(zero_path, control());
+     result_i_o   ->init_req(zero_path, i_o());
+     result_memory->init_req(zero_path, memory(adr_type));
++  }
+ 
+-    set_control(fast_ctrl);
++  if (!stopped() && must_clear_dest) {
++    // We have to initialize the *uncopied* part of the array to zero.
++    // The copy destination is the slice dest[off..off+len].  The other slices
++    // are dest_head = dest[0..off] and dest_tail = dest[off+len..dest.length].
++    Node* dest_size   = alloc->in(AllocateNode::AllocSize);
++    Node* dest_length = alloc->in(AllocateNode::ALength);
++    Node* dest_tail   = _gvn.transform( new(C,3) AddINode(dest_offset,
++                                                          copy_length) );
++
++    // If there is a head section that needs zeroing, do it now.
++    if (find_int_con(dest_offset, -1) != 0) {
++      generate_clear_array(adr_type, dest, basic_elem_type,
++                           intcon(0), dest_offset,
++                           NULL);
++    }
++
++    // Next, perform a dynamic check on the tail length.
++    // It is often zero, and we can win big if we prove this.
++    // There are two wins:  Avoid generating the ClearArray
++    // with its attendant messy index arithmetic, and upgrade
++    // the copy to a more hardware-friendly word size of 64 bits.
++    Node* tail_ctl = NULL;
++    if (!stopped() && !_gvn.eqv_uncast(dest_tail, dest_length)) {
++      Node* cmp_lt   = _gvn.transform( new(C,3) CmpINode(dest_tail, dest_length) );
++      Node* bol_lt   = _gvn.transform( new(C,2) BoolNode(cmp_lt, BoolTest::lt) );
++      tail_ctl = generate_slow_guard(bol_lt, NULL);
++      assert(tail_ctl != NULL || !stopped(), "must be an outcome");
++    }
++
++    // At this point, let's assume there is no tail.
++    if (!stopped() && alloc != NULL && basic_elem_type != T_OBJECT) {
++      // There is no tail.  Try an upgrade to a 64-bit copy.
++      bool didit = false;
++      { PreserveJVMState pjvms(this);
++        didit = generate_block_arraycopy(adr_type, basic_elem_type, alloc,
++                                         src, src_offset, dest, dest_offset,
++                                         dest_size);
++        if (didit) {
++          // Present the results of the block-copying fast call.
++          result_region->init_req(bcopy_path, control());
++          result_i_o   ->init_req(bcopy_path, i_o());
++          result_memory->init_req(bcopy_path, memory(adr_type));
++        }
++      }
++      if (didit)
++        set_control(top());     // no regular fast path
++    }
++
++    // Clear the tail, if any.
++    if (tail_ctl != NULL) {
++      Node* notail_ctl = stopped() ? NULL : control();
++      set_control(tail_ctl);
++      if (notail_ctl == NULL) {
++        generate_clear_array(adr_type, dest, basic_elem_type,
++                             dest_tail, NULL,
++                             dest_size);
++      } else {
++        // Make a local merge.
++        Node* done_ctl = new(C,3) RegionNode(3);
++        Node* done_mem = new(C,3) PhiNode(done_ctl, Type::MEMORY, adr_type);
++        done_ctl->init_req(1, notail_ctl);
++        done_mem->init_req(1, memory(adr_type));
++        generate_clear_array(adr_type, dest, basic_elem_type,
++                             dest_tail, NULL,
++                             dest_size);
++        done_ctl->init_req(2, control());
++        done_mem->init_req(2, memory(adr_type));
++        set_control( _gvn.transform(done_ctl) );
++        set_memory(  _gvn.transform(done_mem), adr_type );
++      }
++    }
+   }
+ 
++  BasicType copy_type = basic_elem_type;
+   assert(basic_elem_type != T_ARRAY, "caller must fix this");
+-  if (basic_elem_type == T_OBJECT) {
++  if (!stopped() && copy_type == T_OBJECT) {
+     // If src and dest have compatible element types, we can copy bits.
+     // Types S[] and D[] are compatible if D is a supertype of S.
+     //
+@@ -4189,17 +4380,24 @@
+       if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
+       checked_control = control();
+       checked_i_o     = i_o();
+-      checked_mem     = reset_memory();
++      checked_mem     = memory(adr_type);
+       checked_value   = cv;
+     }
+     // At this point we know we do not need type checks on oop stores.
++
++    // Let's see if we need card marks:
++    if (alloc != NULL && use_ReduceInitialCardMarks()) {
++      // If we do not need card marks, copy using the jint or jlong stub.
++      copy_type = LP64_ONLY(T_LONG) NOT_LP64(T_INT);
++      assert(type2aelembytes[basic_elem_type] == type2aelembytes[copy_type],
++             "sizes agree");
++    }
+   }
+ 
+   if (!stopped()) {
+     // Generate the fast path, if possible.
+     PreserveJVMState pjvms(this);
+-
+-    generate_unchecked_arraycopy(basic_elem_type, adr_type, disjoint_bases,
++    generate_unchecked_arraycopy(adr_type, copy_type, disjoint_bases,
+                                  src, src_offset, dest, dest_offset,
+                                  ConvI2X(copy_length));
+ 
+@@ -4210,8 +4408,10 @@
+   }
+ 
+   // Here are all the slow paths up to this point, in one bundle:
+-  if (slow_control == NULL)  slow_control = top();
+-  slow_control = _gvn.transform(slow_control);
++  slow_control = top();
++  if (slow_region != NULL)
++    slow_control = _gvn.transform(slow_region);
++  debug_only(slow_region = (RegionNode*)badAddress);
+ 
+   set_control(checked_control);
+   if (!stopped()) {
+@@ -4241,33 +4441,60 @@
+     slow_i_o2  ->init_req(2, i_o());
+     slow_mem2  ->init_req(2, memory(adr_type));
+ 
+-    // We must continue the copy exactly where it failed, or else
+-    // another thread might see the wrong number of writes to dest.
+-    Node* checked_offset = _gvn.transform( new(C, 3) XorINode(checked_value, intcon(-1)) );
+-    Node* slow_offset    = new(C, 3) PhiNode(slow_reg2, TypeInt::INT);
+-    slow_offset->init_req(1, intcon(0));
+-    slow_offset->init_req(2, checked_offset);
+-
+     slow_control = _gvn.transform(slow_reg2);
+     slow_i_o     = _gvn.transform(slow_i_o2);
+     slow_mem     = _gvn.transform(slow_mem2);
+-    slow_offset  = _gvn.transform(slow_offset);
+ 
+-    // Adjust the arguments by the conditionally incoming offset.
+-    Node* src_off_plus  = _gvn.transform( new(C, 3) AddINode(src_offset,  slow_offset) );
+-    Node* dest_off_plus = _gvn.transform( new(C, 3) AddINode(dest_offset, slow_offset) );
+-    Node* length_minus  = _gvn.transform( new(C, 3) SubINode(copy_length, slow_offset) );
+-
+-    // Tweak the node variables to adjust the code produced below:
+-    src_offset  = src_off_plus;
+-    dest_offset = dest_off_plus;
+-    copy_length = length_minus;
++    if (alloc != NULL) {
++      // We'll restart from the very beginning, after zeroing the whole thing.
++      // This can cause double writes, but that's OK since dest is brand new.
++      // So we ignore the low 31 bits of the value returned from the stub.
++    } else {
++      // We must continue the copy exactly where it failed, or else
++      // another thread might see the wrong number of writes to dest.
++      Node* checked_offset = _gvn.transform( new(C, 3) XorINode(checked_value, intcon(-1)) );
++      Node* slow_offset    = new(C, 3) PhiNode(slow_reg2, TypeInt::INT);
++      slow_offset->init_req(1, intcon(0));
++      slow_offset->init_req(2, checked_offset);
++      slow_offset  = _gvn.transform(slow_offset);
++
++      // Adjust the arguments by the conditionally incoming offset.
++      Node* src_off_plus  = _gvn.transform( new(C, 3) AddINode(src_offset,  slow_offset) );
++      Node* dest_off_plus = _gvn.transform( new(C, 3) AddINode(dest_offset, slow_offset) );
++      Node* length_minus  = _gvn.transform( new(C, 3) SubINode(copy_length, slow_offset) );
++
++      // Tweak the node variables to adjust the code produced below:
++      src_offset  = src_off_plus;
++      dest_offset = dest_off_plus;
++      copy_length = length_minus;
++    }
+   }
+ 
+   set_control(slow_control);
+   if (!stopped()) {
+     // Generate the slow path, if needed.
+-    PreserveJVMState pjvms(this);   // (better safe than sorry)
++    PreserveJVMState pjvms(this);   // replace_in_map may trash the map
++
++    set_memory(slow_mem, adr_type);
++    set_i_o(slow_i_o);
++
++    if (must_clear_dest) {
++      generate_clear_array(adr_type, dest, basic_elem_type,
++                           intcon(0), NULL,
++                           alloc->in(AllocateNode::AllocSize));
++    }
++
++    if (dest != original_dest) {
++      // Promote from rawptr to oop, so it looks right in the call's GC map.
++      dest = _gvn.transform( new(C,2) CheckCastPPNode(control(), dest,
++                                                      TypeInstPtr::NOTNULL) );
++
++      // Edit the call's debug-info to avoid referring to original_dest.
++      // (The problem with original_dest is that it isn't ready until
++      // after the InitializeNode completes, but this stuff is before.)
++      // Substitute in the locally valid dest_oop.
++      replace_in_map(original_dest, dest);
++    }
+ 
+     generate_slow_arraycopy(adr_type,
+                             src, src_offset, dest, dest_offset,
+@@ -4289,21 +4516,292 @@
+   set_i_o(     _gvn.transform(result_i_o)    );
+   set_memory(  _gvn.transform(result_memory), adr_type );
+ 
+-  // The memory edges above are precise in order to model effects around 
++  if (dest != original_dest) {
++    // Pin the "finished" array node after the arraycopy/zeroing operations.
++    // Use a secondary InitializeNode memory barrier.
++    InitializeNode* init = insert_mem_bar_volatile(Op_Initialize,
++                                                   Compile::AliasIdxRaw,
++                                                   raw_dest)->as_Initialize();
++    init->set_complete(&_gvn);  // (there is no corresponding AllocateNode)
++    _gvn.hash_delete(original_dest);
++    original_dest->set_req(0, control());
++    _gvn.hash_find_insert(original_dest);  // put back into GVN table
++  }
++
++  // The memory edges above are precise in order to model effects around
+   // array copyies accurately to allow value numbering of field loads around
+-  // arraycopy.  Such field loads, both before and after, are common in Java 
++  // arraycopy.  Such field loads, both before and after, are common in Java
+   // collections and similar classes involving header/array data structures.
+   //
+-  // But with low number of register or when some registers are used or killed 
++  // But with low number of register or when some registers are used or killed
+   // by arraycopy calls it causes registers spilling on stack. See 6544710.
+-  // The next memory barrier is added to avoid it. If the arraycopy can be 
+-  // optimized away (which it can, sometimes) then we can manually remove 
++  // The next memory barrier is added to avoid it. If the arraycopy can be
++  // optimized away (which it can, sometimes) then we can manually remove
+   // the membar also.
+   if (InsertMemBarAfterArraycopy)
+     insert_mem_bar(Op_MemBarCPUOrder);
+ }
+ 
+ 
++// Helper function which determines if an arraycopy immediately follows
++// an allocation, with no intervening tests or other escapes for the object.
++AllocateArrayNode*
++LibraryCallKit::tightly_coupled_allocation(Node* ptr,
++                                           RegionNode* slow_region) {
++  if (stopped())             return NULL;  // no fast path
++  if (C->AliasLevel() == 0)  return NULL;  // no MergeMems around
++
++  AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(ptr, &_gvn);
++  if (alloc == NULL)  return NULL;
++
++  Node* rawmem = memory(Compile::AliasIdxRaw);
++  // Is the allocation's memory state untouched?
++  if (!(rawmem->is_Proj() && rawmem->in(0)->is_Initialize())) {
++    // Bail out if there have been raw-memory effects since the allocation.
++    // (Example:  There might have been a call or safepoint.)
++    return NULL;
++  }
++  rawmem = rawmem->in(0)->as_Initialize()->memory(Compile::AliasIdxRaw);
++  if (!(rawmem->is_Proj() && rawmem->in(0) == alloc)) {
++    return NULL;
++  }
++
++  // There must be no unexpected observers of this allocation.
++  for (DUIterator_Fast imax, i = ptr->fast_outs(imax); i < imax; i++) {
++    Node* obs = ptr->fast_out(i);
++    if (obs != this->map()) {
++      return NULL;
++    }
++  }
++
++  // This arraycopy must unconditionally follow the allocation of the ptr.
++  Node* alloc_ctl = ptr->in(0);
++  assert(just_allocated_object(alloc_ctl) == ptr, "most recent allo");
++
++  Node* ctl = control();
++  while (ctl != alloc_ctl) {
++    // There may be guards which feed into the slow_region.
++    // Any other control flow means that we might not get a chance
++    // to finish initializing the allocated object.
++    if ((ctl->is_IfFalse() || ctl->is_IfTrue()) && ctl->in(0)->is_If()) {
++      IfNode* iff = ctl->in(0)->as_If();
++      Node* not_ctl = iff->proj_out(1 - ctl->as_Proj()->_con);
++      assert(not_ctl != NULL && not_ctl != ctl, "found alternate");
++      if (slow_region != NULL && slow_region->find_edge(not_ctl) >= 1) {
++        ctl = iff->in(0);       // This test feeds the known slow_region.
++        continue;
++      }
++      // One more try:  Various low-level checks bottom out in
++      // uncommon traps.  If the debug-info of the trap omits
++      // any reference to the allocation, as we've already
++      // observed, then there can be no objection to the trap.
++      bool found_trap = false;
++      for (DUIterator_Fast jmax, j = not_ctl->fast_outs(jmax); j < jmax; j++) {
++        Node* obs = not_ctl->fast_out(j);
++        if (obs->in(0) == not_ctl && obs->is_Call() &&
++            (obs->as_Call()->entry_point() ==
++             SharedRuntime::uncommon_trap_blob()->instructions_begin())) {
++          found_trap = true; break;
++        }
++      }
++      if (found_trap) {
++        ctl = iff->in(0);       // This test feeds a harmless uncommon trap.
++        continue;
++      }
++    }
++    return NULL;
++  }
++
++  // If we get this far, we have an allocation which immediately
++  // precedes the arraycopy, and we can take over zeroing the new object.
++  // The arraycopy will finish the initialization, and provide
++  // a new control state to which we will anchor the destination pointer.
++
++  return alloc;
++}
++
++// Helper for initialization of arrays, creating a ClearArray.
++// It writes zero bits in [start..end), within the body of an array object.
++// The memory effects are all chained onto the 'adr_type' alias category.
++//
++// Since the object is otherwise uninitialized, we are free
++// to put a little "slop" around the edges of the cleared area,
++// as long as it does not go back into the array's header,
++// or beyond the array end within the heap.
++//
++// The lower edge can be rounded down to the nearest jint and the
++// upper edge can be rounded up to the nearest MinObjAlignmentInBytes.
++//
++// Arguments:
++//   adr_type           memory slice where writes are generated
++//   dest               oop of the destination array
++//   basic_elem_type    element type of the destination
++//   slice_idx          array index of first element to store
++//   slice_len          number of elements to store (or NULL)
++//   dest_size          total size in bytes of the array object
++//
++// Exactly one of slice_len or dest_size must be non-NULL.
++// If dest_size is non-NULL, zeroing extends to the end of the object.
++// If slice_len is non-NULL, the slice_idx value must be a constant.
++void
++LibraryCallKit::generate_clear_array(const TypePtr* adr_type,
++                                     Node* dest,
++                                     BasicType basic_elem_type,
++                                     Node* slice_idx,
++                                     Node* slice_len,
++                                     Node* dest_size) {
++  // one or the other but not both of slice_len and dest_size:
++  assert((slice_len != NULL? 1: 0) + (dest_size != NULL? 1: 0) == 1, "");
++  if (slice_len == NULL)  slice_len = top();
++  if (dest_size == NULL)  dest_size = top();
++
++  // operate on this memory slice:
++  Node* mem = memory(adr_type); // memory slice to operate on
++
++  // scaling and rounding of indexes:
++  int scale = exact_log2(type2aelembytes[basic_elem_type]);
++  int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
++  int clear_low = (-1 << scale) & (BytesPerInt  - 1);
++  int bump_bit  = (-1 << scale) & BytesPerInt;
++
++  // determine constant starts and ends
++  const intptr_t BIG_NEG = -128;
++  assert(BIG_NEG + 2*abase < 0, "neg enough");
++  intptr_t slice_idx_con = (intptr_t) find_int_con(slice_idx, BIG_NEG);
++  intptr_t slice_len_con = (intptr_t) find_int_con(slice_len, BIG_NEG);
++  if (slice_len_con == 0) {
++    return;                     // nothing to do here
++  }
++  intptr_t start_con = (abase + (slice_idx_con << scale)) & ~clear_low;
++  intptr_t end_con   = find_intptr_t_con(dest_size, -1);
++  if (slice_idx_con >= 0 && slice_len_con >= 0) {
++    assert(end_con < 0, "not two cons");
++    end_con = round_to(abase + ((slice_idx_con + slice_len_con) << scale),
++                       BytesPerLong);
++  }
++
++  if (start_con >= 0 && end_con >= 0) {
++    // Constant start and end.  Simple.
++    mem = ClearArrayNode::clear_memory(control(), mem, dest,
++                                       start_con, end_con, &_gvn);
++  } else if (start_con >= 0 && dest_size != top()) {
++    // Constant start, pre-rounded end after the tail of the array.
++    Node* end = dest_size;
++    mem = ClearArrayNode::clear_memory(control(), mem, dest,
++                                       start_con, end, &_gvn);
++  } else if (start_con >= 0 && slice_len != top()) {
++    // Constant start, non-constant end.  End needs rounding up.
++    // End offset = round_up(abase + ((slice_idx_con + slice_len) << scale), 8)
++    intptr_t end_base  = abase + (slice_idx_con << scale);
++    int      end_round = (-1 << scale) & (BytesPerLong  - 1);
++    Node*    end       = ConvI2X(slice_len);
++    if (scale != 0)
++      end = _gvn.transform( new(C,3) LShiftXNode(end, intcon(scale) ));
++    end_base += end_round;
++    end = _gvn.transform( new(C,3) AddXNode(end, MakeConX(end_base)) );
++    end = _gvn.transform( new(C,3) AndXNode(end, MakeConX(~end_round)) );
++    mem = ClearArrayNode::clear_memory(control(), mem, dest,
++                                       start_con, end, &_gvn);
++  } else if (start_con < 0 && dest_size != top()) {
++    // Non-constant start, pre-rounded end after the tail of the array.
++    // This is almost certainly a "round-to-end" operation.
++    Node* start = slice_idx;
++    start = ConvI2X(start);
++    if (scale != 0)
++      start = _gvn.transform( new(C,3) LShiftXNode( start, intcon(scale) ));
++    start = _gvn.transform( new(C,3) AddXNode(start, MakeConX(abase)) );
++    if ((bump_bit | clear_low) != 0) {
++      int to_clear = (bump_bit | clear_low);
++      // Align up mod 8, then store a jint zero unconditionally
++      // just before the mod-8 boundary.
++      // This would only fail if the first array element were immediately
++      // after the length field, and were also at an even offset mod 8.
++      assert(((abase + bump_bit) & ~to_clear) - BytesPerInt
++             >= arrayOopDesc::length_offset_in_bytes() + BytesPerInt,
++             "store must not trash length field");
++
++      // Bump 'start' up to (or past) the next jint boundary:
++      start = _gvn.transform( new(C,3) AddXNode(start, MakeConX(bump_bit)) );
++      // Round bumped 'start' down to jlong boundary in body of array.
++      start = _gvn.transform( new(C,3) AndXNode(start, MakeConX(~to_clear)) );
++      // Store a zero to the immediately preceding jint:
++      Node* x1 = _gvn.transform( new(C,3) AddXNode(start, MakeConX(-BytesPerInt)) );
++      Node* p1 = basic_plus_adr(dest, x1);
++      mem = StoreNode::make(C, control(), mem, p1, adr_type, intcon(0), T_INT);
++      mem = _gvn.transform(mem);
++    }
++
++    Node* end = dest_size; // pre-rounded
++    mem = ClearArrayNode::clear_memory(control(), mem, dest,
++                                       start, end, &_gvn);
++  } else {
++    // Non-constant start, unrounded non-constant end.
++    // (Nobody zeroes a random midsection of an array using this routine.)
++    ShouldNotReachHere();       // fix caller
++  }
++
++  // Done.
++  set_memory(mem, adr_type);
++}
++
++
++bool
++LibraryCallKit::generate_block_arraycopy(const TypePtr* adr_type,
++                                         BasicType basic_elem_type,
++                                         AllocateNode* alloc,
++                                         Node* src,  Node* src_offset,
++                                         Node* dest, Node* dest_offset,
++                                         Node* dest_size) {
++  // See if there is an advantage from block transfer.
++  int scale = exact_log2(type2aelembytes[basic_elem_type]);
++  if (scale >= LogBytesPerLong)
++    return false;               // it is already a block transfer
++
++  // Look at the alignment of the starting offsets.
++  int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
++  const intptr_t BIG_NEG = -128;
++  assert(BIG_NEG + 2*abase < 0, "neg enough");
++
++  intptr_t src_off  = abase + ((intptr_t) find_int_con(src_offset, -1)  << scale);
++  intptr_t dest_off = abase + ((intptr_t) find_int_con(dest_offset, -1) << scale);
++  if (src_off < 0 || dest_off < 0)
++    // At present, we can only understand constants.
++    return false;
++
++  if (((src_off | dest_off) & (BytesPerLong-1)) != 0) {
++    // Non-aligned; too bad.
++    // One more chance:  Pick off an initial 32-bit word.
++    // This is a common case, since abase can be odd mod 8.
++    if (((src_off | dest_off) & (BytesPerLong-1)) == BytesPerInt &&
++        ((src_off ^ dest_off) & (BytesPerLong-1)) == 0) {
++      Node* sptr = basic_plus_adr(src,  src_off);
++      Node* dptr = basic_plus_adr(dest, dest_off);
++      Node* sval = make_load(control(), sptr, TypeInt::INT, T_INT, adr_type);
++      store_to_memory(control(), dptr, sval, T_INT, adr_type);
++      src_off += BytesPerInt;
++      dest_off += BytesPerInt;
++    } else {
++      return false;
++    }
++  }
++  assert(src_off % BytesPerLong == 0, "");
++  assert(dest_off % BytesPerLong == 0, "");
++
++  // Do this copy by giant steps.
++  Node* sptr  = basic_plus_adr(src,  src_off);
++  Node* dptr  = basic_plus_adr(dest, dest_off);
++  Node* countx = dest_size;
++  countx = _gvn.transform( new (C, 3) SubXNode(countx, MakeConX(dest_off)) );
++  countx = _gvn.transform( new (C, 3) URShiftXNode(countx, intcon(LogBytesPerLong)) );
++
++  bool disjoint_bases = true;   // since alloc != NULL
++  generate_unchecked_arraycopy(adr_type, T_LONG, disjoint_bases,
++                               sptr, NULL, dptr, NULL, countx);
++
++  return true;
++}
++
++
+ // Helper function; generates code for the slow case.
+ // We make a call to a runtime method which emulates the native method,
+ // but without the native wrapper overhead.
+@@ -4333,7 +4831,7 @@
+                                              Node* src,  Node* src_offset,
+                                              Node* dest, Node* dest_offset,
+                                              Node* copy_length,
+-                                             int nargs) { 
++                                             int nargs) {
+   if (stopped())  return NULL;
+ 
+   address copyfunc_addr = StubRoutines::checkcast_arraycopy();
+@@ -4371,7 +4869,7 @@
+ 
+ // Helper function; generates code for cases requiring runtime checks.
+ Node*
+-LibraryCallKit::generate_generic_arraycopy(const TypePtr* adr_type, 
++LibraryCallKit::generate_generic_arraycopy(const TypePtr* adr_type,
+                                            Node* src,  Node* src_offset,
+                                            Node* dest, Node* dest_offset,
+                                            Node* copy_length,
+@@ -4391,11 +4889,10 @@
+   return _gvn.transform(new (C, 1) ProjNode(call, TypeFunc::Parms));
+ }
+ 
+-
+ // Helper function; generates the fast out-of-line call to an arraycopy stub.
+ void
+-LibraryCallKit::generate_unchecked_arraycopy(BasicType basic_elem_type,
+-                                             const TypePtr* adr_type,
++LibraryCallKit::generate_unchecked_arraycopy(const TypePtr* adr_type,
++                                             BasicType basic_elem_type,
+                                              bool disjoint_bases,
+                                              Node* src,  Node* src_offset,
+                                              Node* dest, Node* dest_offset,
+@@ -4422,4 +4919,3 @@
+                     copyfunc_addr, copyfunc_name, adr_type,
+                     src_start, dest_start, copy_length XTOP);
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/live.cpp openjdk/hotspot/src/share/vm/opto/live.cpp
+--- openjdk6/hotspot/src/share/vm/opto/live.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/live.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)live.cpp	1.70 07/05/17 17:44:00 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -41,7 +38,7 @@
+ // remaining new live-out values are ANDed with what is locally defined.
+ // Leftover bits become the new live-in for the predecessor block, and the pred
+ // block is put on the worklist.
+-//   The locally live-in stuff is computed once and added to predecessor 
++//   The locally live-in stuff is computed once and added to predecessor
+ // live-out sets.  This seperate compilation is done in the outer loop below.
+ PhaseLive::PhaseLive( const PhaseCFG &cfg, LRG_List &names, Arena *arena ) : Phase(LIVE), _cfg(cfg), _names(names), _arena(arena), _live(0) {
+ }
+@@ -58,7 +55,7 @@
+     _live[i].initialize(_maxlrg);
+   }
+ 
+-  // Init the sparse arrays for delta-sets.  
++  // Init the sparse arrays for delta-sets.
+   ResourceMark rm;              // Nuke temp storage on exit
+ 
+   // Does the memory used by _defs and _deltas get reclaimed?  Does it matter?  TT
+@@ -124,12 +121,12 @@
+       add_liveout( p, use, first_pass );
+ 
+       // PhiNode uses go in the live-out set of prior blocks.
+-      for( uint k=i; k>0; k-- ) 
++      for( uint k=i; k>0; k-- )
+         add_liveout( p, _names[b->_nodes[k-1]->in(l)->_idx], first_pass );
+     }
+     freeset( b );
+     first_pass.set(b->_pre_order);
+-    
++
+     // Inner loop: blocks that picked up new live-out values to be propagated
+     while( _worklist->size() ) {
+         // !!!!!
+@@ -141,7 +138,7 @@
+       assert( delta->count(), "missing delta set" );
+ 
+       // Add new-live-in to predecessors live-out sets
+-      for( uint l=1; l<b->num_preds(); l++ ) 
++      for( uint l=1; l<b->num_preds(); l++ )
+         add_liveout( _cfg._bbs[b->pred(l)->_idx], delta, first_pass );
+ 
+       freeset(b);
+@@ -210,7 +207,7 @@
+   IndexSet *f = _deltas[p->_pre_order-1];
+   f->set_next(_free_IndexSet);
+   _free_IndexSet = f;           // Drop onto free list
+-  _deltas[p->_pre_order-1] = NULL;  
++  _deltas[p->_pre_order-1] = NULL;
+ }
+ 
+ //------------------------------add_liveout------------------------------------
+@@ -225,7 +222,7 @@
+       if( !_deltas[p->_pre_order-1] && // Not on worklist?
+           first_pass.test(p->_pre_order) )
+         _worklist->push(p);     // Actually go on worklist if already 1st pass
+-      getset(p)->insert(r);  
++      getset(p)->insert(r);
+     }
+   }
+ }
+@@ -269,7 +266,7 @@
+     tty->print("L%d/", _names[b->_nodes[i]->_idx] );
+     b->_nodes[i]->dump();
+   }
+-  tty->print("\n");  
++  tty->print("\n");
+ }
+ 
+ //------------------------------verify_base_ptrs-------------------------------
+diff -ruN openjdk6/hotspot/src/share/vm/opto/live.hpp openjdk/hotspot/src/share/vm/opto/live.hpp
+--- openjdk6/hotspot/src/share/vm/opto/live.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/live.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)live.hpp	1.43 07/05/05 17:06:19 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class Block;
+diff -ruN openjdk6/hotspot/src/share/vm/opto/locknode.cpp openjdk/hotspot/src/share/vm/opto/locknode.cpp
+--- openjdk6/hotspot/src/share/vm/opto/locknode.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/locknode.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)locknode.cpp	1.49 07/05/17 15:59:05 JVM"
+-#endif
+ /*
+  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -33,7 +30,7 @@
+   return _inmask;
+ }
+ 
+-const RegMask &BoxLockNode::out_RegMask() const { 
++const RegMask &BoxLockNode::out_RegMask() const {
+   return *Matcher::idealreg2regmask[Op_RegP];
+ }
+ 
+@@ -53,7 +50,7 @@
+ }
+ 
+ OptoReg::Name BoxLockNode::stack_slot(Node* box_node) {
+-  // Chase down the BoxNode 
++  // Chase down the BoxNode
+   while (!box_node->is_BoxLock()) {
+     //    if (box_node->is_SpillCopy()) {
+     //      Node *m = box_node->in(1);
+@@ -99,14 +96,14 @@
+ //------------------------------do_monitor_enter-------------------------------
+ void Parse::do_monitor_enter() {
+   kill_dead_locals();
+- 
++
+   // Null check; get casted pointer.
+   Node *obj = do_null_check(peek(), T_OBJECT);
+   // Check for locking null object
+   if (stopped()) return;
+ 
+   // the monitor object is not part of debug info expression stack
+-  pop(); 
++  pop();
+ 
+   // Insert a FastLockNode which takes as arguments the current thread pointer,
+   // the obj pointer & the address of the stack slot pair used for the lock.
+@@ -122,7 +119,4 @@
+   // the matching Lock for this Unlock.  Hence we know there is no need
+   // for a null check on Unlock.
+   shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj());
+-} 
+-
+-
+-
++}
+diff -ruN openjdk6/hotspot/src/share/vm/opto/locknode.hpp openjdk/hotspot/src/share/vm/opto/locknode.hpp
+--- openjdk6/hotspot/src/share/vm/opto/locknode.hpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/locknode.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)locknode.hpp	1.39 07/05/17 15:59:09 JVM"
+-#endif
+ /*
+  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //------------------------------BoxLockNode------------------------------------
+@@ -46,8 +43,8 @@
+   static OptoReg::Name stack_slot(Node* box_node);
+ 
+ #ifndef PRODUCT
+-  virtual void format( PhaseRegAlloc * ) const;
+-  virtual void dump_spec() const { tty->print("  Lock %d",_slot); }
++  virtual void format( PhaseRegAlloc *, outputStream *st ) const;
++  virtual void dump_spec(outputStream *st) const { st->print("  Lock %d",_slot); }
+ #endif
+ };
+ 
+@@ -98,4 +95,3 @@
+   const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
+ 
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/loopnode.cpp openjdk/hotspot/src/share/vm/opto/loopnode.cpp
+--- openjdk6/hotspot/src/share/vm/opto/loopnode.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/loopnode.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)loopnode.cpp	1.258 07/05/17 17:44:08 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,20 +19,34 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+ #include "incls/_loopnode.cpp.incl"
+ 
+ //=============================================================================
++//------------------------------is_loop_iv-------------------------------------
++// Determine if a node is Counted loop induction variable.
++// The method is declared in node.hpp.
++const Node* Node::is_loop_iv() const {
++  if (this->is_Phi() && !this->as_Phi()->is_copy() &&
++      this->as_Phi()->region()->is_CountedLoop() &&
++      this->as_Phi()->region()->as_CountedLoop()->phi() == this) {
++    return this;
++  } else {
++    return NULL;
++  }
++}
++
++//=============================================================================
+ //------------------------------dump_spec--------------------------------------
+ // Dump special per-node info
+ #ifndef PRODUCT
+-void LoopNode::dump_spec() const {
+-  if( is_inner_loop () ) tty->print( "inner " );
+-  if( is_partial_peel_loop () ) tty->print( "partial_peel " );
+-  if( partial_peel_has_failed () ) tty->print( "partial_peel_failed " );
++void LoopNode::dump_spec(outputStream *st) const {
++  if( is_inner_loop () ) st->print( "inner " );
++  if( is_partial_peel_loop () ) st->print( "partial_peel " );
++  if( partial_peel_has_failed () ) st->print( "partial_peel_failed " );
+ }
+ #endif
+ 
+@@ -267,9 +278,11 @@
+   // '>' for count-down loops.  If the condition is inverted and we will
+   // be rolling through MININT to MAXINT, then bail out.
+ 
++  C->print_method("Before CountedLoop", 3);
++
+   // Check for SafePoint on backedge and remove
+   Node *sfpt = x->in(LoopNode::LoopBackControl);
+-  if( sfpt->Opcode() == Op_SafePoint ) {
++  if( sfpt->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt)) {
+     lazy_replace( sfpt, iftrue );
+     loop->_tail = iftrue;
+   }
+@@ -460,11 +473,14 @@
+ 
+   // Check for immediately preceeding SafePoint and remove
+   Node *sfpt2 = le->in(0);
+-  if( sfpt2->Opcode() == Op_SafePoint )
++  if( sfpt2->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt2))
+     lazy_replace( sfpt2, sfpt2->in(TypeFunc::Control));
+ 
+   // Free up intermediate goo
+   _igvn.remove_dead_node(hook);
++
++  C->print_method("After CountedLoop", 3);
++
+   // Return trip counter
+   return trip_count;
+ }
+@@ -492,16 +508,16 @@
+ //------------------------------dump_spec--------------------------------------
+ // Dump special per-node info
+ #ifndef PRODUCT
+-void CountedLoopNode::dump_spec() const {
+-  LoopNode::dump_spec();
++void CountedLoopNode::dump_spec(outputStream *st) const {
++  LoopNode::dump_spec(st);
+   if( stride_is_con() ) {
+-    tty->print("stride: %d ",stride_con());
++    st->print("stride: %d ",stride_con());
+   } else {
+-    tty->print("stride: not constant ");
++    st->print("stride: not constant ");
+   }
+-  if( is_pre_loop () ) tty->print("pre of N%d" , _main_idx );
+-  if( is_main_loop() ) tty->print("main of N%d", _idx );
+-  if( is_post_loop() ) tty->print("post of N%d", _main_idx );
++  if( is_pre_loop () ) st->print("pre of N%d" , _main_idx );
++  if( is_main_loop() ) st->print("main of N%d", _idx );
++  if( is_post_loop() ) st->print("post of N%d", _main_idx );
+ }
+ #endif
+ 
+@@ -714,16 +730,16 @@
+ //------------------------------dump_spec--------------------------------------
+ // Dump special per-node info
+ #ifndef PRODUCT
+-void CountedLoopEndNode::dump_spec() const {
++void CountedLoopEndNode::dump_spec(outputStream *st) const {
+   if( in(TestValue)->is_Bool() ) {
+     BoolTest bt( test_trip()); // Added this for g++.
+ 
+-    tty->print("[");
+-    bt.dump();
+-    tty->print("]");
++    st->print("[");
++    bt.dump_on(st);
++    st->print("]");
+   }
+-  tty->print(" ");
+-  IfNode::dump_spec();
++  st->print(" ");
++  IfNode::dump_spec(st);
+ }
+ #endif
+ 
+@@ -1044,6 +1060,8 @@
+   // Cache parts in locals for easy
+   PhaseIterGVN &igvn = phase->_igvn;
+ 
++  phase->C->print_method("Before beautify loops", 3);
++
+   igvn.hash_delete(_head);      // Yank from hash before hacking edges
+ 
+   // Check for multiple fall-in paths.  Peel off a landing pad if need be.
+@@ -1107,36 +1125,180 @@
+       phase->_igvn.add_users_to_worklist(l->fast_out(i));
+   }
+ 
++  phase->C->print_method("After beautify loops", 3);
++
+   // Now recursively beautify nested loops
+   if( _child ) result |= _child->beautify_loops( phase );
+   if( _next  ) result |= _next ->beautify_loops( phase );
+   return result;
+ }
+ 
+-//------------------------------check_inner_safepts----------------------------
+-// Given dominators, try to find inner loops with calls that must always be
+-// executed (call dominates loop tail).  These loops do not need a seperate
+-// safepoint.
+-void IdealLoopTree::check_inner_safepts( PhaseIdealLoop *phase ) {
+-
+-  // No safepoints found yet?  Not irreducible?
+-  if( !_has_sfpt && !_irreducible &&
+-      // Inner loop, or at least a short trip count to walk the loop?
+-      (!_child ||
+-       (phase->dom_depth(tail()) - phase->dom_depth(_head)) < 20 ) ) {
+-    // Look for a dominating call
+-    for (Node* n = tail(); n != _head; n = phase->idom(n)) {
+-      if( n->is_Call() &&       // Found a call on idom chain?
+-         n->as_Call()->guaranteed_safepoint() ) {
++//------------------------------allpaths_check_safepts----------------------------
++// Allpaths backwards scan from loop tail, terminating each path at first safepoint
++// encountered.  Helper for check_safepts.
++void IdealLoopTree::allpaths_check_safepts(VectorSet &visited, Node_List &stack) {
++  assert(stack.size() == 0, "empty stack");
++  stack.push(_tail);
++  visited.Clear();
++  visited.set(_tail->_idx);
++  while (stack.size() > 0) {
++    Node* n = stack.pop();
++    if (n->is_Call() && n->as_Call()->guaranteed_safepoint()) {
++      // Terminate this path
++    } else if (n->Opcode() == Op_SafePoint) {
++      if (_phase->get_loop(n) != this) {
++        if (_required_safept == NULL) _required_safept = new Node_List();
++        _required_safept->push(n);  // save the one closest to the tail
++      }
++      // Terminate this path
++    } else {
++      uint start = n->is_Region() ? 1 : 0;
++      uint end   = n->is_Region() && !n->is_Loop() ? n->req() : start + 1;
++      for (uint i = start; i < end; i++) {
++        Node* in = n->in(i);
++        assert(in->is_CFG(), "must be");
++        if (!visited.test_set(in->_idx) && is_member(_phase->get_loop(in))) {
++          stack.push(in);
++        }
++      }
++    }
++  }
++}
++
++//------------------------------check_safepts----------------------------
++// Given dominators, try to find loops with calls that must always be
++// executed (call dominates loop tail).  These loops do not need non-call
++// safepoints (ncsfpt).
++//
++// A complication is that a safepoint in a inner loop may be needed
++// by an outer loop. In the following, the inner loop sees it has a
++// call (block 3) on every path from the head (block 2) to the
++// backedge (arc 3->2).  So it deletes the ncsfpt (non-call safepoint)
++// in block 2, _but_ this leaves the outer loop without a safepoint.
++//
++//          entry  0
++//                 |
++//                 v
++// outer 1,2    +->1
++//              |  |
++//              |  v
++//              |  2<---+  ncsfpt in 2
++//              |_/|\   |
++//                 | v  |
++// inner 2,3      /  3  |  call in 3
++//               /   |  |
++//              v    +--+
++//        exit  4
++//
++//
++// This method creates a list (_required_safept) of ncsfpt nodes that must
++// be protected is created for each loop. When a ncsfpt maybe deleted, it
++// is first looked for in the lists for the outer loops of the current loop.
++//
++// The insights into the problem:
++//  A) counted loops are okay
++//  B) innermost loops are okay (only an inner loop can delete
++//     a ncsfpt needed by an outer loop)
++//  C) a loop is immune from an inner loop deleting a safepoint
++//     if the loop has a call on the idom-path
++//  D) a loop is also immune if it has a ncsfpt (non-call safepoint) on the
++//     idom-path that is not in a nested loop
++//  E) otherwise, an ncsfpt on the idom-path that is nested in an inner
++//     loop needs to be prevented from deletion by an inner loop
++//
++// There are two analyses:
++//  1) The first, and cheaper one, scans the loop body from
++//     tail to head following the idom (immediate dominator)
++//     chain, looking for the cases (C,D,E) above.
++//     Since inner loops are scanned before outer loops, there is summary
++//     information about inner loops.  Inner loops can be skipped over
++//     when the tail of an inner loop is encountered.
++//
++//  2) The second, invoked if the first fails to find a call or ncsfpt on
++//     the idom path (which is rare), scans all predecessor control paths
++//     from the tail to the head, terminating a path when a call or sfpt
++//     is encountered, to find the ncsfpt's that are closest to the tail.
++//
++void IdealLoopTree::check_safepts(VectorSet &visited, Node_List &stack) {
++  // Bottom up traversal
++  IdealLoopTree* ch = _child;
++  while (ch != NULL) {
++    ch->check_safepts(visited, stack);
++    ch = ch->_next;
++  }
++
++  if (!_head->is_CountedLoop() && !_has_sfpt && _parent != NULL && !_irreducible) {
++    bool  has_call         = false; // call on dom-path
++    bool  has_local_ncsfpt = false; // ncsfpt on dom-path at this loop depth
++    Node* nonlocal_ncsfpt  = NULL;  // ncsfpt on dom-path at a deeper depth
++    // Scan the dom-path nodes from tail to head
++    for (Node* n = tail(); n != _head; n = _phase->idom(n)) {
++      if (n->is_Call() && n->as_Call()->guaranteed_safepoint()) {
++        has_call = true;
+         _has_sfpt = 1;          // Then no need for a safept!
+         break;
++      } else if (n->Opcode() == Op_SafePoint) {
++        if (_phase->get_loop(n) == this) {
++          has_local_ncsfpt = true;
++          break;
++        }
++        if (nonlocal_ncsfpt == NULL) {
++          nonlocal_ncsfpt = n; // save the one closest to the tail
++        }
++      } else {
++        IdealLoopTree* nlpt = _phase->get_loop(n);
++        if (this != nlpt) {
++          // If at an inner loop tail, see if the inner loop has already
++          // recorded seeing a call on the dom-path (and stop.)  If not,
++          // jump to the head of the inner loop.
++          assert(is_member(nlpt), "nested loop");
++          Node* tail = nlpt->_tail;
++          if (tail->in(0)->is_If()) tail = tail->in(0);
++          if (n == tail) {
++            // If inner loop has call on dom-path, so does outer loop
++            if (nlpt->_has_sfpt) {
++              has_call = true;
++              _has_sfpt = 1;
++              break;
++            }
++            // Skip to head of inner loop
++            assert(_phase->is_dominator(_head, nlpt->_head), "inner head dominated by outer head");
++            n = nlpt->_head;
++          }
++        }
++      }
++    }
++    // Record safept's that this loop needs preserved when an
++    // inner loop attempts to delete it's safepoints.
++    if (_child != NULL && !has_call && !has_local_ncsfpt) {
++      if (nonlocal_ncsfpt != NULL) {
++        if (_required_safept == NULL) _required_safept = new Node_List();
++        _required_safept->push(nonlocal_ncsfpt);
++      } else {
++        // Failed to find a suitable safept on the dom-path.  Now use
++        // an all paths walk from tail to head, looking for safepoints to preserve.
++        allpaths_check_safepts(visited, stack);
+       }
+     }
+   }
++}
+ 
+-  // Recursively
+-  if( _child ) _child->check_inner_safepts( phase );
+-  if( _next  ) _next ->check_inner_safepts( phase );
++//---------------------------is_deleteable_safept----------------------------
++// Is safept not required by an outer loop?
++bool PhaseIdealLoop::is_deleteable_safept(Node* sfpt) {
++  assert(sfpt->Opcode() == Op_SafePoint, "");
++  IdealLoopTree* lp = get_loop(sfpt)->_parent;
++  while (lp != NULL) {
++    Node_List* sfpts = lp->_required_safept;
++    if (sfpts != NULL) {
++      for (uint i = 0; i < sfpts->size(); i++) {
++        if (sfpt == sfpts->at(i))
++          return false;
++      }
++    }
++    lp = lp->_parent;
++  }
++  return true;
+ }
+ 
+ //------------------------------counted_loop-----------------------------------
+@@ -1154,7 +1316,8 @@
+ 
+     // Look for a safepoint to remove
+     for (Node* n = tail(); n != _head; n = phase->idom(n))
+-      if( n->Opcode() == Op_SafePoint ) // Found a safept?
++      if (n->Opcode() == Op_SafePoint && phase->get_loop(n) == this &&
++          phase->is_deleteable_safept(n))
+         phase->lazy_replace(n,n->in(TypeFunc::Control));
+ 
+     CountedLoopNode *cl = _head->as_CountedLoop();
+@@ -1230,6 +1393,21 @@
+         continue;
+       }
+     }
++  } else if (_parent != NULL && !_irreducible) {
++    // Not a counted loop.
++    // Look for a safepoint on the idom-path to remove, preserving the first one
++    bool found = false;
++    Node* n = tail();
++    for (; n != _head && !found; n = phase->idom(n)) {
++      if (n->Opcode() == Op_SafePoint && phase->get_loop(n) == this)
++        found = true; // Found one
++    }
++    // Skip past it and delete the others
++    for (; n != _head; n = phase->idom(n)) {
++      if (n->Opcode() == Op_SafePoint && phase->get_loop(n) == this &&
++          phase->is_deleteable_safept(n))
++        phase->lazy_replace(n,n->in(TypeFunc::Control));
++    }
+   }
+ 
+   // Recursively
+@@ -1374,7 +1552,8 @@
+   // Given dominators, try to find inner loops with calls that must
+   // always be executed (call dominates loop tail).  These loops do
+   // not need a seperate safepoint.
+-  _ltree_root->check_inner_safepts( this );
++  Node_List cisstack(a);
++  _ltree_root->check_safepts(visited, cisstack);
+ 
+   // Walk the DATA nodes and place into loops.  Find earliest control
+   // node.  For CFG nodes, the _nodes array starts out and remains
+@@ -1429,7 +1608,7 @@
+       if (!lpt->is_counted() || !lpt->is_inner()) continue;
+ 
+       lpt->reassociate_invariants(this);
+-      
++
+       // Because RCE opportunities can be masked by split_thru_phi,
+       // look for RCE candidates and inhibit split_thru_phi
+       // on just their loop-phi's for this pass of loop opts
+@@ -2119,7 +2298,8 @@
+           // (the old code here would yank a 2nd safepoint after seeing a
+           // first one, even though the 1st did not dominate in the loop body
+           // and thus could be avoided indefinitely)
+-          if( !verify_me && ilt->_has_sfpt && n->Opcode() == Op_SafePoint ) {
++          if( !verify_me && ilt->_has_sfpt && n->Opcode() == Op_SafePoint &&
++              is_deleteable_safept(n)) {
+             Node *in = n->in(TypeFunc::Control);
+             lazy_replace(n,in);       // Pull safepoint now
+             // Carry on with the recursion "as if" we are walking
+@@ -2694,7 +2874,7 @@
+   } else {
+     while (_curnt != _root && _curnt->_next == NULL) {
+       _curnt = _curnt->_parent;
+-    }      
++    }
+     if (_curnt == _root) {
+       _curnt = NULL;
+       assert(done(), "must be done.");
+@@ -2704,4 +2884,3 @@
+     }
+   }
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/loopnode.hpp openjdk/hotspot/src/share/vm/opto/loopnode.hpp
+--- openjdk6/hotspot/src/share/vm/opto/loopnode.hpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/loopnode.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)loopnode.hpp	1.143 07/06/29 13:39:53 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class CmpNode;
+@@ -39,12 +36,12 @@
+ //                  I D E A L I Z E D   L O O P S
+ //
+ // Idealized loops are the set of loops I perform more interesting
+-// transformations on, beyond simple hoisting.  
++// transformations on, beyond simple hoisting.
+ 
+ //------------------------------LoopNode---------------------------------------
+ // Simple loop header.  Fall in path on left, loop-back path on right.
+ class LoopNode : public RegionNode {
+-  // Size is bigger to hold the flags.  However, the flags do not change 
++  // Size is bigger to hold the flags.  However, the flags do not change
+   // the semantics so it does not appear in the hash & cmp functions.
+   virtual uint size_of() const { return sizeof(*this); }
+ protected:
+@@ -87,7 +84,7 @@
+       in(2) != NULL && phase->type(in(2)) != Type::TOP;
+   }
+ #ifndef PRODUCT
+-  virtual void dump_spec() const;
++  virtual void dump_spec(outputStream *st) const;
+ #endif
+ };
+ 
+@@ -117,21 +114,21 @@
+ // CountedLoopNodes head simple counted loops.  CountedLoopNodes have as
+ // inputs the incoming loop-start control and the loop-back control, so they
+ // act like RegionNodes.  They also take in the initial trip counter, the
+-// loop-invariant stride and the loop-invariant limit value.  CountedLoopNodes 
+-// produce a loop-body control and the trip counter value.  Since 
++// loop-invariant stride and the loop-invariant limit value.  CountedLoopNodes
++// produce a loop-body control and the trip counter value.  Since
+ // CountedLoopNodes behave like RegionNodes I still have a standard CFG model.
+ 
+ class CountedLoopNode : public LoopNode {
+-  // Size is bigger to hold _main_idx.  However, _main_idx does not change 
++  // Size is bigger to hold _main_idx.  However, _main_idx does not change
+   // the semantics so it does not appear in the hash & cmp functions.
+   virtual uint size_of() const { return sizeof(*this); }
+ 
+   // For Pre- and Post-loops during debugging ONLY, this holds the index of
+   // the Main CountedLoop.  Used to assert that we understand the graph shape.
+   node_idx_t _main_idx;
+-  
++
+   // Known trip count calculated by policy_maximally_unroll
+-  int   _trip_count; 
++  int   _trip_count;
+ 
+   // Expected trip count from profile data
+   float _profile_trip_cnt;
+@@ -149,7 +146,7 @@
+       _profile_trip_cnt(COUNT_UNKNOWN), _unrolled_count_log2(0),
+       _node_count_before_unroll(0) {
+     init_class_id(Class_CountedLoop);
+-    // Initialize _trip_count to the largest possible value. 
++    // Initialize _trip_count to the largest possible value.
+     // Will be reset (lower) if the loop's trip count is known.
+   }
+ 
+@@ -214,7 +211,7 @@
+   int  node_count_before_unroll()           { return _node_count_before_unroll; }
+ 
+ #ifndef PRODUCT
+-  virtual void dump_spec() const;
++  virtual void dump_spec(outputStream *st) const;
+ #endif
+ };
+ 
+@@ -240,24 +237,24 @@
+   int stride_con() const;
+   bool stride_is_con() const        { Node *tmp = stride  (); return (tmp != NULL && tmp->is_Con()); }
+   BoolTest::mask test_trip() const  { return in(TestValue)->as_Bool()->_test._test; }
+-  CountedLoopNode *loopnode() const { 
++  CountedLoopNode *loopnode() const {
+     Node *ln = phi()->in(0);
+     assert( ln->Opcode() == Op_CountedLoop, "malformed loop" );
+     return (CountedLoopNode*)ln; }
+ 
+ #ifndef PRODUCT
+-  virtual void dump_spec() const;
++  virtual void dump_spec(outputStream *st) const;
+ #endif
+ };
+ 
+ 
+-inline CountedLoopEndNode *CountedLoopNode::loopexit() const { 
++inline CountedLoopEndNode *CountedLoopNode::loopexit() const {
+   Node *bc = back_control();
+   if( bc == NULL ) return NULL;
+   Node *le = bc->in(0);
+   if( le->Opcode() != Op_CountedLoopEnd )
+     return NULL;
+-  return (CountedLoopEndNode*)le; 
++  return (CountedLoopEndNode*)le;
+ }
+ inline Node *CountedLoopNode::init_trip() const { return loopexit() ? loopexit()->init_trip() : NULL; }
+ inline Node *CountedLoopNode::stride() const { return loopexit() ? loopexit()->stride() : NULL; }
+@@ -285,17 +282,20 @@
+   PhaseIdealLoop* _phase;
+ 
+   Node_List _body;              // Loop body for inner loops
+-  
++
+   uint8 _nest;                  // Nesting depth
+   uint8 _irreducible:1,         // True if irreducible
+         _has_call:1,            // True if has call safepoint
+         _has_sfpt:1,            // True if has non-call safepoint
+         _rce_candidate:1;       // True if candidate for range check elimination
+ 
++  Node_List* _required_safept;      // A inner loop cannot delete these safepts;
++
+   IdealLoopTree( PhaseIdealLoop* phase, Node *head, Node *tail )
+     : _parent(0), _next(0), _child(0),
+       _head(head), _tail(tail),
+       _phase(phase),
++      _required_safept(NULL),
+       _nest(0), _irreducible(0), _has_call(0), _has_sfpt(0), _rce_candidate(0)
+   { }
+ 
+@@ -305,13 +305,13 @@
+   // Set loop nesting depth.  Accumulate has_call bits.
+   int set_nest( uint depth );
+ 
+-  // Split out multiple fall-in edges from the loop header.  Move them to a 
++  // Split out multiple fall-in edges from the loop header.  Move them to a
+   // private RegionNode before the loop.  This becomes the loop landing pad.
+   void split_fall_in( PhaseIdealLoop *phase, int fall_in_cnt );
+ 
+   // Split out the outermost loop from this shared header.
+   void split_outer_loop( PhaseIdealLoop *phase );
+-  
++
+   // Merge all the backedges from the shared header into a private Region.
+   // Feed that region as the one backedge to this loop.
+   void merge_many_backedges( PhaseIdealLoop *phase );
+@@ -328,10 +328,14 @@
+   // Driver for various flavors of iteration splitting
+   void iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new );
+ 
+-  // Given dominators, try to find inner loops with calls that must
+-  // always be executed (call dominates loop tail).  These loops do
+-  // not need a seperate safepoint.
+-  void check_inner_safepts( PhaseIdealLoop *phase );
++  // Given dominators, try to find loops with calls that must always be
++  // executed (call dominates loop tail).  These loops do not need non-call
++  // safepoints (ncsfpt).
++  void check_safepts(VectorSet &visited, Node_List &stack);
++
++  // Allpaths backwards scan from loop tail, terminating each path at first safepoint
++  // encountered.
++  void allpaths_check_safepts(VectorSet &visited, Node_List &stack);
+ 
+   // Convert to counted loops where possible
+   void counted_loop( PhaseIdealLoop *phase );
+@@ -349,7 +353,7 @@
+   // Replace with a 1-in-10 exit guess.
+   void adjust_loop_exit_prob( PhaseIdealLoop *phase );
+ 
+-  // Return TRUE or FALSE if the loop should never be RCE'd or aligned.  
++  // Return TRUE or FALSE if the loop should never be RCE'd or aligned.
+   // Useful for unrolling loops with NO array accesses.
+   bool policy_peel_only( PhaseIdealLoop *phase ) const;
+ 
+@@ -361,7 +365,7 @@
+   bool policy_do_remove_empty_loop( PhaseIdealLoop *phase );
+ 
+   // Return TRUE or FALSE if the loop should be peeled or not.  Peel if we can
+-  // make some loop-invariant test (usually a null-check) happen before the 
++  // make some loop-invariant test (usually a null-check) happen before the
+   // loop.
+   bool policy_peeling( PhaseIdealLoop *phase ) const;
+ 
+@@ -369,7 +373,7 @@
+   // known trip count in the counted loop node.
+   bool policy_maximally_unroll( PhaseIdealLoop *phase ) const;
+ 
+-  // Return TRUE or FALSE if the loop should be unrolled or not.  Unroll if 
++  // Return TRUE or FALSE if the loop should be unrolled or not.  Unroll if
+   // the loop is a CountedLoop and the body is small enough.
+   bool policy_unroll( PhaseIdealLoop *phase ) const;
+ 
+@@ -448,7 +452,7 @@
+     memset(_preorders, 0, sizeof(uint) * _max_preorder);
+   }
+ 
+-  // Check to grow _preorders[] array for the case when build_loop_tree_impl() 
++  // Check to grow _preorders[] array for the case when build_loop_tree_impl()
+   // adds new nodes.
+   void check_grow_preorders( ) {
+     if ( _max_preorder < C->unique() ) {
+@@ -461,14 +465,14 @@
+   // Check for pre-visited.  Zero for NOT visited; non-zero for visited.
+   int is_visited( Node *n ) const { return _preorders[n->_idx]; }
+   // Pre-order numbers are written to the Nodes array as low-bit-set values.
+-  void set_preorder_visited( Node *n, int pre_order ) { 
++  void set_preorder_visited( Node *n, int pre_order ) {
+     assert( !is_visited( n ), "already set" );
+     _preorders[n->_idx] = (pre_order<<1);
+   };
+   // Return pre-order number.
+   int get_preorder( Node *n ) const { assert( is_visited(n), "" ); return _preorders[n->_idx]>>1; }
+ 
+-  // Check for being post-visited. 
++  // Check for being post-visited.
+   // Should be previsited already (checked with assert(is_visited(n))).
+   int is_postvisited( Node *n ) const { assert( is_visited(n), "" ); return _preorders[n->_idx]&1; }
+ 
+@@ -557,7 +561,7 @@
+   Node *get_ctrl_no_update( Node *i ) const {
+     assert( has_ctrl(i), "" );
+     Node *n = (Node*)(((intptr_t)_nodes[i->_idx]) & ~1);
+-    if (!n->in(0)) {        
++    if (!n->in(0)) {
+       // Skip dead CFG nodes
+       do {
+         n = (Node*)(((intptr_t)_nodes[n->_idx]) & ~1);
+@@ -569,7 +573,7 @@
+ 
+   // Check for loop being set
+   // "n" must be a control node. Returns true if "n" is known to be in a loop.
+-  bool has_loop( Node *n ) const { 
++  bool has_loop( Node *n ) const {
+     assert(!has_node(n) || !has_ctrl(n), "");
+     return has_node(n);
+   }
+@@ -619,11 +623,11 @@
+ private:
+   uint _idom_size;
+   Node **_idom;                 // Array of immediate dominators
+-  uint *_dom_depth;           // Used for fast LCA test    
++  uint *_dom_depth;           // Used for fast LCA test
+   GrowableArray<uint>* _dom_stk; // For recomputation of dom depth
+ 
+   Node* idom_no_update(Node* d) const {
+-    assert(d->_idx < _idom_size, "oob"); 
++    assert(d->_idx < _idom_size, "oob");
+     Node* n = _idom[d->_idx];
+     assert(n != NULL,"Bad immediate dominator info.");
+     while (n->in(0) == NULL) {  // Skip dead CFG nodes
+@@ -639,7 +643,7 @@
+     _idom[didx] = n;            // Lazily remove dead CFG nodes from table.
+     return n;
+   }
+-  uint dom_depth(Node* d) const {                        
++  uint dom_depth(Node* d) const {
+     assert(d->_idx < _idom_size, "");
+     return _dom_depth[d->_idx];
+   }
+@@ -649,6 +653,9 @@
+   // Recompute dom_depth
+   void recompute_dom_depth();
+ 
++  // Is safept not required by an outer loop?
++  bool is_deleteable_safept(Node* sfpt);
++
+ public:
+   // Dominators for the sea of nodes
+   void Dominators();
+@@ -677,7 +684,7 @@
+   }
+ 
+   // Is 'n' a (nested) member of 'loop'?
+-  int is_member( const IdealLoopTree *loop, Node *n ) const { 
++  int is_member( const IdealLoopTree *loop, Node *n ) const {
+     return loop->is_member(get_loop(n)); }
+ 
+   // This is the basic building block of the loop optimizations.  It clones an
+@@ -696,7 +703,7 @@
+   //      dominated by the passed in side_by_side_idom node.  Used in
+   //      construction of unswitched loops.
+   void clone_loop( IdealLoopTree *loop, Node_List &old_new, int dom_depth,
+-	           Node* side_by_side_idom = NULL);
++                   Node* side_by_side_idom = NULL);
+ 
+   // If we got the effect of peeling, either by actually peeling or by
+   // making a pre-loop which must execute at least once, we can remove
+@@ -752,7 +759,7 @@
+   // the pre-loop or the post-loop until the condition holds true in the main
+   // loop.  Scale_con, offset and limit are all loop invariant.
+   void add_constraint( int stride_con, int scale_con, Node *offset, Node *limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit );
+-  
++
+   // Partially peel loop up through last_peel node.
+   bool partial_peel( IdealLoopTree *loop, Node_List &old_new );
+ 
+@@ -799,13 +806,13 @@
+ 
+   // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps.
+   // "Nearly" because all Nodes have been cloned from the original in the loop,
+-  // but the fall-in edges to the Cmp are different.  Clone bool/Cmp pairs 
++  // but the fall-in edges to the Cmp are different.  Clone bool/Cmp pairs
+   // through the Phi recursively, and return a Bool.
+   BoolNode *clone_iff( PhiNode *phi, IdealLoopTree *loop );
+   CmpNode *clone_bool( PhiNode *phi, IdealLoopTree *loop );
+ 
+ 
+-  // Rework addressing expressions to get the most loop-invariant stuff 
++  // Rework addressing expressions to get the most loop-invariant stuff
+   // moved out.  We'd like to do all associative operators, but it's especially
+   // important (common) to do address expressions.
+   Node *remix_address_expressions( Node *n );
+@@ -833,7 +840,7 @@
+   Node *split_thru_region( Node *n, Node *region );
+   // Split Node 'n' through merge point if there is enough win.
+   Node *split_thru_phi( Node *n, Node *region, int policy );
+-  // Found an If getting its condition-code input from a Phi in the 
++  // Found an If getting its condition-code input from a Phi in the
+   // same block.  Split thru the Region.
+   void do_split_if( Node *iff );
+ 
+@@ -910,4 +917,3 @@
+ 
+   IdealLoopTree* current() { return _curnt; }  // Return current value of iterator.
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/loopopts.cpp openjdk/hotspot/src/share/vm/opto/loopopts.cpp
+--- openjdk6/hotspot/src/share/vm/opto/loopopts.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/loopopts.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)loopopts.cpp	1.220 07/06/29 13:39:54 JVM"
+-#endif
+ /*
+  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -603,6 +600,9 @@
+     }
+   }
+ 
++  // Use same limit as split_if_with_blocks_post
++  if( C->unique() > 35000 ) return n; // Method too big
++
+   // Split 'n' through the merge point if it is profitable
+   Node *phi = split_thru_phi( n, n_blk, policy );
+   if( !phi ) return n;
+@@ -711,7 +711,7 @@
+           get_ctrl(iff->in(3)) == n_ctrl )
+         return;                 // Inputs not yet split-up
+       if ( get_loop(n_ctrl) != get_loop(get_ctrl(iff)) ) {
+-	return;                 // Loop-invar test gates loop-varying CMOVE
++        return;                 // Loop-invar test gates loop-varying CMOVE
+       }
+     } else {
+       return;  // some other kind of node, such as an Allocate
+@@ -1238,20 +1238,26 @@
+         assert( dd_r >= dom_depth(dom_lca(newuse,use)), "" );
+ 
+         // The original user of 'use' uses 'r' instead.
+-        for (DUIterator_Last lmin, l = use->last_outs(lmin); l >= lmin; --l) {
++        for (DUIterator_Last lmin, l = use->last_outs(lmin); l >= lmin;) {
+           Node* useuse = use->last_out(l);
+           _igvn.hash_delete(useuse);
+           _igvn._worklist.push(useuse);
++          uint uses_found = 0;
+           if( useuse->in(0) == use ) {
+             useuse->set_req(0, r);
++            uses_found++;
+             if( useuse->is_CFG() ) {
+               assert( dom_depth(useuse) > dd_r, "" );
+               set_idom(useuse, r, dom_depth(useuse));
+             }
+           }
+-          for( uint k = 1; k < useuse->req(); k++ )
+-            if( useuse->in(k) == use )
++          for( uint k = 1; k < useuse->req(); k++ ) {
++            if( useuse->in(k) == use ) {
+               useuse->set_req(k, r);
++              uses_found++;
++            }
++          }
++          l -= uses_found;    // we deleted 1 or more copies of this edge
+         }
+ 
+         // Now finish up 'r'
+@@ -2669,4 +2675,3 @@
+   }
+ 
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/loopTransform.cpp openjdk/hotspot/src/share/vm/opto/loopTransform.cpp
+--- openjdk6/hotspot/src/share/vm/opto/loopTransform.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/loopTransform.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)loopTransform.cpp	1.116 07/06/01 11:35:03 JVM"
+-#endif
+ /*
+  * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,14 +19,14 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+ #include "incls/_loopTransform.cpp.incl"
+ 
+ //------------------------------is_loop_exit-----------------------------------
+-// Given an IfNode, return the loop-exiting projection or NULL if both 
++// Given an IfNode, return the loop-exiting projection or NULL if both
+ // arms remain in the loop.
+ Node *IdealLoopTree::is_loop_exit(Node *iff) const {
+   if( iff->outcnt() != 2 ) return NULL; // Ignore partially dead tests
+@@ -175,7 +172,7 @@
+   Node* inv2_c = phase->get_ctrl(inv2);
+   Node* n_inv1;
+   if (neg_inv1) {
+-    Node *zero = phase->_igvn.intcon(0); 
++    Node *zero = phase->_igvn.intcon(0);
+     phase->set_ctrl(zero, phase->C->root());
+     n_inv1 = new (phase->C, 3) SubINode(zero, inv1);
+     phase->register_new_node(n_inv1, inv1_c);
+@@ -248,7 +245,7 @@
+ 
+ //------------------------------peeled_dom_test_elim---------------------------
+ // If we got the effect of peeling, either by actually peeling or by making
+-// a pre-loop which must execute at least once, we can remove all 
++// a pre-loop which must execute at least once, we can remove all
+ // loop-invariant dominated tests in the main body.
+ void PhaseIdealLoop::peeled_dom_test_elim( IdealLoopTree *loop, Node_List &old_new ) {
+   bool progress = true;
+@@ -257,7 +254,7 @@
+     Node *prev = loop->_head->in(LoopNode::LoopBackControl);//loop->tail();
+     Node *test = prev->in(0);
+     while( test != loop->_head ) { // Scan till run off top of loop
+-      
++
+       int p_op = prev->Opcode();
+       if( (p_op == Op_IfFalse || p_op == Op_IfTrue) &&
+           test->is_If() &&      // Test?
+@@ -282,7 +279,7 @@
+ }
+ 
+ //------------------------------do_peeling-------------------------------------
+-// Peel the first iteration of the given loop.  
++// Peel the first iteration of the given loop.
+ // Step 1: Clone the loop body.  The clone becomes the peeled iteration.
+ //         The pre-loop illegally has 2 control users (old & new loops).
+ // Step 2: Make the old-loop fall-in edges point to the peeled iteration.
+@@ -340,7 +337,7 @@
+       old->set_req(LoopNode::EntryControl, new_exit_value);
+     }
+   }
+-  
++
+ 
+   // Step 3: Cut the backedge on the clone (so its not a loop) and remove the
+   //         extra backedge user.
+@@ -371,7 +368,7 @@
+     }
+   }
+ 
+-  // Now force out all loop-invariant dominating tests.  The optimizer 
++  // Now force out all loop-invariant dominating tests.  The optimizer
+   // finds some, but we _know_ they are all useless.
+   peeled_dom_test_elim(loop,old_new);
+ 
+@@ -429,7 +426,7 @@
+ 
+ 
+ //------------------------------policy_unroll----------------------------------
+-// Return TRUE or FALSE if the loop should be unrolled or not.  Unroll if 
++// Return TRUE or FALSE if the loop should be unrolled or not.  Unroll if
+ // the loop is a CountedLoop and the body is small enough.
+ bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const {
+ 
+@@ -443,7 +440,7 @@
+   if( cl->trip_count() <= 1 ) return false;
+ 
+   int future_unroll_ct = cl->unrolled_count() * 2;
+- 
++
+   // Don't unroll if the next round of unrolling would push us
+   // over the expected trip count of the loop.  One is subtracted
+   // from the expected trip count because the pre-loop normally
+@@ -454,7 +451,7 @@
+       (float)future_unroll_ct > cl->profile_trip_cnt() - 1.0) {
+     return false;
+   }
+-  
++
+   // When unroll count is greater than LoopUnrollMin, don't unroll if:
+   //   the residual iterations are more than 10% of the trip count
+   //   and rounds of "unroll,optimize" are not making significant progress
+@@ -507,12 +504,12 @@
+   }
+ 
+   // Check for being too big
+-  if( body_size > (uint)LoopUnrollLimit ) { 
++  if( body_size > (uint)LoopUnrollLimit ) {
+     if( xors_in_loop >= 4 && body_size < (uint)LoopUnrollLimit*4) return true;
+     // Normal case: loop too big
+     return false;
+   }
+-  
++
+   // Check for stride being a small enough constant
+   if( abs(cl->stride_con()) > (1<<3) ) return false;
+ 
+@@ -558,14 +555,14 @@
+       Node *limit = cmp->in(2);
+ 
+       Node *limit_c = phase->get_ctrl(limit);
+-      if( limit_c == phase->C->top() ) 
++      if( limit_c == phase->C->top() )
+         return false;           // Found dead test on live IF?  No RCE!
+       if( is_member(phase->get_loop(limit_c) ) ) {
+         // Compare might have operands swapped; commute them
+         rc_exp = cmp->in(2);
+         limit  = cmp->in(1);
+         limit_c = phase->get_ctrl(limit);
+-        if( is_member(phase->get_loop(limit_c) ) ) 
++        if( is_member(phase->get_loop(limit_c) ) )
+           continue;             // Both inputs are loop varying; cannot RCE
+       }
+ 
+@@ -597,7 +594,7 @@
+ }
+ 
+ //------------------------------clone_up_backedge_goo--------------------------
+-// If Node n lives in the back_ctrl block and cannot float, we clone a private 
++// If Node n lives in the back_ctrl block and cannot float, we clone a private
+ // version of n in preheader_ctrl block and return that, otherwise return n.
+ Node *PhaseIdealLoop::clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n ) {
+   if( get_ctrl(n) != back_ctrl ) return n;
+@@ -863,7 +860,7 @@
+   post_head->set_profile_trip_cnt(4.0);
+   pre_head->set_profile_trip_cnt(4.0);
+ 
+-  // Now force out all loop-invariant dominating tests.  The optimizer 
++  // Now force out all loop-invariant dominating tests.  The optimizer
+   // finds some, but we _know_ they are all useless.
+   peeled_dom_test_elim(loop,old_new);
+ }
+@@ -970,7 +967,7 @@
+   }
+ 
+   // ---------
+-  // Step 4: Clone the loop body.  Move it inside the loop.  This loop body 
++  // Step 4: Clone the loop body.  Move it inside the loop.  This loop body
+   // represents the odd iterations; since the loop trips an even number of
+   // times its backedge is never taken.  Kill the backedge.
+   uint dd = dom_depth(loop_head);
+@@ -989,7 +986,7 @@
+       newphi->set_req(LoopNode::LoopBackControl, phi   ->in(LoopNode::LoopBackControl));
+       phi   ->set_req(LoopNode::LoopBackControl, C->top());
+     }
+-  }  
++  }
+   Node *clone_head = old_new[loop_head->_idx];
+   _igvn.hash_delete( clone_head );
+   loop_head ->set_req(LoopNode::   EntryControl, clone_head->in(LoopNode::LoopBackControl));
+@@ -1030,7 +1027,7 @@
+ 
+   // Now its tripping an even number of times remaining.  Double loop body.
+   // Do not adjust pre-guards; they are not needed and do not exist.
+-  if( cl->trip_count() > 0 ) { 
++  if( cl->trip_count() > 0 ) {
+     do_unroll( loop, old_new, false );
+   }
+ }
+@@ -1047,8 +1044,8 @@
+ // Constrain the main loop iterations so the condition:
+ //    scale_con * I + offset  <  limit
+ // always holds true.  That is, either increase the number of iterations in
+-// the pre-loop or the post-loop until the condition holds true in the main 
+-// loop.  Stride, scale, offset and limit are all loop invariant.  Further, 
++// the pre-loop or the post-loop until the condition holds true in the main
++// loop.  Stride, scale, offset and limit are all loop invariant.  Further,
+ // stride and scale are constants (offset and limit often are).
+ void PhaseIdealLoop::add_constraint( int stride_con, int scale_con, Node *offset, Node *limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ) {
+ 
+@@ -1060,18 +1057,18 @@
+   Node *X = new (C, 3) DivINode( 0, con, scale );
+   register_new_node( X, pre_ctrl );
+ 
+-  // For positive stride, the pre-loop limit always uses a MAX function 
++  // For positive stride, the pre-loop limit always uses a MAX function
+   // and the main loop a MIN function.  For negative stride these are
+-  // reversed.  
+-  
+-  // Also for positive stride*scale the affine function is increasing, so the 
++  // reversed.
++
++  // Also for positive stride*scale the affine function is increasing, so the
+   // pre-loop must check for underflow and the post-loop for overflow.
+   // Negative stride*scale reverses this; pre-loop checks for overflow and
+   // post-loop for underflow.
+   if( stride_con*scale_con > 0 ) {
+     // Compute I < (limit-offset)/scale_con
+     // Adjust main-loop last iteration to be MIN/MAX(main_loop,X)
+-    *main_limit = (stride_con > 0) 
++    *main_limit = (stride_con > 0)
+       ? (Node*)(new (C, 3) MinINode( *main_limit, X ))
+       : (Node*)(new (C, 3) MaxINode( *main_limit, X ));
+     register_new_node( *main_limit, pre_ctrl );
+@@ -1090,7 +1087,7 @@
+     register_new_node( *pre_limit, pre_ctrl );
+ 
+ //   [++] Here's the algebra that justifies the pre-loop limit expression:
+-//   
++//
+ //   NOT( scale_con * I + offset  <  limit )
+ //      ==
+ //   scale_con * I + offset  >=  limit
+@@ -1103,12 +1100,12 @@
+ //      ==
+ //   ( if (scale_con > 0) /*common case*/
+ //       (limit-offset)/scale_con - 1  <  I
+-//     else  
++//     else
+ //       (limit-offset)/scale_con + 1  >  I
+ //    )
+ //   ( if (scale_con > 0) /*common case*/
+ //       (limit-offset)/scale_con + SGN(-scale_con)  <  I
+-//     else  
++//     else
+ //       (limit-offset)/scale_con + SGN(-scale_con)  >  I
+   }
+ }
+@@ -1153,7 +1150,7 @@
+ bool PhaseIdealLoop::is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth) {
+   if (is_scaled_iv(exp, iv, p_scale)) {
+     if (p_offset != NULL) {
+-      Node *zero = _igvn.intcon(0); 
++      Node *zero = _igvn.intcon(0);
+       set_ctrl(zero, C->root());
+       *p_offset = zero;
+     }
+@@ -1184,7 +1181,7 @@
+   } else if (opc == Op_SubI) {
+     if (is_scaled_iv(exp->in(1), iv, p_scale)) {
+       if (p_offset != NULL) {
+-        Node *zero = _igvn.intcon(0); 
++        Node *zero = _igvn.intcon(0);
+         set_ctrl(zero, C->root());
+         Node *ctrl_off = get_ctrl(exp->in(2));
+         Node* offset = new (C, 3) SubINode(zero, exp->in(2));
+@@ -1219,7 +1216,7 @@
+ 
+   // Find the trip counter; we are iteration splitting based on it
+   Node *trip_counter = cl->phi();
+-  // Find the main loop limit; we will trim it's iterations 
++  // Find the main loop limit; we will trim it's iterations
+   // to not ever trip end tests
+   Node *main_limit = cl->limit();
+   // Find the pre-loop limit; we will expand it's iterations to
+@@ -1267,7 +1264,7 @@
+     return;
+   }
+   int stride_con = cl->stride_con();
+-  Node *zero = _igvn.intcon(0); 
++  Node *zero = _igvn.intcon(0);
+   Node *one  = _igvn.intcon(1);
+   set_ctrl(zero, C->root());
+   set_ctrl(one,  C->root());
+@@ -1316,7 +1313,7 @@
+         rc_exp = cmp->in(2);
+         limit  = cmp->in(1);
+         limit_c = get_ctrl(limit);
+-        if( loop->is_member(get_loop(limit_c) ) ) 
++        if( loop->is_member(get_loop(limit_c) ) )
+           continue;             // Both inputs are loop varying; cannot RCE
+       }
+       // Here we know 'limit' is loop invariant
+@@ -1348,9 +1345,9 @@
+ 
+       // At this point we have the expression as:
+       //   scale_con * trip_counter + offset :: limit
+-      // where scale_con, offset and limit are loop invariant.  Trip_counter 
+-      // monotonically increases by stride_con, a constant.  Both (or either) 
+-      // stride_con and scale_con can be negative which will flip about the 
++      // where scale_con, offset and limit are loop invariant.  Trip_counter
++      // monotonically increases by stride_con, a constant.  Both (or either)
++      // stride_con and scale_con can be negative which will flip about the
+       // sense of the test.
+ 
+       // Adjust pre and main loop limits to guard the correct iteration set
+@@ -1370,7 +1367,7 @@
+           }
+         } else {
+ #ifndef PRODUCT
+-          if( PrintOpto ) 
++          if( PrintOpto )
+             tty->print_cr("missed RCE opportunity");
+ #endif
+           continue;             // In release mode, ignore it
+@@ -1388,7 +1385,7 @@
+           limit = new (C, 3) AddINode( limit, one );
+           register_new_node( limit, pre_ctrl );
+           // Fall into LT case
+-        case BoolTest::lt: 
++        case BoolTest::lt:
+           add_constraint( stride_con, scale_con, offset, limit, pre_ctrl, &pre_limit, &main_limit );
+           if (!conditional_rc) {
+             conditional_rc = !loop->dominates_backedge(iff);
+@@ -1396,7 +1393,7 @@
+           break;
+         default:
+ #ifndef PRODUCT
+-          if( PrintOpto ) 
++          if( PrintOpto )
+             tty->print_cr("missed RCE opportunity");
+ #endif
+           continue;             // Unhandled case
+@@ -1492,11 +1489,11 @@
+ //------------------------------DCE_loop_body----------------------------------
+ // Remove simplistic dead code from loop body
+ void IdealLoopTree::DCE_loop_body() {
+-  for( uint i = 0; i < _body.size(); i++ ) 
+-    if( _body.at(i)->outcnt() == 0 ) 
++  for( uint i = 0; i < _body.size(); i++ )
++    if( _body.at(i)->outcnt() == 0 )
+       _body.map( i--, _body.pop() );
+ }
+-  
++
+ 
+ //------------------------------adjust_loop_exit_prob--------------------------
+ // Look for loop-exit tests with the 50/50 (or worse) guesses from the parsing stage.
+@@ -1511,7 +1508,7 @@
+       IfNode *iff = test->in(0)->as_If();
+       if( iff->outcnt() == 2 ) {        // Ignore dead tests
+         Node *bol = iff->in(1);
+-        if( bol && bol->req() > 1 && bol->in(1) && 
++        if( bol && bol->req() > 1 && bol->in(1) &&
+             ((bol->in(1)->Opcode() == Op_StorePConditional ) ||
+              (bol->in(1)->Opcode() == Op_StoreLConditional ) ||
+              (bol->in(1)->Opcode() == Op_CompareAndSwapI ) ||
+@@ -1537,7 +1534,7 @@
+     test = phase->idom(test);
+   }
+ }
+-  
++
+ 
+ //------------------------------policy_do_remove_empty_loop--------------------
+ // Micro-benchmark spamming.  Policy is to always remove empty loops.
+@@ -1553,7 +1550,7 @@
+   if( !phase->is_member(this,phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue)) ) )
+     return false;             // Infinite loop
+ #ifndef PRODUCT
+-  if( PrintOpto ) 
++  if( PrintOpto )
+     tty->print_cr("Removing empty loop");
+ #endif
+ #ifdef ASSERT
+@@ -1579,13 +1576,13 @@
+   phase->C->set_major_progress();
+   return true;
+ }
+-  
++
+ 
+ //=============================================================================
+ //------------------------------iteration_split_impl---------------------------
+ void IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ) {
+   // Check and remove empty loops (spam micro-benchmarks)
+-  if( policy_do_remove_empty_loop(phase) ) 
++  if( policy_do_remove_empty_loop(phase) )
+     return;                     // Here we removed an empty loop
+ 
+   bool should_peel = policy_peeling(phase); // Should we peel?
+@@ -1623,7 +1620,7 @@
+   if( cl->is_normal_loop() ) {
+     bool should_maximally_unroll =  policy_maximally_unroll(phase);
+     if( should_maximally_unroll ) {
+-      // Here we did some unrolling and peeling.  Eventually we will 
++      // Here we did some unrolling and peeling.  Eventually we will
+       // completely unroll this loop and it will no longer be a loop.
+       phase->do_maximally_unroll(this,old_new);
+       return;
+@@ -1639,16 +1636,16 @@
+   // front for RCE, and may want to align loop refs to a cache
+   // line.  Thus we clone a full loop up front whose trip count is
+   // at least 1 (if peeling), but may be several more.
+-        
++
+   // The main loop will start cache-line aligned with at least 1
+   // iteration of the unrolled body (zero-trip test required) and
+   // will have some range checks removed.
+-        
++
+   // A post-loop will finish any odd iterations (leftover after
+   // unrolling), plus any needed for RCE purposes.
+ 
+   bool should_unroll = policy_unroll(phase);
+-  
++
+   bool should_rce = policy_range_check(phase);
+ 
+   bool should_align = policy_align(phase);
+@@ -1657,7 +1654,7 @@
+   // need a pre-loop.  We may still need to peel an initial iteration but
+   // we will not be needing an unknown number of pre-iterations.
+   //
+-  // Basically, if may_rce_align reports FALSE first time through, 
++  // Basically, if may_rce_align reports FALSE first time through,
+   // we will not be able to later do RCE or Aligning on this loop.
+   bool may_rce_align = !policy_peel_only(phase) || should_rce || should_align;
+ 
+@@ -1671,7 +1668,7 @@
+     // Adjust the pre- and main-loop limits to let the pre and post loops run
+     // with full checks, but the main-loop with no checks.  Remove said
+     // checks from the main body.
+-    if( should_rce ) 
++    if( should_rce )
+       phase->do_range_check(this,old_new);
+ 
+     // Double loop body for unrolling.  Adjust the minimum-trip test (will do
+@@ -1679,7 +1676,7 @@
+     // an even number of trips).  If we are peeling, we might enable some RCE
+     // and we'd rather unroll the post-RCE'd loop SO... do not unroll if
+     // peeling.
+-    if( should_unroll && !should_peel ) 
++    if( should_unroll && !should_peel )
+       phase->do_unroll(this,old_new, true);
+ 
+     // Adjust the pre-loop limits to align the main body
+@@ -1706,8 +1703,8 @@
+ 
+   // Look for loop-exit tests with my 50/50 guesses from the Parsing stage.
+   // Replace with a 1-in-10 exit guess.
+-  if( _parent /*not the root loop*/ && 
+-      !_irreducible && 
++  if( _parent /*not the root loop*/ &&
++      !_irreducible &&
+       // Also ignore the occasional dead backedge
+       !tail()->is_top() ) {
+     adjust_loop_exit_prob(phase);
+@@ -1725,7 +1722,7 @@
+     }
+   }
+ 
+-  // Minor offset re-organization to remove loop-fallout uses of 
++  // Minor offset re-organization to remove loop-fallout uses of
+   // trip counter.
+   if( _head->is_CountedLoop() ) phase->reorg_offsets( this );
+   if( _next ) _next->iteration_split( phase, old_new );
+diff -ruN openjdk6/hotspot/src/share/vm/opto/loopUnswitch.cpp openjdk/hotspot/src/share/vm/opto/loopUnswitch.cpp
+--- openjdk6/hotspot/src/share/vm/opto/loopUnswitch.cpp	2008-02-28 05:02:39.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/loopUnswitch.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)loopUnswitch.cpp	1.6 07/06/29 13:39:53 JVM"
+-#endif
+ /*
+  * Copyright 2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/opto/machnode.cpp openjdk/hotspot/src/share/vm/opto/machnode.cpp
+--- openjdk6/hotspot/src/share/vm/opto/machnode.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/machnode.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)machnode.cpp	1.199 07/05/05 17:06:21 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -31,11 +28,11 @@
+ //=============================================================================
+ // Return the value requested
+ // result register lookup, corresponding to int_format
+-int MachOper::reg(PhaseRegAlloc *ra_, const Node *node) const { 
++int MachOper::reg(PhaseRegAlloc *ra_, const Node *node) const {
+   return (int)ra_->get_encode(node);
+ }
+ // input register lookup, corresponding to ext_format
+-int MachOper::reg(PhaseRegAlloc *ra_, const Node *node, int idx) const { 
++int MachOper::reg(PhaseRegAlloc *ra_, const Node *node, int idx) const {
+   return (int)(ra_->get_encode(node->in(idx)));
+ }
+ intptr_t  MachOper::constant() const { return 0x00; }
+@@ -73,15 +70,15 @@
+ }
+ 
+ //------------------------------in_RegMask-------------------------------------
+-const RegMask *MachOper::in_RegMask(int index) const { 
+-  ShouldNotReachHere(); 
+-  return NULL; 
++const RegMask *MachOper::in_RegMask(int index) const {
++  ShouldNotReachHere();
++  return NULL;
+ }
+ 
+ //------------------------------dump_spec--------------------------------------
+ // Print any per-operand special info
+ #ifndef PRODUCT
+-void MachOper::dump_spec() const { }
++void MachOper::dump_spec(outputStream *st) const { }
+ #endif
+ 
+ //------------------------------hash-------------------------------------------
+@@ -125,7 +122,7 @@
+ 
+ //=============================================================================
+ //------------------------------MachNode---------------------------------------
+-        
++
+ //------------------------------emit-------------------------------------------
+ void MachNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+   #ifdef ASSERT
+@@ -196,7 +193,7 @@
+     return *Compile::current()->matcher()->idealreg2spillmask[Op_RegP];
+   }
+   uint opcnt     = 1;                 // First operand
+-  uint num_edges = _opnds[1]->num_edges(); // leaves for first operand 
++  uint num_edges = _opnds[1]->num_edges(); // leaves for first operand
+   while( idx >= skipped+num_edges ) {
+     skipped += num_edges;
+     opcnt++;                          // Bump operand count
+@@ -313,7 +310,7 @@
+ 
+   // Direct addressing modes have no base node, simply an indirect
+   // offset, which is always to raw memory.
+-  // %%%%% Someday we'd like to allow constant oop offsets which 
++  // %%%%% Someday we'd like to allow constant oop offsets which
+   // would let Intel load from static globals in 1 instruction.
+   // Currently Intel requires 2 instructions and a register temp.
+   if (base == NULL) {
+@@ -395,7 +392,7 @@
+ }
+ 
+ //------------------------------method_set-------------------------------------
+-// Set the absolute address of a method 
++// Set the absolute address of a method
+ void MachNode::method_set( intptr_t addr ) {
+   ShouldNotCallThis();
+ }
+@@ -422,7 +419,7 @@
+   }
+ 
+   // Defining flags - can't spill these!  Must remateralize.
+-  if( ideal_reg() == Op_RegFlags ) 
++  if( ideal_reg() == Op_RegFlags )
+     return true;
+ 
+   // Stretching lots of inputs - don't do it.
+@@ -444,35 +441,35 @@
+ #ifndef PRODUCT
+ //------------------------------dump_spec--------------------------------------
+ // Print any per-operand special info
+-void MachNode::dump_spec() const {
++void MachNode::dump_spec(outputStream *st) const {
+   uint cnt = num_opnds();
+   for( uint i=0; i<cnt; i++ )
+-    _opnds[i]->dump_spec();
++    _opnds[i]->dump_spec(st);
+   const TypePtr *t = adr_type();
+   if( t ) {
+     Compile* C = Compile::current();
+     if( C->alias_type(t)->is_volatile() )
+-      tty->print(" Volatile!");
++      st->print(" Volatile!");
+   }
+ }
+ 
+ //------------------------------dump_format------------------------------------
+ // access to virtual
+-void MachNode::dump_format(PhaseRegAlloc *ra) const {
+-  format(ra); // access to virtual
++void MachNode::dump_format(PhaseRegAlloc *ra, outputStream *st) const {
++  format(ra, st); // access to virtual
+ }
+ #endif
+ 
+ //=============================================================================
+ #ifndef PRODUCT
+-void MachTypeNode::dump_spec() const {
+-  _bottom_type->dump();
++void MachTypeNode::dump_spec(outputStream *st) const {
++  _bottom_type->dump_on(st);
+ }
+ #endif
+ 
+ //=============================================================================
+ #ifndef PRODUCT
+-void MachNullCheckNode::format( PhaseRegAlloc *ra_ ) const {
++void MachNullCheckNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
+   int reg = ra_->get_reg_first(in(1)->in(_vidx));
+   tty->print("%s %s", Name(), Matcher::regName[reg]);
+ }
+@@ -488,14 +485,14 @@
+ }
+ 
+ //=============================================================================
+-const Type *MachProjNode::bottom_type() const { 
++const Type *MachProjNode::bottom_type() const {
+   if( _ideal_reg == fat_proj ) return Type::BOTTOM;
+   // Try the normal mechanism first
+   const Type *t = in(0)->bottom_type();
+   if( t->base() == Type::Tuple ) {
+     const TypeTuple *tt = t->is_tuple();
+     if (_con < tt->cnt())
+-      return tt->field_at(_con); 
++      return tt->field_at(_con);
+   }
+   // Else use generic type from ideal register set
+   assert((uint)_ideal_reg < (uint)_last_machine_leaf && Type::mreg2type[_ideal_reg], "in bounds");
+@@ -517,19 +514,19 @@
+ }
+ 
+ #ifndef PRODUCT
+-void MachProjNode::dump_spec() const {
+-  ProjNode::dump_spec();
++void MachProjNode::dump_spec(outputStream *st) const {
++  ProjNode::dump_spec(st);
+   switch (_ideal_reg) {
+-  case unmatched_proj:  tty->print("/unmatched");                         break;
+-  case fat_proj:        tty->print("/fat"); if (WizardMode) _rout.dump(); break;
++  case unmatched_proj:  st->print("/unmatched");                         break;
++  case fat_proj:        st->print("/fat"); if (WizardMode) _rout.dump(); break;
+   }
+ }
+ #endif
+ 
+ //=============================================================================
+ #ifndef PRODUCT
+-void MachIfNode::dump_spec() const {
+-  tty->print("P=%f, C=%f",_prob, _fcnt);
++void MachIfNode::dump_spec(outputStream *st) const {
++  st->print("P=%f, C=%f",_prob, _fcnt);
+ }
+ #endif
+ 
+@@ -537,7 +534,7 @@
+ uint MachReturnNode::size_of() const { return sizeof(*this); }
+ 
+ //------------------------------Registers--------------------------------------
+-const RegMask &MachReturnNode::in_RegMask( uint idx ) const { 
++const RegMask &MachReturnNode::in_RegMask( uint idx ) const {
+   return _in_rms[idx];
+ }
+ 
+@@ -551,7 +548,7 @@
+ const Type *MachSafePointNode::bottom_type() const {  return TypeTuple::MEMBAR; }
+ 
+ //------------------------------Registers--------------------------------------
+-const RegMask &MachSafePointNode::in_RegMask( uint idx ) const { 
++const RegMask &MachSafePointNode::in_RegMask( uint idx ) const {
+   // Values in the domain use the users calling convention, embodied in the
+   // _in_rms array of RegMasks.
+   if( idx < TypeFunc::Parms ) return _in_rms[idx];
+@@ -575,11 +572,11 @@
+ const Type *MachCallNode::Value(PhaseTransform *phase) const { return tf()->range(); }
+ 
+ #ifndef PRODUCT
+-void MachCallNode::dump_spec() const { 
+-  tty->print("# "); 
+-  tf()->dump();
+-  if (_cnt != COUNT_UNKNOWN)  tty->print(" C=%f",_cnt);
+-  if (jvms() != NULL)  jvms()->dump_spec();
++void MachCallNode::dump_spec(outputStream *st) const {
++  st->print("# ");
++  tf()->dump_on(st);
++  if (_cnt != COUNT_UNKNOWN)  st->print(" C=%f",_cnt);
++  if (jvms() != NULL)  jvms()->dump_spec(st);
+ }
+ #endif
+ 
+@@ -603,7 +600,7 @@
+ 
+ 
+ //------------------------------Registers--------------------------------------
+-const RegMask &MachCallNode::in_RegMask( uint idx ) const { 
++const RegMask &MachCallNode::in_RegMask( uint idx ) const {
+   // Values in the domain use the users calling convention, embodied in the
+   // _in_rms array of RegMasks.
+   if (idx < tf()->domain()->cnt())  return _in_rms[idx];
+@@ -613,25 +610,25 @@
+ 
+ //=============================================================================
+ uint MachCallJavaNode::size_of() const { return sizeof(*this); }
+-uint MachCallJavaNode::cmp( const Node &n ) const { 
++uint MachCallJavaNode::cmp( const Node &n ) const {
+   MachCallJavaNode &call = (MachCallJavaNode&)n;
+-  return MachCallNode::cmp(call) && _method->equals(call._method); 
++  return MachCallNode::cmp(call) && _method->equals(call._method);
+ }
+ #ifndef PRODUCT
+-void MachCallJavaNode::dump_spec() const { 
++void MachCallJavaNode::dump_spec(outputStream *st) const {
+   if( _method ) {
+-    _method->print_short_name();
+-    tty->print(" ");
++    _method->print_short_name(st);
++    st->print(" ");
+   }
+-  MachCallNode::dump_spec();
++  MachCallNode::dump_spec(st);
+ }
+ #endif
+ 
+ //=============================================================================
+ uint MachCallStaticJavaNode::size_of() const { return sizeof(*this); }
+-uint MachCallStaticJavaNode::cmp( const Node &n ) const { 
++uint MachCallStaticJavaNode::cmp( const Node &n ) const {
+   MachCallStaticJavaNode &call = (MachCallStaticJavaNode&)n;
+-  return MachCallJavaNode::cmp(call) && _name == call._name; 
++  return MachCallJavaNode::cmp(call) && _name == call._name;
+ }
+ 
+ //----------------------------uncommon_trap_request----------------------------
+@@ -645,49 +642,49 @@
+ 
+ #ifndef PRODUCT
+ // Helper for summarizing uncommon_trap arguments.
+-void MachCallStaticJavaNode::dump_trap_args() const {
++void MachCallStaticJavaNode::dump_trap_args(outputStream *st) const {
+   int trap_req = uncommon_trap_request();
+   if (trap_req != 0) {
+     char buf[100];
+-    tty->print("(%s)",
++    st->print("(%s)",
+                Deoptimization::format_trap_request(buf, sizeof(buf),
+                                                    trap_req));
+   }
+ }
+ 
+-void MachCallStaticJavaNode::dump_spec() const { 
+-  tty->print("Static ");
++void MachCallStaticJavaNode::dump_spec(outputStream *st) const {
++  st->print("Static ");
+   if (_name != NULL) {
+-    tty->print("wrapper for: %s", _name );
+-    dump_trap_args();
+-    tty->print(" ");
++    st->print("wrapper for: %s", _name );
++    dump_trap_args(st);
++    st->print(" ");
+   }
+-  MachCallJavaNode::dump_spec();
++  MachCallJavaNode::dump_spec(st);
+ }
+ #endif
+ 
+ //=============================================================================
+ #ifndef PRODUCT
+-void MachCallDynamicJavaNode::dump_spec() const { 
+-  tty->print("Dynamic ");
+-  MachCallJavaNode::dump_spec();
++void MachCallDynamicJavaNode::dump_spec(outputStream *st) const {
++  st->print("Dynamic ");
++  MachCallJavaNode::dump_spec(st);
+ }
+ #endif
+ //=============================================================================
+ uint MachCallRuntimeNode::size_of() const { return sizeof(*this); }
+-uint MachCallRuntimeNode::cmp( const Node &n ) const { 
++uint MachCallRuntimeNode::cmp( const Node &n ) const {
+   MachCallRuntimeNode &call = (MachCallRuntimeNode&)n;
+   return MachCallNode::cmp(call) && !strcmp(_name,call._name);
+ }
+ #ifndef PRODUCT
+-void MachCallRuntimeNode::dump_spec() const { 
+-  tty->print("%s ",_name);
+-  MachCallNode::dump_spec();
++void MachCallRuntimeNode::dump_spec(outputStream *st) const {
++  st->print("%s ",_name);
++  MachCallNode::dump_spec(st);
+ }
+ #endif
+ //=============================================================================
+ // A shared JVMState for all HaltNodes.  Indicates the start of debug info
+-// is at TypeFunc::Parms.  Only required for SOE register spill handling - 
++// is at TypeFunc::Parms.  Only required for SOE register spill handling -
+ // to indicate where the stack-slot-only debug info inputs begin.
+ // There is no other JVM state needed here.
+ JVMState jvms_for_throw(0);
+@@ -697,14 +694,14 @@
+ 
+ //=============================================================================
+ #ifndef PRODUCT
+-void labelOper::int_format(PhaseRegAlloc *ra, const MachNode *node) const {
+-  tty->print("B%d", _block_num);
++void labelOper::int_format(PhaseRegAlloc *ra, const MachNode *node, outputStream *st) const {
++  st->print("B%d", _block_num);
+ }
+ #endif // PRODUCT
+ 
+ //=============================================================================
+ #ifndef PRODUCT
+-void methodOper::int_format(PhaseRegAlloc *ra, const MachNode *node) const {
+-  tty->print(INTPTR_FORMAT, _method);
++void methodOper::int_format(PhaseRegAlloc *ra, const MachNode *node, outputStream *st) const {
++  st->print(INTPTR_FORMAT, _method);
+ }
+ #endif // PRODUCT
+diff -ruN openjdk6/hotspot/src/share/vm/opto/machnode.hpp openjdk/hotspot/src/share/vm/opto/machnode.hpp
+--- openjdk6/hotspot/src/share/vm/opto/machnode.hpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/machnode.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)machnode.hpp	1.202 07/05/17 15:59:11 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class BufferBlob;
+@@ -54,13 +51,13 @@
+   // Allocate right next to the MachNodes in the same arena
+   void *operator new( size_t x, Compile* C ) { return C->node_arena()->Amalloc_D(x); }
+ 
+-  // Opcode 
++  // Opcode
+   virtual uint opcode() const = 0;
+ 
+   // Number of input edges.
+   // Generally at least 1
+   virtual uint num_edges() const { return 1; }
+-  // Array of Register masks 
++  // Array of Register masks
+   virtual const RegMask *in_RegMask(int index) const;
+ 
+   // Methods to output the encoding of the operand
+@@ -88,7 +85,7 @@
+     return ::as_FloatRegister(reg(ra_, node, idx));
+   }
+ 
+-#if defined(IA32)
++#if defined(IA32) || defined(AMD64)
+   XMMRegister  as_XMMRegister(PhaseRegAlloc *ra_, const Node *node)   const {
+     return ::as_XMMRegister(reg(ra_, node));
+   }
+@@ -118,7 +115,7 @@
+   virtual int  index_position() const;  // index edge position, or -1
+ 
+   // Access the TypeKlassPtr of operands with a base==RegI and disp==RegP
+-  // Only returns non-null value for i486.ad's indOffset32X 
++  // Only returns non-null value for i486.ad's indOffset32X
+   virtual const TypePtr *disp_as_type() const { return NULL; }
+ 
+   // Return the label
+@@ -145,10 +142,10 @@
+   virtual const char    *Name() const { return "???";}
+ 
+   // Methods to output the text version of the operand
+-  virtual void int_format(PhaseRegAlloc *,const MachNode *node) const = 0;
+-  virtual void ext_format(PhaseRegAlloc *,const MachNode *node,int idx) const=0;
++  virtual void int_format(PhaseRegAlloc *,const MachNode *node, outputStream *st) const = 0;
++  virtual void ext_format(PhaseRegAlloc *,const MachNode *node,int idx, outputStream *st) const=0;
+ 
+-  virtual void dump_spec() const; // Print per-operand info
++  virtual void dump_spec(outputStream *st) const; // Print per-operand info
+ #endif
+ };
+ 
+@@ -170,7 +167,7 @@
+ 
+   // Copy inputs and operands to new node of instruction.
+   // Called from cisc_version() and short_branch_version().
+-  // !!!! The method's body is defined in ad_<arch>.cpp file. 
++  // !!!! The method's body is defined in ad_<arch>.cpp file.
+   void fill_new_machnode(MachNode *n, Compile* C) const;
+ 
+   // Return an equivalent instruction using memory for cisc_operand position
+@@ -192,7 +189,7 @@
+   virtual const RegMask *cisc_RegMask() const { return NULL; }
+ 
+   // If this instruction is a 2-address instruction, then return the
+-  // index of the input which must match the output.  Not nessecary 
++  // index of the input which must match the output.  Not nessecary
+   // for instructions which bind the input and output register to the
+   // same singleton regiser (e.g., Intel IDIV which binds AX to be
+   // both an input and an output).  It is nessecary when the input and
+@@ -243,7 +240,7 @@
+ 
+   // If this is a memory op, return the base pointer and fixed offset.
+   // If there are no such, return NULL.  If there are multiple addresses
+-  // or the address is indeterminate (rare cases) then return (Node*)-1, 
++  // or the address is indeterminate (rare cases) then return (Node*)-1,
+   // which serves as node bottom.
+   // If the offset is not statically determined, set it to Type::OffsetBot.
+   // This method is free to ignore stack slots if that helps.
+@@ -252,7 +249,7 @@
+   const Node* get_base_and_disp(intptr_t &offset, const TypePtr* &adr_type) const;
+ 
+   // Helper for get_base_and_disp: find the base and index input nodes.
+-  // Returns the MachOper as determined by memory_operand(), for use, if 
++  // Returns the MachOper as determined by memory_operand(), for use, if
+   // needed by the caller. If (MachOper *)-1 is returned, base and index
+   // are set to NodeSentinel. If (MachOper *) NULL is returned, base and
+   // index are set to NULL.
+@@ -280,11 +277,11 @@
+ 
+   // Set the branch inside jump MachNodes.  Error for non-branch Nodes.
+   virtual void label_set( Label& label, uint block_num );
+-  
++
+   // Adds the label for the case
+   virtual void add_case_label( int switch_val, Label* blockLabel);
+ 
+-  // Set the absolute address for methods 
++  // Set the absolute address for methods
+   virtual void method_set( intptr_t addr );
+ 
+   // Should we clone rather than spill this instruction?
+@@ -296,8 +293,8 @@
+ 
+ #ifndef PRODUCT
+   virtual const char *Name() const = 0; // Machine-specific name
+-  virtual void dump_spec() const; // Print per-node info
+-  void         dump_format(PhaseRegAlloc *ra) const; // access to virtual
++  virtual void dump_spec(outputStream *st) const; // Print per-node info
++  void         dump_format(PhaseRegAlloc *ra, outputStream *st) const; // access to virtual
+ #endif
+ };
+ 
+@@ -324,7 +321,7 @@
+ 
+   virtual const class Type *bottom_type() const { return _bottom_type; }
+ #ifndef PRODUCT
+-  virtual void dump_spec() const;
++  virtual void dump_spec(outputStream *st) const;
+ #endif
+ };
+ 
+@@ -338,7 +335,7 @@
+ 
+ #ifndef PRODUCT
+   virtual const char *Name() const { return "Breakpoint"; }
+-  virtual void format( PhaseRegAlloc * ) const;
++  virtual void format( PhaseRegAlloc *, outputStream *st ) const;
+ #endif
+ };
+ 
+@@ -352,7 +349,7 @@
+ 
+ #ifndef PRODUCT
+   virtual const char *Name() const { return "Unvalidated-Entry-Point"; }
+-  virtual void format( PhaseRegAlloc * ) const;
++  virtual void format( PhaseRegAlloc *, outputStream *st ) const;
+ #endif
+ };
+ 
+@@ -364,11 +361,10 @@
+   virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
+   virtual uint size(PhaseRegAlloc *ra_) const;
+   virtual int reloc() const;
+-  uint implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size ) const;
+ 
+ #ifndef PRODUCT
+   virtual const char *Name() const { return "Prolog"; }
+-  virtual void format( PhaseRegAlloc * ) const;
++  virtual void format( PhaseRegAlloc *, outputStream *st ) const;
+ #endif
+ };
+ 
+@@ -393,7 +389,7 @@
+ 
+ #ifndef PRODUCT
+   virtual const char *Name() const { return "Epilog"; }
+-  virtual void format( PhaseRegAlloc * ) const;
++  virtual void format( PhaseRegAlloc *, outputStream *st ) const;
+ #endif
+ };
+ 
+@@ -414,8 +410,8 @@
+   virtual const Pipeline *pipeline() const;
+ #ifndef PRODUCT
+   virtual const char *Name() const { return "Nop"; }
+-  virtual void format( PhaseRegAlloc * ) const;
+-  virtual void dump_spec() const { } // No per-operand info
++  virtual void format( PhaseRegAlloc *, outputStream *st ) const;
++  virtual void dump_spec(outputStream *st) const { } // No per-operand info
+ #endif
+ };
+ 
+@@ -427,14 +423,14 @@
+   const RegMask *_out;          // RegMask for output
+   const Type *_type;
+ public:
+-  MachSpillCopyNode( Node *n, const RegMask &in, const RegMask &out ) : 
++  MachSpillCopyNode( Node *n, const RegMask &in, const RegMask &out ) :
+     MachIdealNode(), _in(&in), _out(&out), _type(n->bottom_type()) {
+     init_class_id(Class_MachSpillCopy);
+     init_flags(Flag_is_Copy);
+     add_req(NULL);
+     add_req(n);
+   }
+-  virtual uint size_of() const { return sizeof(*this); } 
++  virtual uint size_of() const { return sizeof(*this); }
+   void set_out_RegMask(const RegMask &out) { _out = &out; }
+   void set_in_RegMask(const RegMask &in) { _in = &in; }
+   virtual const RegMask &out_RegMask() const { return *_out; }
+@@ -442,14 +438,14 @@
+   virtual const class Type *bottom_type() const { return _type; }
+   virtual uint ideal_reg() const { return Matcher::base2reg[_type->base()]; }
+   virtual uint oper_input_base() const { return 1; }
+-  uint implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size ) const;
++  uint implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const;
+ 
+   virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
+   virtual uint size(PhaseRegAlloc *ra_) const;
+ 
+ #ifndef PRODUCT
+   virtual const char *Name() const { return "MachSpillCopy"; }
+-  virtual void format( PhaseRegAlloc * ) const;
++  virtual void format( PhaseRegAlloc *, outputStream *st ) const;
+ #endif
+ };
+ 
+@@ -457,7 +453,7 @@
+ // Machine-dependent null-pointer-check Node.  Points a real MachNode that is
+ // also some kind of memory op.  Turns the indicated MachNode into a
+ // conditional branch with good latency on the ptr-not-null path and awful
+-// latency on the pointer-is-null path.  
++// latency on the pointer-is-null path.
+ 
+ class MachNullCheckNode : public MachIdealNode {
+ public:
+@@ -478,7 +474,7 @@
+   virtual const RegMask &out_RegMask() const { return RegMask::Empty; }
+ #ifndef PRODUCT
+   virtual const char *Name() const { return "NullCheck"; }
+-  virtual void format( PhaseRegAlloc * ) const;
++  virtual void format( PhaseRegAlloc *, outputStream *st ) const;
+ #endif
+ };
+ 
+@@ -505,10 +501,10 @@
+   virtual const RegMask &in_RegMask(uint) const { return RegMask::Empty; }
+   virtual const RegMask &out_RegMask() const { return _rout; }
+   virtual uint  ideal_reg() const { return _ideal_reg; }
+-  // Need size_of() for virtual ProjNode::clone() 
++  // Need size_of() for virtual ProjNode::clone()
+   virtual uint  size_of() const { return sizeof(MachProjNode); }
+ #ifndef PRODUCT
+-  virtual void dump_spec() const;
++  virtual void dump_spec(outputStream *st) const;
+ #endif
+ };
+ 
+@@ -523,7 +519,7 @@
+     init_class_id(Class_MachIf);
+   }
+ #ifndef PRODUCT
+-  virtual void dump_spec() const;
++  virtual void dump_spec(outputStream *st) const;
+ #endif
+ };
+ 
+@@ -652,7 +648,7 @@
+   bool returns_long() const { return tf()->return_type() == T_LONG; }
+   bool return_value_is_used() const;
+ #ifndef PRODUCT
+-  virtual void dump_spec() const;
++  virtual void dump_spec(outputStream *st) const;
+ #endif
+ };
+ 
+@@ -670,7 +666,7 @@
+     init_class_id(Class_MachCallJava);
+   }
+ #ifndef PRODUCT
+-  virtual void dump_spec() const;
++  virtual void dump_spec(outputStream *st) const;
+ #endif
+ };
+ 
+@@ -690,8 +686,8 @@
+ 
+   virtual int ret_addr_offset();
+ #ifndef PRODUCT
+-  virtual void dump_spec() const;
+-  void dump_trap_args() const;
++  virtual void dump_spec(outputStream *st) const;
++  void dump_trap_args(outputStream *st) const;
+ #endif
+ };
+ 
+@@ -706,7 +702,7 @@
+   }
+   virtual int ret_addr_offset();
+ #ifndef PRODUCT
+-  virtual void dump_spec() const;
++  virtual void dump_spec(outputStream *st) const;
+ #endif
+ };
+ 
+@@ -722,7 +718,7 @@
+   }
+   virtual int ret_addr_offset();
+ #ifndef PRODUCT
+-  virtual void dump_spec() const;
++  virtual void dump_spec(outputStream *st) const;
+ #endif
+ };
+ 
+@@ -743,7 +739,7 @@
+ 
+ //------------------------------MachTempNode-----------------------------------
+ // Node used by the adlc to construct inputs to represent temporary registers
+-class MachTempNode : public MachNode { 
++class MachTempNode : public MachNode {
+ private:
+   MachOper *_opnd_array[1];
+ 
+@@ -762,7 +758,7 @@
+   virtual uint size_of() const { return sizeof(MachTempNode); }
+ 
+ #ifndef PRODUCT
+-  virtual void format(PhaseRegAlloc *ra) const {}
++  virtual void format(PhaseRegAlloc *, outputStream *st ) const {}
+   virtual const char *Name() const { return "MachTemp";}
+ #endif
+ };
+@@ -771,7 +767,7 @@
+ 
+ //------------------------------labelOper--------------------------------------
+ // Machine-independent version of label operand
+-class labelOper : public MachOper { 
++class labelOper : public MachOper {
+ private:
+   virtual uint           num_edges() const { return 0; }
+ public:
+@@ -797,15 +793,15 @@
+ #ifndef PRODUCT
+   virtual const char    *Name()   const { return "Label";}
+ 
+-  virtual void int_format(PhaseRegAlloc *ra, const MachNode *node) const;
+-  virtual void ext_format(PhaseRegAlloc *ra, const MachNode *node, int idx) const { int_format( ra, node ); }
++  virtual void int_format(PhaseRegAlloc *ra, const MachNode *node, outputStream *st) const;
++  virtual void ext_format(PhaseRegAlloc *ra, const MachNode *node, int idx, outputStream *st) const { int_format( ra, node, st ); }
+ #endif
+ };
+ 
+ 
+ //------------------------------methodOper--------------------------------------
+ // Machine-independent version of method operand
+-class methodOper : public MachOper { 
++class methodOper : public MachOper {
+ private:
+   virtual uint           num_edges() const { return 0; }
+ public:
+@@ -824,7 +820,7 @@
+ #ifndef PRODUCT
+   virtual const char    *Name()   const { return "Method";}
+ 
+-  virtual void int_format(PhaseRegAlloc *ra, const MachNode *node) const;
+-  virtual void ext_format(PhaseRegAlloc *ra, const MachNode *node, int idx) const { int_format( ra, node ); }
++  virtual void int_format(PhaseRegAlloc *ra, const MachNode *node, outputStream *st) const;
++  virtual void ext_format(PhaseRegAlloc *ra, const MachNode *node, int idx, outputStream *st) const { int_format( ra, node, st ); }
+ #endif
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/opto/macro.cpp openjdk/hotspot/src/share/vm/opto/macro.cpp
+--- openjdk6/hotspot/src/share/vm/opto/macro.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/macro.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)macro.cpp	1.31 07/07/10 21:32:44 JVM"
+-#endif
+ /*
+  * Copyright 2005-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -71,14 +68,14 @@
+ }
+ 
+ Node* PhaseMacroExpand::opt_iff(Node* region, Node* iff) {
+-  IfNode *opt_iff = _igvn.register_new_node_with_optimizer(iff)->as_If();
++  IfNode *opt_iff = transform_later(iff)->as_If();
+ 
+   // Fast path taken; set region slot 2
+-  Node *fast_taken = _igvn.register_new_node_with_optimizer( new (C, 1) IfFalseNode(opt_iff) );
++  Node *fast_taken = transform_later( new (C, 1) IfFalseNode(opt_iff) );
+   region->init_req(2,fast_taken); // Capture fast-control
+ 
+   // Fast path not-taken, i.e. slow path
+-  Node *slow_taken = _igvn.register_new_node_with_optimizer( new (C, 1) IfTrueNode(opt_iff) );
++  Node *slow_taken = transform_later( new (C, 1) IfTrueNode(opt_iff) );
+   return slow_taken;
+ }
+ 
+@@ -109,7 +106,7 @@
+   call->set_cnt(PROB_UNLIKELY_MAG(4));  // Same effect as RC_UNCOMMON.
+   _igvn.hash_delete(oldcall);
+   _igvn.subsume_node(oldcall, call);
+-  _igvn.register_new_node_with_optimizer(call);
++  transform_later(call);
+ 
+   return call;
+ }
+@@ -170,11 +167,29 @@
+ }
+ 
+ 
++//---------------------------set_eden_pointers-------------------------
++void PhaseMacroExpand::set_eden_pointers(Node* &eden_top_adr, Node* &eden_end_adr) {
++  if (UseTLAB) {                // Private allocation: load from TLS
++    Node* thread = transform_later(new (C, 1) ThreadLocalNode());
++    int tlab_top_offset = in_bytes(JavaThread::tlab_top_offset());
++    int tlab_end_offset = in_bytes(JavaThread::tlab_end_offset());
++    eden_top_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_top_offset);
++    eden_end_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_end_offset);
++  } else {                      // Shared allocation: load from globals
++    CollectedHeap* ch = Universe::heap();
++    address top_adr = (address)ch->top_addr();
++    address end_adr = (address)ch->end_addr();
++    eden_top_adr = makecon(TypeRawPtr::make(top_adr));
++    eden_end_adr = basic_plus_adr(eden_top_adr, end_adr - top_adr);
++  }
++}
++
++
+ Node* PhaseMacroExpand::make_load(Node* ctl, Node* mem, Node* base, int offset, const Type* value_type, BasicType bt) {
+   Node* adr = basic_plus_adr(base, offset);
+   const TypePtr* adr_type = TypeRawPtr::BOTTOM;
+   Node* value = LoadNode::make(C, ctl, mem, adr, adr_type, value_type, bt);
+-  _igvn.register_new_node_with_optimizer(value);
++  transform_later(value);
+   return value;
+ }
+ 
+@@ -182,16 +197,16 @@
+ Node* PhaseMacroExpand::make_store(Node* ctl, Node* mem, Node* base, int offset, Node* value, BasicType bt) {
+   Node* adr = basic_plus_adr(base, offset);
+   mem = StoreNode::make(C, ctl, mem, adr, NULL, value, bt);
+-  _igvn.register_new_node_with_optimizer(mem);
++  transform_later(mem);
+   return mem;
+ }
+ 
+ //=============================================================================
+-// 
+-//                              A L L O C A T I O N 
++//
++//                              A L L O C A T I O N
+ //
+ // Allocation attempts to be fast in the case of frequent small objects.
+-// It breaks down like this: 
++// It breaks down like this:
+ //
+ // 1) Size in doublewords is computed.  This is a constant for objects and
+ // variable for most arrays.  Doubleword units are used to avoid size
+@@ -218,7 +233,7 @@
+ // 5) If NOT using TLABs, Store-Conditional the adjusted heap top back
+ // down.  If contended, repeat at step 3.  If using TLABs normal-store
+ // adjusted heap top back down; there is no contention.
+-// 
++//
+ // 6) If !ZeroTLAB then Bulk-clear the object/array.  Fill in klass & mark
+ // fields.
+ //
+@@ -254,8 +269,10 @@
+   Node* size_in_bytes     = alloc->in(AllocateNode::AllocSize);
+   Node* klass_node        = alloc->in(AllocateNode::KlassNode);
+   Node* initial_slow_test = alloc->in(AllocateNode::InitialTest);
+-  Node* eden_top_adr      = alloc->in(AllocateNode::EdenTop);
+-  Node* eden_end_adr      = alloc->in(AllocateNode::EdenEnd);
++
++  Node* eden_top_adr;
++  Node* eden_end_adr;
++  set_eden_pointers(eden_top_adr, eden_end_adr);
+ 
+   uint raw_idx = C->get_alias_index(TypeRawPtr::BOTTOM);
+   assert(ctrl != NULL, "must have control");
+@@ -310,13 +327,13 @@
+     // might be a TRUE for finalizers or a fancy class check for
+     // newInstance0.
+     IfNode *toobig_iff = new (C, 2) IfNode(ctrl, initial_slow_test, PROB_MIN, COUNT_UNKNOWN);
+-    _igvn.register_new_node_with_optimizer(toobig_iff);
++    transform_later(toobig_iff);
+     // Plug the failing-too-big test into the slow-path region
+     Node *toobig_true = new (C, 1) IfTrueNode( toobig_iff );
+-    _igvn.register_new_node_with_optimizer(toobig_true);
++    transform_later(toobig_true);
+     slow_region    ->init_req( too_big_or_final_path, toobig_true );
+     toobig_false = new (C, 1) IfFalseNode( toobig_iff );
+-    _igvn.register_new_node_with_optimizer(toobig_false);
++    transform_later(toobig_false);
+   } else {         // No initial test, just fall into next case
+     toobig_false = ctrl;
+     debug_only(slow_region = NodeSentinel);
+@@ -345,42 +362,42 @@
+       // loop-back merge point.
+       contended_region    ->init_req( fall_in_path, toobig_false );
+       contended_phi_rawmem->init_req( fall_in_path, mem );
+-      _igvn.register_new_node_with_optimizer(contended_region);
+-      _igvn.register_new_node_with_optimizer(contended_phi_rawmem);
++      transform_later(contended_region);
++      transform_later(contended_phi_rawmem);
+     }
+-  
+-    // Load(-locked) the heap top.  
++
++    // Load(-locked) the heap top.
+     // See note above concerning the control input when using a TLAB
+-    Node *old_eden_top = UseTLAB 
++    Node *old_eden_top = UseTLAB
+       ? new (C, 3) LoadPNode     ( ctrl, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM )
+       : new (C, 3) LoadPLockedNode( contended_region, contended_phi_rawmem, eden_top_adr );
+ 
+-    _igvn.register_new_node_with_optimizer(old_eden_top);
++    transform_later(old_eden_top);
+     // Add to heap top to get a new heap top
+     Node *new_eden_top = new (C, 4) AddPNode( top(), old_eden_top, size_in_bytes );
+-    _igvn.register_new_node_with_optimizer(new_eden_top);
++    transform_later(new_eden_top);
+     // Check for needing a GC; compare against heap end
+     Node *needgc_cmp = new (C, 3) CmpPNode( new_eden_top, eden_end );
+-    _igvn.register_new_node_with_optimizer(needgc_cmp);
++    transform_later(needgc_cmp);
+     Node *needgc_bol = new (C, 2) BoolNode( needgc_cmp, BoolTest::ge );
+-    _igvn.register_new_node_with_optimizer(needgc_bol);
++    transform_later(needgc_bol);
+     IfNode *needgc_iff = new (C, 2) IfNode(contended_region, needgc_bol, PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN );
+-    _igvn.register_new_node_with_optimizer(needgc_iff);
+-    
++    transform_later(needgc_iff);
++
+     // Plug the failing-heap-space-need-gc test into the slow-path region
+     Node *needgc_true = new (C, 1) IfTrueNode( needgc_iff );
+-    _igvn.register_new_node_with_optimizer(needgc_true);
++    transform_later(needgc_true);
+     if( initial_slow_test ) {
+       slow_region    ->init_req( need_gc_path, needgc_true );
+       // This completes all paths into the slow merge point
+-      _igvn.register_new_node_with_optimizer(slow_region);
++      transform_later(slow_region);
+     } else {                      // No initial slow path needed!
+       // Just fall from the need-GC path straight into the VM call.
+       slow_region    = needgc_true;
+     }
+     // No need for a GC.  Setup for the Store-Conditional
+     Node *needgc_false = new (C, 1) IfFalseNode( needgc_iff );
+-    _igvn.register_new_node_with_optimizer(needgc_false);
++    transform_later(needgc_false);
+ 
+     // Grab regular I/O before optional prefetch may change it.
+     // Slow-path does no I/O so just set it to the original I/O.
+@@ -396,28 +413,28 @@
+     Node *fast_oop_ctrl;
+     if( UseTLAB ) {
+       store_eden_top = new (C, 4) StorePNode( needgc_false, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, new_eden_top );
+-      _igvn.register_new_node_with_optimizer(store_eden_top);
++      transform_later(store_eden_top);
+       fast_oop_ctrl = needgc_false; // No contention, so this is the fast path
+     } else {
+       store_eden_top = new (C, 5) StorePConditionalNode( needgc_false, contended_phi_rawmem, eden_top_adr, new_eden_top, old_eden_top );
+-      _igvn.register_new_node_with_optimizer(store_eden_top);
++      transform_later(store_eden_top);
+       Node *contention_check = new (C, 2) BoolNode( store_eden_top, BoolTest::ne );
+-      _igvn.register_new_node_with_optimizer(contention_check);
++      transform_later(contention_check);
+       store_eden_top = new (C, 1) SCMemProjNode(store_eden_top);
+-      _igvn.register_new_node_with_optimizer(store_eden_top);
++      transform_later(store_eden_top);
+ 
+       // If not using TLABs, check to see if there was contention.
+       IfNode *contention_iff = new (C, 2) IfNode ( needgc_false, contention_check, PROB_MIN, COUNT_UNKNOWN );
+-      _igvn.register_new_node_with_optimizer(contention_iff);
++      transform_later(contention_iff);
+       Node *contention_true = new (C, 1) IfTrueNode( contention_iff );
+-      _igvn.register_new_node_with_optimizer(contention_true);
++      transform_later(contention_true);
+       // If contention, loopback and try again.
+       contended_region->init_req( contended_loopback_path, contention_true );
+       contended_phi_rawmem->init_req( contended_loopback_path, store_eden_top );
+ 
+       // Fast-path succeeded with no contention!
+       Node *contention_false = new (C, 1) IfFalseNode( contention_iff );
+-      _igvn.register_new_node_with_optimizer(contention_false);
++      transform_later(contention_false);
+       fast_oop_ctrl = contention_false;
+     }
+ 
+@@ -438,7 +455,7 @@
+ 
+       // Get base of thread-local storage area
+       Node* thread = new (C, 1) ThreadLocalNode();
+-      _igvn.register_new_node_with_optimizer(thread);
++      transform_later(thread);
+ 
+       call->init_req(TypeFunc::Parms+0, thread);
+       call->init_req(TypeFunc::Parms+1, fast_oop);
+@@ -447,11 +464,11 @@
+       call->init_req( TypeFunc::Memory , fast_oop_rawmem );
+       call->init_req( TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr) );
+       call->init_req( TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr) );
+-      _igvn.register_new_node_with_optimizer(call);
++      transform_later(call);
+       fast_oop_ctrl = new (C, 1) ProjNode(call,TypeFunc::Control);
+-      _igvn.register_new_node_with_optimizer(fast_oop_ctrl);
++      transform_later(fast_oop_ctrl);
+       fast_oop_rawmem = new (C, 1) ProjNode(call,TypeFunc::Memory);
+-      _igvn.register_new_node_with_optimizer(fast_oop_rawmem);
++      transform_later(fast_oop_rawmem);
+     }
+ 
+     // Plug in the successful fast-path into the result merge point
+@@ -476,8 +493,9 @@
+   call->init_req( TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr) );
+ 
+   call->init_req(TypeFunc::Parms+0, klass_node);
+-  if (length != NULL)
++  if (length != NULL) {
+     call->init_req(TypeFunc::Parms+1, length);
++  }
+ 
+   // Copy debug information and adjust JVMState information, then replace
+   // allocate node with the call
+@@ -487,7 +505,7 @@
+   }
+   _igvn.hash_delete(alloc);
+   _igvn.subsume_node(alloc, call);
+-  _igvn.register_new_node_with_optimizer(call);
++  transform_later(call);
+ 
+   // Identify the output projections from the allocate node and
+   // adjust any references to them.
+@@ -518,7 +536,7 @@
+   if (_memproj_catchall != NULL ) {
+     if (_memproj_fallthrough == NULL) {
+       _memproj_fallthrough = new (C, 1) ProjNode(call, TypeFunc::Memory);
+-      _igvn.register_new_node_with_optimizer(_memproj_fallthrough);
++      transform_later(_memproj_fallthrough);
+     }
+     for (DUIterator_Fast imax, i = _memproj_catchall->fast_outs(imax); i < imax; i++) {
+       Node *use = _memproj_catchall->fast_out(i);
+@@ -536,7 +554,7 @@
+   // Replace uses of the control i_o projection with result_phi_i_o (unless we are only generating a slow call)
+   if (_ioproj_fallthrough == NULL) {
+     _ioproj_fallthrough = new (C, 1) ProjNode(call, TypeFunc::I_O);
+-    _igvn.register_new_node_with_optimizer(_ioproj_fallthrough);
++    transform_later(_ioproj_fallthrough);
+   } else if (!always_slow) {
+     for (DUIterator_Fast imax, i = _ioproj_fallthrough->fast_outs(imax); i < imax; i++) {
+       Node *use = _ioproj_fallthrough->fast_out(i);
+@@ -568,7 +586,7 @@
+ 
+   if (_fallthroughcatchproj != NULL) {
+     ctrl = _fallthroughcatchproj->clone();
+-    _igvn.register_new_node_with_optimizer(ctrl);
++    transform_later(ctrl);
+     _igvn.hash_delete(_fallthroughcatchproj);
+     _igvn.subsume_node(_fallthroughcatchproj, result_region);
+   } else {
+@@ -580,7 +598,7 @@
+     slow_result = top();
+   } else {
+     slow_result = _resproj->clone();
+-    _igvn.register_new_node_with_optimizer(slow_result);
++    transform_later(slow_result);
+     _igvn.hash_delete(_resproj);
+     _igvn.subsume_node(_resproj, result_phi_rawoop);
+   }
+@@ -589,13 +607,14 @@
+   result_region    ->init_req( slow_result_path, ctrl );
+   result_phi_rawoop->init_req( slow_result_path, slow_result);
+   result_phi_rawmem->init_req( slow_result_path, _memproj_fallthrough );
+-  _igvn.register_new_node_with_optimizer(result_region);
+-  _igvn.register_new_node_with_optimizer(result_phi_rawoop);
+-  _igvn.register_new_node_with_optimizer(result_phi_rawmem);
+-  _igvn.register_new_node_with_optimizer(result_phi_i_o);
++  transform_later(result_region);
++  transform_later(result_phi_rawoop);
++  transform_later(result_phi_rawmem);
++  transform_later(result_phi_i_o);
+   // This completes all paths into the result merge point
+ }
+ 
++
+ // Helper for PhaseMacroExpand::expand_allocate_common.
+ // Initializes the newly-allocated storage.
+ Node*
+@@ -603,6 +622,7 @@
+                                     Node* control, Node* rawmem, Node* object,
+                                     Node* klass_node, Node* length,
+                                     Node* size_in_bytes) {
++  InitializeNode* init = alloc->initialization();
+   // Store the klass & mark bits
+   Node* mark_node = NULL;
+   // For now only enable fast locking for non-array types
+@@ -625,72 +645,93 @@
+       header_size = Klass::layout_helper_header_size(k->layout_helper());
+   }
+ 
+-  // Now bulk-clear the object body.  There may be a padding word after the
+-  // length, but it doesn't need to be initialized.  Optimizer will expand
+-  // this to a series of Stores if it's short and fixed size.
+-  if (!ZeroTLAB) {
+-    rawmem = ClearArrayNode::clear_memory(control, rawmem, object,
+-                                          header_size, size_in_bytes,
+-                                          &_igvn);
++  // Clear the object body, if necessary.
++  if (init == NULL) {
++    // The init has somehow disappeared; be cautious and clear everything.
++    //
++    // This can happen if a node is allocated but an uncommon trap occurs
++    // immediately.  In this case, the Initialize gets associated with the
++    // trap, and may be placed in a different (outer) loop, if the Allocate
++    // is in a loop.  If (this is rare) the inner loop gets unrolled, then
++    // there can be two Allocates to one Initialize.  The answer in all these
++    // edge cases is safety first.  It is always safe to clear immediately
++    // within an Allocate, and then (maybe or maybe not) clear some more later.
++    if (!ZeroTLAB)
++      rawmem = ClearArrayNode::clear_memory(control, rawmem, object,
++                                            header_size, size_in_bytes,
++                                            &_igvn);
++  } else {
++    if (!init->is_complete()) {
++      // Try to win by zeroing only what the init does not store.
++      // We can also try to do some peephole optimizations,
++      // such as combining some adjacent subword stores.
++      rawmem = init->complete_stores(control, rawmem, object,
++                                     header_size, size_in_bytes, &_igvn);
++    }
++
++    // We have no more use for this link, since the AllocateNode goes away:
++    init->set_req(InitializeNode::RawAddress, top());
++    // (If we keep the link, it just confuses the register allocator,
++    // who thinks he sees a real use of the address by the membar.)
+   }
+ 
+   return rawmem;
+ }
+ 
+ // Generate prefetch instructions for next allocations.
+-Node* PhaseMacroExpand::prefetch_allocation(Node* i_o, Node*& needgc_false, 
+-                                        Node*& contended_phi_rawmem, 
+-                                        Node* old_eden_top, Node* new_eden_top, 
++Node* PhaseMacroExpand::prefetch_allocation(Node* i_o, Node*& needgc_false,
++                                        Node*& contended_phi_rawmem,
++                                        Node* old_eden_top, Node* new_eden_top,
+                                         Node* length) {
+-   if( UseTLAB && AllocatePrefetchStyle == 2 ) { 
++   if( UseTLAB && AllocatePrefetchStyle == 2 ) {
+       // Generate prefetch allocation with watermark check.
+-      // As an allocation hits the watermark, we will prefetch starting 
++      // As an allocation hits the watermark, we will prefetch starting
+       // at a "distance" away from watermark.
+       enum { fall_in_path = 1, pf_path = 2 };
+ 
+       Node *pf_region = new (C, 3) RegionNode(3);
+-      Node *pf_phi_rawmem = new (C, 3) PhiNode( pf_region, Type::MEMORY, 
++      Node *pf_phi_rawmem = new (C, 3) PhiNode( pf_region, Type::MEMORY,
+                                                 TypeRawPtr::BOTTOM );
+       // I/O is used for Prefetch
+-      Node *pf_phi_abio = new (C, 3) PhiNode( pf_region, Type::ABIO ); 
++      Node *pf_phi_abio = new (C, 3) PhiNode( pf_region, Type::ABIO );
+ 
+       Node *thread = new (C, 1) ThreadLocalNode();
+-      _igvn.register_new_node_with_optimizer(thread);
++      transform_later(thread);
+ 
+-      Node *eden_pf_adr = new (C, 4) AddPNode( top()/*not oop*/, thread, 
++      Node *eden_pf_adr = new (C, 4) AddPNode( top()/*not oop*/, thread,
+                    _igvn.MakeConX(in_bytes(JavaThread::tlab_pf_top_offset())) );
+-      _igvn.register_new_node_with_optimizer(eden_pf_adr);
++      transform_later(eden_pf_adr);
+ 
+-      Node *old_pf_wm = new (C, 3) LoadPNode( needgc_false, 
+-                                   contended_phi_rawmem, eden_pf_adr, 
++      Node *old_pf_wm = new (C, 3) LoadPNode( needgc_false,
++                                   contended_phi_rawmem, eden_pf_adr,
+                                    TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM );
+-      _igvn.register_new_node_with_optimizer(old_pf_wm);
++      transform_later(old_pf_wm);
+ 
+       // check against new_eden_top
+       Node *need_pf_cmp = new (C, 3) CmpPNode( new_eden_top, old_pf_wm );
+-      _igvn.register_new_node_with_optimizer(need_pf_cmp);
++      transform_later(need_pf_cmp);
+       Node *need_pf_bol = new (C, 2) BoolNode( need_pf_cmp, BoolTest::ge );
+-      _igvn.register_new_node_with_optimizer(need_pf_bol);
+-      IfNode *need_pf_iff = new (C, 2) IfNode( needgc_false, need_pf_bol, 
++      transform_later(need_pf_bol);
++      IfNode *need_pf_iff = new (C, 2) IfNode( needgc_false, need_pf_bol,
+                                        PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN );
+-      _igvn.register_new_node_with_optimizer(need_pf_iff);
+-      
++      transform_later(need_pf_iff);
++
+       // true node, add prefetchdistance
+       Node *need_pf_true = new (C, 1) IfTrueNode( need_pf_iff );
+-      _igvn.register_new_node_with_optimizer(need_pf_true);
++      transform_later(need_pf_true);
+ 
+       Node *need_pf_false = new (C, 1) IfFalseNode( need_pf_iff );
+-      _igvn.register_new_node_with_optimizer(need_pf_false);
++      transform_later(need_pf_false);
+ 
+-      Node *new_pf_wmt = new (C, 4) AddPNode( top(), old_pf_wm, 
++      Node *new_pf_wmt = new (C, 4) AddPNode( top(), old_pf_wm,
+                                     _igvn.MakeConX(AllocatePrefetchDistance) );
+-      _igvn.register_new_node_with_optimizer(new_pf_wmt );
++      transform_later(new_pf_wmt );
+       new_pf_wmt->set_req(0, need_pf_true);
+ 
+-      Node *store_new_wmt = new (C, 4) StorePNode( need_pf_true, 
+-                                       contended_phi_rawmem, eden_pf_adr, 
++      Node *store_new_wmt = new (C, 4) StorePNode( need_pf_true,
++                                       contended_phi_rawmem, eden_pf_adr,
+                                        TypeRawPtr::BOTTOM, new_pf_wmt );
+-      _igvn.register_new_node_with_optimizer(store_new_wmt);
++      transform_later(store_new_wmt);
+ 
+       // adding prefetches
+       pf_phi_abio->init_req( fall_in_path, i_o );
+@@ -702,11 +743,11 @@
+       uint distance = 0;
+ 
+       for ( uint i = 0; i < lines; i++ ) {
+-        prefetch_adr = new (C, 4) AddPNode( old_pf_wm, new_pf_wmt, 
++        prefetch_adr = new (C, 4) AddPNode( old_pf_wm, new_pf_wmt,
+                                             _igvn.MakeConX(distance) );
+-        _igvn.register_new_node_with_optimizer(prefetch_adr);
++        transform_later(prefetch_adr);
+         prefetch = new (C, 3) PrefetchWriteNode( i_o, prefetch_adr );
+-        _igvn.register_new_node_with_optimizer(prefetch);
++        transform_later(prefetch);
+         distance += step_size;
+         i_o = prefetch;
+       }
+@@ -718,9 +759,9 @@
+       pf_phi_rawmem->init_req( fall_in_path, contended_phi_rawmem );
+       pf_phi_rawmem->init_req( pf_path, store_new_wmt );
+ 
+-      _igvn.register_new_node_with_optimizer(pf_region);
+-      _igvn.register_new_node_with_optimizer(pf_phi_rawmem);
+-      _igvn.register_new_node_with_optimizer(pf_phi_abio);
++      transform_later(pf_region);
++      transform_later(pf_phi_rawmem);
++      transform_later(pf_phi_abio);
+ 
+       needgc_false = pf_region;
+       contended_phi_rawmem = pf_phi_rawmem;
+@@ -734,16 +775,16 @@
+       uint step_size = AllocatePrefetchStepSize;
+       uint distance = AllocatePrefetchDistance;
+       for ( uint i = 0; i < lines; i++ ) {
+-        prefetch_adr = new (C, 4) AddPNode( old_eden_top, new_eden_top, 
++        prefetch_adr = new (C, 4) AddPNode( old_eden_top, new_eden_top,
+                                             _igvn.MakeConX(distance) );
+-        _igvn.register_new_node_with_optimizer(prefetch_adr);
++        transform_later(prefetch_adr);
+         prefetch = new (C, 3) PrefetchWriteNode( i_o, prefetch_adr );
+-        // Do not let it float too high, since if eden_top == eden_end, 
++        // Do not let it float too high, since if eden_top == eden_end,
+         // both might be null.
+         if( i == 0 ) { // Set control for first prefetch, next follows it
+           prefetch->init_req(0, needgc_false);
+         }
+-        _igvn.register_new_node_with_optimizer(prefetch);
++        transform_later(prefetch);
+         distance += step_size;
+         i_o = prefetch;
+       }
+@@ -812,12 +853,12 @@
+ 
+   // Make the merge point
+   Node *region = new (C, 3) RegionNode(3);
+-  
+-  Node *bol = _igvn.register_new_node_with_optimizer(new (C, 2) BoolNode(flock,BoolTest::ne));
++
++  Node *bol = transform_later(new (C, 2) BoolNode(flock,BoolTest::ne));
+   Node *iff = new (C, 2) IfNode( ctrl, bol, PROB_MIN, COUNT_UNKNOWN );
+   // Optimize test; set region slot 2
+   Node *slow_path = opt_iff(region,iff);
+-  
++
+   // Make slow path call
+   CallNode *call = make_slow_call( (CallNode *) lock, OptoRuntime::complete_monitor_enter_Type(), OptoRuntime::complete_monitor_locking_Java(), NULL, slow_path, obj, box );
+ 
+@@ -834,20 +875,20 @@
+   // disconnect fall-through projection from call and create a new one
+   // hook up users of fall-through projection to region
+   Node *slow_ctrl = _fallthroughproj->clone();
+-  _igvn.register_new_node_with_optimizer(slow_ctrl);
++  transform_later(slow_ctrl);
+   _igvn.hash_delete(_fallthroughproj);
+   _fallthroughproj->disconnect_inputs(NULL);
+   region->init_req(1, slow_ctrl);
+   // region inputs are now complete
+-  _igvn.register_new_node_with_optimizer(region);
++  transform_later(region);
+   _igvn.subsume_node(_fallthroughproj, region);
+ 
+   // create a Phi for the memory state
+   Node *mem_phi = new (C, 3) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
+-  Node *memproj = _igvn.register_new_node_with_optimizer( new (C, 1) ProjNode(call, TypeFunc::Memory) );
++  Node *memproj = transform_later( new (C, 1) ProjNode(call, TypeFunc::Memory) );
+   mem_phi->init_req(1, memproj );
+   mem_phi->init_req(2, mem);
+-  _igvn.register_new_node_with_optimizer(mem_phi);
++  transform_later(mem_phi);
+     _igvn.hash_delete(_memproj_fallthrough);
+   _igvn.subsume_node(_memproj_fallthrough, mem_phi);
+ 
+@@ -874,14 +915,14 @@
+   RegionNode *region = new (C, 3) RegionNode(3);
+ 
+   FastUnlockNode *funlock = new (C, 3) FastUnlockNode( ctrl, obj, box );
+-  funlock = _igvn.register_new_node_with_optimizer( funlock )->as_FastUnlock();
+-  Node *bol = _igvn.register_new_node_with_optimizer(new (C, 2) BoolNode(funlock,BoolTest::ne));
++  funlock = transform_later( funlock )->as_FastUnlock();
++  Node *bol = transform_later(new (C, 2) BoolNode(funlock,BoolTest::ne));
+   Node *iff = new (C, 2) IfNode( ctrl, bol, PROB_MIN, COUNT_UNKNOWN );
+   // Optimize test; set region slot 2
+   Node *slow_path = opt_iff(region,iff);
+ 
+   CallNode *call = make_slow_call( (CallNode *) unlock, OptoRuntime::complete_monitor_exit_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), "complete_monitor_unlocking_C", slow_path, obj, box );
+-  
++
+   extract_call_projections(call);
+ 
+   assert ( _ioproj_fallthrough == NULL && _ioproj_catchall == NULL &&
+@@ -892,20 +933,20 @@
+   // disconnect fall-through projection from call and create a new one
+   // hook up users of fall-through projection to region
+   Node *slow_ctrl = _fallthroughproj->clone();
+-  _igvn.register_new_node_with_optimizer(slow_ctrl);
++  transform_later(slow_ctrl);
+   _igvn.hash_delete(_fallthroughproj);
+   _fallthroughproj->disconnect_inputs(NULL);
+   region->init_req(1, slow_ctrl);
+   // region inputs are now complete
+-  _igvn.register_new_node_with_optimizer(region);
++  transform_later(region);
+   _igvn.subsume_node(_fallthroughproj, region);
+ 
+   // create a Phi for the memory state
+   Node *mem_phi = new (C, 3) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
+-  Node *memproj = _igvn.register_new_node_with_optimizer( new(C, 1) ProjNode(call, TypeFunc::Memory) );
++  Node *memproj = transform_later( new(C, 1) ProjNode(call, TypeFunc::Memory) );
+   mem_phi->init_req(1, memproj );
+   mem_phi->init_req(2, mem);
+-  _igvn.register_new_node_with_optimizer(mem_phi);
++  transform_later(mem_phi);
+     _igvn.hash_delete(_memproj_fallthrough);
+   _igvn.subsume_node(_memproj_fallthrough, mem_phi);
+ 
+@@ -952,4 +993,3 @@
+   _igvn.optimize();
+   return false;
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/macro.hpp openjdk/hotspot/src/share/vm/opto/macro.hpp
+--- openjdk6/hotspot/src/share/vm/opto/macro.hpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/macro.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)macro.hpp	1.10 07/07/02 18:45:21 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class  AllocateNode;
+@@ -43,11 +40,22 @@
+   Node* basic_plus_adr(Node* base, int offset) {
+     return (offset == 0)? base: basic_plus_adr(base, MakeConX(offset));
+   }
++  Node* basic_plus_adr(Node* base, Node* ptr, int offset) {
++    return (offset == 0)? ptr: basic_plus_adr(base, ptr, MakeConX(offset));
++  }
+   Node* basic_plus_adr(Node* base, Node* offset) {
+-    Node* adr = new (C, 4) AddPNode(base, base, offset);
+-    _igvn.register_new_node_with_optimizer(adr);
+-    return adr;
++    return basic_plus_adr(base, base, offset);
++  }
++  Node* basic_plus_adr(Node* base, Node* ptr, Node* offset) {
++    Node* adr = new (C, 4) AddPNode(base, ptr, offset);
++    return transform_later(adr);
++  }
++  Node* transform_later(Node* n) {
++    // equivalent to _gvn.transform in GraphKit, Ideal, etc.
++    _igvn.register_new_node_with_optimizer(n);
++    return n;
+   }
++  void set_eden_pointers(Node* &eden_top_adr, Node* &eden_end_adr);
+   Node* make_load( Node* ctl, Node* mem, Node* base, int offset,
+                    const Type* value_type, BasicType bt);
+   Node* make_store(Node* ctl, Node* mem, Node* base, int offset,
+@@ -77,9 +85,9 @@
+   int replace_input(Node *use, Node *oldref, Node *newref);
+   void copy_call_debug_info(CallNode *oldcall, CallNode * newcall);
+   Node* opt_iff(Node* region, Node* iff);
+-  void copy_predefined_input_for_runtime_call(Node * ctrl, CallNode* oldcall, CallNode* call); 
++  void copy_predefined_input_for_runtime_call(Node * ctrl, CallNode* oldcall, CallNode* call);
+   CallNode* make_slow_call(CallNode *oldcall, const TypeFunc* slow_call_type, address slow_call,
+-                       const char* leaf_name, Node* slow_path, Node* parm0, Node* parm1); 
++                       const char* leaf_name, Node* slow_path, Node* parm0, Node* parm1);
+   void extract_call_projections(CallNode *call);
+ 
+   Node* initialize_object(AllocateNode* alloc,
+@@ -88,7 +96,7 @@
+                           Node* size_in_bytes);
+ 
+   Node* prefetch_allocation(Node* i_o,
+-                            Node*& needgc_false, Node*& contended_phi_rawmem, 
++                            Node*& needgc_false, Node*& contended_phi_rawmem,
+                             Node* old_eden_top, Node* new_eden_top,
+                             Node* length);
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/opto/matcher.cpp openjdk/hotspot/src/share/vm/opto/matcher.cpp
+--- openjdk6/hotspot/src/share/vm/opto/matcher.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/matcher.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)matcher.cpp	1.386 07/05/05 17:06:19 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -33,12 +30,12 @@
+ 
+ 
+ const int Matcher::base2reg[Type::lastype] = {
+-  Node::NotAMachineReg,0,0, Op_RegI, Op_RegL, 0, 
++  Node::NotAMachineReg,0,0, Op_RegI, Op_RegL, 0,
+   Node::NotAMachineReg, Node::NotAMachineReg, /* tuple, array */
+   Op_RegP, Op_RegP, Op_RegP, Op_RegP, Op_RegP, Op_RegP, /* the pointers */
+   0, 0/*abio*/,
+   Op_RegP /* Return address */, 0, /* the memories */
+-  Op_RegF, Op_RegF, Op_RegF, Op_RegD, Op_RegD, Op_RegD, 
++  Op_RegF, Op_RegF, Op_RegF, Op_RegD, Op_RegD, Op_RegD,
+   0  /*bottom*/
+ };
+ 
+@@ -56,15 +53,15 @@
+   _old2new_map(C->comp_arena()),
+ #endif
+   _shared_constants(C->comp_arena()),
+-  _reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp), 
+-  _swallowed(swallowed), 
+-  _begin_inst_chain_rule(_BEGIN_INST_CHAIN_RULE), 
+-  _end_inst_chain_rule(_END_INST_CHAIN_RULE), 
++  _reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp),
++  _swallowed(swallowed),
++  _begin_inst_chain_rule(_BEGIN_INST_CHAIN_RULE),
++  _end_inst_chain_rule(_END_INST_CHAIN_RULE),
+   _must_clone(must_clone), _proj_list(proj_list),
+   _register_save_policy(register_save_policy),
+   _c_reg_save_policy(c_reg_save_policy),
+   _register_save_type(register_save_type),
+-  _ruleName(ruleName), 
++  _ruleName(ruleName),
+   _allocation_started(false),
+   _states_arena(Chunk::medium_size),
+   _visited(&_states_arena),
+@@ -139,7 +136,7 @@
+ 
+ 
+ //---------------------------match---------------------------------------------
+-void Matcher::match( ) { 
++void Matcher::match( ) {
+   // One-time initialization of some register masks.
+   init_spill_mask( C->root()->in(1) );
+   _return_addr_mask = return_addr();
+@@ -186,7 +183,7 @@
+   const StartNode *start = C->start();
+   start->calling_convention( sig_bt, vm_parm_regs, argcnt );
+ #ifdef ASSERT
+-  // Sanity check users' calling convention.  Real handy while trying to 
++  // Sanity check users' calling convention.  Real handy while trying to
+   // get the initial port correct.
+   { for (uint i = 0; i<argcnt; i++) {
+       if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
+@@ -204,7 +201,7 @@
+                "parameters in register must be preserved by runtime stubs");
+       }
+       for (uint j = 0; j < i; j++) {
+-        assert(parm_reg != vm_parm_regs[j].first(), 
++        assert(parm_reg != vm_parm_regs[j].first(),
+                "calling conv. must produce distinct regs");
+       }
+     }
+@@ -246,10 +243,10 @@
+   }
+ 
+   // Finally, make sure the incoming arguments take up an even number of
+-  // words, in case the arguments or locals need to contain doubleword stack 
+-  // slots.  The rest of the system assumes that stack slot pairs (in 
+-  // particular, in the spill area) which look aligned will in fact be 
+-  // aligned relative to the stack pointer in the target machine.  Double 
++  // words, in case the arguments or locals need to contain doubleword stack
++  // slots.  The rest of the system assumes that stack slot pairs (in
++  // particular, in the spill area) which look aligned will in fact be
++  // aligned relative to the stack pointer in the target machine.  Double
+   // stack slots will always be allocated aligned.
+   _new_SP = OptoReg::Name(round_to(_in_arg_limit, RegMask::SlotsPerLong));
+ 
+@@ -272,7 +269,9 @@
+   find_shared( C->root() );
+   find_shared( C->top() );
+ 
+-  // Swap out to old-space; emptying new-space 
++  C->print_method("Before Matching", 2);
++
++  // Swap out to old-space; emptying new-space
+   Arena *old = C->node_arena()->move_contents(C->old_arena());
+ 
+   // Save debug and profile information for nodes in old space:
+@@ -341,11 +340,11 @@
+ 
+ 
+ //------------------------------Fixup_Save_On_Entry----------------------------
+-// The stated purpose of this routine is to take care of save-on-entry 
++// The stated purpose of this routine is to take care of save-on-entry
+ // registers.  However, the overall goal of the Match phase is to convert into
+ // machine-specific instructions which have RegMasks to guide allocation.
+ // So what this procedure really does is put a valid RegMask on each input
+-// to the machine-specific variations of all Return, TailCall and Halt 
++// to the machine-specific variations of all Return, TailCall and Halt
+ // instructions.  It also adds edgs to define the save-on-entry values (and of
+ // course gives them a mask).
+ 
+@@ -400,15 +399,15 @@
+   C->FIRST_STACK_mask().set_AllStack();
+ 
+   // Make spill masks.  Registers for their class, plus FIRST_STACK_mask.
+-  *idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI]; 
++  *idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI];
+    idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask());
+-  *idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL]; 
++  *idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL];
+    idealreg2spillmask[Op_RegL]->OR(C->FIRST_STACK_mask());
+-  *idealreg2spillmask[Op_RegF] = *idealreg2regmask[Op_RegF]; 
++  *idealreg2spillmask[Op_RegF] = *idealreg2regmask[Op_RegF];
+    idealreg2spillmask[Op_RegF]->OR(C->FIRST_STACK_mask());
+-  *idealreg2spillmask[Op_RegD] = *idealreg2regmask[Op_RegD]; 
++  *idealreg2spillmask[Op_RegD] = *idealreg2regmask[Op_RegD];
+    idealreg2spillmask[Op_RegD]->OR(C->FIRST_STACK_mask());
+-  *idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP]; 
++  *idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP];
+    idealreg2spillmask[Op_RegP]->OR(C->FIRST_STACK_mask());
+ 
+   // Make up debug masks.  Any spill slot plus callee-save registers.
+@@ -425,22 +424,22 @@
+   bool exclude_soe = !Compile::current()->is_method_compilation();
+ 
+   for( i=OptoReg::Name(0); i<OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i,1) ) {
+-    // registers the caller has to save do not work 
+-    if( _register_save_policy[i] == 'C' ||  
++    // registers the caller has to save do not work
++    if( _register_save_policy[i] == 'C' ||
+         _register_save_policy[i] == 'A' ||
+         (_register_save_policy[i] == 'E' && exclude_soe) ) {
+-      idealreg2debugmask[Op_RegI]->Remove(i); // Exclude save-on-call 
++      idealreg2debugmask[Op_RegI]->Remove(i); // Exclude save-on-call
+       idealreg2debugmask[Op_RegL]->Remove(i); // registers from debug
+       idealreg2debugmask[Op_RegF]->Remove(i); // masks
+       idealreg2debugmask[Op_RegD]->Remove(i);
+-      idealreg2debugmask[Op_RegP]->Remove(i); 
++      idealreg2debugmask[Op_RegP]->Remove(i);
+     }
+   }
+ }
+ 
+ //---------------------------is_save_on_entry----------------------------------
+ bool Matcher::is_save_on_entry( int reg ) {
+-  return 
++  return
+     _register_save_policy[reg] == 'E' ||
+     _register_save_policy[reg] == 'A' || // Save-on-entry register?
+     // Also save argument registers in the trampolining stubs
+@@ -451,19 +450,19 @@
+ void Matcher::Fixup_Save_On_Entry( ) {
+   init_first_stack_mask();
+ 
+-  Node *root = C->root();       // Short name for root  
+-  // Count number of save-on-entry registers.  
++  Node *root = C->root();       // Short name for root
++  // Count number of save-on-entry registers.
+   uint soe_cnt = number_of_saved_registers();
+   uint i;
+ 
+   // Find the procedure Start Node
+   StartNode *start = C->start();
+   assert( start, "Expect a start node" );
+-  
++
+   // Save argument registers in the trampolining stubs
+-  if( C->save_argument_registers() ) 
+-    for( i = 0; i < _last_Mach_Reg; i++ ) 
+-      if( is_spillable_arg(i) ) 
++  if( C->save_argument_registers() )
++    for( i = 0; i < _last_Mach_Reg; i++ )
++      if( is_spillable_arg(i) )
+         soe_cnt++;
+ 
+   // Input RegMask array shared by all Returns.
+@@ -471,11 +470,11 @@
+   // there is only 1 returned value
+   uint ret_edge_cnt = TypeFunc::Parms + ((C->tf()->range()->cnt() == TypeFunc::Parms) ? 0 : 1);
+   RegMask *ret_rms  = init_input_masks( ret_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
+-  // Returns have 0 or 1 returned values depending on call signature. 
+-  // Return register is specified by return_value in the AD file.  
++  // Returns have 0 or 1 returned values depending on call signature.
++  // Return register is specified by return_value in the AD file.
+   if (ret_edge_cnt > TypeFunc::Parms)
+     ret_rms[TypeFunc::Parms+0] = _return_value_mask;
+-  
++
+   // Input RegMask array shared by all Rethrows.
+   uint reth_edge_cnt = TypeFunc::Parms+1;
+   RegMask *reth_rms  = init_input_masks( reth_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
+@@ -485,7 +484,7 @@
+   // Need two slots for ptrs in 64-bit land
+   reth_rms[TypeFunc::Parms].Insert(OptoReg::add(OptoReg::Name(find_receiver(false)),1));
+ #endif
+-  
++
+   // Input RegMask array shared by all TailCalls
+   uint tail_call_edge_cnt = TypeFunc::Parms+2;
+   RegMask *tail_call_rms = init_input_masks( tail_call_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
+@@ -538,10 +537,10 @@
+   }
+ 
+   // Next unused projection number from Start.
+-  int proj_cnt = C->tf()->domain()->cnt();  
+-  
+-  // Do all the save-on-entry registers.  Make projections from Start for 
+-  // them, and give them a use at the exit points.  To the allocator, they 
++  int proj_cnt = C->tf()->domain()->cnt();
++
++  // Do all the save-on-entry registers.  Make projections from Start for
++  // them, and give them a use at the exit points.  To the allocator, they
+   // look like incoming register arguments.
+   for( i = 0; i < _last_Mach_Reg; i++ ) {
+     if( is_save_on_entry(i) ) {
+@@ -571,7 +570,7 @@
+         halt_rms     [     halt_edge_cnt].Insert(OptoReg::Name(i+1));
+         mproj = new (C, 1) MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegD );
+         proj_cnt += 2;          // Skip 2 for doubles
+-      } 
++      }
+       else if( (i&1) == 1 &&    // Else check for high half of double
+                _register_save_type[i-1] == Op_RegF &&
+                _register_save_type[i  ] == Op_RegF &&
+@@ -582,7 +581,7 @@
+         tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
+         halt_rms     [     halt_edge_cnt] = RegMask::Empty;
+         mproj = C->top();
+-      } 
++      }
+       // Is this a RegI low half of a RegL?  Double up 2 adjacent RegI's
+       // into a single RegL.
+       else if( (i&1) == 0 &&
+@@ -597,7 +596,7 @@
+         halt_rms     [     halt_edge_cnt].Insert(OptoReg::Name(i+1));
+         mproj = new (C, 1) MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegL );
+         proj_cnt += 2;          // Skip 2 for longs
+-      } 
++      }
+       else if( (i&1) == 1 &&    // Else check for high half of long
+                _register_save_type[i-1] == Op_RegI &&
+                _register_save_type[i  ] == Op_RegI &&
+@@ -620,8 +619,8 @@
+       halt_edge_cnt ++;
+ 
+       // Add a use of the SOE register to all exit paths
+-      for( uint j=1; j < root->req(); j++ ) 
+-        root->in(j)->add_req(mproj); 
++      for( uint j=1; j < root->req(); j++ )
++        root->in(j)->add_req(mproj);
+     } // End of if a save-on-entry register
+   } // End of for all machine registers
+ }
+@@ -667,8 +666,8 @@
+   MachNode *spillF  = match_tree(new (C, 3) LoadFNode(NULL,mem,fp,atp));
+   MachNode *spillD  = match_tree(new (C, 3) LoadDNode(NULL,mem,fp,atp));
+   MachNode *spillP  = match_tree(new (C, 3) LoadPNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM));
+-  assert(spillI != NULL && spillL != NULL && spillF != NULL && 
+-	 spillD != NULL && spillP != NULL, "");
++  assert(spillI != NULL && spillL != NULL && spillF != NULL &&
++         spillD != NULL && spillP != NULL, "");
+ 
+   // Get the ADLC notion of the right regmask, for each basic type.
+   idealreg2regmask[Op_RegI] = &spillI->out_RegMask();
+@@ -880,7 +879,7 @@
+       // And now walk his children, and convert his inputs to new-space.
+       for( ; i >= 0; --i ) { // For all normal inputs do
+         Node *m = n->in(i);  // Get input
+-        if(m != NULL) 
++        if(m != NULL)
+           mstack.push(m, Visit, n, i);
+       }
+ 
+@@ -917,7 +916,7 @@
+     // Keep track of the largest numbered stack slot used for an arg.
+     // Largest used slot per call-site indicates the amount of stack
+     // that is killed by the call.
+-    if( warped >= out_arg_limit_per_call ) 
++    if( warped >= out_arg_limit_per_call )
+       out_arg_limit_per_call = OptoReg::add(warped,1);
+     if (!RegMask::can_represent(warped)) {
+       C->record_method_not_compilable_all_tiers("unsupported calling sequence");
+@@ -957,21 +956,21 @@
+     mcall->set_tf(         call->tf());
+     mcall->set_entry_point(call->entry_point());
+     mcall->set_cnt(        call->cnt());
+-  
++
+     if( mcall->is_MachCallJava() ) {
+       MachCallJavaNode *mcall_java  = mcall->as_MachCallJava();
+       const CallJavaNode *call_java =  call->as_CallJava();
+       method = call_java->method();
+       mcall_java->_method = method;
+       mcall_java->_bci = call_java->_bci;
+-      mcall_java->_optimized_virtual = call_java->is_optimized_virtual(); 
+-      if( mcall_java->is_MachCallStaticJava() ) 
+-        mcall_java->as_MachCallStaticJava()->_name = 
++      mcall_java->_optimized_virtual = call_java->is_optimized_virtual();
++      if( mcall_java->is_MachCallStaticJava() )
++        mcall_java->as_MachCallStaticJava()->_name =
+          call_java->as_CallStaticJava()->_name;
+-      if( mcall_java->is_MachCallDynamicJava() ) 
+-        mcall_java->as_MachCallDynamicJava()->_vtable_index = 
++      if( mcall_java->is_MachCallDynamicJava() )
++        mcall_java->as_MachCallDynamicJava()->_vtable_index =
+          call_java->as_CallDynamicJava()->_vtable_index;
+-    } 
++    }
+     else if( mcall->is_MachCallRuntime() ) {
+       mcall->as_MachCallRuntime()->_name = call->as_CallRuntime()->_name;
+     }
+@@ -1002,7 +1001,7 @@
+   // Place first outgoing argument can possibly be put.
+   OptoReg::Name begin_out_arg_area = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
+   assert( is_even(begin_out_arg_area), "" );
+-  // Compute max outgoing register number per call site.  
++  // Compute max outgoing register number per call site.
+   OptoReg::Name out_arg_limit_per_call = begin_out_arg_area;
+   // Calls to C may hammer extra stack slots above and beyond any arguments.
+   // These are usually backing store for register arguments for varargs.
+@@ -1023,7 +1022,7 @@
+     call->calling_convention( sig_bt, parm_regs, argcnt );
+ 
+ #ifdef ASSERT
+-    // Sanity check users' calling convention.  Really handy during 
++    // Sanity check users' calling convention.  Really handy during
+     // the initial porting effort.  Fairly expensive otherwise.
+     { for (int i = 0; i<argcnt; i++) {
+       if( !parm_regs[i].first()->is_valid() &&
+@@ -1078,10 +1077,10 @@
+     mcall->_argsize = out_arg_limit_per_call - begin_out_arg_area;
+   }
+ 
+-  // Compute the max stack slot killed by any call.  These will not be 
+-  // available for debug info, and will be used to adjust FIRST_STACK_mask 
++  // Compute the max stack slot killed by any call.  These will not be
++  // available for debug info, and will be used to adjust FIRST_STACK_mask
+   // after all call sites have been visited.
+-  if( _out_arg_limit < out_arg_limit_per_call) 
++  if( _out_arg_limit < out_arg_limit_per_call)
+     _out_arg_limit = out_arg_limit_per_call;
+ 
+   if (mcall) {
+@@ -1147,14 +1146,14 @@
+   // Label the input tree, allocating labels from top-level arena
+   Label_Root( n, s, n->in(0), mem );
+   if (C->failing())  return NULL;
+-  
++
+   // The minimum cost match for the whole tree is found at the root State
+   uint mincost = max_juint;
+   uint cost = max_juint;
+   uint i;
+   for( i = 0; i < NUM_OPERANDS; i++ ) {
+     if( s->valid(i) &&                // valid entry and
+-        s->_cost[i] < cost &&         // low cost and 
++        s->_cost[i] < cost &&         // low cost and
+         s->_rule[i] >= NUM_OPERANDS ) // not an operand
+       cost = s->_cost[mincost=i];
+   }
+@@ -1171,7 +1170,7 @@
+ #ifdef ASSERT
+   _old2new_map.map(n->_idx, m);
+ #endif
+-  
++
+   // Add any Matcher-ignored edges
+   uint cnt = n->req();
+   uint start = 1;
+@@ -1185,7 +1184,7 @@
+       if( i < m->req() )
+         m->ins_req( i, n->in(i) );
+       else
+-        m->add_req( n->in(i) ); 
++        m->add_req( n->in(i) );
+     }
+   }
+ 
+@@ -1212,7 +1211,7 @@
+     if( control && m->in(0) && control != m->in(0) ) {
+ 
+       // Actually, we can live with the most conservative control we
+-      // find, if it post-dominates the others.  This allows us to 
++      // find, if it post-dominates the others.  This allows us to
+       // pick up load/op/store trees where the load can float a little
+       // above the store.
+       Node *x = control;
+@@ -1239,16 +1238,16 @@
+ // Label method walks a "tree" of nodes, using the ADLC generated DFA to match
+ // ideal nodes to machine instructions.  Trees are delimited by shared Nodes,
+ // things the Matcher does not match (e.g., Memory), and things with different
+-// Controls (hence forced into different blocks).  We pass in the Control 
++// Controls (hence forced into different blocks).  We pass in the Control
+ // selected for this entire State tree.
+ 
+ // The Matcher works on Trees, but an Intel add-to-memory requires a DAG: the
+-// Store and the Load must have identical Memories (as well as identical 
+-// pointers).  Since the Matcher does not have anything for Memory (and 
++// Store and the Load must have identical Memories (as well as identical
++// pointers).  Since the Matcher does not have anything for Memory (and
+ // does not handle DAGs), I have to match the Memory input myself.  If the
+ // Tree root is a Store, I require all Loads to have the identical memory.
+ Node *Matcher::Label_Root( const Node *n, State *svec, Node *control, const Node *mem){
+-  // Since Label_Root is a recursive function, its possible that we might run 
++  // Since Label_Root is a recursive function, its possible that we might run
+   // out of stack space.  See bugs 6272980 & 6227033 for more info.
+   LabelRootDepth++;
+   if (LabelRootDepth > MaxLabelRootDepth) {
+@@ -1261,17 +1260,17 @@
+ 
+   // Examine children for memory state
+   // Can only subsume a child into your match-tree if that child's memory state
+-  // is not modified along the path to another input.  
++  // is not modified along the path to another input.
+   // It is unsafe even if the other inputs are separate roots.
+   Node *input_mem = NULL;
+-  for( i = 1; i < cnt; i++ ) { 
++  for( i = 1; i < cnt; i++ ) {
+     if( !n->match_edge(i) ) continue;
+     Node *m = n->in(i);         // Get ith input
+     assert( m, "expect non-null children" );
+     if( m->is_Load() ) {
+       if( input_mem == NULL ) {
+         input_mem = m->in(MemNode::Memory);
+-      } else if( input_mem != m->in(MemNode::Memory) ) { 
++      } else if( input_mem != m->in(MemNode::Memory) ) {
+         input_mem = NodeSentinel;
+       }
+     }
+@@ -1290,11 +1289,11 @@
+     s->_kids[1] = NULL;
+     s->_leaf = m;
+ 
+-    // Check for leaves of the State Tree; things that cannot be a part of 
+-    // the current tree.  If it finds any, that value is matched as a 
++    // Check for leaves of the State Tree; things that cannot be a part of
++    // the current tree.  If it finds any, that value is matched as a
+     // register operand.  If not, then the normal matching is used.
+     if( match_into_reg(n, m, control, i, is_shared(m)) ||
+-        // 
++        //
+         // Stop recursion if this is LoadNode and the root of this tree is a
+         // StoreNode and the load & store have different memories.
+         ((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem) ||
+@@ -1303,15 +1302,15 @@
+         (input_mem == NodeSentinel) ) {
+ #ifndef PRODUCT
+       // Print when we exclude matching due to different memory states at input-loads
+-      if( PrintOpto && (Verbose && WizardMode) && (input_mem == NodeSentinel) 
+-        && !((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem) ) { 
+-        tty->print_cr("invalid input_mem"); 
++      if( PrintOpto && (Verbose && WizardMode) && (input_mem == NodeSentinel)
++        && !((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem) ) {
++        tty->print_cr("invalid input_mem");
+       }
+ #endif
+       // Switch to a register-only opcode; this value must be in a register
+       // and cannot be subsumed as part of a larger instruction.
+       s->DFA( m->ideal_reg(), m );
+-    
++
+     } else {
+       // If match tree has no control and we do, adopt it for entire tree
+       if( control == NULL && m->in(0) != NULL && m->req() > 1 )
+@@ -1322,7 +1321,7 @@
+     }
+   }
+ 
+-  
++
+   // Call DFA to match this node, and return
+   svec->DFA( n->Opcode(), n );
+ 
+@@ -1379,15 +1378,15 @@
+ 
+ //------------------------------ReduceInst-------------------------------------
+ // Reduce a State tree (with given Control) into a tree of MachNodes.
+-// This routine (and it's cohort ReduceOper) convert Ideal Nodes into 
++// This routine (and it's cohort ReduceOper) convert Ideal Nodes into
+ // complicated machine Nodes.  Each MachNode covers some tree of Ideal Nodes.
+-// Each MachNode has a number of complicated MachOper operands; each 
+-// MachOper also covers a further tree of Ideal Nodes.  
++// Each MachNode has a number of complicated MachOper operands; each
++// MachOper also covers a further tree of Ideal Nodes.
+ 
+ // The root of the Ideal match tree is always an instruction, so we enter
+ // the recursion here.  After building the MachNode, we need to recurse
+ // the tree checking for these cases:
+-// (1) Child is an instruction - 
++// (1) Child is an instruction -
+ //     Build the instruction (recursively), add it as an edge.
+ //     Build a simple operand (register) to hold the result of the instruction.
+ // (2) Child is an interior part of an instruction -
+@@ -1499,7 +1498,7 @@
+     mem = mem2;
+   }
+   if( s->_leaf->in(0) != NULL && s->_leaf->req() > 1) {
+-    if( mach->in(0) == NULL ) 
++    if( mach->in(0) == NULL )
+       mach->set_req(0, s->_leaf->in(0));
+   }
+ 
+@@ -1558,7 +1557,7 @@
+ //     and instruction as an input to the MachNode
+ void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
+   assert( rule < _LAST_MACH_OPER, "called with operand rule" );
+-  State *kid = s->_kids[0]; 
++  State *kid = s->_kids[0];
+   assert( kid == NULL || s->_leaf->in(0) == NULL, "internal operands have no control" );
+ 
+   // Leaf?  And not subsumed?
+@@ -1572,7 +1571,7 @@
+     mem = s->_leaf->in(MemNode::Memory);
+   }
+   if( s->_leaf->in(0) && s->_leaf->req() > 1) {
+-    if( !mach->in(0) ) 
++    if( !mach->in(0) )
+       mach->set_req(0,s->_leaf->in(0));
+     else {
+       assert( s->_leaf->in(0) == mach->in(0), "same instruction, differing controls?" );
+@@ -1624,14 +1623,14 @@
+ // Set bits if Node is shared or otherwise a root
+ void Matcher::find_shared( Node *n ) {
+   // Allocate stack of size C->unique() * 2 to avoid frequent realloc
+-  MStack mstack(C->unique() * 2); 
++  MStack mstack(C->unique() * 2);
+   mstack.push(n, Visit);     // Don't need to pre-visit root node
+   while (mstack.is_nonempty()) {
+     n = mstack.node();       // Leave node on stack
+     Node_State nstate = mstack.state();
+     if (nstate == Pre_Visit) {
+       if (is_visited(n)) {   // Visited already?
+-        // Node is shared and has no reason to clone.  Flag it as shared.  
++        // Node is shared and has no reason to clone.  Flag it as shared.
+         // This causes it to match into a register for the sharing.
+         set_shared(n);       // Flag as shared and
+         mstack.pop();        // remove node from stack
+@@ -1658,7 +1657,7 @@
+         // with matching cmp/branch in 1 instruction.  The Matcher needs the
+         // Bool and CmpX side-by-side, because it can only get at constants
+         // that are at the leaves of Match trees, and the Bool's condition acts
+-        // as a constant here.  
++        // as a constant here.
+         mstack.push(n->in(1), Visit);         // Clone the Bool
+         mstack.push(n->in(0), Pre_Visit);     // Visit control input
+         continue; // while (mstack.is_nonempty())
+@@ -1691,7 +1690,7 @@
+       case Op_Jump:
+         mstack.push(n->in(1), Visit);         // Switch Value
+         mstack.push(n->in(0), Pre_Visit);     // Visit Control input
+-        continue;                             // while (mstack.is_nonempty())                   
++        continue;                             // while (mstack.is_nonempty())
+       case Op_StrComp:
+         set_shared(n); // Force result into register (it will be anyways)
+         break;
+@@ -1782,7 +1781,7 @@
+         if( _must_clone[mop] ) {
+           mstack.push(m, Visit);
+           continue; // for(int i = ...)
+-        } 
++        }
+ 
+         // Clone addressing expressions as they are "free" in most instructions
+         if( mem_op && i == MemNode::Address && mop == Op_AddP ) {
+@@ -1799,10 +1798,20 @@
+               set_visited(adr);  // Flag as visited now
+               Node *shift = adr->in(AddPNode::Offset);
+               // Check for shift by small constant as well
+-              if( shift->Opcode() == Op_LShiftX && shift->in(2)->is_Con() && 
+-                  shift->in(2)->get_int() <= 3 ) { 
++              if( shift->Opcode() == Op_LShiftX && shift->in(2)->is_Con() &&
++                  shift->in(2)->get_int() <= 3 ) {
+                 set_visited(shift);  // Flag as visited now
+                 mstack.push(shift->in(2), Visit);
++#ifdef _LP64
++                // Allow Matcher to match the rule which bypass
++                // ConvI2L operation for an array index on LP64
++                // if the index value is positive.
++                if( shift->in(1)->Opcode() == Op_ConvI2L &&
++                    shift->in(1)->as_Type()->type()->is_long()->_lo >= 0 ) {
++                  set_visited(shift->in(1));  // Flag as visited now
++                  mstack.push(shift->in(1)->in(1), Pre_Visit);
++                } else
++#endif
+                 mstack.push(shift->in(1), Pre_Visit);
+               } else {
+                 mstack.push(shift, Pre_Visit);
+@@ -1826,7 +1835,7 @@
+       mstack.pop(); // Remove node from stack
+       // We cannot remove the Cmp input from the Bool here, as the Bool may be
+       // shared and all users of the Bool need to move the Cmp in parallel.
+-      // This leaves both the Bool and the If pointing at the Cmp.  To 
++      // This leaves both the Bool and the If pointing at the Cmp.  To
+       // prevent the Matcher from trying to Match the Cmp along both paths
+       // BoolNode::match_edge always returns a zero.
+ 
+@@ -1926,7 +1935,7 @@
+     Node *test = _null_check_tests[i];
+     Node *val = _null_check_tests[i+1];
+     if (has_new_node(val)) {
+-      // Is a match-tree root, so replace with the matched value 
++      // Is a match-tree root, so replace with the matched value
+       _null_check_tests.map(i+1, new_node(val));
+     } else {
+       // Yank from candidate list
+@@ -1936,7 +1945,7 @@
+       _null_check_tests.pop();
+       i-=2;
+     }
+-  }  
++  }
+ }
+ 
+ 
+@@ -1962,7 +1971,7 @@
+   Compile *C = Compile::current();
+   assert( rel->Opcode() == Op_MemBarRelease, "" );
+   const MemBarReleaseNode *mem = (const MemBarReleaseNode*)rel;
+-  DUIterator_Fast imax, i = mem->fast_outs(imax); 
++  DUIterator_Fast imax, i = mem->fast_outs(imax);
+   Node *ctrl = NULL;
+   while( true ) {
+     ctrl = mem->fast_out(i);            // Throw out-of-bounds if proj not found
+@@ -1975,8 +1984,8 @@
+   }
+   Node *iff = NULL;
+   for( DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++ ) {
+-    Node *x = ctrl->fast_out(j); 
+-    if( x->is_If() && x->req() > 1 && 
++    Node *x = ctrl->fast_out(j);
++    if( x->is_If() && x->req() > 1 &&
+         !C->node_arena()->contains(x) ) { // Unmatched old-space only
+       iff = x;
+       break;
+@@ -2002,29 +2011,29 @@
+ 
+   // Get the Proj node, ctrl, that can be used to iterate forward
+   Node *ctrl = NULL;
+-  DUIterator_Fast imax, i = mem->fast_outs(imax); 
++  DUIterator_Fast imax, i = mem->fast_outs(imax);
+   while( true ) {
+-    ctrl = mem->fast_out(i);		// Throw out-of-bounds if proj not found
++    ctrl = mem->fast_out(i);            // Throw out-of-bounds if proj not found
+     assert( ctrl->is_Proj(), "only projections here" );
+     ProjNode *proj = (ProjNode*)ctrl;
+     if( proj->_con == TypeFunc::Control &&
+-	!C->node_arena()->contains(ctrl) ) // Unmatched old-space only
++        !C->node_arena()->contains(ctrl) ) // Unmatched old-space only
+       break;
+     i++;
+   }
+ 
+   for( DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++ ) {
+-    Node *x = ctrl->fast_out(j); 
++    Node *x = ctrl->fast_out(j);
+     int xop = x->Opcode();
+ 
+     // We don't need current barrier if we see another or a lock
+-    // before seeing volatile load. 
++    // before seeing volatile load.
+     //
+     // Op_Fastunlock previously appeared in the Op_* list below.
+     // With the advent of 1-0 lock operations we're no longer guaranteed
+-    // that a monitor exit operation contains a serializing instruction. 
+-    
+-    if (xop == Op_MemBarVolatile || 
++    // that a monitor exit operation contains a serializing instruction.
++
++    if (xop == Op_MemBarVolatile ||
+         xop == Op_FastLock ||
+         xop == Op_CompareAndSwapL ||
+         xop == Op_CompareAndSwapP ||
+@@ -2034,17 +2043,17 @@
+     if (x->is_MemBar()) {
+       // We must retain this membar if there is an upcoming volatile
+       // load, which will be preceded by acquire membar.
+-      if (xop == Op_MemBarAcquire) 
++      if (xop == Op_MemBarAcquire)
+         return false;
+       // For other kinds of barriers, check by pretending we
+       // are them, and seeing if we can be removed.
+-      else 
++      else
+         return post_store_load_barrier((const MemBarNode*)x);
+     }
+ 
+     // Delicate code to detect case of an upcoming fastlock block
+-    if( x->is_If() && x->req() > 1 && 
+-	!C->node_arena()->contains(x) ) { // Unmatched old-space only
++    if( x->is_If() && x->req() > 1 &&
++        !C->node_arena()->contains(x) ) { // Unmatched old-space only
+       Node *iff = x;
+       Node *bol = iff->in(1);
+       // The iff might be some random subclass of If or bol might be Con-Top
+@@ -2084,12 +2093,12 @@
+ 
+ #ifndef PRODUCT
+ //---------------------------dump----------------------------------------------
+-void State::dump() { 
++void State::dump() {
+   tty->print("\n");
+-  dump(0); 
++  dump(0);
+ }
+ 
+-void State::dump(int depth) { 
++void State::dump(int depth) {
+   for( int j = 0; j < depth; j++ )
+     tty->print("   ");
+   tty->print("--N: ");
+@@ -2102,7 +2111,7 @@
+         tty->print("   ");
+         assert(_cost[i] != max_juint, "cost must be a valid value");
+         assert(_rule[i] < _last_Mach_Node, "rule[i] must be valid rule");
+-        tty->print_cr("%s  %d  %s", 
++        tty->print_cr("%s  %d  %s",
+                       ruleName[i], _cost[i], ruleName[_rule[i]] );
+       }
+   tty->print_cr("");
+@@ -2112,4 +2121,3 @@
+       _kids[i]->dump(depth+1);
+ }
+ #endif
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/matcher.hpp openjdk/hotspot/src/share/vm/opto/matcher.hpp
+--- openjdk6/hotspot/src/share/vm/opto/matcher.hpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/matcher.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)matcher.hpp	1.187 07/05/05 17:06:19 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class Compile;
+@@ -60,11 +57,11 @@
+ 
+   // Map dense opcode number to info on when rule is swallowed constant.
+   const bool *_swallowed;
+-  
++
+   // Map dense rule number to determine if this is an instruction chain rule
+   const uint _begin_inst_chain_rule;
+   const uint _end_inst_chain_rule;
+-  
++
+   // We want to clone constants and possible CmpI-variants.
+   // If we do not clone CmpI, then we can have many instances of
+   // condition codes alive at once.  This is OK on some chips and
+@@ -79,7 +76,7 @@
+ 
+   // Node labeling iterator for instruction selection
+   Node *Label_Root( const Node *n, State *svec, Node *control, const Node *mem );
+-  
++
+   Node *transform( Node *dummy );
+ 
+   Node_List &_proj_list;        // For Machine nodes killing many values
+@@ -234,6 +231,11 @@
+   // Vector ideal reg
+   static const uint vector_ideal_reg(void);
+ 
++  // Used to determine a "low complexity" 64-bit constant.  (Zero is simple.)
++  // The standard of comparison is one (StoreL ConL) vs. two (StoreI ConI).
++  // Depends on the details of 64-bit constant generation on the CPU.
++  static const bool isSimpleConstant64(jlong con);
++
+   // These calls are all generated by the ADLC
+ 
+   // TRUE - grows up, FALSE - grows down (Intel)
+@@ -251,7 +253,7 @@
+     return stack_alignment_in_bytes() / (VMRegImpl::stack_slot_size);
+   }
+ 
+-  // Array mapping arguments to registers.  Argument 0 is usually the 'this' 
++  // Array mapping arguments to registers.  Argument 0 is usually the 'this'
+   // pointer.  Registers can include stack-slots and regular registers.
+   static void calling_convention( BasicType *, VMRegPair *, uint len, bool is_outgoing );
+ 
+@@ -284,7 +286,7 @@
+   // Java-Interpreter calling convention
+   // (what you use when calling between compiled-Java and Interpreted-Java
+ 
+-  // Number of callee-save + always-save registers 
++  // Number of callee-save + always-save registers
+   // Ignores frame pointer and "special" registers
+   static int  number_of_saved_registers();
+ 
+@@ -303,10 +305,10 @@
+   static OptoReg::Name  interpreter_frame_pointer_reg();
+   static const RegMask &interpreter_frame_pointer_reg_mask();
+ 
+-  // Java-Native calling convention 
++  // Java-Native calling convention
+   // (what you use when intercalling between Java and C++ code)
+ 
+-  // Array mapping arguments to registers.  Argument 0 is usually the 'this' 
++  // Array mapping arguments to registers.  Argument 0 is usually the 'this'
+   // pointer.  Registers can include stack-slots and regular registers.
+   static void c_calling_convention( BasicType*, VMRegPair *, uint );
+   // Frame pointer. The frame pointer is kept at the base of the stack
+@@ -321,11 +323,18 @@
+   // Is this branch offset small enough to be addressed by a short branch?
+   bool is_short_branch_offset(int offset);
+ 
++  // Optional scaling for the parameter to the ClearArray/CopyArray node.
++  static const bool init_array_count_is_in_bytes;
++
++  // Threshold small size (in bytes) for a ClearArray/CopyArray node.
++  // Anything this size or smaller may get converted to discrete scalar stores.
++  static const int init_array_short_size;
++
+   // Should the Matcher clone shifts on addressing modes, expecting them to
+   // be subsumed into complex addressing expressions or compute them into
+   // registers?  True for Intel but false for most RISCs
+   static const bool clone_shift_expressions;
+-  
++
+   // Is it better to copy float constants, or load them directly from memory?
+   // Intel can load a float constant from a direct address, requiring no
+   // extra registers.  Most RISCs will have to materialize an address into a
+diff -ruN openjdk6/hotspot/src/share/vm/opto/memnode.cpp openjdk/hotspot/src/share/vm/opto/memnode.cpp
+--- openjdk6/hotspot/src/share/vm/opto/memnode.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/memnode.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)memnode.cpp	1.237 07/05/17 15:59:18 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Portions of code courtesy of Clifford Click
+@@ -43,7 +40,7 @@
+ }
+ 
+ #ifndef PRODUCT
+-void MemNode::dump_spec() const {
++void MemNode::dump_spec(outputStream *st) const {
+   if (in(Address) == NULL)  return; // node is dead
+ #ifndef ASSERT
+   // fake the missing field
+@@ -51,37 +48,37 @@
+   if (in(Address) != NULL)
+     _adr_type = in(Address)->bottom_type()->isa_ptr();
+ #endif
+-  dump_adr_type(this, _adr_type);
++  dump_adr_type(this, _adr_type, st);
+ 
+   Compile* C = Compile::current();
+   if( C->alias_type(_adr_type)->is_volatile() )
+-    tty->print(" Volatile!");
++    st->print(" Volatile!");
+ }
+ 
+-void MemNode::dump_adr_type(const Node* mem, const TypePtr* adr_type) {
+-  tty->print(" @");
++void MemNode::dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st) {
++  st->print(" @");
+   if (adr_type == NULL) {
+-    tty->print("NULL");
++    st->print("NULL");
+   } else {
+-    adr_type->dump();
++    adr_type->dump_on(st);
+     Compile* C = Compile::current();
+     Compile::AliasType* atp = NULL;
+     if (C->have_alias_type(adr_type))  atp = C->alias_type(adr_type);
+     if (atp == NULL)
+-      tty->print(", idx=?\?;");
++      st->print(", idx=?\?;");
+     else if (atp->index() == Compile::AliasIdxBot)
+-      tty->print(", idx=Bot;");
++      st->print(", idx=Bot;");
+     else if (atp->index() == Compile::AliasIdxTop)
+-      tty->print(", idx=Top;");
++      st->print(", idx=Top;");
+     else if (atp->index() == Compile::AliasIdxRaw)
+-      tty->print(", idx=Raw;");
++      st->print(", idx=Raw;");
+     else {
+       ciField* field = atp->field();
+       if (field) {
+-        tty->print(", name=");
+-        field->print_name_on(tty);
++        st->print(", name=");
++        field->print_name_on(st);
+       }
+-      tty->print(", idx=%d;", atp->index());
++      st->print(", idx=%d;", atp->index());
+     }
+   }
+ }
+@@ -96,7 +93,7 @@
+ Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) {
+   // If our control input is a dead region, kill all below the region
+   Node *ctl = in(MemNode::Control);
+-  if (ctl && remove_dead_region(phase, can_reshape)) 
++  if (ctl && remove_dead_region(phase, can_reshape))
+     return this;
+ 
+   // Ignore if memory is dead, or self-loop
+@@ -111,9 +108,23 @@
+   // Avoid independent memory operations
+   Node* old_mem = mem;
+ 
++  if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
++    InitializeNode* init = mem->in(0)->as_Initialize();
++    if (init->is_complete()) {  // i.e., after macro expansion
++      const TypePtr* tp = t_adr->is_ptr();
++      uint alias_idx = phase->C->get_alias_index(tp);
++      // Free this slice from the init.  It was hooked, temporarily,
++      // by GraphKit::set_output_for_allocation.
++      if (alias_idx > Compile::AliasIdxRaw) {
++        mem = init->memory(alias_idx);
++        // ...but not with the raw-pointer slice.
++      }
++    }
++  }
++
+   if (mem->is_MergeMem()) {
+     MergeMemNode* mmem = mem->as_MergeMem();
+-    const TypePtr *tp = t_adr->is_ptr(); 
++    const TypePtr *tp = t_adr->is_ptr();
+     uint alias_idx = phase->C->get_alias_index(tp);
+ #ifdef ASSERT
+     {
+@@ -122,9 +133,9 @@
+       const TypePtr *adr_t =  adr_type();
+       bool consistent =  adr_t == NULL || adr_t->empty() || phase->C->must_alias(adr_t, alias_idx );
+       // Sometimes dead array references collapse to a[-1], a[-2], or a[-3]
+-      if( !consistent && adr_t != NULL && !adr_t->empty() && 
++      if( !consistent && adr_t != NULL && !adr_t->empty() &&
+              tp->isa_aryptr() &&    tp->offset() == Type::OffsetBot &&
+-          adr_t->isa_aryptr() && adr_t->offset() != Type::OffsetBot && 
++          adr_t->isa_aryptr() && adr_t->offset() != Type::OffsetBot &&
+           ( adr_t->offset() == arrayOopDesc::length_offset_in_bytes() ||
+             adr_t->offset() == oopDesc::klass_offset_in_bytes() ||
+             adr_t->offset() == oopDesc::mark_offset_in_bytes() ) ) {
+@@ -167,6 +178,65 @@
+   return NULL;
+ }
+ 
++// Helper function for proving some simple control dominations.
++// Attempt to prove that control input 'dom' dominates (or equals) 'sub'.
++// Already assumes that 'dom' is available at 'sub', and that 'sub'
++// is not a constant (dominated by the method's StartNode).
++// Used by MemNode::find_previous_store to prove that the
++// control input of a memory operation predates (dominates)
++// an allocation it wants to look past.
++bool MemNode::detect_dominating_control(Node* dom, Node* sub) {
++  if (dom == NULL)      return false;
++  if (dom->is_Proj())   dom = dom->in(0);
++  if (dom->is_Start())  return true; // anything inside the method
++  if (dom->is_Root())   return true; // dom 'controls' a constant
++  int cnt = 20;                      // detect cycle or too much effort
++  while (sub != NULL) {              // walk 'sub' up the chain to 'dom'
++    if (--cnt < 0)   return false;   // in a cycle or too complex
++    if (sub == dom)  return true;
++    if (sub->is_Start())  return false;
++    if (sub->is_Root())   return false;
++    Node* up = sub->in(0);
++    if (sub == up && sub->is_Region()) {
++      for (uint i = 1; i < sub->req(); i++) {
++        Node* in = sub->in(i);
++        if (in != NULL && !in->is_top() && in != sub) {
++          up = in; break;            // take any path on the way up to 'dom'
++        }
++      }
++    }
++    if (sub == up)  return false;    // some kind of tight cycle
++    sub = up;
++  }
++  return false;
++}
++
++//---------------------detect_ptr_independence---------------------------------
++// Used by MemNode::find_previous_store to prove that two base
++// pointers are never equal.
++// The pointers are accompanied by their associated allocations,
++// if any, which have been previously discovered by the caller.
++bool MemNode::detect_ptr_independence(Node* p1, AllocateNode* a1,
++                                      Node* p2, AllocateNode* a2,
++                                      PhaseTransform* phase) {
++  // Attempt to prove that these two pointers cannot be aliased.
++  // They may both manifestly be allocations, and they should differ.
++  // Or, if they are not both allocations, they can be distinct constants.
++  // Otherwise, one is an allocation and the other a pre-existing value.
++  if (a1 == NULL && a2 == NULL) {           // neither an allocation
++    return (p1 != p2) && p1->is_Con() && p2->is_Con();
++  } else if (a1 != NULL && a2 != NULL) {    // both allocations
++    return (a1 != a2);
++  } else if (a1 != NULL) {                  // one allocation a1
++    // (Note:  p2->is_Con implies p2->in(0)->is_Root, which dominates.)
++    return detect_dominating_control(p2->in(0), a1->in(0));
++  } else { //(a2 != NULL)                   // one allocation a2
++    return detect_dominating_control(p1->in(0), a2->in(0));
++  }
++  return false;
++}
++
++
+ // The logic for reordering loads and stores uses four steps:
+ // (a) Walk carefully past stores and initializations which we
+ //     can prove are independent of this load.
+@@ -218,6 +288,15 @@
+           continue;           // (a) advance through independent store memory
+         }
+       }
++      if (st_base != base &&
++          detect_ptr_independence(base, alloc,
++                                  st_base,
++                                  AllocateNode::Ideal_allocation(st_base, phase),
++                                  phase)) {
++        // Success:  The bases are provably independent.
++        mem = mem->in(MemNode::Memory);
++        continue;           // (a) advance through independent store memory
++      }
+ 
+       // (b) At this point, if the bases or offsets do not agree, we lose,
+       // since we have not managed to prove 'this' and 'mem' independent.
+@@ -225,6 +304,41 @@
+         return mem;         // let caller handle steps (c), (d)
+       }
+ 
++    } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
++      InitializeNode* st_init = mem->in(0)->as_Initialize();
++      AllocateNode*  st_alloc = st_init->allocation();
++      if (st_alloc == NULL)
++        break;              // something degenerated
++      bool known_identical = false;
++      bool known_independent = false;
++      if (alloc == st_alloc)
++        known_identical = true;
++      else if (alloc != NULL)
++        known_independent = true;
++      else if (ctrl != NULL &&
++               detect_dominating_control(ctrl, st_alloc->in(0)))
++        known_independent = true;
++
++      if (known_independent) {
++        // The bases are provably independent: Either they are
++        // manifestly distinct allocations, or else the control
++        // of this load dominates the store's allocation.
++        int alias_idx = phase->C->get_alias_index(adr_type());
++        if (alias_idx == Compile::AliasIdxRaw) {
++          mem = st_alloc->in(TypeFunc::Memory);
++        } else {
++          mem = st_init->memory(alias_idx);
++        }
++        continue;           // (a) advance through independent store memory
++      }
++
++      // (b) at this point, if we are not looking at a store initializing
++      // the same allocation we are loading from, we lose.
++      if (known_identical) {
++        // From caller, can_see_stored_value will consult find_captured_store.
++        return mem;         // let caller handle steps (c), (d)
++      }
++
+     }
+ 
+     // Unless there is an explicit 'continue', we must bail out here,
+@@ -273,17 +387,17 @@
+ }
+ 
+ //------------------------adr_phi_is_loop_invariant----------------------------
+-// A helper function for Ideal_DU_postCCP to check if a Phi in a counted 
+-// loop is loop invariant. Make a quick traversal of Phi and associated 
++// A helper function for Ideal_DU_postCCP to check if a Phi in a counted
++// loop is loop invariant. Make a quick traversal of Phi and associated
+ // CastPP nodes, looking to see if they are a closed group within the loop.
+ bool MemNode::adr_phi_is_loop_invariant(Node* adr_phi, Node* cast) {
+   // The idea is that the phi-nest must boil down to only CastPP nodes
+-  // with the same data. This implies that any path into the loop already 
+-  // includes such a CastPP, and so the original cast, whatever its input, 
++  // with the same data. This implies that any path into the loop already
++  // includes such a CastPP, and so the original cast, whatever its input,
+   // must be covered by an equivalent cast, with an earlier control input.
+   ResourceMark rm;
+ 
+-  // The loop entry input of the phi should be the unique dominating 
++  // The loop entry input of the phi should be the unique dominating
+   // node for every Phi/CastPP in the loop.
+   Unique_Node_List closure;
+   closure.push(adr_phi->in(LoopNode::EntryControl));
+@@ -343,7 +457,7 @@
+   Node *mem = in(MemNode::Memory);
+   Node *adr = in(MemNode::Address);
+   Node *skipped_cast = NULL;
+-  // Need a null check?  Regular static accesses do not because they are 
++  // Need a null check?  Regular static accesses do not because they are
+   // from constant addresses.  Array ops are gated by the range check (which
+   // always includes a NULL check).  Just check field ops.
+   if( !ctr ) {
+@@ -371,7 +485,7 @@
+         set_req(MemNode::Control, adr->in(0));
+         ccp->hash_insert(this);
+         return this;
+-        
++
+       case Op_Phi:
+         // Attempt to float above a Phi to some dominating point.
+         if (adr->in(0) != NULL && adr->in(0)->is_CountedLoop()) {
+@@ -394,8 +508,8 @@
+         // These usually stick around to change address type, however a
+         // useless one can be elided and we still need to pick up a control edge
+         if (adr->in(0) == NULL) {
+-          // This CheckCastPP node has NO control and is likely useless. But we 
+-          // need check further up the ancestor chain for a control input to keep 
++          // This CheckCastPP node has NO control and is likely useless. But we
++          // need check further up the ancestor chain for a control input to keep
+           // the node in place. 4959717.
+           skipped_cast = adr;
+           adr = adr->in(1);
+@@ -405,7 +519,7 @@
+         set_req(MemNode::Control, adr->in(0));
+         ccp->hash_insert(this);
+         return this;
+-        
++
+         // List of "safe" opcodes; those that implicitly block the memory
+         // op below any null check.
+       case Op_CastX2P:          // no null checks on native pointers
+@@ -455,16 +569,16 @@
+ uint LoadNode::cmp( const Node &n ) const
+ { return !Type::cmp( _type, ((LoadNode&)n)._type ); }
+ const Type *LoadNode::bottom_type() const { return _type; }
+-uint LoadNode::ideal_reg() const { 
++uint LoadNode::ideal_reg() const {
+   return Matcher::base2reg[_type->base()];
+ }
+ 
+ #ifndef PRODUCT
+-void LoadNode::dump_spec() const { 
+-  MemNode::dump_spec();
++void LoadNode::dump_spec(outputStream *st) const {
++  MemNode::dump_spec(st);
+   if( !Verbose && !WizardMode ) {
+     // standard dump does this in Verbose and WizardMode
+-    tty->print(" #"); _type->dump();
++    st->print(" #"); _type->dump_on(st);
+   }
+ }
+ #endif
+@@ -520,16 +634,65 @@
+ Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const {
+   Node* ld_adr = in(MemNode::Address);
+ 
+-  if (st->is_Store()) {
+-    Node* st_adr = st->in(MemNode::Address);
+-    if (!phase->eqv(st_adr, ld_adr)) {
+-      return NULL;
+-    }
+-    // Now prove that we have a LoadQ matched to a StoreQ, for some Q.
+-    if (store_Opcode() != st->Opcode()) {
+-      return NULL;
++  // Loop around twice in the case Load -> Initialize -> Store.
++  // (See PhaseIterGVN::add_users_to_worklist, which knows about this case.)
++  for (int trip = 0; trip <= 1; trip++) {
++
++    if (st->is_Store()) {
++      Node* st_adr = st->in(MemNode::Address);
++      if (!phase->eqv(st_adr, ld_adr)) {
++        // Try harder before giving up...  Match raw and non-raw pointers.
++        intptr_t st_off = 0;
++        AllocateNode* alloc = AllocateNode::Ideal_allocation(st_adr, phase, st_off);
++        if (alloc == NULL)       return NULL;
++        intptr_t ld_off = 0;
++        AllocateNode* allo2 = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
++        if (alloc != allo2)      return NULL;
++        if (ld_off != st_off)    return NULL;
++        // At this point we have proven something like this setup:
++        //  A = Allocate(...)
++        //  L = LoadQ(,  AddP(CastPP(, A.Parm),, #Off))
++        //  S = StoreQ(, AddP(,        A.Parm  , #Off), V)
++        // (Actually, we haven't yet proven the Q's are the same.)
++        // In other words, we are loading from a casted version of
++        // the same pointer-and-offset that we stored to.
++        // Thus, we are able to replace L by V.
++      }
++      // Now prove that we have a LoadQ matched to a StoreQ, for some Q.
++      if (store_Opcode() != st->Opcode())
++        return NULL;
++      return st->in(MemNode::ValueIn);
++    }
++
++    intptr_t offset = 0;  // scratch
++
++    // A load from a freshly-created object always returns zero.
++    // (This can happen after LoadNode::Ideal resets the load's memory input
++    // to find_captured_store, which returned InitializeNode::zero_memory.)
++    if (st->is_Proj() && st->in(0)->is_Allocate() &&
++        st->in(0) == AllocateNode::Ideal_allocation(ld_adr, phase, offset) &&
++        offset >= st->in(0)->as_Allocate()->minimum_header_size()) {
++      // return a zero value for the load's basic type
++      // (This is one of the few places where a generic PhaseTransform
++      // can create new nodes.  Think of it as lazily manifesting
++      // virtually pre-existing constants.)
++      return phase->zerocon(memory_type());
++    }
++
++    // A load from an initialization barrier can match a captured store.
++    if (st->is_Proj() && st->in(0)->is_Initialize()) {
++      InitializeNode* init = st->in(0)->as_Initialize();
++      AllocateNode* alloc = init->allocation();
++      if (alloc != NULL &&
++          alloc == AllocateNode::Ideal_allocation(ld_adr, phase, offset)) {
++        // examine a captured store value
++        st = init->find_captured_store(offset, memory_size(), phase);
++        if (st != NULL)
++          continue;             // take one more trip around
++      }
+     }
+-    return st->in(MemNode::ValueIn);
++
++    break;
+   }
+ 
+   return NULL;
+@@ -563,6 +726,8 @@
+ //------------------------------Ideal------------------------------------------
+ // If the load is from Field memory and the pointer is non-null, we can
+ // zero out the control input.
++// If the offset is constant and the base is an object allocation,
++// try to hook me up to the exact initializing store.
+ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+   Node* p = MemNode::Ideal_common(phase, can_reshape);
+   if (p)  return (p == NodeSentinel) ? NULL : p;
+@@ -572,22 +737,25 @@
+ 
+   // Skip up past a SafePoint control.  Cannot do this for Stores because
+   // pointer stores & cardmarks must stay on the same side of a SafePoint.
+-  if( ctrl != NULL && ctrl->Opcode() == Op_SafePoint && 
++  if( ctrl != NULL && ctrl->Opcode() == Op_SafePoint &&
+       phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw ) {
+     ctrl = ctrl->in(0);
+     set_req(MemNode::Control,ctrl);
+   }
+ 
+-  // Check for useless memory edge in some common special cases
+-  if( in(MemNode::Control) ) {
+-    Node *adr = address->is_AddP() ? address->in(AddPNode::Base) : address;
+-    if( adr->is_Proj() && adr->as_Proj()->_con == TypeFunc::Parms && 
+-        adr->in(0)->is_Start() && phase->type(adr)->is_ptr()->_ptr == TypePtr::NotNull ) {
++  // Check for useless control edge in some common special cases
++  if (in(MemNode::Control) != NULL) {
++    intptr_t ignore = 0;
++    Node*    base   = AddPNode::Ideal_base_and_offset(address, phase, ignore);
++    if (base != NULL
++        && phase->type(base)->higher_equal(TypePtr::NOTNULL)
++        && detect_dominating_control(base->in(0), phase->C->start())) {
++      // A method-invariant, non-null address (constant or 'this' argument).
+       set_req(MemNode::Control, NULL);
+     }
+   }
+ 
+-  // Check for prior array store with a different offset; make Load
++  // Check for prior store with a different base or offset; make Load
+   // independent.  Skip through any number of them.  Bail out if the stores
+   // are in an endless dead cycle and report no progress.  This is a key
+   // transform for Reflection.  However, if after skipping through the Stores
+@@ -658,7 +826,7 @@
+   if (tp->base() == Type::AryPtr) {
+     const Type *t = tp->is_aryptr()->elem();
+     // Don't do this for integer types. There is only potential profit if
+-    // the element type t is lower than _type; that is, for int types, if _type is 
++    // the element type t is lower than _type; that is, for int types, if _type is
+     // more restrictive than t.  This only happens here if one is short and the other
+     // char (both 16 bits), and in those cases we've made an intentional decision
+     // to use one kind of load over the other. See AndINode::Ideal and 4965907.
+@@ -707,7 +875,7 @@
+             tp->is_klassptr()->klass()->is_java_lang_Object() ||
+             // also allow array-loading from the primary supertype
+             // array during subtype checks
+-            Opcode() == Op_LoadKlass, 
++            Opcode() == Op_LoadKlass,
+             "Field accesses must be precise" );
+     // For klass/static loads, we expect the _type to be precise
+   }
+@@ -794,7 +962,14 @@
+     }
+   }
+ 
+-  // (If loading from a freshly-allocated object, could produce zero here.)
++  // If we are loading from a freshly-allocated object, produce a zero,
++  // if the load is provably beyond the header of the object.
++  // (Also allow a variable load from a fresh array to produce zero.)
++  if (ReduceFieldZeroing) {
++    Node* value = can_see_stored_value(mem,phase);
++    if (value != NULL && value->is_Con())
++      return value->bottom_type();
++  }
+ 
+   return _type;
+ }
+@@ -833,7 +1008,7 @@
+ Node *LoadCNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+   Node* mem = in(MemNode::Memory);
+   Node* value = can_see_stored_value(mem,phase);
+-  if( value && !phase->type(value)->higher_equal( _type ) ) 
++  if( value && !phase->type(value)->higher_equal( _type ) )
+     return new (phase->C, 3) AndINode(value,phase->intcon(0xFFFF));
+   // Identity call will handle the case where truncation is not needed.
+   return LoadNode::Ideal(phase, can_reshape);
+@@ -966,7 +1141,7 @@
+     if( klass->is_obj_array_klass() &&
+         (uint)tkls->offset() == objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)) {
+       ciKlass* elem = klass->as_obj_array_klass()->element_klass();
+-      // // Always returning precise element type is incorrect, 
++      // // Always returning precise element type is incorrect,
+       // // e.g., element type could be object and array may contain strings
+       // return TypeKlassPtr::make(TypePtr::Constant, elem, 0);
+ 
+@@ -974,6 +1149,13 @@
+       // according to the element type's subclassing.
+       return TypeKlassPtr::make(tkls->ptr(), elem, 0/*offset*/);
+     }
++    if( klass->is_instance_klass() && tkls->klass_is_exact() &&
++        (uint)tkls->offset() == Klass::super_offset_in_bytes() + sizeof(oopDesc)) {
++      ciKlass* sup = klass->as_instance_klass()->super();
++      // The field is Klass::_super.  Return its (constant) value.
++      // (Folds up the 2nd indirection in aClassConstant.getSuperClass().)
++      return sup ? TypeKlassPtr::make(sup) : TypePtr::NULL_PTR;
++    }
+   }
+ 
+   // Bailout case
+@@ -1082,8 +1264,8 @@
+   }
+ 
+   return this;
+-}
+ 
++}
+ //=============================================================================
+ //---------------------------StoreNode::make-----------------------------------
+ // Polymorphic factory method:
+@@ -1126,6 +1308,8 @@
+ 
+ //------------------------------Ideal------------------------------------------
+ // Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x).
++// When a store immediately follows a relevant allocation/initialization,
++// try to capture it into the initialization, or hoist it above.
+ Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+   Node* p = MemNode::Ideal_common(phase, can_reshape);
+   if (p)  return (p == NodeSentinel) ? NULL : p;
+@@ -1135,7 +1319,7 @@
+ 
+   // Back-to-back stores to same address?  Fold em up.
+   // Generally unsafe if I have intervening uses...
+-  if (can_reshape && mem->is_Store() && phase->eqv( mem->in(MemNode::Address), address )) {
++  if (mem->is_Store() && phase->eqv_uncast(mem->in(MemNode::Address), address)) {
+     // Looking at a dead closed cycle of memory?
+     assert(mem != mem->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
+ 
+@@ -1163,6 +1347,24 @@
+     }
+   }
+ 
++  // Capture an unaliased, unconditional, simple store into an initializer.
++  // Or, if it is independent of the allocation, hoist it above the allocation.
++  if (ReduceFieldZeroing && /*can_reshape &&*/
++      mem->is_Proj() && mem->in(0)->is_Initialize()) {
++    InitializeNode* init = mem->in(0)->as_Initialize();
++    intptr_t offset = init->can_capture_store(this, phase);
++    if (offset > 0) {
++      Node* moved = init->capture_store(this, offset, phase);
++      // If the InitializeNode captured me, it made a raw copy of me,
++      // and I need to disappear.
++      if (moved != NULL) {
++        // %%% hack to ensure that Ideal returns a new node:
++        mem = MergeMemNode::make(phase->C, mem);
++        return mem;             // fold me away
++      }
++    }
++  }
++
+   return NULL;                  // No further progress
+ }
+ 
+@@ -1181,6 +1383,7 @@
+ //------------------------------Identity---------------------------------------
+ // Remove redundant stores:
+ //   Store(m, p, Load(m, p)) changes to m.
++//   Store(, p, x) -> Store(m, p, x) changes to Store(m, p, x).
+ Node *StoreNode::Identity( PhaseTransform *phase ) {
+   Node* mem = in(MemNode::Memory);
+   Node* adr = in(MemNode::Address);
+@@ -1188,12 +1391,42 @@
+ 
+   // Load then Store?  Then the Store is useless
+   if (val->is_Load() &&
+-      val->as_Load()->memory_size() == this->memory_size() &&
+-      phase->eqv( val->in(MemNode::Address), adr ) &&
+-      phase->eqv( val->in(MemNode::Memory ), mem )) {
++      phase->eqv_uncast( val->in(MemNode::Address), adr ) &&
++      phase->eqv_uncast( val->in(MemNode::Memory ), mem ) &&
++      val->as_Load()->store_Opcode() == Opcode()) {
+     return mem;
+   }
+ 
++  // Two stores in a row of the same value?
++  if (mem->is_Store() &&
++      phase->eqv_uncast( mem->in(MemNode::Address), adr ) &&
++      phase->eqv_uncast( mem->in(MemNode::ValueIn), val ) &&
++      mem->Opcode() == Opcode()) {
++    return mem;
++  }
++
++  // Store of zero anywhere into a freshly-allocated object?
++  // Then the store is useless.
++  // (It must already have been captured by the InitializeNode.)
++  if (ReduceFieldZeroing && phase->type(val)->is_zero_type()) {
++    // a newly allocated object is already all-zeroes everywhere
++    if (mem->is_Proj() && mem->in(0)->is_Allocate()) {
++      return mem;
++    }
++
++    // the store may also apply to zero-bits in an earlier object
++    Node* prev_mem = find_previous_store(phase);
++    // Steps (a), (b):  Walk past independent stores to find an exact match.
++    if (prev_mem != NULL) {
++      Node* prev_val = can_see_stored_value(prev_mem, phase);
++      if (prev_val != NULL && phase->eqv(prev_val, val)) {
++        // prev_val and val might differ by a cast; it would be good
++        // to keep the more informative of the two.
++        return mem;
++      }
++    }
++  }
++
+   return this;
+ }
+ 
+@@ -1206,14 +1439,14 @@
+ //------------------------------cmp--------------------------------------------
+ // Do not common stores up together.  They generally have to be split
+ // back up anyways, so do not bother.
+-uint StoreNode::cmp( const Node &n ) const { 
++uint StoreNode::cmp( const Node &n ) const {
+   return (&n == this);          // Always fail except on self
+ }
+ 
+ //------------------------------Ideal_masked_input-----------------------------
+ // Check for a useless mask before a partial-word store
+ // (StoreB ... (AndI valIn conIa) )
+-// If (conIa & mask == mask) this simplifies to   
++// If (conIa & mask == mask) this simplifies to
+ // (StoreB ... (valIn) )
+ Node *StoreNode::Ideal_masked_input(PhaseGVN *phase, uint mask) {
+   Node *val = in(MemNode::ValueIn);
+@@ -1369,16 +1602,20 @@
+ //------------------------------Idealize---------------------------------------
+ // Clearing a short array is faster with stores
+ Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape){
+-  const TypeInt *t = phase->type(in(2))->isa_int();
+-  if( !t ) return NULL;
+-  if( !t->is_con() ) return NULL;
+-  int con = t->get_con();       // Length is in doublewords
+-  // Length too long; use fast hardware clear
+-  if( con > 8 ) return NULL;
++  const int unit = BytesPerLong;
++  const TypeX* t = phase->type(in(2))->isa_intptr_t();
++  if (!t)  return NULL;
++  if (!t->is_con())  return NULL;
++  intptr_t raw_count = t->get_con();
++  intptr_t size = raw_count;
++  if (!Matcher::init_array_count_is_in_bytes) size *= unit;
+   // Clearing nothing uses the Identity call.
+   // Negative clears are possible on dead ClearArrays
+   // (see jck test stmt114.stmt11402.val).
+-  if( con <= 0 ) return NULL;
++  if (size <= 0 || size % unit != 0)  return NULL;
++  intptr_t count = size / unit;
++  // Length too long; use fast hardware clear
++  if (size > Matcher::init_array_short_size)  return NULL;
+   Node *mem = in(1);
+   if( phase->type(mem)==Type::TOP ) return NULL;
+   Node *adr = in(3);
+@@ -1395,8 +1632,8 @@
+   Node *zero = phase->makecon(TypeLong::ZERO);
+   Node *off  = phase->MakeConX(BytesPerLong);
+   mem = new (phase->C, 4) StoreLNode(in(0),mem,adr,atp,zero);
+-  con--;
+-  while( con-- ) {
++  count--;
++  while( count-- ) {
+     mem = phase->transform(mem);
+     adr = phase->transform(new (phase->C, 4) AddPNode(base,adr,off));
+     mem = new (phase->C, 4) StoreLNode(in(0),mem,adr,atp,zero);
+@@ -1414,8 +1651,7 @@
+   intptr_t offset = start_offset;
+ 
+   int unit = BytesPerLong;
+-
+-  if (unit == BytesPerLong && (offset % BytesPerLong) != 0) {
++  if ((offset % unit) != 0) {
+     Node* adr = new (C, 4) AddPNode(dest, dest, phase->MakeConX(offset));
+     adr = phase->transform(adr);
+     const TypePtr* atp = TypeRawPtr::BOTTOM;
+@@ -1426,20 +1662,60 @@
+   assert((offset % unit) == 0, "");
+ 
+   // Initialize the remaining stuff, if any, with a ClearArray.
+-  Node* zbase = phase->MakeConX(offset);
+-  Node* zsize = phase->transform( new (C, 3) SubXNode(end_offset, zbase) );
+-  Node* zinit = phase->zerocon((unit == BytesPerLong) ? T_LONG : T_INT);
++  return clear_memory(ctl, mem, dest, phase->MakeConX(offset), end_offset, phase);
++}
++
++Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
++                                   Node* start_offset,
++                                   Node* end_offset,
++                                   PhaseGVN* phase) {
++  Compile* C = phase->C;
++  int unit = BytesPerLong;
++  Node* zbase = start_offset;
++  Node* zend  = end_offset;
+ 
+   // Scale to the unit required by the CPU:
+-  Node* shift = phase->intcon(exact_log2(unit));
+-  zsize = phase->transform( new (C, 3) URShiftXNode(zsize, shift) );
++  if (!Matcher::init_array_count_is_in_bytes) {
++    Node* shift = phase->intcon(exact_log2(unit));
++    zbase = phase->transform( new(C,3) URShiftXNode(zbase, shift) );
++    zend  = phase->transform( new(C,3) URShiftXNode(zend,  shift) );
++  }
++
++  Node* zsize = phase->transform( new(C,3) SubXNode(zend, zbase) );
++  Node* zinit = phase->zerocon((unit == BytesPerLong) ? T_LONG : T_INT);
+ 
+   // Bulk clear double-words
+-  Node* adr = phase->transform( new (C, 4) AddPNode(dest, dest, zbase) );
++  Node* adr = phase->transform( new(C,4) AddPNode(dest, dest, start_offset) );
+   mem = new (C, 4) ClearArrayNode(ctl, mem, zsize, adr);
+   return phase->transform(mem);
+ }
+ 
++Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
++                                   intptr_t start_offset,
++                                   intptr_t end_offset,
++                                   PhaseGVN* phase) {
++  Compile* C = phase->C;
++  assert((end_offset % BytesPerInt) == 0, "odd end offset");
++  intptr_t done_offset = end_offset;
++  if ((done_offset % BytesPerLong) != 0) {
++    done_offset -= BytesPerInt;
++  }
++  if (done_offset > start_offset) {
++    mem = clear_memory(ctl, mem, dest,
++                       start_offset, phase->MakeConX(done_offset), phase);
++  }
++  if (done_offset < end_offset) { // emit the final 32-bit store
++    Node* adr = new (C, 4) AddPNode(dest, dest, phase->MakeConX(done_offset));
++    adr = phase->transform(adr);
++    const TypePtr* atp = TypeRawPtr::BOTTOM;
++    mem = StoreNode::make(C, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT);
++    mem = phase->transform(mem);
++    done_offset += BytesPerInt;
++  }
++  assert(done_offset == end_offset, "");
++  return mem;
++}
++
+ //=============================================================================
+ // Do we match on this edge? No memory edges
+ uint StrCompNode::match_edge(uint idx) const {
+@@ -1447,7 +1723,7 @@
+ }
+ 
+ //------------------------------Ideal------------------------------------------
+-// Return a node which is more "ideal" than the current node.  Strip out 
++// Return a node which is more "ideal" than the current node.  Strip out
+ // control copies
+ Node *StrCompNode::Ideal(PhaseGVN *phase, bool can_reshape){
+   return remove_dead_region(phase, can_reshape) ? this : NULL;
+@@ -1470,7 +1746,7 @@
+ 
+ //------------------------------cmp--------------------------------------------
+ uint MemBarNode::hash() const { return NO_HASH; }
+-uint MemBarNode::cmp( const Node &n ) const { 
++uint MemBarNode::cmp( const Node &n ) const {
+   return (&n == this);          // Always fail except on self
+ }
+ 
+@@ -1482,12 +1758,13 @@
+   case Op_MemBarRelease:   return new(C, len) MemBarReleaseNode(C,  atp, pn);
+   case Op_MemBarVolatile:  return new(C, len) MemBarVolatileNode(C, atp, pn);
+   case Op_MemBarCPUOrder:  return new(C, len) MemBarCPUOrderNode(C, atp, pn);
++  case Op_Initialize:      return new(C, len) InitializeNode(C,     atp, pn);
+   default:                 ShouldNotReachHere(); return NULL;
+   }
+ }
+ 
+ //------------------------------Ideal------------------------------------------
+-// Return a node which is more "ideal" than the current node.  Strip out 
++// Return a node which is more "ideal" than the current node.  Strip out
+ // control copies
+ Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+   if (remove_dead_region(phase, can_reshape))  return this;
+@@ -1514,8 +1791,901 @@
+   return NULL;
+ }
+ 
+-//=============================================================================
+-// 
++//===========================InitializeNode====================================
++// SUMMARY:
++// This node acts as a memory barrier on raw memory, after some raw stores.
++// The 'cooked' oop value feeds from the Initialize, not the Allocation.
++// The Initialize can 'capture' suitably constrained stores as raw inits.
++// It can coalesce related raw stores into larger units (called 'tiles').
++// It can avoid zeroing new storage for memory units which have raw inits.
++// At macro-expansion, it is marked 'complete', and does not optimize further.
++//
++// EXAMPLE:
++// The object 'new short[2]' occupies 16 bytes in a 32-bit machine.
++//   ctl = incoming control; mem* = incoming memory
++// (Note:  A star * on a memory edge denotes I/O and other standard edges.)
++// First allocate uninitialized memory and fill in the header:
++//   alloc = (Allocate ctl mem* 16 #short[].klass ...)
++//   ctl := alloc.Control; mem* := alloc.Memory*
++//   rawmem = alloc.Memory; rawoop = alloc.RawAddress
++// Then initialize to zero the non-header parts of the raw memory block:
++//   init = (Initialize alloc.Control alloc.Memory* alloc.RawAddress)
++//   ctl := init.Control; mem.SLICE(#short[*]) := init.Memory
++// After the initialize node executes, the object is ready for service:
++//   oop := (CheckCastPP init.Control alloc.RawAddress #short[])
++// Suppose its body is immediately initialized as {1,2}:
++//   store1 = (StoreC init.Control init.Memory (+ oop 12) 1)
++//   store2 = (StoreC init.Control store1      (+ oop 14) 2)
++//   mem.SLICE(#short[*]) := store2
++//
++// DETAILS:
++// An InitializeNode collects and isolates object initialization after
++// an AllocateNode and before the next possible safepoint.  As a
++// memory barrier (MemBarNode), it keeps critical stores from drifting
++// down past any safepoint or any publication of the allocation.
++// Before this barrier, a newly-allocated object may have uninitialized bits.
++// After this barrier, it may be treated as a real oop, and GC is allowed.
++//
++// The semantics of the InitializeNode include an implicit zeroing of
++// the new object from object header to the end of the object.
++// (The object header and end are determined by the AllocateNode.)
++//
++// Certain stores may be added as direct inputs to the InitializeNode.
++// These stores must update raw memory, and they must be to addresses
++// derived from the raw address produced by AllocateNode, and with
++// a constant offset.  They must be ordered by increasing offset.
++// The first one is at in(RawStores), the last at in(req()-1).
++// Unlike most memory operations, they are not linked in a chain,
++// but are displayed in parallel as users of the rawmem output of
++// the allocation.
++//
++// (See comments in InitializeNode::capture_store, which continue
++// the example given above.)
++//
++// When the associated Allocate is macro-expanded, the InitializeNode
++// may be rewritten to optimize collected stores.  A ClearArrayNode
++// may also be created at that point to represent any required zeroing.
++// The InitializeNode is then marked 'complete', prohibiting further
++// capturing of nearby memory operations.
++//
++// During macro-expansion, all captured initializations which store
++// constant values of 32 bits or smaller are coalesced (if advantagous)
++// into larger 'tiles' 32 or 64 bits.  This allows an object to be
++// initialized in fewer memory operations.  Memory words which are
++// covered by neither tiles nor non-constant stores are pre-zeroed
++// by explicit stores of zero.  (The code shape happens to do all
++// zeroing first, then all other stores, with both sequences occurring
++// in order of ascending offsets.)
++//
++// Alternatively, code may be inserted between an AllocateNode and its
++// InitializeNode, to perform arbitrary initialization of the new object.
++// E.g., the object copying intrinsics insert complex data transfers here.
++// The initialization must then be marked as 'complete' disable the
++// built-in zeroing semantics and the collection of initializing stores.
++//
++// While an InitializeNode is incomplete, reads from the memory state
++// produced by it are optimizable if they match the control edge and
++// new oop address associated with the allocation/initialization.
++// They return a stored value (if the offset matches) or else zero.
++// A write to the memory state, if it matches control and address,
++// and if it is to a constant offset, may be 'captured' by the
++// InitializeNode.  It is cloned as a raw memory operation and rewired
++// inside the initialization, to the raw oop produced by the allocation.
++// Operations on addresses which are provably distinct (e.g., to
++// other AllocateNodes) are allowed to bypass the initialization.
++//
++// The effect of all this is to consolidate object initialization
++// (both arrays and non-arrays, both piecewise and bulk) into a
++// single location, where it can be optimized as a unit.
++//
++// Only stores with an offset less than TrackedInitializationLimit words
++// will be considered for capture by an InitializeNode.  This puts a
++// reasonable limit on the complexity of optimized initializations.
++
++//---------------------------InitializeNode------------------------------------
++InitializeNode::InitializeNode(Compile* C, int adr_type, Node* rawoop)
++  : _is_complete(false),
++    MemBarNode(C, adr_type, rawoop)
++{
++  init_class_id(Class_Initialize);
++
++  assert(adr_type == Compile::AliasIdxRaw, "only valid atp");
++  assert(in(RawAddress) == rawoop, "proper init");
++  // Note:  allocation() can be NULL, for secondary initialization barriers
++}
++
++// Since this node is not matched, it will be processed by the
++// register allocator.  Declare that there are no constraints
++// on the allocation of the RawAddress edge.
++const RegMask &InitializeNode::in_RegMask(uint idx) const {
++  // This edge should be set to top, by the set_complete.  But be conservative.
++  if (idx == InitializeNode::RawAddress)
++    return *(Compile::current()->matcher()->idealreg2spillmask[in(idx)->ideal_reg()]);
++  return RegMask::Empty;
++}
++
++Node* InitializeNode::memory(uint alias_idx) {
++  Node* mem = in(Memory);
++  if (mem->is_MergeMem()) {
++    return mem->as_MergeMem()->memory_at(alias_idx);
++  } else {
++    // incoming raw memory is not split
++    return mem;
++  }
++}
++
++bool InitializeNode::is_non_zero() {
++  if (is_complete())  return false;
++  remove_extra_zeroes();
++  return (req() > RawStores);
++}
++
++void InitializeNode::set_complete(PhaseGVN* phase) {
++  assert(!is_complete(), "caller responsibility");
++  _is_complete = true;
++
++  // After this node is complete, it contains a bunch of
++  // raw-memory initializations.  There is no need for
++  // it to have anything to do with non-raw memory effects.
++  // Therefore, tell all non-raw users to re-optimize themselves,
++  // after skipping the memory effects of this initialization.
++  PhaseIterGVN* igvn = phase->is_IterGVN();
++  if (igvn)  igvn->add_users_to_worklist(this);
++}
++
++// convenience function
++// return false if the init contains any stores already
++bool AllocateNode::maybe_set_complete(PhaseGVN* phase) {
++  InitializeNode* init = initialization();
++  if (init == NULL || init->is_complete())  return false;
++  init->remove_extra_zeroes();
++  // for now, if this allocation has already collected any inits, bail:
++  if (init->is_non_zero())  return false;
++  init->set_complete(phase);
++  return true;
++}
++
++void InitializeNode::remove_extra_zeroes() {
++  if (req() == RawStores)  return;
++  Node* zmem = zero_memory();
++  uint fill = RawStores;
++  for (uint i = fill; i < req(); i++) {
++    Node* n = in(i);
++    if (n->is_top() || n == zmem)  continue;  // skip
++    if (fill < i)  set_req(fill, n);          // compact
++    ++fill;
++  }
++  // delete any empty spaces created:
++  while (fill < req()) {
++    del_req(fill);
++  }
++}
++
++// Helper for remembering which stores go with which offsets.
++intptr_t InitializeNode::get_store_offset(Node* st, PhaseTransform* phase) {
++  if (!st->is_Store())  return -1;  // can happen to dead code via subsume_node
++  intptr_t offset = -1;
++  Node* base = AddPNode::Ideal_base_and_offset(st->in(MemNode::Address),
++                                               phase, offset);
++  if (base == NULL)     return -1;  // something is dead,
++  if (offset < 0)       return -1;  //        dead, dead
++  return offset;
++}
++
++// Helper for proving that an initialization expression is
++// "simple enough" to be folded into an object initialization.
++// Attempts to prove that a store's initial value 'n' can be captured
++// within the initialization without creating a vicious cycle, such as:
++//     { Foo p = new Foo(); p.next = p; }
++// True for constants and parameters and small combinations thereof.
++bool InitializeNode::detect_init_independence(Node* n,
++                                              bool st_is_pinned,
++                                              int& count) {
++  if (n == NULL)      return true;   // (can this really happen?)
++  if (n->is_Proj())   n = n->in(0);
++  if (n == this)      return false;  // found a cycle
++  if (n->is_Con())    return true;
++  if (n->is_Start())  return true;   // params, etc., are OK
++  if (n->is_Root())   return true;   // even better
++
++  Node* ctl = n->in(0);
++  if (ctl != NULL && !ctl->is_top()) {
++    if (ctl->is_Proj())  ctl = ctl->in(0);
++    if (ctl == this)  return false;
++
++    // If we already know that the enclosing memory op is pinned right after
++    // the init, then any control flow that the store has picked up
++    // must have preceded the init, or else be equal to the init.
++    // Even after loop optimizations (which might change control edges)
++    // a store is never pinned *before* the availability of its inputs.
++    if (!MemNode::detect_dominating_control(ctl, this->in(0)))
++      return false;                  // failed to prove a good control
++
++  }
++
++  // Check data edges for possible dependencies on 'this'.
++  if ((count += 1) > 20)  return false;  // complexity limit
++  for (uint i = 1; i < n->req(); i++) {
++    Node* m = n->in(i);
++    if (m == NULL || m == n || m->is_top())  continue;
++    uint first_i = n->find_edge(m);
++    if (i != first_i)  continue;  // process duplicate edge just once
++    if (!detect_init_independence(m, st_is_pinned, count)) {
++      return false;
++    }
++  }
++
++  return true;
++}
++
++// Here are all the checks a Store must pass before it can be moved into
++// an initialization.  Returns zero if a check fails.
++// On success, returns the (constant) offset to which the store applies,
++// within the initialized memory.
++intptr_t InitializeNode::can_capture_store(StoreNode* st, PhaseTransform* phase) {
++  const int FAIL = 0;
++  if (st->req() != MemNode::ValueIn + 1)
++    return FAIL;                // an inscrutable StoreNode (card mark?)
++  Node* ctl = st->in(MemNode::Control);
++  if (!(ctl != NULL && ctl->is_Proj() && ctl->in(0) == this))
++    return FAIL;                // must be unconditional after the initialization
++  Node* mem = st->in(MemNode::Memory);
++  if (!(mem->is_Proj() && mem->in(0) == this))
++    return FAIL;                // must not be preceded by other stores
++  Node* adr = st->in(MemNode::Address);
++  intptr_t offset;
++  AllocateNode* alloc = AllocateNode::Ideal_allocation(adr, phase, offset);
++  if (alloc == NULL)
++    return FAIL;                // inscrutable address
++  if (alloc != allocation())
++    return FAIL;                // wrong allocation!  (store needs to float up)
++  Node* val = st->in(MemNode::ValueIn);
++  int complexity_count = 0;
++  if (!detect_init_independence(val, true, complexity_count))
++    return FAIL;                // stored value must be 'simple enough'
++
++  return offset;                // success
++}
++
++// Find the captured store in(i) which corresponds to the range
++// [start..start+size) in the initialized object.
++// If there is one, return its index i.  If there isn't, return the
++// negative of the index where it should be inserted.
++// Return 0 if the queried range overlaps an initialization boundary
++// or if dead code is encountered.
++// If size_in_bytes is zero, do not bother with overlap checks.
++int InitializeNode::captured_store_insertion_point(intptr_t start,
++                                                   int size_in_bytes,
++                                                   PhaseTransform* phase) {
++  const int FAIL = 0, MAX_STORE = BytesPerLong;
++
++  if (is_complete())
++    return FAIL;                // arraycopy got here first; punt
++
++  assert(allocation() != NULL, "must be present");
++
++  // no negatives, no header fields:
++  if (start < (intptr_t) sizeof(oopDesc))  return FAIL;
++  if (start < (intptr_t) sizeof(arrayOopDesc) &&
++      start < (intptr_t) allocation()->minimum_header_size())  return FAIL;
++
++  // after a certain size, we bail out on tracking all the stores:
++  intptr_t ti_limit = (TrackedInitializationLimit * HeapWordSize);
++  if (start >= ti_limit)  return FAIL;
++
++  for (uint i = InitializeNode::RawStores, limit = req(); ; ) {
++    if (i >= limit)  return -(int)i; // not found; here is where to put it
++
++    Node*    st     = in(i);
++    intptr_t st_off = get_store_offset(st, phase);
++    if (st_off < 0) {
++      if (st != zero_memory()) {
++        return FAIL;            // bail out if there is dead garbage
++      }
++    } else if (st_off > start) {
++      // ...we are done, since stores are ordered
++      if (st_off < start + size_in_bytes) {
++        return FAIL;            // the next store overlaps
++      }
++      return -(int)i;           // not found; here is where to put it
++    } else if (st_off < start) {
++      if (size_in_bytes != 0 &&
++          start < st_off + MAX_STORE &&
++          start < st_off + st->as_Store()->memory_size()) {
++        return FAIL;            // the previous store overlaps
++      }
++    } else {
++      if (size_in_bytes != 0 &&
++          st->as_Store()->memory_size() != size_in_bytes) {
++        return FAIL;            // mismatched store size
++      }
++      return i;
++    }
++
++    ++i;
++  }
++}
++
++// Look for a captured store which initializes at the offset 'start'
++// with the given size.  If there is no such store, and no other
++// initialization interferes, then return zero_memory (the memory
++// projection of the AllocateNode).
++Node* InitializeNode::find_captured_store(intptr_t start, int size_in_bytes,
++                                          PhaseTransform* phase) {
++  assert(stores_are_sane(phase), "");
++  int i = captured_store_insertion_point(start, size_in_bytes, phase);
++  if (i == 0) {
++    return NULL;                // something is dead
++  } else if (i < 0) {
++    return zero_memory();       // just primordial zero bits here
++  } else {
++    Node* st = in(i);           // here is the store at this position
++    assert(get_store_offset(st->as_Store(), phase) == start, "sanity");
++    return st;
++  }
++}
++
++// Create, as a raw pointer, an address within my new object at 'offset'.
++Node* InitializeNode::make_raw_address(intptr_t offset,
++                                       PhaseTransform* phase) {
++  Node* addr = in(RawAddress);
++  if (offset != 0) {
++    Compile* C = phase->C;
++    addr = phase->transform( new (C, 4) AddPNode(C->top(), addr,
++                                                 phase->MakeConX(offset)) );
++  }
++  return addr;
++}
++
++// Clone the given store, converting it into a raw store
++// initializing a field or element of my new object.
++// Caller is responsible for retiring the original store,
++// with subsume_node or the like.
++//
++// From the example above InitializeNode::InitializeNode,
++// here are the old stores to be captured:
++//   store1 = (StoreC init.Control init.Memory (+ oop 12) 1)
++//   store2 = (StoreC init.Control store1      (+ oop 14) 2)
++//
++// Here is the changed code; note the extra edges on init:
++//   alloc = (Allocate ...)
++//   rawoop = alloc.RawAddress
++//   rawstore1 = (StoreC alloc.Control alloc.Memory (+ rawoop 12) 1)
++//   rawstore2 = (StoreC alloc.Control alloc.Memory (+ rawoop 14) 2)
++//   init = (Initialize alloc.Control alloc.Memory rawoop
++//                      rawstore1 rawstore2)
++//
++Node* InitializeNode::capture_store(StoreNode* st, intptr_t start,
++                                    PhaseTransform* phase) {
++  assert(stores_are_sane(phase), "");
++
++  if (start < 0)  return NULL;
++  assert(can_capture_store(st, phase) == start, "sanity");
++
++  Compile* C = phase->C;
++  int size_in_bytes = st->memory_size();
++  int i = captured_store_insertion_point(start, size_in_bytes, phase);
++  if (i == 0)  return NULL;     // bail out
++  Node* prev_mem = NULL;        // raw memory for the captured store
++  if (i > 0) {
++    prev_mem = in(i);           // there is a pre-existing store under this one
++    set_req(i, C->top());       // temporarily disconnect it
++    // See StoreNode::Ideal 'st->outcnt() == 1' for the reason to disconnect.
++  } else {
++    i = -i;                     // no pre-existing store
++    prev_mem = zero_memory();   // a slice of the newly allocated object
++    if (i > InitializeNode::RawStores && in(i-1) == prev_mem)
++      set_req(--i, C->top());   // reuse this edge; it has been folded away
++    else
++      ins_req(i, C->top());     // build a new edge
++  }
++  Node* new_st = st->clone();
++  new_st->set_req(MemNode::Control, in(Control));
++  new_st->set_req(MemNode::Memory,  prev_mem);
++  new_st->set_req(MemNode::Address, make_raw_address(start, phase));
++  new_st = phase->transform(new_st);
++
++  // At this point, new_st might have swallowed a pre-existing store
++  // at the same offset, or perhaps new_st might have disappeared,
++  // if it redundantly stored the same value (or zero to fresh memory).
++
++  // In any case, wire it in:
++  set_req(i, new_st);
++
++  // The caller may now kill the old guy.
++  DEBUG_ONLY(Node* check_st = find_captured_store(start, size_in_bytes, phase));
++  assert(check_st == new_st || check_st == NULL, "must be findable");
++  assert(!is_complete(), "");
++  return new_st;
++}
++
++static bool store_constant(jlong* tiles, int num_tiles,
++                           intptr_t st_off, int st_size,
++                           jlong con) {
++  if ((st_off & (st_size-1)) != 0)
++    return false;               // strange store offset (assume size==2**N)
++  address addr = (address)tiles + st_off;
++  assert(st_off >= 0 && addr+st_size <= (address)&tiles[num_tiles], "oob");
++  switch (st_size) {
++  case sizeof(jbyte):  *(jbyte*) addr = (jbyte) con; break;
++  case sizeof(jchar):  *(jchar*) addr = (jchar) con; break;
++  case sizeof(jint):   *(jint*)  addr = (jint)  con; break;
++  case sizeof(jlong):  *(jlong*) addr = (jlong) con; break;
++  default: return false;        // strange store size (detect size!=2**N here)
++  }
++  return true;                  // return success to caller
++}
++
++// Coalesce subword constants into int constants and possibly
++// into long constants.  The goal, if the CPU permits,
++// is to initialize the object with a small number of 64-bit tiles.
++// Also, convert floating-point constants to bit patterns.
++// Non-constants are not relevant to this pass.
++//
++// In terms of the running example on InitializeNode::InitializeNode
++// and InitializeNode::capture_store, here is the transformation
++// of rawstore1 and rawstore2 into rawstore12:
++//   alloc = (Allocate ...)
++//   rawoop = alloc.RawAddress
++//   tile12 = 0x00010002
++//   rawstore12 = (StoreI alloc.Control alloc.Memory (+ rawoop 12) tile12)
++//   init = (Initialize alloc.Control alloc.Memory rawoop rawstore12)
++//
++void
++InitializeNode::coalesce_subword_stores(intptr_t header_size,
++                                        Node* size_in_bytes,
++                                        PhaseGVN* phase) {
++  Compile* C = phase->C;
++
++  assert(stores_are_sane(phase), "");
++  // Note:  After this pass, they are not completely sane,
++  // since there may be some overlaps.
++
++  int old_subword = 0, old_long = 0, new_int = 0, new_long = 0;
++
++  intptr_t ti_limit = (TrackedInitializationLimit * HeapWordSize);
++  intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, ti_limit);
++  size_limit = MIN2(size_limit, ti_limit);
++  size_limit = align_size_up(size_limit, BytesPerLong);
++  int num_tiles = size_limit / BytesPerLong;
++
++  // allocate space for the tile map:
++  const int small_len = DEBUG_ONLY(true ? 3 :) 30; // keep stack frames small
++  jlong  tiles_buf[small_len];
++  Node*  nodes_buf[small_len];
++  jlong  inits_buf[small_len];
++  jlong* tiles = ((num_tiles <= small_len) ? &tiles_buf[0]
++                  : NEW_RESOURCE_ARRAY(jlong, num_tiles));
++  Node** nodes = ((num_tiles <= small_len) ? &nodes_buf[0]
++                  : NEW_RESOURCE_ARRAY(Node*, num_tiles));
++  jlong* inits = ((num_tiles <= small_len) ? &inits_buf[0]
++                  : NEW_RESOURCE_ARRAY(jlong, num_tiles));
++  // tiles: exact bitwise model of all primitive constants
++  // nodes: last constant-storing node subsumed into the tiles model
++  // inits: which bytes (in each tile) are touched by any initializations
++
++  //// Pass A: Fill in the tile model with any relevant stores.
++
++  Copy::zero_to_bytes(tiles, sizeof(tiles[0]) * num_tiles);
++  Copy::zero_to_bytes(nodes, sizeof(nodes[0]) * num_tiles);
++  Copy::zero_to_bytes(inits, sizeof(inits[0]) * num_tiles);
++  Node* zmem = zero_memory(); // initially zero memory state
++  for (uint i = InitializeNode::RawStores, limit = req(); i < limit; i++) {
++    Node* st = in(i);
++    intptr_t st_off = get_store_offset(st, phase);
++
++    // Figure out the store's offset and constant value:
++    if (st_off < header_size)             continue; //skip (ignore header)
++    if (st->in(MemNode::Memory) != zmem)  continue; //skip (odd store chain)
++    int st_size = st->as_Store()->memory_size();
++    if (st_off + st_size > size_limit)    break;
++
++    // Record which bytes are touched, whether by constant or not.
++    if (!store_constant(inits, num_tiles, st_off, st_size, (jlong) -1))
++      continue;                 // skip (strange store size)
++
++    const Type* val = phase->type(st->in(MemNode::ValueIn));
++    if (!val->singleton())                continue; //skip (non-con store)
++    BasicType type = val->basic_type();
++
++    jlong con = 0;
++    switch (type) {
++    case T_INT:    con = val->is_int()->get_con();  break;
++    case T_LONG:   con = val->is_long()->get_con(); break;
++    case T_FLOAT:  con = jint_cast(val->getf());    break;
++    case T_DOUBLE: con = jlong_cast(val->getd());   break;
++    default:                              continue; //skip (odd store type)
++    }
++
++    if (type == T_LONG && Matcher::isSimpleConstant64(con) &&
++        st->Opcode() == Op_StoreL) {
++      continue;                 // This StoreL is already optimal.
++    }
++
++    // Store down the constant.
++    store_constant(tiles, num_tiles, st_off, st_size, con);
++
++    intptr_t j = st_off >> LogBytesPerLong;
++
++    if (type == T_INT && st_size == BytesPerInt
++        && (st_off & BytesPerInt) == BytesPerInt) {
++      jlong lcon = tiles[j];
++      if (!Matcher::isSimpleConstant64(lcon) &&
++          st->Opcode() == Op_StoreI) {
++        // This StoreI is already optimal by itself.
++        jint* intcon = (jint*) &tiles[j];
++        intcon[1] = 0;  // undo the store_constant()
++
++        // If the previous store is also optimal by itself, back up and
++        // undo the action of the previous loop iteration... if we can.
++        // But if we can't, just let the previous half take care of itself.
++        st = nodes[j];
++        st_off -= BytesPerInt;
++        con = intcon[0];
++        if (con != 0 && st != NULL && st->Opcode() == Op_StoreI) {
++          assert(st_off >= header_size, "still ignoring header");
++          assert(get_store_offset(st, phase) == st_off, "must be");
++          assert(in(i-1) == zmem, "must be");
++          DEBUG_ONLY(const Type* tcon = phase->type(st->in(MemNode::ValueIn)));
++          assert(con == tcon->is_int()->get_con(), "must be");
++          // Undo the effects of the previous loop trip, which swallowed st:
++          intcon[0] = 0;        // undo store_constant()
++          set_req(i-1, st);     // undo set_req(i, zmem)
++          nodes[j] = NULL;      // undo nodes[j] = st
++          --old_subword;        // undo ++old_subword
++        }
++        continue;               // This StoreI is already optimal.
++      }
++    }
++
++    // This store is not needed.
++    set_req(i, zmem);
++    nodes[j] = st;              // record for the moment
++    if (st_size < BytesPerLong) // something has changed
++          ++old_subword;        // includes int/float, but who's counting...
++    else  ++old_long;
++  }
++
++  if ((old_subword + old_long) == 0)
++    return;                     // nothing more to do
++
++  //// Pass B: Convert any non-zero tiles into optimal constant stores.
++  // Be sure to insert them before overlapping non-constant stores.
++  // (E.g., byte[] x = { 1,2,y,4 }  =>  x[int 0] = 0x01020004, x[2]=y.)
++  for (int j = 0; j < num_tiles; j++) {
++    jlong con  = tiles[j];
++    jlong init = inits[j];
++    if (con == 0)  continue;
++    jint con0,  con1;           // split the constant, address-wise
++    jint init0, init1;          // split the init map, address-wise
++    { union { jlong con; jint intcon[2]; } u;
++      u.con = con;
++      con0  = u.intcon[0];
++      con1  = u.intcon[1];
++      u.con = init;
++      init0 = u.intcon[0];
++      init1 = u.intcon[1];
++    }
++
++    Node* old = nodes[j];
++    assert(old != NULL, "need the prior store");
++    intptr_t offset = (j * BytesPerLong);
++
++    bool split = !Matcher::isSimpleConstant64(con);
++
++    if (offset < header_size) {
++      assert(offset + BytesPerInt >= header_size, "second int counts");
++      assert(*(jint*)&tiles[j] == 0, "junk in header");
++      split = true;             // only the second word counts
++      // Example:  int a[] = { 42 ... }
++    } else if (con0 == 0 && init0 == -1) {
++      split = true;             // first word is covered by full inits
++      // Example:  int a[] = { ... foo(), 42 ... }
++    } else if (con1 == 0 && init1 == -1) {
++      split = true;             // second word is covered by full inits
++      // Example:  int a[] = { ... 42, foo() ... }
++    }
++
++    // Here's a case where init0 is neither 0 nor -1:
++    //   byte a[] = { ... 0,0,foo(),0,  0,0,0,42 ... }
++    // Assuming big-endian memory, init0, init1 are 0x0000FF00, 0x000000FF.
++    // In this case the tile is not split; it is (jlong)42.
++    // The big tile is stored down, and then the foo() value is inserted.
++    // (If there were foo(),foo() instead of foo(),0, init0 would be -1.)
++
++    Node* ctl = old->in(MemNode::Control);
++    Node* adr = make_raw_address(offset, phase);
++    const TypePtr* atp = TypeRawPtr::BOTTOM;
++
++    // One or two coalesced stores to plop down.
++    Node*    st[2];
++    intptr_t off[2];
++    int  nst = 0;
++    if (!split) {
++      ++new_long;
++      off[nst] = offset;
++      st[nst++] = StoreNode::make(C, ctl, zmem, adr, atp,
++                                  phase->longcon(con), T_LONG);
++    } else {
++      // Omit either if it is a zero.
++      if (con0 != 0) {
++        ++new_int;
++        off[nst]  = offset;
++        st[nst++] = StoreNode::make(C, ctl, zmem, adr, atp,
++                                    phase->intcon(con0), T_INT);
++      }
++      if (con1 != 0) {
++        ++new_int;
++        offset += BytesPerInt;
++        adr = make_raw_address(offset, phase);
++        off[nst]  = offset;
++        st[nst++] = StoreNode::make(C, ctl, zmem, adr, atp,
++                                    phase->intcon(con1), T_INT);
++      }
++    }
++
++    // Insert second store first, then the first before the second.
++    // Insert each one just before any overlapping non-constant stores.
++    while (nst > 0) {
++      Node* st1 = st[--nst];
++      C->copy_node_notes_to(st1, old);
++      st1 = phase->transform(st1);
++      offset = off[nst];
++      assert(offset >= header_size, "do not smash header");
++      int ins_idx = captured_store_insertion_point(offset, /*size:*/0, phase);
++      guarantee(ins_idx != 0, "must re-insert constant store");
++      if (ins_idx < 0)  ins_idx = -ins_idx;  // never overlap
++      if (ins_idx > InitializeNode::RawStores && in(ins_idx-1) == zmem)
++        set_req(--ins_idx, st1);
++      else
++        ins_req(ins_idx, st1);
++    }
++  }
++
++  if (PrintCompilation && WizardMode)
++    tty->print_cr("Changed %d/%d subword/long constants into %d/%d int/long",
++                  old_subword, old_long, new_int, new_long);
++  if (C->log() != NULL)
++    C->log()->elem("comment that='%d/%d subword/long to %d/%d int/long'",
++                   old_subword, old_long, new_int, new_long);
++
++  // Clean up any remaining occurrences of zmem:
++  remove_extra_zeroes();
++}
++
++// Explore forward from in(start) to find the first fully initialized
++// word, and return its offset.  Skip groups of subword stores which
++// together initialize full words.  If in(start) is itself part of a
++// fully initialized word, return the offset of in(start).  If there
++// are no following full-word stores, or if something is fishy, return
++// a negative value.
++intptr_t InitializeNode::find_next_fullword_store(uint start, PhaseGVN* phase) {
++  int       int_map = 0;
++  intptr_t  int_map_off = 0;
++  const int FULL_MAP = right_n_bits(BytesPerInt);  // the int_map we hope for
++
++  for (uint i = start, limit = req(); i < limit; i++) {
++    Node* st = in(i);
++
++    intptr_t st_off = get_store_offset(st, phase);
++    if (st_off < 0)  break;  // return conservative answer
++
++    int st_size = st->as_Store()->memory_size();
++    if (st_size >= BytesPerInt && (st_off % BytesPerInt) == 0) {
++      return st_off;            // we found a complete word init
++    }
++
++    // update the map:
++
++    intptr_t this_int_off = align_size_down(st_off, BytesPerInt);
++    if (this_int_off != int_map_off) {
++      // reset the map:
++      int_map = 0;
++      int_map_off = this_int_off;
++    }
++
++    int subword_off = st_off - this_int_off;
++    int_map |= right_n_bits(st_size) << subword_off;
++    if ((int_map & FULL_MAP) == FULL_MAP) {
++      return this_int_off;      // we found a complete word init
++    }
++
++    // Did this store hit or cross the word boundary?
++    intptr_t next_int_off = align_size_down(st_off + st_size, BytesPerInt);
++    if (next_int_off == this_int_off + BytesPerInt) {
++      // We passed the current int, without fully initializing it.
++      int_map_off = next_int_off;
++      int_map >>= BytesPerInt;
++    } else if (next_int_off > this_int_off + BytesPerInt) {
++      // We passed the current and next int.
++      return this_int_off + BytesPerInt;
++    }
++  }
++
++  return -1;
++}
++
++
++// Called when the associated AllocateNode is expanded into CFG.
++// At this point, we may perform additional optimizations.
++// Linearize the stores by ascending offset, to make memory
++// activity as coherent as possible.
++Node* InitializeNode::complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
++                                      intptr_t header_size,
++                                      Node* size_in_bytes,
++                                      PhaseGVN* phase) {
++  assert(!is_complete(), "not already complete");
++  assert(stores_are_sane(phase), "");
++  assert(allocation() != NULL, "must be present");
++
++  remove_extra_zeroes();
++
++  if (ReduceFieldZeroing || ReduceBulkZeroing)
++    // reduce instruction count for common initialization patterns
++    coalesce_subword_stores(header_size, size_in_bytes, phase);
++
++  Node* zmem = zero_memory();   // initially zero memory state
++  Node* inits = zmem;           // accumulating a linearized chain of inits
++  #ifdef ASSERT
++  intptr_t last_init_off = sizeof(oopDesc);  // previous init offset
++  intptr_t last_init_end = sizeof(oopDesc);  // previous init offset+size
++  intptr_t last_tile_end = sizeof(oopDesc);  // previous tile offset+size
++  #endif
++  intptr_t zeroes_done = header_size;
++
++  bool do_zeroing = true;       // we might give up if inits are very sparse
++  int  big_init_gaps = 0;       // how many large gaps have we seen?
++
++  if (ZeroTLAB)  do_zeroing = false;
++  if (!ReduceFieldZeroing && !ReduceBulkZeroing)  do_zeroing = false;
++
++  for (uint i = InitializeNode::RawStores, limit = req(); i < limit; i++) {
++    Node* st = in(i);
++    intptr_t st_off = get_store_offset(st, phase);
++    if (st_off < 0)
++      break;                    // unknown junk in the inits
++    if (st->in(MemNode::Memory) != zmem)
++      break;                    // complicated store chains somehow in list
++
++    int st_size = st->as_Store()->memory_size();
++    intptr_t next_init_off = st_off + st_size;
++
++    if (do_zeroing && zeroes_done < next_init_off) {
++      // See if this store needs a zero before it or under it.
++      intptr_t zeroes_needed = st_off;
++
++      if (st_size < BytesPerInt) {
++        // Look for subword stores which only partially initialize words.
++        // If we find some, we must lay down some word-level zeroes first,
++        // underneath the subword stores.
++        //
++        // Examples:
++        //   byte[] a = { p,q,r,s }  =>  a[0]=p,a[1]=q,a[2]=r,a[3]=s
++        //   byte[] a = { x,y,0,0 }  =>  a[0..3] = 0, a[0]=x,a[1]=y
++        //   byte[] a = { 0,0,z,0 }  =>  a[0..3] = 0, a[2]=z
++        //
++        // Note:  coalesce_subword_stores may have already done this,
++        // if it was prompted by constant non-zero subword initializers.
++        // But this case can still arise with non-constant stores.
++
++        intptr_t next_full_store = find_next_fullword_store(i, phase);
++
++        // In the examples above:
++        //   in(i)          p   q   r   s     x   y     z
++        //   st_off        12  13  14  15    12  13    14
++        //   st_size        1   1   1   1     1   1     1
++        //   next_full_s.  12  16  16  16    16  16    16
++        //   z's_done      12  16  16  16    12  16    12
++        //   z's_needed    12  16  16  16    16  16    16
++        //   zsize          0   0   0   0     4   0     4
++        if (next_full_store < 0) {
++          // Conservative tack:  Zero to end of current word.
++          zeroes_needed = align_size_up(zeroes_needed, BytesPerInt);
++        } else {
++          // Zero to beginning of next fully initialized word.
++          // Or, don't zero at all, if we are already in that word.
++          assert(next_full_store >= zeroes_needed, "must go forward");
++          assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary");
++          zeroes_needed = next_full_store;
++        }
++      }
++
++      if (zeroes_needed > zeroes_done) {
++        intptr_t zsize = zeroes_needed - zeroes_done;
++        // Do some incremental zeroing on rawmem, in parallel with inits.
++        zeroes_done = align_size_down(zeroes_done, BytesPerInt);
++        rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
++                                              zeroes_done, zeroes_needed,
++                                              phase);
++        zeroes_done = zeroes_needed;
++        if (zsize > Matcher::init_array_short_size && ++big_init_gaps > 2)
++          do_zeroing = false;   // leave the hole, next time
++      }
++    }
++
++    // Collect the store and move on:
++    st->set_req(MemNode::Memory, inits);
++    inits = st;                 // put it on the linearized chain
++    set_req(i, zmem);           // unhook from previous position
++
++    if (zeroes_done == st_off)
++      zeroes_done = next_init_off;
++
++    assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any");
++
++    #ifdef ASSERT
++    // Various order invariants.  Weaker than stores_are_sane because
++    // a large constant tile can be filled in by smaller non-constant stores.
++    assert(st_off >= last_init_off, "inits do not reverse");
++    last_init_off = st_off;
++    const Type* val = NULL;
++    if (st_size >= BytesPerInt &&
++        (val = phase->type(st->in(MemNode::ValueIn)))->singleton() &&
++        (int)val->basic_type() < (int)T_OBJECT) {
++      assert(st_off >= last_tile_end, "tiles do not overlap");
++      assert(st_off >= last_init_end, "tiles do not overwrite inits");
++      last_tile_end = MAX2(last_tile_end, next_init_off);
++    } else {
++      intptr_t st_tile_end = align_size_up(next_init_off, BytesPerLong);
++      assert(st_tile_end >= last_tile_end, "inits stay with tiles");
++      assert(st_off      >= last_init_end, "inits do not overlap");
++      last_init_end = next_init_off;  // it's a non-tile
++    }
++    #endif //ASSERT
++  }
++
++  remove_extra_zeroes();        // clear out all the zmems left over
++  add_req(inits);
++
++  if (!ZeroTLAB) {
++    // If anything remains to be zeroed, zero it all now.
++    zeroes_done = align_size_down(zeroes_done, BytesPerInt);
++    // if it is the last unused 4 bytes of an instance, forget about it
++    intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint);
++    if (zeroes_done + BytesPerLong >= size_limit) {
++      assert(allocation() != NULL, "");
++      Node* klass_node = allocation()->in(AllocateNode::KlassNode);
++      ciKlass* k = phase->type(klass_node)->is_klassptr()->klass();
++      if (zeroes_done == k->layout_helper())
++        zeroes_done = size_limit;
++    }
++    if (zeroes_done < size_limit) {
++      rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
++                                            zeroes_done, size_in_bytes, phase);
++    }
++  }
++
++  set_complete(phase);
++  return rawmem;
++}
++
++
++#ifdef ASSERT
++bool InitializeNode::stores_are_sane(PhaseTransform* phase) {
++  if (is_complete())
++    return true;                // stores could be anything at this point
++  intptr_t last_off = sizeof(oopDesc);
++  for (uint i = InitializeNode::RawStores; i < req(); i++) {
++    Node* st = in(i);
++    intptr_t st_off = get_store_offset(st, phase);
++    if (st_off < 0)  continue;  // ignore dead garbage
++    if (last_off > st_off) {
++      tty->print_cr("*** bad store offset at %d: %d > %d", i, last_off, st_off);
++      this->dump(2);
++      assert(false, "ascending store offsets");
++      return false;
++    }
++    last_off = st_off + st->as_Store()->memory_size();
++  }
++  return true;
++}
++#endif //ASSERT
++
++
++
++
++//============================MergeMemNode=====================================
++//
+ // SEMANTICS OF MEMORY MERGES:  A MergeMem is a memory state assembled from several
+ // contributing store or call operations.  Each contributor provides the memory
+ // state for a particular "alias type" (see Compile::alias_type).  For example,
+@@ -1523,54 +2693,54 @@
+ // to alias category #6 may use X as its memory state input, as an exact equivalent
+ // to using the MergeMem as a whole.
+ //   Load<6>( MergeMem(<6>: X, ...), p ) <==> Load<6>(X,p)
+-// 
++//
+ // (Here, the <N> notation gives the index of the relevant adr_type.)
+-// 
++//
+ // In one special case (and more cases in the future), alias categories overlap.
+ // The special alias category "Bot" (Compile::AliasIdxBot) includes all memory
+ // states.  Therefore, if a MergeMem has only one contributing input W for Bot,
+ // it is exactly equivalent to that state W:
+ //   MergeMem(<Bot>: W) <==> W
+-// 
++//
+ // Usually, the merge has more than one input.  In that case, where inputs
+ // overlap (i.e., one is Bot), the narrower alias type determines the memory
+ // state for that type, and the wider alias type (Bot) fills in everywhere else:
+ //   Load<5>( MergeMem(<Bot>: W, <6>: X), p ) <==> Load<5>(W,p)
+ //   Load<6>( MergeMem(<Bot>: W, <6>: X), p ) <==> Load<6>(X,p)
+-// 
++//
+ // A merge can take a "wide" memory state as one of its narrow inputs.
+ // This simply means that the merge observes out only the relevant parts of
+ // the wide input.  That is, wide memory states arriving at narrow merge inputs
+ // are implicitly "filtered" or "sliced" as necessary.  (This is rare.)
+-// 
++//
+ // These rules imply that MergeMem nodes may cascade (via their <Bot> links),
+ // and that memory slices "leak through":
+ //   MergeMem(<Bot>: MergeMem(<Bot>: W, <7>: Y)) <==> MergeMem(<Bot>: W, <7>: Y)
+-// 
++//
+ // But, in such a cascade, repeated memory slices can "block the leak":
+ //   MergeMem(<Bot>: MergeMem(<Bot>: W, <7>: Y), <7>: Y') <==> MergeMem(<Bot>: W, <7>: Y')
+-// 
++//
+ // In the last example, Y is not part of the combined memory state of the
+ // outermost MergeMem.  The system must, of course, prevent unschedulable
+ // memory states from arising, so you can be sure that the state Y is somehow
+ // a precursor to state Y'.
+-// 
+-// 
++//
++//
+ // REPRESENTATION OF MEMORY MERGES: The indexes used to address the Node::in array
+ // of each MergeMemNode array are exactly the numerical alias indexes, including
+ // but not limited to AliasIdxTop, AliasIdxBot, and AliasIdxRaw.  The functions
+ // Compile::alias_type (and kin) produce and manage these indexes.
+-// 
++//
+ // By convention, the value of in(AliasIdxTop) (i.e., in(1)) is always the top node.
+ // (Note that this provides quick access to the top node inside MergeMem methods,
+ // without the need to reach out via TLS to Compile::current.)
+-// 
++//
+ // As a consequence of what was just described, a MergeMem that represents a full
+ // memory state has an edge in(AliasIdxBot) which is a "wide" memory state,
+ // containing all alias categories.
+-// 
++//
+ // MergeMem nodes never (?) have control inputs, so in(0) is NULL.
+-// 
++//
+ // All other edges in(N) (including in(AliasIdxRaw), which is in(3)) are either
+ // a memory state for the alias type <N>, or else the top node, meaning that
+ // there is no particular input for that alias type.  Note that the length of
+@@ -1580,37 +2750,37 @@
+ //
+ // This use of top is named "empty_memory()", or "empty_mem" (no-memory) as a variable.
+ // (Top was chosen because it works smoothly with passes like GCM.)
+-// 
++//
+ // For convenience, we hardwire the alias index for TypeRawPtr::BOTTOM.  (It is
+ // the type of random VM bits like TLS references.)  Since it is always the
+ // first non-Bot memory slice, some low-level loops use it to initialize an
+ // index variable:  for (i = AliasIdxRaw; i < req(); i++).
+ //
+-// 
++//
+ // ACCESSORS:  There is a special accessor MergeMemNode::base_memory which returns
+ // the distinguished "wide" state.  The accessor MergeMemNode::memory_at(N) returns
+ // the memory state for alias type <N>, or (if there is no particular slice at <N>,
+ // it returns the base memory.  To prevent bugs, memory_at does not accept <Top>
+ // or <Bot> indexes.  The iterator MergeMemStream provides robust iteration over
+ // MergeMem nodes or pairs of such nodes, ensuring that the non-top edges are visited.
+-// 
++//
+ // %%%% We may get rid of base_memory as a separate accessor at some point; it isn't
+ // really that different from the other memory inputs.  An abbreviation called
+ // "bot_memory()" for "memory_at(AliasIdxBot)" would keep code tidy.
+-// 
+-// 
++//
++//
+ // PARTIAL MEMORY STATES:  During optimization, MergeMem nodes may arise that represent
+ // partial memory states.  When a Phi splits through a MergeMem, the copy of the Phi
+ // that "emerges though" the base memory will be marked as excluding the alias types
+ // of the other (narrow-memory) copies which "emerged through" the narrow edges:
+-// 
++//
+ //   Phi<Bot>(U, MergeMem(<Bot>: W, <8>: Y))
+ //     ==Ideal=>  MergeMem(<Bot>: Phi<Bot-8>(U, W), Phi<8>(U, Y))
+-// 
++//
+ // This strange "subtraction" effect is necessary to ensure IGVN convergence.
+ // (It is currently unimplemented.)  As you can see, the resulting merge is
+ // actually a disjoint union of memory states, rather than an overlay.
+-// 
++//
+ 
+ //------------------------------MergeMemNode-----------------------------------
+ Node* MergeMemNode::make_empty_memory() {
+@@ -1621,7 +2791,7 @@
+ 
+ MergeMemNode::MergeMemNode(Node *new_base) : Node(1+Compile::AliasIdxRaw) {
+   init_class_id(Class_MergeMem);
+-  // all inputs are nullified in Node::Node(int) 
++  // all inputs are nullified in Node::Node(int)
+   // set_input(0, NULL);  // no control input
+ 
+   // Initialize the edges uniformly to top, for starters.
+@@ -1651,13 +2821,13 @@
+ 
+ //------------------------------cmp--------------------------------------------
+ uint MergeMemNode::hash() const { return NO_HASH; }
+-uint MergeMemNode::cmp( const Node &n ) const { 
++uint MergeMemNode::cmp( const Node &n ) const {
+   return (&n == this);          // Always fail except on self
+ }
+ 
+ //------------------------------Identity---------------------------------------
+ Node* MergeMemNode::Identity(PhaseTransform *phase) {
+-  // Identity if this merge point does not record any interesting memory 
++  // Identity if this merge point does not record any interesting memory
+   // disambiguations.
+   Node* base_mem = base_memory();
+   Node* empty_mem = empty_memory();
+@@ -1731,7 +2901,7 @@
+   // mechanisms.  This method repairs that damage.
+ 
+   assert(!old_mbase || old_mbase->is_empty_memory(empty_mem), "consistent sentinels");
+-  
++
+   // Look at each slice.
+   for (uint i = Compile::AliasIdxRaw; i < req(); i++) {
+     Node* old_in = in(i);
+@@ -1816,7 +2986,7 @@
+   if( base_memory()->is_MergeMem() ) {
+     MergeMemNode *new_mbase = base_memory()->as_MergeMem();
+     Node *m = phase->transform(new_mbase);  // Rollup any cycles
+-    if( m != NULL && (m->is_top() || 
++    if( m != NULL && (m->is_top() ||
+         m->is_MergeMem() && m->as_MergeMem()->base_memory() == empty_mem) ) {
+       // propagate rollup of dead cycle to self
+       set_req(Compile::AliasIdxBot, empty_mem);
+@@ -1873,16 +3043,16 @@
+ 
+ //------------------------------dump_spec--------------------------------------
+ #ifndef PRODUCT
+-void MergeMemNode::dump_spec() const {
+-  tty->print(" {");
++void MergeMemNode::dump_spec(outputStream *st) const {
++  st->print(" {");
+   Node* base_mem = base_memory();
+   for( uint i = Compile::AliasIdxRaw; i < req(); i++ ) {
+     Node* mem = memory_at(i);
+-    if (mem == base_mem) { tty->print(" -"); continue; }
+-    tty->print( " N%d:", mem->_idx );
+-    Compile::current()->get_adr_type(i)->dump();
++    if (mem == base_mem) { st->print(" -"); continue; }
++    st->print( " N%d:", mem->_idx );
++    Compile::current()->get_adr_type(i)->dump_on(st);
+   }
+-  tty->print(" }");
++  st->print(" }");
+ }
+ #endif // !PRODUCT
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/opto/memnode.hpp openjdk/hotspot/src/share/vm/opto/memnode.hpp
+--- openjdk6/hotspot/src/share/vm/opto/memnode.hpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/memnode.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)memnode.hpp	1.118 07/05/05 17:06:18 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Portions of code courtesy of Clifford Click
+@@ -48,7 +45,7 @@
+   };
+ protected:
+   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
+-    : Node(c0,c1,c2   ) { 
++    : Node(c0,c1,c2   ) {
+     init_class_id(Class_Mem);
+     debug_only(_adr_type=at; adr_type();)
+   }
+@@ -63,14 +60,21 @@
+     debug_only(_adr_type=at; adr_type();)
+   }
+ 
++  // Helpers for the optimizer.  Documented in memnode.cpp.
++  static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
++                                      Node* p2, AllocateNode* a2,
++                                      PhaseTransform* phase);
+   static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
+ 
+ public:
++  // This one should probably be a phase-specific function:
++  static bool detect_dominating_control(Node* dom, Node* sub);
++
+   // Is this Node a MemNode or some descendent?  Default is YES.
+   virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
+ 
+   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
+-  
++
+   // Shared code for Ideal methods:
+   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit NULL.
+ 
+@@ -79,7 +83,7 @@
+ 
+   // Raw access function, to allow copying of adr_type efficiently in
+   // product builds and retain the debug info for debug builds.
+-  const TypePtr *raw_adr_type() const { 
++  const TypePtr *raw_adr_type() const {
+ #ifdef ASSERT
+     return _adr_type;
+ #else
+@@ -105,8 +109,8 @@
+   Node* can_see_stored_value(Node* st, PhaseTransform* phase) const;
+ 
+ #ifndef PRODUCT
+-  static void dump_adr_type(const Node* mem, const TypePtr* adr_type);
+-  virtual void dump_spec() const;
++  static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
++  virtual void dump_spec(outputStream *st) const;
+ #endif
+ };
+ 
+@@ -118,7 +122,7 @@
+   virtual uint size_of() const; // Size is bigger
+   const Type* const _type;      // What kind of value is loaded?
+ public:
+-         
++
+   LoadNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt )
+     : MemNode(c,mem,adr,at), _type(rt) {
+     init_class_id(Class_Load);
+@@ -160,7 +164,7 @@
+   virtual int store_Opcode() const = 0;
+ 
+ #ifndef PRODUCT
+-  virtual void dump_spec() const;
++  virtual void dump_spec(outputStream *st) const;
+ #endif
+ protected:
+   const Type* load_array_final_field(const TypeKlassPtr *tkls,
+@@ -184,7 +188,7 @@
+ // Load a char (16bits unsigned) from memory
+ class LoadCNode : public LoadNode {
+ public:
+-  LoadCNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::CHAR ) 
++  LoadCNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::CHAR )
+     : LoadNode(c,mem,adr,at,ti) {}
+   virtual int Opcode() const;
+   virtual uint ideal_reg() const { return Op_RegI; }
+@@ -197,7 +201,7 @@
+ // Load an integer from memory
+ class LoadINode : public LoadNode {
+ public:
+-  LoadINode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::INT ) 
++  LoadINode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::INT )
+     : LoadNode(c,mem,adr,at,ti) {}
+   virtual int Opcode() const;
+   virtual uint ideal_reg() const { return Op_RegI; }
+@@ -241,9 +245,9 @@
+   bool require_atomic_access() { return _require_atomic_access; }
+   static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt);
+ #ifndef PRODUCT
+-  virtual void dump_spec() const {
+-    LoadNode::dump_spec();
+-    if (_require_atomic_access)  tty->print(" Atomic!");
++  virtual void dump_spec(outputStream *st) const {
++    LoadNode::dump_spec(st);
++    if (_require_atomic_access)  st->print(" Atomic!");
+   }
+ #endif
+ };
+@@ -252,7 +256,7 @@
+ // Load a long from unaligned memory
+ class LoadL_unalignedNode : public LoadLNode {
+ public:
+-  LoadL_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at ) 
++  LoadL_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
+     : LoadLNode(c,mem,adr,at) {}
+   virtual int Opcode() const;
+ };
+@@ -261,7 +265,7 @@
+ // Load a float (64 bits) from memory
+ class LoadFNode : public LoadNode {
+ public:
+-  LoadFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::FLOAT ) 
++  LoadFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::FLOAT )
+     : LoadNode(c,mem,adr,at,t) {}
+   virtual int Opcode() const;
+   virtual uint ideal_reg() const { return Op_RegF; }
+@@ -273,7 +277,7 @@
+ // Load a double (64 bits) from memory
+ class LoadDNode : public LoadNode {
+ public:
+-  LoadDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::DOUBLE ) 
++  LoadDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::DOUBLE )
+     : LoadNode(c,mem,adr,at,t) {}
+   virtual int Opcode() const;
+   virtual uint ideal_reg() const { return Op_RegD; }
+@@ -285,7 +289,7 @@
+ // Load a double from unaligned memory
+ class LoadD_unalignedNode : public LoadDNode {
+ public:
+-  LoadD_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at ) 
++  LoadD_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
+     : LoadDNode(c,mem,adr,at) {}
+   virtual int Opcode() const;
+ };
+@@ -294,7 +298,7 @@
+ // Load a pointer from memory (either object or array)
+ class LoadPNode : public LoadNode {
+ public:
+-  LoadPNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t ) 
++  LoadPNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t )
+     : LoadNode(c,mem,adr,at,t) {}
+   virtual int Opcode() const;
+   virtual uint ideal_reg() const { return Op_RegP; }
+@@ -309,7 +313,7 @@
+   // results (new raw memory state) inside of loops preventing all manner of
+   // other optimizations).  Basically, it's ugly but so is the alternative.
+   // See comment in macro.cpp, around line 125 expand_allocate_common().
+-  virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; } 
++  virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
+ };
+ 
+ //------------------------------LoadKlassNode----------------------------------
+@@ -321,14 +325,14 @@
+   virtual int Opcode() const;
+   virtual const Type *Value( PhaseTransform *phase ) const;
+   virtual Node *Identity( PhaseTransform *phase );
+-  virtual bool depends_only_on_test() const { return true; } 
++  virtual bool depends_only_on_test() const { return true; }
+ };
+ 
+ //------------------------------LoadSNode--------------------------------------
+ // Load a short (16bits signed) from memory
+ class LoadSNode : public LoadNode {
+ public:
+-  LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT ) 
++  LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT )
+     : LoadNode(c,mem,adr,at,ti) {}
+   virtual int Opcode() const;
+   virtual uint ideal_reg() const { return Op_RegI; }
+@@ -436,9 +440,9 @@
+   bool require_atomic_access() { return _require_atomic_access; }
+   static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val);
+ #ifndef PRODUCT
+-  virtual void dump_spec() const {
+-    StoreNode::dump_spec();
+-    if (_require_atomic_access)  tty->print(" Atomic!");
++  virtual void dump_spec(outputStream *st) const {
++    StoreNode::dump_spec(st);
++    if (_require_atomic_access)  st->print(" Atomic!");
+   }
+ #endif
+ };
+@@ -493,7 +497,7 @@
+     : LoadPNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM) {}
+   virtual int Opcode() const;
+   virtual int store_Opcode() const { return Op_StorePConditional; }
+-  virtual bool depends_only_on_test() const { return true; } 
++  virtual bool depends_only_on_test() const { return true; }
+ };
+ 
+ //------------------------------LoadLLockedNode---------------------------------
+@@ -521,7 +525,7 @@
+   virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
+   virtual const Type *Value( PhaseTransform *phase ) const;
+ #ifndef PRODUCT
+-  virtual void dump_spec() const {};
++  virtual void dump_spec(outputStream *st) const {};
+ #endif
+ };
+ 
+@@ -596,11 +600,19 @@
+   virtual uint match_edge(uint idx) const;
+ 
+   // Clear the given area of an object or array.
+-  // Assume that the end_offset is aligned mod BytesPerLong.
+-  // The start_offset (e.g., 8 or 12) must be aligned at least mod BytesPerInt.
++  // The start offset must always be aligned mod BytesPerInt.
++  // The end offset must always be aligned mod BytesPerLong.
+   // Return the new memory.
+   static Node* clear_memory(Node* control, Node* mem, Node* dest,
+                             intptr_t start_offset,
++                            intptr_t end_offset,
++                            PhaseGVN* phase);
++  static Node* clear_memory(Node* control, Node* mem, Node* dest,
++                            intptr_t start_offset,
++                            Node* end_offset,
++                            PhaseGVN* phase);
++  static Node* clear_memory(Node* control, Node* mem, Node* dest,
++                            Node* start_offset,
+                             Node* end_offset,
+                             PhaseGVN* phase);
+ };
+@@ -705,6 +717,92 @@
+   virtual uint ideal_reg() const { return 0; } // not matched in the AD file
+ };
+ 
++// Isolation of object setup after an AllocateNode and before next safepoint.
++// (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
++class InitializeNode: public MemBarNode {
++  friend class AllocateNode;
++
++  bool _is_complete;
++
++public:
++  enum {
++    Control    = TypeFunc::Control,
++    Memory     = TypeFunc::Memory,     // MergeMem for states affected by this op
++    RawAddress = TypeFunc::Parms+0,    // the newly-allocated raw address
++    RawStores  = TypeFunc::Parms+1     // zero or more stores (or TOP)
++  };
++
++  InitializeNode(Compile* C, int adr_type, Node* rawoop);
++  virtual int Opcode() const;
++  virtual uint size_of() const { return sizeof(*this); }
++  virtual uint ideal_reg() const { return 0; } // not matched in the AD file
++  virtual const RegMask &in_RegMask(uint) const;  // mask for RawAddress
++
++  // Manage incoming memory edges via a MergeMem on in(Memory):
++  Node* memory(uint alias_idx);
++
++  // The raw memory edge coming directly from the Allocation.
++  // The contents of this memory are *always* all-zero-bits.
++  Node* zero_memory() { return memory(Compile::AliasIdxRaw); }
++
++  // Return the corresponding allocation for this initialization (or null if none).
++  // (Note: Both InitializeNode::allocation and AllocateNode::initialization
++  // are defined in graphKit.cpp, which sets up the bidirectional relation.)
++  AllocateNode* allocation();
++
++  // Anything other than zeroing in this init?
++  bool is_non_zero();
++
++  // An InitializeNode must completed before macro expansion is done.
++  // Completion requires that the AllocateNode must be followed by
++  // initialization of the new memory to zero, then to any initializers.
++  bool is_complete() { return _is_complete; }
++
++  // Mark complete.  (Must not yet be complete.)
++  void set_complete(PhaseGVN* phase);
++
++#ifdef ASSERT
++  // ensure all non-degenerate stores are ordered and non-overlapping
++  bool stores_are_sane(PhaseTransform* phase);
++#endif //ASSERT
++
++  // See if this store can be captured; return offset where it initializes.
++  // Return 0 if the store cannot be moved (any sort of problem).
++  intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase);
++
++  // Capture another store; reformat it to write my internal raw memory.
++  // Return the captured copy, else NULL if there is some sort of problem.
++  Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase);
++
++  // Find captured store which corresponds to the range [start..start+size).
++  // Return my own memory projection (meaning the initial zero bits)
++  // if there is no such store.  Return NULL if there is a problem.
++  Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase);
++
++  // Called when the associated AllocateNode is expanded into CFG.
++  Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
++                        intptr_t header_size, Node* size_in_bytes,
++                        PhaseGVN* phase);
++
++ private:
++  void remove_extra_zeroes();
++
++  // Find out where a captured store should be placed (or already is placed).
++  int captured_store_insertion_point(intptr_t start, int size_in_bytes,
++                                     PhaseTransform* phase);
++
++  static intptr_t get_store_offset(Node* st, PhaseTransform* phase);
++
++  Node* make_raw_address(intptr_t offset, PhaseTransform* phase);
++
++  bool detect_init_independence(Node* n, bool st_is_pinned, int& count);
++
++  void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
++                               PhaseGVN* phase);
++
++  intptr_t find_next_fullword_store(uint i, PhaseGVN* phase);
++};
++
+ //------------------------------MergeMem---------------------------------------
+ // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.)
+ class MergeMemNode: public Node {
+@@ -747,7 +845,7 @@
+   void grow_to_match(const MergeMemNode* other);
+   bool verify_sparse() const PRODUCT_RETURN0;
+ #ifndef PRODUCT
+-  virtual void dump_spec() const;
++  virtual void dump_spec(outputStream *st) const;
+ #endif
+ };
+ 
+@@ -962,4 +1060,3 @@
+   virtual uint match_edge(uint idx) const { return idx==2; }
+   virtual const Type *bottom_type() const { return Type::ABIO; }
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/mulnode.cpp openjdk/hotspot/src/share/vm/opto/mulnode.cpp
+--- openjdk6/hotspot/src/share/vm/opto/mulnode.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/mulnode.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)mulnode.cpp	1.133 07/05/05 17:06:16 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Portions of code courtesy of Clifford Click
+@@ -33,7 +30,7 @@
+ 
+ //=============================================================================
+ //------------------------------hash-------------------------------------------
+-// Hash function over MulNodes.  Needs to be commutative; i.e., I swap 
++// Hash function over MulNodes.  Needs to be commutative; i.e., I swap
+ // (commute) inputs to MulNodes willy-nilly so the hash function must return
+ // the same value in the presence of edge swapping.
+ uint MulNode::hash() const {
+@@ -51,7 +48,7 @@
+ }
+ 
+ //------------------------------Ideal------------------------------------------
+-// We also canonicalize the Node, moving constants to the right input, 
++// We also canonicalize the Node, moving constants to the right input,
+ // and flatten expressions (so that 1+x+2 becomes x+3).
+ Node *MulNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+   const Type *t1 = phase->type( in(1) );
+@@ -77,14 +74,14 @@
+   uint op = Opcode();
+   if( t2->singleton() &&        // Right input is a constant?
+       op != Op_MulF &&          // Float & double cannot reassociate
+-      op != Op_MulD ) { 
++      op != Op_MulD ) {
+     if( t2 == Type::TOP ) return NULL;
+     Node *mul1 = in(1);
+ #ifdef ASSERT
+     // Check for dead loop
+     int   op1 = mul1->Opcode();
+     if( phase->eqv( mul1, this ) || phase->eqv( in(2), this ) ||
+-        ( op1 == mul_opcode() || op1 == add_opcode() ) && 
++        ( op1 == mul_opcode() || op1 == add_opcode() ) &&
+         ( phase->eqv( mul1->in(1), this ) || phase->eqv( mul1->in(2), this ) ||
+           phase->eqv( mul1->in(1), mul1 ) || phase->eqv( mul1->in(2), mul1 ) ) )
+       assert(false, "dead loop in MulNode::Ideal");
+@@ -92,7 +89,7 @@
+ 
+     if( mul1->Opcode() == mul_opcode() ) {  // Left input is a multiply?
+       // Mul of a constant?
+-      const Type *t12 = phase->type( mul1->in(2) ); 
++      const Type *t12 = phase->type( mul1->in(2) );
+       if( t12->singleton() && t12 != Type::TOP) { // Left input is an add of a constant?
+         // Compute new constant; check for overflow
+         const Type *tcon01 = mul1->as_Mul()->mul_ring(t2,t12);
+@@ -105,18 +102,18 @@
+         }
+       }
+     }
+-    // If the right input is a constant, and the left input is an add of a 
++    // If the right input is a constant, and the left input is an add of a
+     // constant, flatten the tree: (X+con1)*con0 ==> X*con0 + con1*con0
+     const Node *add1 = in(1);
+     if( add1->Opcode() == add_opcode() ) {      // Left input is an add?
+       // Add of a constant?
+-      const Type *t12 = phase->type( add1->in(2) ); 
++      const Type *t12 = phase->type( add1->in(2) );
+       if( t12->singleton() && t12 != Type::TOP ) { // Left input is an add of a constant?
+         assert( add1->in(1) != add1, "dead loop in MulNode::Ideal" );
+         // Compute new constant; check for overflow
+         const Type *tcon01 = mul_ring(t2,t12);
+         if( tcon01->singleton() ) {
+-        
++
+         // Convert (X+con1)*con0 into X*con0
+           Node *mul = clone();    // mul = ()*con0
+           mul->set_req(1,add1->in(1));  // mul = X*con0
+@@ -171,7 +168,7 @@
+   } else if ((con = in(2)->find_int_con(0)) == 0) {
+     return MulNode::Ideal(phase, can_reshape);
+   }
+- 
++
+   // Now we have a constant Node on the right and the constant in con
+   if( con == 0 ) return NULL;   // By zero is handled by Value call
+   if( con == 1 ) return NULL;   // By one  is handled by Identity call
+@@ -267,7 +264,7 @@
+   } else if ((con = in(2)->find_long_con(0)) == 0) {
+     return MulNode::Ideal(phase, can_reshape);
+   }
+- 
++
+   // Now we have a constant Node on the right and the constant in con
+   if( con == CONST64(0) ) return NULL;  // By zero is handled by Value call
+   if( con == CONST64(1) ) return NULL;  // By one  is handled by Identity call
+@@ -379,7 +376,7 @@
+   int widen = MAX2(r0->_widen,r1->_widen);
+ 
+   // If either input is a constant, might be able to trim cases
+-  if( !r0->is_con() && !r1->is_con() ) 
++  if( !r0->is_con() && !r1->is_con() )
+     return TypeInt::INT;        // No constants to be had
+ 
+   // Both constants?  Return bits
+@@ -410,11 +407,18 @@
+   const TypeInt *t2 = phase->type( in(2) )->isa_int();
+   if( t2 && t2->is_con() ) {
+     int con = t2->get_con();
+-    uint lop = load->Opcode();      
++    // Masking off high bits which are always zero is useless.
++    const TypeInt* t1 = phase->type( in(1) )->isa_int();
++    if (t1 != NULL && t1->_lo >= 0) {
++      jint t1_support = ((jint)1 << (1 + log2_intptr(t1->_hi))) - 1;
++      if ((t1_support & con) == t1_support)
++        return load;
++    }
++    uint lop = load->Opcode();
+     if( lop == Op_LoadC &&
+         con == 0x0000FFFF )     // Already zero-extended
+       return load;
+-    // Masking off the high bits of a unsigned-shift-right is not 
++    // Masking off the high bits of a unsigned-shift-right is not
+     // needed either.
+     if( lop == Op_URShiftI ) {
+       const TypeInt *t12 = phase->type( load->in(2) )->isa_int();
+@@ -477,7 +481,7 @@
+       int shift = t12->get_con();
+       shift &= BitsPerJavaInteger-1;  // semantics of Java shifts
+       const int sign_bits_mask = ~right_n_bits(BitsPerJavaInteger - shift);
+-      // If the AND'ing of the 2 masks has no bits, then only original shifted 
++      // If the AND'ing of the 2 masks has no bits, then only original shifted
+       // bits survive.  NO sign-extension bits survive the maskings.
+       if( (sign_bits_mask & mask) == 0 ) {
+         // Use zero-fill shift instead
+@@ -490,7 +494,7 @@
+   // Check for 'negate/and-1', a pattern emitted when someone asks for
+   // 'mod 2'.  Negate leaves the low order bit unchanged (think: complement
+   // plus 1) and the mask is of the low order bit.  Skip the negate.
+-  if( lop == Op_SubI && mask == 1 && load->in(1) && 
++  if( lop == Op_SubI && mask == 1 && load->in(1) &&
+       phase->type(load->in(1)) == TypeInt::ZERO )
+     return new (phase->C, 3) AndINode( load->in(2), in(2) );
+ 
+@@ -509,7 +513,7 @@
+   int widen = MAX2(r0->_widen,r1->_widen);
+ 
+   // If either input is a constant, might be able to trim cases
+-  if( !r0->is_con() && !r1->is_con() ) 
++  if( !r0->is_con() && !r1->is_con() )
+     return TypeLong::LONG;      // No constants to be had
+ 
+   // Both constants?  Return bits
+@@ -536,8 +540,15 @@
+   const TypeLong *t2 = phase->type( in(2) )->isa_long();
+   if( t2 && t2->is_con() ) {
+     jlong con = t2->get_con();
++    // Masking off high bits which are always zero is useless.
++    const TypeLong* t1 = phase->type( in(1) )->isa_long();
++    if (t1 != NULL && t1->_lo >= 0) {
++      jlong t1_support = ((jlong)1 << (1 + log2_long(t1->_hi))) - 1;
++      if ((t1_support & con) == t1_support)
++        return usr;
++    }
+     uint lop = usr->Opcode();
+-    // Masking off the high bits of a unsigned-shift-right is not 
++    // Masking off the high bits of a unsigned-shift-right is not
+     // needed either.
+     if( lop == Op_URShiftL ) {
+       const TypeInt *t12 = phase->type( usr->in(2) )->isa_int();
+@@ -569,7 +580,7 @@
+       int shift = t12->get_con();
+       shift &= (BitsPerJavaInteger*2)-1;  // semantics of Java shifts
+       const jlong sign_bits_mask = ~(((jlong)CONST64(1) << (jlong)(BitsPerJavaInteger*2 - shift)) -1);
+-      // If the AND'ing of the 2 masks has no bits, then only original shifted 
++      // If the AND'ing of the 2 masks has no bits, then only original shifted
+       // bits survive.  NO sign-extension bits survive the maskings.
+       if( (sign_bits_mask & mask) == 0 ) {
+         // Use zero-fill shift instead
+@@ -590,7 +601,7 @@
+ }
+ 
+ //------------------------------Ideal------------------------------------------
+-// If the right input is a constant, and the left input is an add of a 
++// If the right input is a constant, and the left input is an add of a
+ // constant, flatten the tree: (X+con1)<<con0 ==> X<<con0 + con1<<con0
+ Node *LShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) {
+   const Type *t  = phase->type( in(2) );
+@@ -608,7 +619,7 @@
+     assert( add1 != add1->in(1), "dead loop in LShiftINode::Ideal" );
+     const TypeInt *t12 = phase->type(add1->in(2))->isa_int();
+     if( t12 && t12->is_con() ){ // Left input is an add of a con?
+-      // Transform is legal, but check for profit.  Avoid breaking 'i2s' 
++      // Transform is legal, but check for profit.  Avoid breaking 'i2s'
+       // and 'i2b' patterns which typically fold into 'StoreC/StoreB'.
+       if( con < 16 ) {
+         // Compute X << con0
+@@ -621,7 +632,7 @@
+ 
+   // Check for "(x>>c0)<<c0" which just masks off low bits
+   if( (add1_op == Op_RShiftI || add1_op == Op_URShiftI ) &&
+-      add1->in(2) == in(2) ) 
++      add1->in(2) == in(2) )
+     // Convert to "(x & -(1<<c0))"
+     return new (phase->C, 3) AndINode(add1->in(1),phase->intcon( -(1<<con)));
+ 
+@@ -663,17 +674,33 @@
+ 
+   // Either input is BOTTOM ==> the result is BOTTOM
+   if( (t1 == TypeInt::INT) || (t2 == TypeInt::INT) ||
+-      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) ) 
++      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) )
+     return TypeInt::INT;
+ 
+   const TypeInt *r1 = t1->is_int(); // Handy access
+   const TypeInt *r2 = t2->is_int(); // Handy access
+ 
+-  if( !r1->is_con() || !r2->is_con() )
++  if (!r2->is_con())
+     return TypeInt::INT;
+ 
+   uint shift = r2->get_con();
+   shift &= BitsPerJavaInteger-1;  // semantics of Java shifts
++  // Shift by a multiple of 32 does nothing:
++  if (shift == 0)  return t1;
++
++  // If the shift is a constant, shift the bounds of the type,
++  // unless this could lead to an overflow.
++  if (!r1->is_con()) {
++    jint lo = r1->_lo, hi = r1->_hi;
++    if (((lo << shift) >> shift) == lo &&
++        ((hi << shift) >> shift) == hi) {
++      // No overflow.  The range shifts up cleanly.
++      return TypeInt::make((jint)lo << (jint)shift,
++                           (jint)hi << (jint)shift,
++                           MAX2(r1->_widen,r2->_widen));
++    }
++    return TypeInt::INT;
++  }
+ 
+   return TypeInt::make( (jint)r1->get_con() << (jint)shift );
+ }
+@@ -686,7 +713,7 @@
+ }
+ 
+ //------------------------------Ideal------------------------------------------
+-// If the right input is a constant, and the left input is an add of a 
++// If the right input is a constant, and the left input is an add of a
+ // constant, flatten the tree: (X+con1)<<con0 ==> X<<con0 + con1<<con0
+ Node *LShiftLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+   const Type *t  = phase->type( in(2) );
+@@ -714,7 +741,7 @@
+ 
+   // Check for "(x>>c0)<<c0" which just masks off low bits
+   if( (add1_op == Op_RShiftL || add1_op == Op_URShiftL ) &&
+-      add1->in(2) == in(2) ) 
++      add1->in(2) == in(2) )
+     // Convert to "(x & -(1<<c0))"
+     return new (phase->C, 3) AndLNode(add1->in(1),phase->longcon( -(CONST64(1)<<con)));
+ 
+@@ -756,17 +783,33 @@
+ 
+   // Either input is BOTTOM ==> the result is BOTTOM
+   if( (t1 == TypeLong::LONG) || (t2 == TypeInt::INT) ||
+-      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) ) 
++      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) )
+     return TypeLong::LONG;
+ 
+   const TypeLong *r1 = t1->is_long(); // Handy access
+   const TypeInt  *r2 = t2->is_int();  // Handy access
+ 
+-  if( !r1->is_con() || !r2->is_con() )
++  if (!r2->is_con())
+     return TypeLong::LONG;
+ 
+   uint shift = r2->get_con();
+   shift &= (BitsPerJavaInteger*2)-1;  // semantics of Java shifts
++  // Shift by a multiple of 64 does nothing:
++  if (shift == 0)  return t1;
++
++  // If the shift is a constant, shift the bounds of the type,
++  // unless this could lead to an overflow.
++  if (!r1->is_con()) {
++    jlong lo = r1->_lo, hi = r1->_hi;
++    if (((lo << shift) >> shift) == lo &&
++        ((hi << shift) >> shift) == hi) {
++      // No overflow.  The range shifts up cleanly.
++      return TypeLong::make((jlong)lo << (jint)shift,
++                            (jlong)hi << (jint)shift,
++                            MAX2(r1->_widen,r2->_widen));
++    }
++    return TypeLong::LONG;
++  }
+ 
+   return TypeLong::make( (jlong)r1->get_con() << (jint)shift );
+ }
+@@ -843,10 +886,10 @@
+       set_req(2, phase->intcon(0));
+       return this;
+     }
+-    else if( ld->Opcode() == Op_LoadC ) 
++    else if( ld->Opcode() == Op_LoadC )
+       // Replace zero-extension-load with sign-extension-load
+-      return new (phase->C, 3) LoadSNode( ld->in(MemNode::Control), 
+-                                ld->in(MemNode::Memory), 
++      return new (phase->C, 3) LoadSNode( ld->in(MemNode::Control),
++                                ld->in(MemNode::Memory),
+                                 ld->in(MemNode::Address),
+                                 ld->adr_type());
+   }
+@@ -887,7 +930,7 @@
+ 
+   if (t2 == TypeInt::INT)
+     return TypeInt::INT;
+-      
++
+   const TypeInt *r1 = t1->is_int(); // Handy access
+   const TypeInt *r2 = t2->is_int(); // Handy access
+ 
+@@ -949,7 +992,7 @@
+ 
+   if (t2 == TypeInt::INT)
+     return TypeLong::LONG;
+-      
++
+   const TypeLong *r1 = t1->is_long(); // Handy access
+   const TypeInt  *r2 = t2->is_int (); // Handy access
+ 
+@@ -986,18 +1029,18 @@
+   const TypeInt *ti = phase->type( in(2) )->isa_int();
+   if ( ti && ti->is_con() && ( ti->get_con() & ( BitsPerInt - 1 ) ) == 0 ) return in(1);
+ 
+-  // Check for "((x << LogBytesPerWord) + (wordSize-1)) >> LogBytesPerWord" which is just "x". 
++  // Check for "((x << LogBytesPerWord) + (wordSize-1)) >> LogBytesPerWord" which is just "x".
+   // Happens during new-array length computation.
+   // Safe if 'x' is in the range [0..(max_int>>LogBytesPerWord)]
+   Node *add = in(1);
+   if( add->Opcode() == Op_AddI ) {
+     const TypeInt *t2  = phase->type(add->in(2))->isa_int();
+-    if( t2 && t2->is_con(wordSize - 1) && 
++    if( t2 && t2->is_con(wordSize - 1) &&
+         add->in(1)->Opcode() == Op_LShiftI ) {
+       // Check that shift_counts are LogBytesPerWord
+       Node          *lshift_count   = add->in(1)->in(2);
+       const TypeInt *t_lshift_count = phase->type(lshift_count)->isa_int();
+-      if( t_lshift_count && t_lshift_count->is_con(LogBytesPerWord) && 
++      if( t_lshift_count && t_lshift_count->is_con(LogBytesPerWord) &&
+           t_lshift_count == phase->type(in(2)) ) {
+         Node          *x   = add->in(1)->in(1);
+         const TypeInt *t_x = phase->type(x)->isa_int();
+@@ -1021,7 +1064,7 @@
+   const int mask = right_n_bits(BitsPerJavaInteger - con);
+ 
+   int in1_op = in(1)->Opcode();
+-  
++
+   // Check for ((x>>>a)>>>b) and replace with (x>>>(a+b)) when a+b < 32
+   if( in1_op == Op_URShiftI ) {
+     const TypeInt *t12 = phase->type( in(1)->in(2) )->isa_int();
+@@ -1033,7 +1076,7 @@
+         return new (phase->C, 3) URShiftINode( in(1)->in(1), phase->intcon(con3) );
+     }
+   }
+-  
++
+   // Check for ((x << z) + Y) >>> z.  Replace with x + con>>>z
+   // The idiom for rounding to a power of 2 is "(Q+(2^z-1)) >>> z".
+   // If Q is "X << z" the rounding is useless.  Look for patterns like
+@@ -1056,16 +1099,21 @@
+   if( in1_op == Op_AndI ) {
+     const TypeInt *t3 = phase->type( andi->in(2) )->isa_int();
+     if( t3 && t3->is_con() ) { // Right input is a constant
+-      const jint mask2 = t3->get_con();
++      jint mask2 = t3->get_con();
++      mask2 >>= con;  // *signed* shift downward (high-order zeroes do not help)
+       Node *newshr = phase->transform( new (phase->C, 3) URShiftINode(andi->in(1), in(2)) );
+-      return new (phase->C, 3) AndINode(newshr, phase->intcon( (mask2 >> con) & mask ));
++      return new (phase->C, 3) AndINode(newshr, phase->intcon(mask2));
++      // The negative values are easier to materialize than positive ones.
++      // A typical case from address arithmetic is ((x & ~15) >> 4).
++      // It's better to change that to ((x >> 4) & ~0) versus
++      // ((x >> 4) & 0x0FFFFFFF).  The difference is greatest in LP64.
+     }
+   }
+ 
+   // Check for "(X << z ) >>> z" which simply zero-extends
+   Node *shl = in(1);
+-  if( in1_op == Op_LShiftI && 
+-      phase->type(shl->in(2)) == t2 ) 
++  if( in1_op == Op_LShiftI &&
++      phase->type(shl->in(2)) == t2 )
+     return new (phase->C, 3) AndINode( shl->in(1), phase->intcon(mask) );
+ 
+   return NULL;
+@@ -1092,7 +1140,7 @@
+ 
+   if (t2 == TypeInt::INT)
+     return TypeInt::INT;
+-      
++
+   const TypeInt *r1 = t1->is_int();     // Handy access
+   const TypeInt *r2 = t2->is_int();     // Handy access
+ 
+@@ -1127,18 +1175,18 @@
+     return ti;
+   }
+ 
+-  // 
++  //
+   // Do not support shifted oops in info for GC
+-  // 
++  //
+   // else if( t1->base() == Type::InstPtr ) {
+-  //   
++  //
+   //   const TypeInstPtr *o = t1->is_instptr();
+-  //   if( t1->singleton() ) 
++  //   if( t1->singleton() )
+   //     return TypeInt::make( ((uint32)o->const_oop() + o->_offset) >> shift );
+   // }
+   // else if( t1->base() == Type::KlassPtr ) {
+   //   const TypeKlassPtr *o = t1->is_klassptr();
+-  //   if( t1->singleton() ) 
++  //   if( t1->singleton() )
+   //     return TypeInt::make( ((uint32)o->const_oop() + o->_offset) >> shift );
+   // }
+ 
+@@ -1184,16 +1232,17 @@
+   if( andi->Opcode() == Op_AndL ) {
+     const TypeLong *t3 = phase->type( andi->in(2) )->isa_long();
+     if( t3 && t3->is_con() ) { // Right input is a constant
+-      const jlong mask2 = t3->get_con();
++      jlong mask2 = t3->get_con();
++      mask2 >>= con;  // *signed* shift downward (high-order zeroes do not help)
+       Node *newshr = phase->transform( new (phase->C, 3) URShiftLNode(andi->in(1), in(2)) );
+-      return new (phase->C, 3) AndLNode(newshr, phase->longcon((mask2 >> con) & mask));
++      return new (phase->C, 3) AndLNode(newshr, phase->longcon(mask2));
+     }
+   }
+ 
+   // Check for "(X << z ) >>> z" which simply zero-extends
+   Node *shl = in(1);
+-  if( shl->Opcode() == Op_LShiftL && 
+-      phase->type(shl->in(2)) == t2 ) 
++  if( shl->Opcode() == Op_LShiftL &&
++      phase->type(shl->in(2)) == t2 )
+     return new (phase->C, 3) AndLNode( shl->in(1), phase->longcon(mask) );
+ 
+   return NULL;
+@@ -1220,7 +1269,7 @@
+ 
+   if (t2 == TypeInt::INT)
+     return TypeLong::LONG;
+-      
++
+   const TypeLong *r1 = t1->is_long(); // Handy access
+   const TypeInt  *r2 = t2->is_int (); // Handy access
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/opto/mulnode.hpp openjdk/hotspot/src/share/vm/opto/mulnode.hpp
+--- openjdk6/hotspot/src/share/vm/opto/mulnode.hpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/mulnode.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)mulnode.hpp	1.53 07/05/05 17:06:18 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Portions of code courtesy of Clifford Click
+@@ -42,10 +39,10 @@
+   }
+ 
+   // Handle algebraic identities here.  If we have an identity, return the Node
+-  // we are equivalent to.  We look for "add of zero" as an identity.  
++  // we are equivalent to.  We look for "add of zero" as an identity.
+   virtual Node *Identity( PhaseTransform *phase );
+ 
+-  // We also canonicalize the Node, moving constants to the right input, 
++  // We also canonicalize the Node, moving constants to the right input,
+   // and flatten expressions (so that 1+x+2 becomes x+3).
+   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+ 
+@@ -248,4 +245,3 @@
+   const Type *bottom_type() const { return TypeLong::LONG; }
+   virtual uint ideal_reg() const { return Op_RegL; }
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/multnode.cpp openjdk/hotspot/src/share/vm/opto/multnode.cpp
+--- openjdk6/hotspot/src/share/vm/opto/multnode.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/multnode.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)multnode.cpp	1.60 07/05/05 17:06:23 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -30,7 +27,7 @@
+ 
+ //=============================================================================
+ //------------------------------MultiNode--------------------------------------
+-const RegMask &MultiNode::out_RegMask() const { 
++const RegMask &MultiNode::out_RegMask() const {
+   return RegMask::Empty;
+ }
+ 
+@@ -58,9 +55,9 @@
+ 
+ //=============================================================================
+ //------------------------------ProjNode---------------------------------------
+-uint ProjNode::hash() const { 
++uint ProjNode::hash() const {
+   // only one input
+-  return (uintptr_t)in(TypeFunc::Control) + (_con << 1) + (_is_io_use ? 1 : 0); 
++  return (uintptr_t)in(TypeFunc::Control) + (_con << 1) + (_is_io_use ? 1 : 0);
+ }
+ uint ProjNode::cmp( const Node &n ) const { return _con == ((ProjNode&)n)._con && ((ProjNode&)n)._is_io_use == _is_io_use; }
+ uint ProjNode::size_of() const { return sizeof(ProjNode); }
+@@ -71,13 +68,13 @@
+   return (_con == TypeFunc::Control && def->is_CFG());
+ }
+ 
+-const Type *ProjNode::bottom_type() const { 
++const Type *ProjNode::bottom_type() const {
+   if (in(0) == NULL)  return Type::TOP;
+   const Type *tb = in(0)->bottom_type();
+   if( tb == Type::TOP ) return Type::TOP;
+   if( tb == Type::BOTTOM ) return Type::BOTTOM;
+   const TypeTuple *t = tb->is_tuple();
+-  return t->field_at(_con); 
++  return t->field_at(_con);
+ }
+ 
+ const TypePtr *ProjNode::adr_type() const {
+@@ -96,7 +93,7 @@
+ 
+ bool ProjNode::pinned() const { return in(0)->pinned(); }
+ #ifndef PRODUCT
+-void ProjNode::dump_spec() const { tty->print("#%d",_con); if(_is_io_use) tty->print(" (i_o_use)");}
++void ProjNode::dump_spec(outputStream *st) const { st->print("#%d",_con); if(_is_io_use) st->print(" (i_o_use)");}
+ #endif
+ 
+ //----------------------------check_con----------------------------------------
+@@ -127,8 +124,6 @@
+ }
+ 
+ //------------------------------ideal_reg--------------------------------------
+-uint ProjNode::ideal_reg() const { 
++uint ProjNode::ideal_reg() const {
+   return Matcher::base2reg[bottom_type()->base()];
+ }
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/multnode.hpp openjdk/hotspot/src/share/vm/opto/multnode.hpp
+--- openjdk6/hotspot/src/share/vm/opto/multnode.hpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/multnode.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)multnode.hpp	1.46 07/05/05 17:06:24 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class Matcher;
+@@ -68,7 +65,7 @@
+   }
+   const uint _con;              // The field in the tuple we are projecting
+   const bool _is_io_use;        // Used to distinguish between the projections
+-                                // used on the control and io paths from a macro node 
++                                // used on the control and io paths from a macro node
+   virtual int Opcode() const;
+   virtual bool      is_CFG() const;
+   virtual bool depends_only_on_test() const { return false; }
+@@ -79,7 +76,6 @@
+   virtual uint ideal_reg() const;
+   virtual const RegMask &out_RegMask() const;
+ #ifndef PRODUCT
+-  virtual void dump_spec() const;
++  virtual void dump_spec(outputStream *st) const;
+ #endif
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/node.cpp openjdk/hotspot/src/share/vm/opto/node.cpp
+--- openjdk6/hotspot/src/share/vm/opto/node.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/node.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)node.cpp	1.227 07/05/21 15:45:56 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -454,7 +451,7 @@
+ 
+ 
+ //------------------------------clone------------------------------------------
+-// Clone a Node.  
++// Clone a Node.
+ Node *Node::clone() const {
+   Compile *compile = Compile::current();
+   uint s = size_of();           // Size of inherited Node
+@@ -473,7 +470,7 @@
+   uint i;
+   for( i = 0; i < len(); i++ ) {
+     Node *x = in(i);
+-    n->_in[i] = x; 
++    n->_in[i] = x;
+     if (x != NULL) x->add_out(n);
+   }
+   if (is_macro())
+@@ -493,11 +490,11 @@
+   if (this->is_Mach() && (nopnds = this->as_Mach()->num_opnds()) > 0) {
+     MachNode *mach  = n->as_Mach();
+     MachNode *mthis = this->as_Mach();
+-    // Get address of _opnd_array. 
++    // Get address of _opnd_array.
+     // It should be the same offset since it is the clone of this node.
+     MachOper **from = mthis->_opnds;
+-    MachOper **to = (MachOper **)((size_t)(&mach->_opnds) + 
+-                    pointer_delta((const void*)from, 
++    MachOper **to = (MachOper **)((size_t)(&mach->_opnds) +
++                    pointer_delta((const void*)from,
+                                   (const void*)(&mthis->_opnds), 1));
+     mach->_opnds = to;
+     for ( uint i = 0; i < nopnds; ++i ) {
+@@ -533,7 +530,7 @@
+ extern int reclaim_idx ;
+ extern int reclaim_in  ;
+ extern int reclaim_node;
+-void Node::destruct() { 
++void Node::destruct() {
+   // Eagerly reclaim unique Node numberings
+   Compile* compile = Compile::current();
+   if ((uint)_idx+1 == compile->unique()) {
+@@ -561,7 +558,7 @@
+   char *out_edge_end = out_array + out_edge_size;
+   int node_size = size_of();
+ 
+-  // Free the output edge array 
++  // Free the output edge array
+   if (out_edge_size > 0) {
+ #ifdef ASSERT
+     if( out_edge_end == compile->node_arena()->hwm() )
+@@ -580,23 +577,23 @@
+ #else
+     // It was; free the input array and object all in one hit
+     compile->node_arena()->Afree(_in,edge_size+node_size);
+-#endif 
++#endif
+   } else {
+ 
+     // Free just the input array
+ #ifdef ASSERT
+-    if( edge_end == compile->node_arena()->hwm() ) 
++    if( edge_end == compile->node_arena()->hwm() )
+       reclaim_in  += edge_size;
+-#endif 
++#endif
+     compile->node_arena()->Afree(_in,edge_size);
+ 
+     // Free just the object
+ #ifdef ASSERT
+-    if( ((char*)this) + node_size == compile->node_arena()->hwm() ) 
++    if( ((char*)this) + node_size == compile->node_arena()->hwm() )
+       reclaim_node+= node_size;
+ #else
+     compile->node_arena()->Afree(this,node_size);
+-#endif 
++#endif
+   }
+   if (is_macro()) {
+     compile->remove_macro_node(this);
+@@ -663,7 +660,7 @@
+ #ifdef ASSERT
+ //------------------------------is_dead----------------------------------------
+ bool Node::is_dead() const {
+-  // Mach and pinch point nodes may look like dead. 
++  // Mach and pinch point nodes may look like dead.
+   if( is_top() || is_Mach() || (Opcode() == Op_Node && _outcnt > 0) )
+     return false;
+   for( uint i = 0; i < _max; i++ )
+@@ -672,7 +669,7 @@
+   dump();
+   return true;
+ }
+-#endif 
++#endif
+ 
+ //------------------------------add_req----------------------------------------
+ // Add a new required input at the end
+@@ -680,13 +677,13 @@
+   assert( is_not_dead(n), "can not use dead node");
+ 
+   // Look to see if I can move precedence down one without reallocating
+-  if( (_cnt >= _max) || (in(_max-1) != NULL) ) 
++  if( (_cnt >= _max) || (in(_max-1) != NULL) )
+     grow( _max+1 );
+ 
+   // Find a precedence edge to move
+   if( in(_cnt) != NULL ) {       // Next precedence edge is busy?
+     uint i;
+-    for( i=_cnt; i<_max; i++ ) 
++    for( i=_cnt; i<_max; i++ )
+       if( in(i) == NULL )       // Find the NULL at end of prec edge list
+         break;                  // There must be one, since we grew the array
+     _in[i] = in(_cnt);          // Move prec over, making space for req edge
+@@ -707,13 +704,13 @@
+   }
+ 
+   // Look to see if I can move precedence down one without reallocating
+-  if( (_cnt+m) > _max || _in[_max-m] ) 
++  if( (_cnt+m) > _max || _in[_max-m] )
+     grow( _max+m );
+ 
+   // Find a precedence edge to move
+   if( _in[_cnt] != NULL ) {     // Next precedence edge is busy?
+     uint i;
+-    for( i=_cnt; i<_max; i++ ) 
++    for( i=_cnt; i<_max; i++ )
+       if( _in[i] == NULL )      // Find the NULL at end of prec edge list
+         break;                  // There must be one, since we grew the array
+     // Slide all the precs over by m positions (assume #prec << m).
+@@ -840,13 +837,13 @@
+ }
+ 
+ //------------------------------add_prec---------------------------------------
+-// Add a new precedence input.  Precedence inputs are unordered, with 
++// Add a new precedence input.  Precedence inputs are unordered, with
+ // duplicates removed and NULLs packed down at the end.
+ void Node::add_prec( Node *n ) {
+   assert( is_not_dead(n), "can not use dead node");
+ 
+   // Check for NULL at end
+-  if( _cnt >= _max || in(_max-1) ) 
++  if( _cnt >= _max || in(_max-1) )
+     grow( _max+1 );
+ 
+   // Find a precedence edge to move
+@@ -857,13 +854,13 @@
+ }
+ 
+ //------------------------------rm_prec----------------------------------------
+-// Remove a precedence input.  Precedence inputs are unordered, with 
++// Remove a precedence input.  Precedence inputs are unordered, with
+ // duplicates removed and NULLs packed down at the end.
+ void Node::rm_prec( uint j ) {
+ 
+   // Find end of precedence list to pack NULLs
+   uint i;
+-  for( i=j; i<_max; i++ ) 
++  for( i=j; i<_max; i++ )
+     if( !_in[i] )               // Find the NULL at end of prec edge list
+       break;
+   if (_in[j] != NULL) _in[j]->del_out((Node *)this);
+@@ -898,17 +895,17 @@
+ 
+ //------------------------------format-----------------------------------------
+ // Print as assembly
+-void Node::format( PhaseRegAlloc * ) const {}
++void Node::format( PhaseRegAlloc *, outputStream *st ) const {}
+ //------------------------------emit-------------------------------------------
+-// Emit bytes starting at parameter 'ptr'.  
+-void Node::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {} 
++// Emit bytes starting at parameter 'ptr'.
++void Node::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {}
+ //------------------------------size-------------------------------------------
+ // Size of instruction in bytes
+ uint Node::size(PhaseRegAlloc *ra_) const { return 0; }
+ 
+ //------------------------------CFG Construction-------------------------------
+ // Nodes that end basic blocks, e.g. IfTrue/IfFalse, JumpProjNode, Root,
+-// Goto and Return.  
++// Goto and Return.
+ const Node *Node::is_block_proj() const { return 0; }
+ 
+ // Minimum guaranteed type
+@@ -961,7 +958,7 @@
+ // return the 'this' pointer instead of NULL.
+ //
+ // You cannot return an OLD Node, except for the 'this' pointer.  Use the
+-// Identity call to return an old Node; basically if Identity can find 
++// Identity call to return an old Node; basically if Identity can find
+ // another Node have the Ideal call make no change and return NULL.
+ // Example: AddINode::Ideal must check for add of zero; in this case it
+ // returns NULL instead of doing any graph reshaping.
+@@ -976,14 +973,14 @@
+ // def-use info.  If you are making a new Node (either as the new root or
+ // some new internal piece) you must NOT use set_req with def-use info.
+ // You can make a new Node with either 'new' or 'clone'.  In either case,
+-// def-use info is (correctly) not generated.  
++// def-use info is (correctly) not generated.
+ // Example: reshape "(X+3)+4" into "X+7":
+ //    set_req(1,in(1)->in(1) /* grab X */, du /* must use DU on 'this' */);
+ //    set_req(2,phase->intcon(7),du);
+ //    return this;
+ // Example: reshape "X*4" into "X<<1"
+ //    return new (C,3) LShiftINode( in(1), phase->intcon(1) );
+-// 
++//
+ // You must call 'phase->transform(X)' on any new Nodes X you make, except
+ // for the returned root node.  Example: reshape "X*31" with "(X<<5)-1".
+ //    Node *shift=phase->transform(new(C,3)LShiftINode(in(1),phase->intcon(5)));
+@@ -994,7 +991,7 @@
+ // The Right Thing with def-use info.
+ //
+ // You cannot bury the 'this' Node inside of a graph reshape.  If the reshaped
+-// graph uses the 'this' Node it must be the root.  If you want a Node with 
++// graph uses the 'this' Node it must be the root.  If you want a Node with
+ // the same Opcode as the 'this' pointer use 'clone'.
+ //
+ Node *Node::Ideal(PhaseGVN *phase, bool can_reshape) {
+@@ -1079,11 +1076,11 @@
+           if (n->outcnt() == 0) {   // Input also goes dead?
+             if (!n->is_Con())
+               nstack.push(n);       // Clear it out as well
+-          } else if (n->outcnt() == 1 && 
++          } else if (n->outcnt() == 1 &&
+                      n->has_special_unique_user()) {
+             igvn->add_users_to_worklist( n );
+           } else if (n->outcnt() <= 2 && n->is_Store()) {
+-            // Push store's uses on worklist to enable folding optimization for 
++            // Push store's uses on worklist to enable folding optimization for
+             // store/store and store/load to the same address.
+             // The restriction (outcnt() <= 2) is the same as in set_req_X()
+             // and remove_globally_dead_node().
+@@ -1137,7 +1134,7 @@
+ 
+ //------------------------------rematerialize-----------------------------------
+ // Should we clone rather than spill this instruction?
+-bool Node::rematerialize() const { 
++bool Node::rematerialize() const {
+   if ( is_Mach() )
+     return this->as_Mach()->rematerialize();
+   else
+@@ -1147,7 +1144,7 @@
+ //------------------------------needs_anti_dependence_check---------------------
+ // Nodes which use memory without consuming it, hence need antidependences.
+ bool Node::needs_anti_dependence_check() const {
+-  if( req() < 2 || (_flags & Flag_needs_anti_dependence_check) == 0 ) 
++  if( req() < 2 || (_flags & Flag_needs_anti_dependence_check) == 0 )
+     return false;
+   else
+     return in(1)->bottom_type()->has_memory();
+@@ -1187,16 +1184,16 @@
+ 
+ // Get a double constant from a ConstNode.
+ // Returns the constant if it is a double ConstNode
+-jdouble Node::getd() const { 
+-  assert( Opcode() == Op_ConD, "" ); 
+-  return ((ConDNode*)this)->type()->is_double_constant()->getd(); 
++jdouble Node::getd() const {
++  assert( Opcode() == Op_ConD, "" );
++  return ((ConDNode*)this)->type()->is_double_constant()->getd();
+ }
+ 
+ // Get a float constant from a ConstNode.
+ // Returns the constant if it is a float ConstNode
+-jfloat Node::getf() const { 
+-  assert( Opcode() == Op_ConF, "" ); 
+-  return ((ConFNode*)this)->type()->is_float_constant()->getf(); 
++jfloat Node::getf() const {
++  assert( Opcode() == Op_ConF, "" );
++  return ((ConFNode*)this)->type()->is_float_constant()->getf();
+ }
+ 
+ #ifndef PRODUCT
+@@ -1214,7 +1211,7 @@
+ //------------------------------find------------------------------------------
+ // Find a neighbor of this Node with the given _idx
+ // If idx is negative, find its absolute value, following both _in and _out.
+-static void find_recur( Node* &result, Node *n, int idx, bool only_ctrl, 
++static void find_recur( Node* &result, Node *n, int idx, bool only_ctrl,
+                         VectorSet &old_space, VectorSet &new_space ) {
+   int node_idx = (idx >= 0) ? idx : -idx;
+   if (NotANode(n))  return;  // Gracefully handle NULL, -1, 0xabababab, etc.
+@@ -1369,7 +1366,7 @@
+   }
+ 
+   // Dump node-specific info
+-  dump_spec();
++  dump_spec(tty);
+ #ifdef ASSERT
+   // Dump the non-reset _debug_idx
+   if( Verbose && WizardMode ) {
+@@ -1393,7 +1390,7 @@
+     t->dump();
+   } else if( t == Type::MEMORY ) {
+     tty->print("  Memory:");
+-    MemNode::dump_adr_type(this, adr_type());
++    MemNode::dump_adr_type(this, adr_type(), tty);
+   } else if( Verbose || WizardMode ) {
+     tty->print("  Type:");
+     if( t ) {
+@@ -1408,7 +1405,7 @@
+     if (nn != NULL && !nn->is_clear()) {
+       if (nn->jvms() != NULL) {
+         tty->print(" !jvms:");
+-        nn->jvms()->dump_spec();
++        nn->jvms()->dump_spec(tty);
+       }
+     }
+   }
+@@ -1644,7 +1641,7 @@
+     if (true /*VerifyDefUse*/) {
+       // Count use-def edges from n to x
+       int cnt = 0;
+-      for( uint j = 0; j < n->len(); j++ ) 
++      for( uint j = 0; j < n->len(); j++ )
+         if( n->in(j) == x )
+           cnt++;
+       // Count def-use edges from x to n
+@@ -1678,7 +1675,7 @@
+ // Graph walk, with both pre-order and post-order functions
+ void Node::walk(NFunc pre, NFunc post, void *env) {
+   VectorSet visited(Thread::current()->resource_area()); // Setup for local walk
+-  walk_(pre, post, env, visited); 
++  walk_(pre, post, env, visited);
+ }
+ 
+ void Node::walk_(NFunc pre, NFunc post, void *env, VectorSet &visited) {
+@@ -1700,12 +1697,12 @@
+ }
+ 
+ // Register classes are defined for specific machines
+-const RegMask &Node::out_RegMask() const { 
++const RegMask &Node::out_RegMask() const {
+   ShouldNotCallThis();
+   return *(new RegMask());
+ }
+ 
+-const RegMask &Node::in_RegMask(uint) const { 
++const RegMask &Node::in_RegMask(uint) const {
+   ShouldNotCallThis();
+   return *(new RegMask());
+ }
+@@ -1727,7 +1724,7 @@
+ 
+ //-----------------------------------------------------------------------------
+ void Node_Array::grow( uint i ) {
+-  if( !_max ) { 
++  if( !_max ) {
+     _max = 1;
+     _nodes = (Node**)_a->Amalloc( _max * sizeof(Node*) );
+     _nodes[0] = NULL;
+@@ -1848,7 +1845,7 @@
+ #ifndef PRODUCT
+   for( uint i = 0; i < _cnt; i++ )
+     if( _nodes[i] ) {
+-      tty->print("%5d--> ",i); 
++      tty->print("%5d--> ",i);
+       _nodes[i]->dump();
+     }
+ #endif
+@@ -1901,14 +1898,14 @@
+ //=============================================================================
+ uint TypeNode::size_of() const { return sizeof(*this); }
+ #ifndef PRODUCT
+-void TypeNode::dump_spec() const { 
++void TypeNode::dump_spec(outputStream *st) const {
+   if( !Verbose && !WizardMode ) {
+     // standard dump does this in Verbose and WizardMode
+-    tty->print(" #"); _type->dump();
++    st->print(" #"); _type->dump_on(st);
+   }
+ }
+ #endif
+-uint TypeNode::hash() const { 
++uint TypeNode::hash() const {
+   return Node::hash() + _type->hash();
+ }
+ uint TypeNode::cmp( const Node &n ) const
+@@ -1917,8 +1914,6 @@
+ const Type *TypeNode::Value( PhaseTransform * ) const { return _type; }
+ 
+ //------------------------------ideal_reg--------------------------------------
+-uint TypeNode::ideal_reg() const { 
++uint TypeNode::ideal_reg() const {
+   return Matcher::base2reg[_type->base()];
+ }
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/node.hpp openjdk/hotspot/src/share/vm/opto/node.hpp
+--- openjdk6/hotspot/src/share/vm/opto/node.hpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/node.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)node.hpp	1.221 07/05/17 17:44:27 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Portions of code courtesy of Clifford Click
+@@ -59,6 +56,7 @@
+ class FastLockNode;
+ class FastUnlockNode;
+ class IfNode;
++class InitializeNode;
+ class JVMState;
+ class JumpNode;
+ class JumpProjNode;
+@@ -119,7 +117,7 @@
+ class IfFalseNode;
+ typedef void (*NFunc)(Node&,void*);
+ extern "C" {
+-  typedef int (*C_sort_func_t)(const void *, const void *); 
++  typedef int (*C_sort_func_t)(const void *, const void *);
+ }
+ 
+ // The type of all node counts and indexes.
+@@ -183,9 +181,9 @@
+   // from.  This should allow fast access to node creation & deletion.  This
+   // field is a local cache of a value defined in some "program fragment" for
+   // which these Nodes are just a part of.
+-  
++
+   // New Operator that takes a Compile pointer, this will eventually
+-  // be the "new" New operator. 
++  // be the "new" New operator.
+   inline void* operator new( size_t x, Compile* C) {
+     Node* n = (Node*)C->node_arena()->Amalloc_D(x);
+ #ifdef ASSERT
+@@ -196,8 +194,8 @@
+   }
+ 
+   // New Operator that takes a Compile pointer, this will eventually
+-  // be the "new" New operator. 
+-  inline void* operator new( size_t x, Compile* C, int y) { 
++  // be the "new" New operator.
++  inline void* operator new( size_t x, Compile* C, int y) {
+     Node* n = (Node*)C->node_arena()->Amalloc_D(x + y*sizeof(void*));
+     n->_in = (Node**)(((char*)n) + x);
+ #ifdef ASSERT
+@@ -205,7 +203,7 @@
+ #endif
+     n->_out = (Node**)C;
+     return (void*)n;
+-  }  
++  }
+ 
+   // Delete is a NOP
+   void operator delete( void *ptr ) {}
+@@ -217,7 +215,7 @@
+   Node( uint required );
+ 
+   // Create a new Node with given input edges.
+-  // This version requires use of the "edge-count" new.  
++  // This version requires use of the "edge-count" new.
+   // E.g.  new (C,3) FooNode( C, NULL, left, right );
+   Node( Node *n0 );
+   Node( Node *n0, Node *n1 );
+@@ -348,9 +346,9 @@
+   // Return the unique out edge.
+   Node* unique_out() const { assert(_outcnt==1,"not unique"); return _out[0]; }
+   // Delete out edge at position 'i' by moving last out edge to position 'i'
+-  void  raw_del_out(uint i) { 
+-    assert(i < _outcnt,"oob"); 
+-    assert(_outcnt > 0,"oob"); 
++  void  raw_del_out(uint i) {
++    assert(i < _outcnt,"oob");
++    assert(_outcnt > 0,"oob");
+     #if OPTO_DU_ITERATOR_ASSERT
+     // Record that a change happened here.
+     debug_only(_last_del = _out[i]; ++_del_tick);
+@@ -363,7 +361,7 @@
+ #ifdef ASSERT
+   bool is_dead() const;
+ #define is_not_dead(n) ((n) == NULL || !VerifyIterativeGVN || !((n)->is_dead()))
+-#endif 
++#endif
+ 
+   // Set a required input edge, also updates corresponding output edge
+   void add_req( Node *n ); // Append a NEW required input
+@@ -382,7 +380,7 @@
+   }
+   // Light version of set_req() to init inputs after node creation.
+   void init_req( uint i, Node *n ) {
+-    assert( i == 0 && this == n || 
++    assert( i == 0 && this == n ||
+             is_not_dead(n), "can not use dead node");
+     assert( i < _cnt, "oob");
+     assert( !VerifyHashTableKeys || _hash_lock == 0,
+@@ -446,7 +444,7 @@
+   void rm_prec( uint i );
+   void set_prec( uint i, Node *n ) {
+     assert( is_not_dead(n), "can not use dead node");
+-    assert( i >= _cnt, "not a precedence edge"); 
++    assert( i >= _cnt, "not a precedence edge");
+     if (_in[i] != NULL) _in[i]->del_out((Node *)this);
+     _in[i] = n;
+     if (n != NULL) n->add_out((Node *)this);
+@@ -476,7 +474,7 @@
+ 
+   // Generate class id for some ideal nodes to avoid virtual query
+   // methods is_<Node>().
+-  // Class id is the set of bits corresponded to the node class and all its 
++  // Class id is the set of bits corresponded to the node class and all its
+   // super classes so that queries for super classes are also valid.
+   // Subclasses of the same super class have different assigned bit
+   // (the third parameter in the macro DEFINE_CLASS_ID).
+@@ -489,7 +487,7 @@
+   //
+   //  Class_MachCall=30, ClassMask_MachCall=31
+   // 12               8               4               0
+-  //  0   0   0   0   0   0   0   0   1   1   1   1   0 
++  //  0   0   0   0   0   0   0   0   1   1   1   1   0
+   //                                  |   |   |   |
+   //                                  |   |   |   Bit_Mach=2
+   //                                  |   |   Bit_MachReturn=4
+@@ -498,7 +496,7 @@
+   //
+   //  Class_CountedLoop=56, ClassMask_CountedLoop=63
+   // 12               8               4               0
+-  //  0   0   0   0   0   0   0   1   1   1   0   0   0 
++  //  0   0   0   0   0   0   0   1   1   1   0   0   0
+   //                              |   |   |
+   //                              |   |   Bit_Region=8
+   //                              |   Bit_Loop=16
+@@ -507,7 +505,7 @@
+   #define DEFINE_CLASS_ID(cl, supcl, subn) \
+   Bit_##cl = (Class_##supcl == 0) ? 1 << subn : (Bit_##supcl) << (1 + subn) , \
+   Class_##cl = Class_##supcl + Bit_##cl , \
+-  ClassMask_##cl = ((Bit_##cl << 1) - 1) , 
++  ClassMask_##cl = ((Bit_##cl << 1) - 1) ,
+ 
+   // This enum is used only for C2 ideal and mach nodes with is_<node>() methods
+   // so that it's values fits into 16 bits.
+@@ -538,6 +536,7 @@
+         DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2)
+       DEFINE_CLASS_ID(Start,       Multi, 2)
+       DEFINE_CLASS_ID(MemBar,      Multi, 3)
++        DEFINE_CLASS_ID(Initialize,    MemBar, 0)
+ 
+     DEFINE_CLASS_ID(Mach,  Node, 1)
+       DEFINE_CLASS_ID(MachReturn, Mach, 0)
+@@ -637,14 +636,14 @@
+ 
+   // Return a dense integer opcode number
+   virtual int Opcode() const;
+-  
++
+   // Virtual inherited Node size
+   virtual uint size_of() const;
+ 
+   // Other interesting Node properties
+ 
+   // Special case: is_Call() returns true for both CallNode and MachCallNode.
+-  bool is_Call() const { 
++  bool is_Call() const {
+     return (_flags & Flag_is_Call) != 0;
+   }
+ 
+@@ -687,6 +686,7 @@
+   DEFINE_CLASS_QUERY(If)
+   DEFINE_CLASS_QUERY(IfFalse)
+   DEFINE_CLASS_QUERY(IfTrue)
++  DEFINE_CLASS_QUERY(Initialize)
+   DEFINE_CLASS_QUERY(Jump)
+   DEFINE_CLASS_QUERY(JumpProj)
+   DEFINE_CLASS_QUERY(Load)
+@@ -727,14 +727,14 @@
+   #undef DEFINE_CLASS_QUERY
+ 
+   // duplicate of is_MachSpillCopy()
+-  bool is_SpillCopy () const { 
++  bool is_SpillCopy () const {
+     return ((_class_id & ClassMask_MachSpillCopy) == Class_MachSpillCopy);
+   }
+ 
+   bool is_Con () const { return (_flags & Flag_is_Con) != 0; }
+   bool is_Goto() const { return (_flags & Flag_is_Goto) != 0; }
+   // The data node which is safe to leave in dead loop during IGVN optimization.
+-  bool is_dead_loop_safe() const { 
++  bool is_dead_loop_safe() const {
+     return is_Phi() || is_Proj() ||
+            (_flags & (Flag_is_dead_loop_safe | Flag_is_Con)) != 0;
+   }
+@@ -759,7 +759,7 @@
+   // Nodes, next block selector Nodes (block enders), and next block
+   // projections.  These calls need to work on their machine equivalents.  The
+   // Ideal beginning Nodes are RootNode, RegionNode and StartNode.
+-  bool is_block_start() const { 
++  bool is_block_start() const {
+     if ( is_Region() )
+       return this == (const Node*)in(0);
+     else
+@@ -793,14 +793,14 @@
+   virtual const class TypePtr *adr_type() const { return NULL; }
+ 
+   // Return an existing node which computes the same function as this node.
+-  // The optimistic combined algorithm requires this to return a Node which 
++  // The optimistic combined algorithm requires this to return a Node which
+   // is a small number of steps away (e.g., one of my inputs).
+   virtual Node *Identity( PhaseTransform *phase );
+ 
+   // Return the set of values this Node can take on at runtime.
+   virtual const Type *Value( PhaseTransform *phase ) const;
+ 
+-  // Return a node which is more "ideal" than the current node.  
++  // Return a node which is more "ideal" than the current node.
+   // The invariants on this call are subtle.  If in doubt, read the
+   // treatise in node.cpp above the default implemention AND TEST WITH
+   // +VerifyIterativeGVN!
+@@ -840,6 +840,10 @@
+   // value, if it appears (by local graph inspection) to be computed by a simple conditional.
+   bool is_iteratively_computed();
+ 
++  // Determine if a node is Counted loop induction variable.
++  // The method is defined in loopnode.cpp.
++  const Node* is_loop_iv() const;
++
+   // Return a node with opcode "opc" and same inputs as "this" if one can
+   // be found; Otherwise return NULL;
+   Node* find_similar(int opc);
+@@ -870,9 +874,9 @@
+   virtual JVMState* jvms() const;
+ 
+   // Print as assembly
+-  virtual void format( PhaseRegAlloc * ) const;
++  virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const;
+   // Emit bytes starting at parameter 'ptr'
+-  // Bump 'ptr' by the number of output bytes 
++  // Bump 'ptr' by the number of output bytes
+   virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
+   // Size of instruction in bytes
+   virtual uint size(PhaseRegAlloc *ra_) const;
+@@ -945,7 +949,7 @@
+   virtual void dump_req() const;     // Print required-edge info
+   virtual void dump_prec() const;    // Print precedence-edge info
+   virtual void dump_out() const;     // Print the output edge info
+-  virtual void dump_spec() const {}; // Print per-node info
++  virtual void dump_spec(outputStream *st) const {}; // Print per-node info
+   void verify_edges(Unique_Node_List &visited); // Verify bi-directional edges
+   void verify() const;               // Check Def-Use info for my subgraph
+   static void verify_recur(const Node *n, int verify_depth, VectorSet &old_space, VectorSet &new_space);
+@@ -1223,7 +1227,7 @@
+ //-----------------------------------------------------------------------------
+ // Map dense integer indices to Nodes.  Uses classic doubling-array trick.
+ // Abstractly provides an infinite array of Node*'s, initialized to NULL.
+-// Note that the constructor just zeros things, and since I use Arena 
++// Note that the constructor just zeros things, and since I use Arena
+ // allocation I do not need a destructor to reclaim storage.
+ class Node_Array : public ResourceObj {
+ protected:
+@@ -1283,8 +1287,8 @@
+   bool member( Node *n ) { return _in_worklist.test(n->_idx) != 0; }
+   VectorSet &member_set(){ return _in_worklist; }
+ 
+-  void push( Node *b ) { 
+-    if( !_in_worklist.test_set(b->_idx) ) 
++  void push( Node *b ) {
++    if( !_in_worklist.test_set(b->_idx) )
+       Node_List::push(b);
+   }
+   Node *pop() {
+@@ -1334,15 +1338,15 @@
+   Arena *_a;         // Arena to allocate in
+   void grow();
+ public:
+-  Node_Stack(int size) { 
++  Node_Stack(int size) {
+     size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
+     _a = Thread::current()->resource_area();
+     _inodes = NEW_ARENA_ARRAY( _a, INode, max );
+     _inode_max = _inodes + max;
+     _inode_top = _inodes - 1; // stack is empty
+   }
+-  
+-  Node_Stack(Arena *a, int size) : _a(a) { 
++
++  Node_Stack(Arena *a, int size) : _a(a) {
+     size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
+     _inodes = NEW_ARENA_ARRAY( _a, INode, max );
+     _inode_max = _inodes + max;
+@@ -1397,7 +1401,7 @@
+ 
+   JVMState* jvms()            { return _jvms; }
+   void  set_jvms(JVMState* x) {        _jvms = x; }
+-  
++
+   // True if there is nothing here.
+   bool is_clear() {
+     return (_jvms == NULL);
+@@ -1483,7 +1487,6 @@
+   virtual const Type *bottom_type() const;
+   virtual       uint  ideal_reg() const;
+ #ifndef PRODUCT
+-  virtual void dump_spec() const;
++  virtual void dump_spec(outputStream *st) const;
+ #endif
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/opcodes.cpp openjdk/hotspot/src/share/vm/opto/opcodes.cpp
+--- openjdk6/hotspot/src/share/vm/opto/opcodes.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/opcodes.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)opcodes.cpp	1.15 07/05/05 17:06:24 JVM"
+-#endif
+ /*
+  * Copyright 1998-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ----------------------------------------------------------------------------
+@@ -43,4 +40,3 @@
+   "_last_class_name",
+ };
+ #undef macro
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/opcodes.hpp openjdk/hotspot/src/share/vm/opto/opcodes.hpp
+--- openjdk6/hotspot/src/share/vm/opto/opcodes.hpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/opcodes.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)opcodes.hpp	1.31 07/05/05 17:06:24 JVM"
+-#endif
+ /*
+  * Copyright 1997-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Build a big enum of class names to give them dense integer indices
+diff -ruN openjdk6/hotspot/src/share/vm/opto/optoreg.hpp openjdk/hotspot/src/share/vm/opto/optoreg.hpp
+--- openjdk6/hotspot/src/share/vm/opto/optoreg.hpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/optoreg.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)optoreg.hpp	1.6 07/05/05 17:06:18 JVM"
+-#endif
+ /*
+  * Copyright 2006-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //------------------------------OptoReg----------------------------------------
+@@ -45,17 +42,17 @@
+ // if the value is outside the enumeration's valid range. OptoReg::Name is
+ // typedef'ed as int, because it needs to be able to represent spill-slots.
+ //
+-class OptoReg VALUE_OBJ_CLASS_SPEC { 
++class OptoReg VALUE_OBJ_CLASS_SPEC {
+ 
+  friend class C2Compiler;
+  public:
+   typedef int Name;
+   enum {
+     // Chunk 0
+-    Physical = AdlcVMDeps::Physical, // Start of physical regs 
++    Physical = AdlcVMDeps::Physical, // Start of physical regs
+     // A few oddballs at the edge of the world
+-    Special = -2,		// All special (not allocated) values
+-    Bad = -1			// Not a register
++    Special = -2,               // All special (not allocated) values
++    Bad = -1                    // Not a register
+   };
+ 
+  private:
+diff -ruN openjdk6/hotspot/src/share/vm/opto/output.cpp openjdk/hotspot/src/share/vm/opto/output.cpp
+--- openjdk6/hotspot/src/share/vm/opto/output.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/output.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)output.cpp	1.289 07/05/17 15:59:26 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -64,7 +61,7 @@
+   entry->_nodes.map( 0, prolog );
+   bbs.map( prolog->_idx, entry );
+   bbs.map( start->_idx, NULL ); // start is no longer in any block
+-  
++
+   // Virtual methods need an unverified entry point
+ 
+   if( is_osr_compilation() ) {
+@@ -74,9 +71,9 @@
+     }
+   } else {
+     if( _method && !_method->flags().is_static() ) {
+-      // Insert unvalidated entry point 
++      // Insert unvalidated entry point
+       _cfg->insert( broot, 0, new (this) MachUEPNode() );
+-    } 
++    }
+ 
+   }
+ 
+@@ -89,13 +86,13 @@
+     ||(OptoBreakpointC2R && !_method)
+ #endif
+     ) {
+-    // checking for _method means that OptoBreakpoint does not apply to 
++    // checking for _method means that OptoBreakpoint does not apply to
+     // runtime stubs or frame converters
+     _cfg->insert( entry, 1, new (this) MachBreakpointNode() );
+   }
+ 
+   // Insert epilogs before every return
+-  for( uint i=0; i<_cfg->_num_blocks; i++ ) { 
++  for( uint i=0; i<_cfg->_num_blocks; i++ ) {
+     Block *b = _cfg->_blocks[i];
+     if( !b->is_connector() && b->non_connector_successor(0) == _cfg->_broot ) { // Found a program exit point?
+       Node *m = b->end();
+@@ -111,7 +108,7 @@
+ # ifdef ENABLE_ZAP_DEAD_LOCALS
+   if ( ZapDeadCompiledLocals )  Insert_zap_nodes();
+ # endif
+-  
++
+   ScheduleAndBundle();
+ 
+ #ifndef PRODUCT
+@@ -143,7 +140,7 @@
+   // Determine if we need to generate a stack overflow check.
+   // Do it if the method is not a stub function and
+   // has java calls or has frame size > vm_page_size/8.
+-  return (stub_function() == NULL && 
++  return (stub_function() == NULL &&
+           (has_java_calls() || frame_size_in_bytes > os::vm_page_size()>>3));
+ }
+ 
+@@ -151,7 +148,7 @@
+   // Determine if we need to generate a register stack overflow check.
+   // This is only used on architectures which have split register
+   // and memory stacks (ie. IA64).
+-  // Bang if the method is not a stub function and has java calls 
++  // Bang if the method is not a stub function and has java calls
+   return (stub_function() == NULL && has_java_calls());
+ }
+ 
+@@ -191,12 +188,12 @@
+ 
+   if ( skip )  return;
+ 
+-   
++
+   if ( _method == NULL )
+     return; // no safepoints/oopmaps emitted for calls in stubs,so we don't care
+ 
+   // Insert call to zap runtime stub before every node with an oop map
+-  for( uint i=0; i<_cfg->_num_blocks; i++ ) { 
++  for( uint i=0; i<_cfg->_num_blocks; i++ ) {
+     Block *b = _cfg->_blocks[i];
+     for ( uint j = 0;  j < b->_nodes.size();  ++j ) {
+       Node *n = b->_nodes[j];
+@@ -216,13 +213,12 @@
+           MachCallNode* call = n->as_MachCall();
+           if (call->entry_point() == OptoRuntime::new_instance_Java() ||
+               call->entry_point() == OptoRuntime::new_array_Java() ||
+-              call->entry_point() == OptoRuntime::multianewarray1_Java() ||
+               call->entry_point() == OptoRuntime::multianewarray2_Java() ||
+               call->entry_point() == OptoRuntime::multianewarray3_Java() ||
+               call->entry_point() == OptoRuntime::multianewarray4_Java() ||
+               call->entry_point() == OptoRuntime::multianewarray5_Java() ||
+               call->entry_point() == OptoRuntime::slow_arraycopy_Java() ||
+-              call->entry_point() == OptoRuntime::complete_monitor_locking_Java() 
++              call->entry_point() == OptoRuntime::complete_monitor_locking_Java()
+               ) {
+             insert = false;
+           }
+@@ -241,9 +237,9 @@
+ 
+ Node* Compile::call_zap_node(MachSafePointNode* node_to_check, int block_no) {
+   const TypeFunc *tf = OptoRuntime::zap_dead_locals_Type();
+-  CallStaticJavaNode* ideal_node = 
+-    new (this, tf->domain()->cnt()) CallStaticJavaNode( tf, 
+-         OptoRuntime::zap_dead_locals_stub(_method->flags().is_native()), 
++  CallStaticJavaNode* ideal_node =
++    new (this, tf->domain()->cnt()) CallStaticJavaNode( tf,
++         OptoRuntime::zap_dead_locals_stub(_method->flags().is_native()),
+                             "call zap dead locals stub", 0, TypePtr::BOTTOM);
+   // We need to copy the OopMap from the site we're zapping at.
+   // We have to make a copy, because the zap site might not be
+@@ -266,7 +262,7 @@
+ # endif // ENABLE_ZAP_DEAD_LOCALS
+ 
+ //------------------------------compute_loop_first_inst_sizes------------------
+-// Compute the size of first NumberOfLoopInstrToAlign instructions at head 
++// Compute the size of first NumberOfLoopInstrToAlign instructions at head
+ // of a loop. When aligning a loop we need to provide enough instructions
+ // in cpu's fetch buffer to feed decoders. The loop alignment could be
+ // avoided if we have enough instructions in fetch buffer at the head of a loop.
+@@ -274,7 +270,7 @@
+ // a loop will be aligned if the size is not reset here.
+ //
+ // Note: Mach instructions could contain several HW instructions
+-// so the size is estimated only. 
++// so the size is estimated only.
+ //
+ void Compile::compute_loop_first_inst_sizes() {
+   // The next condition is used to gate the loop alignment optimization.
+@@ -287,7 +283,7 @@
+     for( uint i=1; i <= last_block; i++ ) {
+       Block *b = _cfg->_blocks[i];
+       // Check the first loop's block which requires an alignment.
+-      if( b->head()->is_Loop() && 
++      if( b->head()->is_Loop() &&
+           b->code_alignment() > (uint)relocInfo::addr_unit() ) {
+         uint sum_size = 0;
+         uint inst_cnt = NumberOfLoopInstrToAlign;
+@@ -310,7 +306,7 @@
+                   !_cfg->_blocks[i+1]->head()->is_Loop() ) {
+               i++;
+               nb = _cfg->_blocks[i];
+-              inst_cnt  = nb->compute_first_inst_size(sum_size, inst_cnt, 
++              inst_cnt  = nb->compute_first_inst_size(sum_size, inst_cnt,
+                                                       _regalloc);
+             } // while( inst_cnt > 0 && i < last_block  )
+           } // if( bx != b )
+@@ -383,7 +379,7 @@
+             reloc_size += reloc_java_to_interp();
+           }
+         } else if (mach->is_MachSafePoint()) {
+-          // If call/safepoint are adjacent, account for possible 
++          // If call/safepoint are adjacent, account for possible
+           // nop to disambiguate the two safepoints.
+           if (min_offset_from_last_call == 0) {
+             blk_size += nop_size;
+@@ -434,7 +430,7 @@
+     // Find the branch; ignore trailing NOPs.
+     for( j = b->_nodes.size()-1; j>=0; j-- ) {
+       nj = b->_nodes[j];
+-      if( !nj->is_Mach() || nj->as_Mach()->ideal_Opcode() != Op_Con ) 
++      if( !nj->is_Mach() || nj->as_Mach()->ideal_Opcode() != Op_Con )
+         break;
+     }
+ 
+@@ -450,7 +446,7 @@
+             // We've got a winner.  Replace this branch.
+             MachNode *replacement = mach->short_branch_version(this);
+             b->_nodes.map(j, replacement);
+-          
++
+             // Update the jmp_end size to save time in our
+             // next pass.
+             jmp_end[i] -= (mach->size(_regalloc) - replacement->size(_regalloc));
+@@ -466,7 +462,7 @@
+     }
+   }
+ 
+-  // Compute the size of first NumberOfLoopInstrToAlign instructions at head 
++  // Compute the size of first NumberOfLoopInstrToAlign instructions at head
+   // of a loop. It is used to determine the padding for loop alignment.
+   compute_loop_first_inst_sizes();
+ 
+@@ -555,7 +551,7 @@
+ //------------------------------FillLocArray-----------------------------------
+ // Create a bit of debug info and append it to the array.  The mapping is from
+ // Java local or expression stack to constant, register or stack-slot.  For
+-// doubles, insert 2 mappings and return 1 (to tell the caller that the next 
++// doubles, insert 2 mappings and return 1 (to tell the caller that the next
+ // entry has been taken care of and caller should skip it).
+ static LocationValue *new_loc_value( PhaseRegAlloc *ra, OptoReg::Name regnum, Location::Type l_type ) {
+   // This should never have accepted Bad before
+@@ -615,14 +611,14 @@
+     }
+ #else //_LP64
+ #ifdef SPARC
+-    if (t->base() == Type::Long && OptoReg::is_reg(regnum)) { 
+-      // For SPARC we have to swap high and low words for 
++    if (t->base() == Type::Long && OptoReg::is_reg(regnum)) {
++      // For SPARC we have to swap high and low words for
+       // long values stored in a single-register (g0-g7).
+       array->append(new_loc_value( _regalloc,              regnum   , Location::normal ));
+       array->append(new_loc_value( _regalloc, OptoReg::add(regnum,1), Location::normal ));
+     } else
+ #endif //SPARC
+-    if( t->base() == Type::DoubleBot || t->base() == Type::DoubleCon || t->base() == Type::Long ) { 
++    if( t->base() == Type::DoubleBot || t->base() == Type::DoubleCon || t->base() == Type::Long ) {
+       // Repack the double/long as two jints.
+       // The convention the interpreter uses is that the second local
+       // holds the first raw word of the native double representation.
+@@ -635,11 +631,11 @@
+     }
+ #endif //_LP64
+     else if( (t->base() == Type::FloatBot || t->base() == Type::FloatCon) &&
+-               OptoReg::is_reg(regnum) ) { 
+-      array->append(new_loc_value( _regalloc, regnum, Matcher::float_in_double 
++               OptoReg::is_reg(regnum) ) {
++      array->append(new_loc_value( _regalloc, regnum, Matcher::float_in_double
+                                    ? Location::float_in_dbl : Location::normal ));
+-    } else if( t->base() == Type::Int && OptoReg::is_reg(regnum) ) { 
+-      array->append(new_loc_value( _regalloc, regnum, Matcher::int_in_long 
++    } else if( t->base() == Type::Int && OptoReg::is_reg(regnum) ) {
++      array->append(new_loc_value( _regalloc, regnum, Matcher::int_in_long
+                                    ? Location::int_in_long : Location::normal ));
+     } else {
+       array->append(new_loc_value( _regalloc, regnum, _regalloc->is_oop(local) ? Location::oop : Location::normal ));
+@@ -655,13 +651,13 @@
+   case Type::AnyPtr:
+     array->append(new ConstantOopWriteValue(NULL));
+     break;
+-  case Type::AryPtr: 
+-  case Type::InstPtr: 
++  case Type::AryPtr:
++  case Type::InstPtr:
+   case Type::KlassPtr:          // fall through
+     array->append(new ConstantOopWriteValue(t->isa_oopptr()->const_oop()->encoding()));
+     break;
+-  case Type::Int:    
+-    array->append(new ConstantIntValue(t->is_int()->get_con())); 
++  case Type::Int:
++    array->append(new ConstantIntValue(t->is_int()->get_con()));
+     break;
+   case Type::RawPtr:
+     // A return address (T_ADDRESS).
+@@ -717,12 +713,12 @@
+     break;
+   }
+   case Type::Top:               // Add an illegal value here
+-    array->append(new LocationValue(Location())); 
++    array->append(new LocationValue(Location()));
+     break;
+   default:
+     ShouldNotReachHere();
+     break;
+-  }                     
++  }
+ }
+ 
+ // Determine if this node starts a bundle
+@@ -788,15 +784,15 @@
+ 
+     // Add in mappings of the monitors
+     assert( !method ||
+-            !method->is_synchronized() || 
+-            method->is_native() || 
+-            num_mon > 0 || 
+-            !GenerateSynchronizationCode, 
++            !method->is_synchronized() ||
++            method->is_native() ||
++            num_mon > 0 ||
++            !GenerateSynchronizationCode,
+             "monitors must always exist for synchronized methods");
+ 
+     // Build the growable array of ScopeValues for exp stack
+     GrowableArray<MonitorValue*> *monarray = new GrowableArray<MonitorValue*>(num_mon);
+-            
++
+     // Loop over monitors and insert into array
+     for(idx = 0; idx < num_mon; idx++) {
+       // Grab the node that defines this monitor
+@@ -816,7 +812,7 @@
+ 
+       OptoReg::Name box_reg = BoxLockNode::stack_slot(box_node);
+       monarray->append(new MonitorValue(scval, Location::new_stk_loc(Location::normal,_regalloc->reg2offset(box_reg))));
+-    }     
++    }
+ 
+     // Build first class objects to pass to scope
+     DebugToken *locvals = debug_info()->create_scope_values(locarray);
+@@ -926,7 +922,7 @@
+     C->record_failure("excessive request to CodeCache");
+   } else {
+     UseInterpreter            = true;
+-    UseCompiler               = false;    
++    UseCompiler               = false;
+     AlwaysCompileLoopMethods  = false;
+     C->record_failure("CodeCache is full");
+     warning("CodeCache is full. Compiling has been disabled");
+@@ -1016,7 +1012,7 @@
+   MachNode *_nop_list[Bundle::_nop_count];
+   Bundle::initialize_nops(_nop_list, this);
+ 
+-  // Create oopmap set. 
++  // Create oopmap set.
+   _oop_map_set = new OopMapSet();
+ 
+   // !!!!! This preserves old handling of oopmaps for now
+@@ -1060,14 +1056,14 @@
+     // If this block needs to start aligned (i.e, can be reached other
+     // than by falling-thru from the previous block), then force the
+     // start of a new bundle.
+-    if( Pipeline::requires_bundling() && starts_bundle(head) ) 
++    if( Pipeline::requires_bundling() && starts_bundle(head) )
+       cb->flush_bundle(true);
+ 
+     // Define the label at the beginning of the basic block
+     if( labels_not_set )
+       MacroAssembler(cb).bind( blk_labels[b->_pre_order] );
+ 
+-    else 
++    else
+       assert( blk_labels[b->_pre_order].loc_pos() == cb->code_size(),
+               "label position does not match code offset" );
+ 
+@@ -1092,7 +1088,7 @@
+ 
+       // If this starts a new instruction group, then flush the current one
+       // (but allow split bundles)
+-      if( Pipeline::requires_bundling() && starts_bundle(n) ) 
++      if( Pipeline::requires_bundling() && starts_bundle(n) )
+         cb->flush_bundle(false);
+ 
+       // The following logic is duplicated in the code ifdeffed for
+@@ -1118,7 +1114,7 @@
+         // Make sure safepoint node for polling is distinct from a call's
+         // return by adding a nop if needed.
+         if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset ) {
+-          padding = nop_size; 
++          padding = nop_size;
+         }
+         assert( labels_not_set || padding == 0, "instruction should already be aligned")
+ 
+@@ -1142,7 +1138,7 @@
+           mcall->method_set((intptr_t)mcall->entry_point());
+ 
+           // Save the return address
+-          call_returns[b->_pre_order] = current_offset + mcall->ret_addr_offset(); 
++          call_returns[b->_pre_order] = current_offset + mcall->ret_addr_offset();
+ 
+           if (!mcall->is_safepoint_node()) {
+             is_mcall = false;
+@@ -1217,12 +1213,12 @@
+ 
+         else if( !n->is_Proj() ) {
+           // Remember the begining of the previous instruction, in case
+-          // it's followed by a flag-kill and a null-check.  Happens on 
++          // it's followed by a flag-kill and a null-check.  Happens on
+           // Intel all the time, with add-to-memory kind of opcodes.
+           previous_offset = current_offset;
+         }
+       }
+-      
++
+       // Verify that there is sufficient space remaining
+       cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
+       if (cb->blob() == NULL) {
+@@ -1255,11 +1251,11 @@
+       // See if this instruction has a delay slot
+       if ( valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) {
+         assert(delay_slot != NULL, "expecting delay slot node");
+-  
++
+         // Back up 1 instruction
+         cb->set_code_end(
+           cb->code_end()-Pipeline::instr_unit_size());
+-  
++
+         // Save the offset for the listing
+ #ifndef PRODUCT
+         if( node_offsets && delay_slot->_idx < node_offset_limit )
+@@ -1389,7 +1385,7 @@
+     // Find the branch; ignore trailing NOPs.
+     for( j = b->_nodes.size()-1; j>=0; j-- ) {
+       n = b->_nodes[j];
+-      if( !n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con ) 
++      if( !n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con )
+         break;
+     }
+ 
+@@ -1409,7 +1405,7 @@
+ #endif
+       // last instruction is a CatchNode, find it's CatchProjNodes
+       int nof_succs = b->_num_succs;
+-      // allocate space 
++      // allocate space
+       GrowableArray<intptr_t> handler_bcis(nof_succs);
+       GrowableArray<intptr_t> handler_pcos(nof_succs);
+       // iterate through all successors
+@@ -1465,9 +1461,9 @@
+ // Initializer for class Scheduling
+ 
+ Scheduling::Scheduling(Arena *arena, Compile &compile)
+-  : _arena(arena), 
+-    _cfg(compile.cfg()), 
+-    _bbs(compile.cfg()->_bbs), 
++  : _arena(arena),
++    _cfg(compile.cfg()),
++    _bbs(compile.cfg()->_bbs),
+     _regalloc(compile.regalloc()),
+     _reg_node(arena),
+     _bundle_instr_count(0),
+@@ -1808,11 +1804,11 @@
+   for ( uint i=0; i < n->len(); i++ ) {
+     Node *def = n->in(i);
+     if (!def) continue;
+-    if( def->is_Proj() )        // If this is a machine projection, then 
++    if( def->is_Proj() )        // If this is a machine projection, then
+       def = def->in(0);         // propagate usage thru to the base instruction
+ 
+     if( _bbs[def->_idx] != bb ) // Ignore if not block-local
+-      continue;                 
++      continue;
+ 
+     // Compute the latency
+     uint l = _bundle_cycle_number + n->latency(i);
+@@ -2016,7 +2012,7 @@
+       (op != Op_Node &&         // Not an unused antidepedence node and
+        // not an unallocated boxlock
+        (OptoReg::is_valid(_regalloc->get_reg_first(n)) || op != Op_BoxLock)) ) {
+-    
++
+     // Push any trailing projections
+     if( bb->_nodes[bb->_nodes.size()-1] != n ) {
+       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+@@ -2025,7 +2021,7 @@
+           _scheduled.push(foi);
+       }
+     }
+-  
++
+     // Put the instruction in the schedule list
+     _scheduled.push(n);
+   }
+@@ -2086,13 +2082,13 @@
+       assert(inp != n, "no cycles allowed" );
+       if( _bbs[inp->_idx] == bb ) { // Block-local use?
+         if( inp->is_Proj() )    // Skip through Proj's
+-          inp = inp->in(0); 
++          inp = inp->in(0);
+         ++_uses[inp->_idx];     // Count 1 block-local use
+       }
+     }
+ 
+     // If this instruction has a 0 use count, then it is available
+-    if (!_uses[n->_idx]) { 
++    if (!_uses[n->_idx]) {
+       _current_latency[n->_idx] = _bundle_cycle_number;
+       AddNodeToAvailableList(n);
+     }
+@@ -2140,7 +2136,7 @@
+       continue;
+ 
+     // Skip empty, connector blocks
+-    if (bb->is_connector()) 
++    if (bb->is_connector())
+       continue;
+ 
+     // If the following block is not the sole successor of
+@@ -2180,7 +2176,7 @@
+     // have their delay slots filled in the template expansions, so we don't
+     // bother scheduling them.
+     Node *last = bb->_nodes[_bb_end];
+-    if( last->is_Catch() || 
++    if( last->is_Catch() ||
+        (last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) {
+       // There must be a prior call.  Skip it.
+       while( !bb->_nodes[--_bb_end]->is_Call() ) {
+@@ -2224,7 +2220,7 @@
+       Node *n = bb->_nodes[l];
+       uint m;
+       for( m = 0; m < _bb_end-_bb_start; m++ )
+-        if( _scheduled[m] == n ) 
++        if( _scheduled[m] == n )
+           break;
+       assert( m < _bb_end-_bb_start, "instruction missing in schedule" );
+     }
+@@ -2260,7 +2256,7 @@
+   if (_cfg->C->trace_opto_output())
+     tty->print("# <- DoScheduling\n");
+ #endif
+-  
++
+   // Record final node-bundling array location
+   _regalloc->C->set_node_bundling_base(_node_bundling_base);
+ 
+@@ -2412,13 +2408,13 @@
+       if( _regalloc->get_reg_first(uses->in(i)) == def_reg ||
+           _regalloc->get_reg_second(uses->in(i)) == def_reg ) {
+         // Yes, found a use/kill pinch-point
+-        pinch->set_req(0,NULL);  // 
++        pinch->set_req(0,NULL);  //
+         pinch->replace_by(kill); // Move anti-dep edges up
+         pinch = kill;
+         _reg_node.map(def_reg,pinch);
+         return;
+       }
+-    }    
++    }
+   }
+ 
+   // Add edge from kill to pinch-point
+@@ -2436,7 +2432,7 @@
+       _bbs[use->_idx] == b ) {
+     if( pinch->Opcode() == Op_Node && // Real pinch-point (not optimistic?)
+         pinch->req() == 1 ) {   // pinch not yet in block?
+-      pinch->del_req(0);        // yank pointer to later-def, also set flag 
++      pinch->del_req(0);        // yank pointer to later-def, also set flag
+       // Insert the pinch-point in the block just after the last use
+       b->_nodes.insert(b->find_node(use)+1,pinch);
+       _bb_end++;                // Increase size scheduled region in block
+@@ -2479,7 +2475,7 @@
+   // We put edges from the prior and current DEF/KILLs to the pinch point.
+   // We put the pinch point in _reg_node.  If there's already a pinch point
+   // we merely add an edge from the current DEF/KILL to the pinch point.
+-                               
++
+   // After doing the DEF/KILLs, we handle USEs.  For each used register, we
+   // put an edge from the pinch point to the USE.
+ 
+@@ -2498,7 +2494,7 @@
+     int is_def = n->outcnt();   // def if some uses prior to adding precedence edges
+     if( n->Opcode() == Op_MachProj && n->ideal_reg() == MachProjNode::fat_proj ) {
+       // Fat-proj kills a slew of registers
+-      // This can add edges to 'n' and obscure whether or not it was a def, 
++      // This can add edges to 'n' and obscure whether or not it was a def,
+       // hence the is_def flag.
+       fat_proj_seen = true;
+       RegMask rm = n->out_RegMask();// Make local copy
+@@ -2529,7 +2525,7 @@
+     Node *m = b->_nodes[i];
+ 
+     // Add precedence edge from following safepoint to use of derived pointer
+-    if( last_safept_node != end_node && 
++    if( last_safept_node != end_node &&
+         m != last_safept_node) {
+       for (uint k = 1; k < m->req(); k++) {
+         const Type *t = m->in(k)->bottom_type();
+diff -ruN openjdk6/hotspot/src/share/vm/opto/output.hpp openjdk/hotspot/src/share/vm/opto/output.hpp
+--- openjdk6/hotspot/src/share/vm/opto/output.hpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/output.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)output.hpp	1.28 07/05/05 17:06:24 JVM"
+-#endif
+ /*
+  * Copyright 2000-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class Arena;
+@@ -84,7 +81,7 @@
+ 
+   // Mapping from register to Node
+   Node_List _reg_node;
+-  
++
+   // Free list for pinch nodes.
+   Node_List _pinch_free_list;
+ 
+@@ -216,4 +213,3 @@
+ #endif
+ 
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/parse1.cpp openjdk/hotspot/src/share/vm/opto/parse1.cpp
+--- openjdk6/hotspot/src/share/vm/opto/parse1.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/parse1.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)parse1.cpp	1.493 07/05/17 15:59:31 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -90,14 +87,14 @@
+                                      Node *local_addrs_base) {
+   Node *mem = memory(Compile::AliasIdxRaw);
+   Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
+-  
++
+   // Very similar to LoadNode::make, except we handle un-aligned longs and
+   // doubles on Sparc.  Intel can handle them just fine directly.
+   Node *l;
+   switch( bt ) {                // Signature is flattened
+   case T_INT:     l = new (C, 3) LoadINode( 0, mem, adr, TypeRawPtr::BOTTOM ); break;
+   case T_FLOAT:   l = new (C, 3) LoadFNode( 0, mem, adr, TypeRawPtr::BOTTOM ); break;
+-  case T_ADDRESS: 
++  case T_ADDRESS:
+   case T_OBJECT:  l = new (C, 3) LoadPNode( 0, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM ); break;
+   case T_LONG:
+   case T_DOUBLE: {
+@@ -105,11 +102,11 @@
+     // refers to the back half of the long/double.  Recompute adr.
+     adr = basic_plus_adr( local_addrs_base, local_addrs, -(index+1)*wordSize );
+     if( Matcher::misaligned_doubles_ok ) {
+-      l = (bt == T_DOUBLE) 
++      l = (bt == T_DOUBLE)
+         ? (Node*)new (C, 3) LoadDNode( 0, mem, adr, TypeRawPtr::BOTTOM )
+         : (Node*)new (C, 3) LoadLNode( 0, mem, adr, TypeRawPtr::BOTTOM );
+     } else {
+-      l = (bt == T_DOUBLE) 
++      l = (bt == T_DOUBLE)
+         ? (Node*)new (C, 3) LoadD_unalignedNode( 0, mem, adr, TypeRawPtr::BOTTOM )
+         : (Node*)new (C, 3) LoadL_unalignedNode( 0, mem, adr, TypeRawPtr::BOTTOM );
+     }
+@@ -216,7 +213,7 @@
+     // Try and copy the displaced header to the BoxNode
+     Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf);
+ 
+-   
++
+     store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw);
+ 
+     // Build a bogus FastLockNode (no code will be generated) and push the
+@@ -296,7 +293,7 @@
+ 
+   // End the OSR migration
+   make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
+-                    CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end), 
++                    CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
+                     "OSR_migration_end", TypeRawPtr::BOTTOM,
+                     osr_buf);
+ 
+@@ -466,7 +463,7 @@
+     assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");
+     if (C->tf() != tf()) {
+       MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag);
+-      assert(C->env()->system_dictionary_modification_counter_changed(), 
++      assert(C->env()->system_dictionary_modification_counter_changed(),
+              "Must invalidate if TypeFuncs differ");
+     }
+   } else {
+@@ -477,7 +474,7 @@
+   methods_parsed++;
+ #ifndef PRODUCT
+   // add method size here to guarantee that inlined methods are added too
+-  if (TimeCompiler) 
++  if (TimeCompiler)
+     _total_bytes_compiled += method()->code_size();
+ 
+   show_parse_info();
+@@ -560,8 +557,8 @@
+   do_exits();
+ 
+   // Collect a few more statistics.
+-  parse_idx += C->unique(); 
+-  parse_arena += C->node_arena()->used(); 
++  parse_idx += C->unique();
++  parse_arena += C->node_arena()->used();
+ 
+   if (log)  log->done("parse nodes='%d' memory='%d'",
+                       C->unique(), C->node_arena()->used());
+@@ -694,7 +691,7 @@
+   if (tf()->range()->cnt() > TypeFunc::Parms) {
+     const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
+     // Don't "bind" an unloaded return klass to the ret_phi. If the klass
+-    // becomes loaded during the subsequent parsing, the loaded and unloaded 
++    // becomes loaded during the subsequent parsing, the loaded and unloaded
+     // types will not join when we transform and push in do_exits().
+     const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
+     if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) {
+@@ -763,7 +760,7 @@
+ //--------------------------return_values--------------------------------------
+ void Compile::return_values(JVMState* jvms) {
+   GraphKit kit(jvms);
+-  Node* ret = new (this, TypeFunc::Parms) ReturnNode(TypeFunc::Parms, 
++  Node* ret = new (this, TypeFunc::Parms) ReturnNode(TypeFunc::Parms,
+                              kit.control(),
+                              kit.i_o(),
+                              kit.reset_memory(),
+@@ -839,7 +836,7 @@
+   case Bytecodes::_checkcast:
+   case Bytecodes::_instanceof:
+   case Bytecodes::_athrow:
+-  case Bytecodes::_anewarray: 
++  case Bytecodes::_anewarray:
+   case Bytecodes::_newarray:
+   case Bytecodes::_multianewarray:
+   case Bytecodes::_new:
+@@ -850,7 +847,7 @@
+ 
+   case Bytecodes::_invokestatic:
+   case Bytecodes::_invokespecial:
+-  case Bytecodes::_invokevirtual: 
++  case Bytecodes::_invokevirtual:
+   case Bytecodes::_invokeinterface:
+     return false;
+     break;
+@@ -976,7 +973,7 @@
+   if (do_synch || DTraceMethodProbes) {
+     // First move the exception list out of _exits:
+     GraphKit kit(_exits.transfer_exceptions_into_jvms());
+-    SafePointNode* normal_map = kit.map();  // keep this guy safe 
++    SafePointNode* normal_map = kit.map();  // keep this guy safe
+     // Now re-collect the exceptions into _exits:
+     SafePointNode* ex_map;
+     while ((ex_map = kit.pop_exception_state()) != NULL) {
+@@ -1024,9 +1021,9 @@
+ // For OSR, the map contains a single RawPtr parameter.
+ // Initial monitor locking for sync. methods is performed by do_method_entry.
+ SafePointNode* Parse::create_entry_map() {
+-  // Check for really stupid bail-out cases.  
++  // Check for really stupid bail-out cases.
+   uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
+-  if (len >= 32760) { 
++  if (len >= 32760) {
+     C->record_method_not_compilable_all_tiers("too many local variables");
+     return NULL;
+   }
+@@ -1093,13 +1090,13 @@
+   set_sp(0);                      // Java Stack Pointer
+ 
+   NOT_PRODUCT( count_compiled_calls(true/*at_method_entry*/, false/*is_inline*/); )
+-  
++
+   if (DTraceMethodProbes) {
+     make_dtrace_method_entry(method());
+   }
+ 
+   // If the method is synchronized, we need to construct a lock node, attach
+-  // it to the Start node, and pin it there.  
++  // it to the Start node, and pin it there.
+   if (method()->is_synchronized()) {
+     // Insert a FastLockNode right after the Start which takes as arguments
+     // the current thread pointer, the "this" pointer & the address of the
+@@ -1115,7 +1112,7 @@
+       ciInstance* mirror = _method->holder()->java_mirror();
+       const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
+       lock_obj = makecon(t_lock);
+-    } else {                  // Else pass the "this" pointer, 
++    } else {                  // Else pass the "this" pointer,
+       lock_obj = local(0);    // which is Parm0 from StartNode
+     }
+     // Clear out dead values from the debug info.
+@@ -1204,7 +1201,7 @@
+ 
+   // Note: We never call next_path_num along exception paths, so they
+   // never get processed as "ready".  Also, the input phis of exception
+-  // handlers get specially processed, so that 
++  // handlers get specially processed, so that
+ }
+ 
+ //---------------------------successor_for_bci---------------------------------
+@@ -1293,7 +1290,7 @@
+ 
+ //----------------------------record_change--------------------------------
+ // Record results of parsing one bytecode
+-void Parse::BytecodeParseHistogram::record_change() { 
++void Parse::BytecodeParseHistogram::record_change() {
+   if( PrintParseStatistics && !_parser->is_osr_parse() ) {
+     ++_bytecodes_parsed[_initial_bytecode];
+     _nodes_constructed [_initial_bytecode] += (_compiler->unique() - _initial_node_count);
+@@ -1503,7 +1500,7 @@
+ void Parse::handle_missing_successor(int target_bci) {
+ #ifndef PRODUCT
+   Block* b = block();
+-  int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1; 
++  int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
+   tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->pre_order(), trap_bci);
+ #endif
+   ShouldNotReachHere();
+@@ -1829,7 +1826,7 @@
+     map->set_req(idx, top());
+     return NULL;
+   }
+- 
++
+   // Do not create phis for top either.
+   // A top on a non-null control flow must be an unused even after the.phi.
+   if (t == Type::TOP || t == Type::HALF) {
+@@ -1947,7 +1944,7 @@
+     set_all_memory( _gvn.transform(mem_phi) );
+     set_i_o(        _gvn.transform(io_phi) );
+   }
+-  
++
+   set_control( _gvn.transform(result_rgn) );
+ }
+ 
+@@ -1983,7 +1980,7 @@
+     }
+     mms.memory()->add_req(mms.memory2());
+   }
+-  
++
+   // frame pointer is always same, already captured
+   if (value != NULL) {
+     // If returning oops to an interface-return, there is a silent free
+@@ -2052,7 +2049,7 @@
+   Node* mem = MergeMemNode::make(C, map()->memory());
+ 
+   mem = _gvn.transform(mem);
+-  
++
+   // Pass control through the safepoint
+   sfpnt->init_req(TypeFunc::Control  , control());
+   // Fix edges normally used by a call
+diff -ruN openjdk6/hotspot/src/share/vm/opto/parse2.cpp openjdk/hotspot/src/share/vm/opto/parse2.cpp
+--- openjdk6/hotspot/src/share/vm/opto/parse2.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/parse2.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)parse2.cpp	1.359 07/05/05 17:06:23 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,13 +19,13 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+ #include "incls/_parse2.cpp.incl"
+ 
+-extern int explicit_null_checks_inserted, 
++extern int explicit_null_checks_inserted,
+            explicit_null_checks_elided;
+ 
+ //---------------------------------array_load----------------------------------
+@@ -80,7 +77,7 @@
+     }
+   }
+ 
+-  // Check for big class initializers with all constant offsets 
++  // Check for big class initializers with all constant offsets
+   // feeding into a known-size array.
+   const TypeInt* idxtype = _gvn.type(idx)->is_int();
+   // See if the highest idx value is less than the lowest array bound,
+@@ -234,11 +231,11 @@
+     return false;
+   }
+ 
+-  void set (jint value, int dest, int table_index) {           
+-    setRange(value, value, dest, table_index); 
++  void set (jint value, int dest, int table_index) {
++    setRange(value, value, dest, table_index);
+   }
+-  bool adjoin(jint value, int dest, int table_index) { 
+-    return adjoinRange(value, value, dest, table_index); 
++  bool adjoin(jint value, int dest, int table_index) {
++    return adjoinRange(value, value, dest, table_index);
+   }
+ 
+   void print(ciEnv* env) {
+@@ -290,14 +287,14 @@
+   }
+   jint highest = lo_index+(len-1);
+   assert(ranges[rp].hi() == highest, "");
+-  if (highest != max_jint 
++  if (highest != max_jint
+       && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex)) {
+     ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex);
+   }
+   assert(rp < len+2, "not too many ranges");
+ 
+   // Safepoint in case if backward branch observed
+-  if( makes_backward_branch && UseLoopSafepoints )      
++  if( makes_backward_branch && UseLoopSafepoints )
+     add_safepoint();
+ 
+   jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
+@@ -346,14 +343,14 @@
+   }
+   jint highest = table[2*(len-1)];
+   assert(ranges[rp].hi() == highest, "");
+-  if( highest != max_jint 
++  if( highest != max_jint
+       && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex) ) {
+     ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex);
+   }
+   assert(rp < rnum, "not too many ranges");
+ 
+   // Safepoint in case backward branch observed
+-  if( makes_backward_branch && UseLoopSafepoints )      
++  if( makes_backward_branch && UseLoopSafepoints )
+     add_safepoint();
+ 
+   jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
+@@ -363,23 +360,23 @@
+ bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) {
+   // Are jumptables enabled
+   if (!UseJumpTables)  return false;
+-  
++
+   // Are jumptables supported
+   if (!Matcher::has_match_rule(Op_Jump))  return false;
+-  
++
+   // Don't make jump table if profiling
+   if (method_data_update())  return false;
+-  
++
+   // Decide if a guard is needed to lop off big ranges at either (or
+   // both) end(s) of the input set. We'll call this the default target
+   // even though we can't be sure that it is the true "default".
+-  
++
+   bool needs_guard = false;
+-  int default_dest; 
++  int default_dest;
+   int64 total_outlier_size = 0;
+   int64 hi_size = ((int64)hi->hi()) - ((int64)hi->lo()) + 1;
+   int64 lo_size = ((int64)lo->hi()) - ((int64)lo->lo()) + 1;
+-  
++
+   if (lo->dest() == hi->dest()) {
+     total_outlier_size = hi_size + lo_size;
+     default_dest = lo->dest();
+@@ -390,7 +387,7 @@
+     total_outlier_size = hi_size;
+     default_dest = hi->dest();
+   }
+-   
++
+   // If a guard test will eliminate very sparse end ranges, then
+   // it is worth the cost of an extra jump.
+   if (total_outlier_size > (MaxJumpTableSparseness * 4)) {
+@@ -398,21 +395,21 @@
+     if (default_dest == lo->dest()) lo++;
+     if (default_dest == hi->dest()) hi--;
+   }
+-   
++
+   // Find the total number of cases and ranges
+   int64 num_cases = ((int64)hi->hi()) - ((int64)lo->lo()) + 1;
+   int num_range = hi - lo + 1;
+- 
++
+   // Don't create table if: too large, too small, or too sparse.
+-  if (num_cases < MinJumpTableSize || num_cases > MaxJumpTableSize)  
++  if (num_cases < MinJumpTableSize || num_cases > MaxJumpTableSize)
+     return false;
+-  if (num_cases > (MaxJumpTableSparseness * num_range))  
++  if (num_cases > (MaxJumpTableSparseness * num_range))
+     return false;
+-   
++
+   // Normalize table lookups to zero
+   int lowval = lo->lo();
+   key_val = _gvn.transform( new (C, 3) SubINode(key_val, _gvn.intcon(lowval)) );
+-   
++
+   // Generate a guard to protect against input keyvals that aren't
+   // in the switch domain.
+   if (needs_guard) {
+@@ -422,7 +419,7 @@
+     IfNode* iff = create_and_map_if( control(), tst, PROB_FAIR, COUNT_UNKNOWN);
+     jump_if_true_fork(iff, default_dest, NullTableIndex);
+   }
+- 
++
+   // Create an ideal node JumpTable that has projections
+   // of all possible ranges for a switch statement
+   // The key_val input must be converted to a pointer offset and scaled.
+@@ -433,14 +430,14 @@
+   const TypeLong* lkeytype = TypeLong::make(CONST64(0), num_cases-1, Type::WidenMin);
+   key_val       = _gvn.transform( new (C, 2) ConvI2LNode(key_val, lkeytype) );
+ #endif
+-  // Shift the value by wordsize so we have an index into the table, rather 
++  // Shift the value by wordsize so we have an index into the table, rather
+   // than a switch value
+   Node *shiftWord = _gvn.MakeConX(wordSize);
+   key_val = _gvn.transform( new (C, 3) MulXNode( key_val, shiftWord));
+ 
+   // Create the JumpNode
+   Node* jtn = _gvn.transform( new (C, 2) JumpNode(control(), key_val, num_cases) );
+-   
++
+   // These are the switch destinations hanging off the jumpnode
+   int i = 0;
+   for (SwitchRange* r = lo; r <= hi; r++) {
+@@ -651,16 +648,16 @@
+   Node* res = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 0));
+ 
+   push(res);
+-} 
++}
+ 
+ void Parse::do_irem() {
+   // Must keep both values on the expression-stack during null-check
+-  do_null_check(peek(), T_INT); 
++  do_null_check(peek(), T_INT);
+   // Compile-time detect of null-exception?
+   if (stopped())  return;
+ 
+   Node* b = pop();
+-  Node* a = pop();  
++  Node* a = pop();
+ 
+   const Type *t = _gvn.type(b);
+   if (t != Type::TOP) {
+@@ -897,6 +894,11 @@
+       tty->print_cr("Never-taken backedge stops compilation at bci %d",bci());
+ #endif
+     repush_if_args(); // to gather stats on loop
++    // We need to mark this branch as taken so that if we recompile we will
++    // see that it is possible. In the tiered system the interpreter doesn't
++    // do profiling and by the time we get to the lower tier from the interpreter
++    // the path may be cold again. Make sure it doesn't look untaken
++    profile_taken_branch(target_bci, !ProfileInterpreter);
+     uncommon_trap(Deoptimization::Reason_unreached,
+                   Deoptimization::Action_reinterpret,
+                   NULL, "cold");
+@@ -910,7 +912,7 @@
+ 
+   explicit_null_checks_inserted++;
+   Node* a = null();
+-  Node* b = pop();  
++  Node* b = pop();
+   Node* c = _gvn.transform( new (C, 3) CmpPNode(b, a) );
+ 
+   // Make a cast-away-nullness that is control dependent on the test
+@@ -945,7 +947,7 @@
+   // False branch
+   Node* iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff) );
+   set_control(iffalse);
+-  
++
+   if (stopped()) {              // Path is dead?
+     explicit_null_checks_elided++;
+   } else  {                     // Path is live.
+@@ -970,6 +972,11 @@
+       tty->print_cr("Never-taken backedge stops compilation at bci %d",bci());
+ #endif
+     repush_if_args(); // to gather stats on loop
++    // We need to mark this branch as taken so that if we recompile we will
++    // see that it is possible. In the tiered system the interpreter doesn't
++    // do profiling and by the time we get to the lower tier from the interpreter
++    // the path may be cold again. Make sure it doesn't look untaken
++    profile_taken_branch(target_bci, !ProfileInterpreter);
+     uncommon_trap(Deoptimization::Reason_unreached,
+                   Deoptimization::Action_reinterpret,
+                   NULL, "cold");
+@@ -984,7 +991,7 @@
+   if (!BoolTest(btest).is_canonical()) {
+     btest         = BoolTest(btest).negate();
+     taken_if_true = false;
+-    // prob is NOT updated here; it remains the probability of the taken 
++    // prob is NOT updated here; it remains the probability of the taken
+     // path (as opposed to the prob of the path guarded by an 'IfTrueNode').
+   }
+   assert(btest != BoolTest::eq, "!= is the only canonical exact test");
+@@ -1067,6 +1074,15 @@
+     // We do not simply inspect for a null constant, since a node may
+     // optimize to 'null' later on.
+     repush_if_args();
++    // We need to mark this branch as taken so that if we recompile we will
++    // see that it is possible. In the tiered system the interpreter doesn't
++    // do profiling and by the time we get to the lower tier from the interpreter
++    // the path may be cold again. Make sure it doesn't look untaken
++    if (is_fallthrough) {
++      profile_not_taken_branch(!ProfileInterpreter);
++    } else {
++      profile_taken_branch(iter().get_dest(), !ProfileInterpreter);
++    }
+     uncommon_trap(Deoptimization::Reason_unreached,
+                   Deoptimization::Action_reinterpret,
+                   NULL,
+@@ -1296,7 +1312,7 @@
+     push( local(3) );
+     break;
+   case Bytecodes::_fload:
+-  case Bytecodes::_iload:    
++  case Bytecodes::_iload:
+     push( local(iter().get_index()) );
+     break;
+   case Bytecodes::_lload_0:
+@@ -1349,13 +1365,13 @@
+   case Bytecodes::_istore_3:
+   case Bytecodes::_astore_3:
+     set_local( 3, pop() );
+-    break; 
++    break;
+   case Bytecodes::_fstore:
+   case Bytecodes::_istore:
+   case Bytecodes::_astore:
+     set_local( iter().get_index(), pop() );
+-    break; 
+-  // long stores 
++    break;
++  // long stores
+   case Bytecodes::_lstore_0:
+     set_pair_local( 0, pop_pair() );
+     break;
+@@ -1372,7 +1388,7 @@
+     set_pair_local( iter().get_index(), pop_pair() );
+     break;
+ 
+-  // double stores 
++  // double stores
+   case Bytecodes::_dstore_0:
+     set_pair_local( 0, dstore_rounding(pop_pair()) );
+     break;
+@@ -1391,7 +1407,7 @@
+ 
+   case Bytecodes::_pop:  _sp -= 1;   break;
+   case Bytecodes::_pop2: _sp -= 2;   break;
+-  case Bytecodes::_swap: 
++  case Bytecodes::_swap:
+     a = pop();
+     b = pop();
+     push(a);
+@@ -1402,14 +1418,14 @@
+     push(a);
+     push(a);
+     break;
+-  case Bytecodes::_dup_x1: 
++  case Bytecodes::_dup_x1:
+     a = pop();
+     b = pop();
+     push( a );
+     push( b );
+     push( a );
+     break;
+-  case Bytecodes::_dup_x2: 
++  case Bytecodes::_dup_x2:
+     a = pop();
+     b = pop();
+     c = pop();
+@@ -1418,7 +1434,7 @@
+     push( b );
+     push( a );
+     break;
+-  case Bytecodes::_dup2: 
++  case Bytecodes::_dup2:
+     a = pop();
+     b = pop();
+     push( b );
+@@ -1458,13 +1474,13 @@
+ 
+   case Bytecodes::_arraylength: {
+     // Must do null-check with value on expression stack
+-    Node *ary = do_null_check(peek(), T_ARRAY); 
++    Node *ary = do_null_check(peek(), T_ARRAY);
+     // Compile-time detect of null-exception?
+     if (stopped())  return;
+     a = pop();
+-    push(load_array_length(a)); 
++    push(load_array_length(a));
+     break;
+-  } 
++  }
+ 
+   case Bytecodes::_baload: array_load(T_BYTE);   break;
+   case Bytecodes::_caload: array_load(T_CHAR);   break;
+@@ -1498,9 +1514,9 @@
+     c = pop();                  // Oop to store
+     b = pop();                  // index (already used)
+     a = pop();                  // the array itself
++    const Type* elemtype  = _gvn.type(a)->is_aryptr()->elem();
+     const TypeAryPtr* adr_type = TypeAryPtr::OOPS;
+-    Node* store = store_to_memory(control(), d, c, T_OBJECT, adr_type);
+-    store_barrier(store, T_ARRAY, a, d, c);
++    Node* store = store_oop_to_array(control(), a, d, adr_type, c, elemtype, T_OBJECT);
+     break;
+   }
+   case Bytecodes::_lastore: {
+@@ -1541,19 +1557,19 @@
+     break;
+   case Bytecodes::_idiv:
+     // Must keep both values on the expression-stack during null-check
+-    do_null_check(peek(), T_INT); 
++    do_null_check(peek(), T_INT);
+     // Compile-time detect of null-exception?
+     if (stopped())  return;
+     b = pop();
+-    a = pop();  
++    a = pop();
+     push( _gvn.transform( new (C, 3) DivINode(control(),a,b) ) );
+     break;
+   case Bytecodes::_imul:
+-    b = pop(); a = pop();  
++    b = pop(); a = pop();
+     push( _gvn.transform( new (C, 3) MulINode(a,b) ) );
+     break;
+   case Bytecodes::_iadd:
+-    b = pop(); a = pop();  
++    b = pop(); a = pop();
+     push( _gvn.transform( new (C, 3) AddINode(a,b) ) );
+     break;
+   case Bytecodes::_ineg:
+@@ -1561,19 +1577,19 @@
+     push( _gvn.transform( new (C, 3) SubINode(_gvn.intcon(0),a)) );
+     break;
+   case Bytecodes::_isub:
+-    b = pop(); a = pop();  
++    b = pop(); a = pop();
+     push( _gvn.transform( new (C, 3) SubINode(a,b) ) );
+     break;
+   case Bytecodes::_iand:
+-    b = pop(); a = pop();  
++    b = pop(); a = pop();
+     push( _gvn.transform( new (C, 3) AndINode(a,b) ) );
+     break;
+   case Bytecodes::_ior:
+-    b = pop(); a = pop();  
++    b = pop(); a = pop();
+     push( _gvn.transform( new (C, 3) OrINode(a,b) ) );
+     break;
+   case Bytecodes::_ixor:
+-    b = pop(); a = pop();  
++    b = pop(); a = pop();
+     push( _gvn.transform( new (C, 3) XorINode(a,b) ) );
+     break;
+   case Bytecodes::_ishl:
+@@ -1589,31 +1605,31 @@
+     push( _gvn.transform( new (C, 3) URShiftINode(a,b) ) );
+     break;
+ 
+-  case Bytecodes::_fneg:  
++  case Bytecodes::_fneg:
+     a = pop();
+     b = _gvn.transform(new (C, 2) NegFNode (a));
+     push(b);
+     break;
+ 
+   case Bytecodes::_fsub:
+-    b = pop();  
+-    a = pop();  
++    b = pop();
++    a = pop();
+     c = _gvn.transform( new (C, 3) SubFNode(a,b) );
+     d = precision_rounding(c);
+     push( d );
+     break;
+ 
+   case Bytecodes::_fadd:
+-    b = pop();  
+-    a = pop();  
++    b = pop();
++    a = pop();
+     c = _gvn.transform( new (C, 3) AddFNode(a,b) );
+     d = precision_rounding(c);
+     push( d );
+     break;
+ 
+   case Bytecodes::_fmul:
+-    b = pop();  
+-    a = pop();  
++    b = pop();
++    a = pop();
+     c = _gvn.transform( new (C, 3) MulFNode(a,b) );
+     d = precision_rounding(c);
+     push( d );
+@@ -1621,7 +1637,7 @@
+ 
+   case Bytecodes::_fdiv:
+     b = pop();
+-    a = pop();  
++    a = pop();
+     c = _gvn.transform( new (C, 3) DivFNode(0,a,b) );
+     d = precision_rounding(c);
+     push( d );
+@@ -1631,7 +1647,7 @@
+     if (Matcher::has_match_rule(Op_ModF)) {
+       // Generate a ModF node.
+       b = pop();
+-      a = pop();  
++      a = pop();
+       c = _gvn.transform( new (C, 3) ModFNode(0,a,b) );
+       d = precision_rounding(c);
+       push( d );
+@@ -1641,11 +1657,11 @@
+       modf();
+     }
+     break;
+-    
++
+   case Bytecodes::_fcmpl:
+     b = pop();
+     a = pop();
+-    c = _gvn.transform( new (C, 3) CmpF3Node( a, b)); 
++    c = _gvn.transform( new (C, 3) CmpF3Node( a, b));
+     push(c);
+     break;
+   case Bytecodes::_fcmpg:
+@@ -1657,7 +1673,7 @@
+     // as well by using CmpF3 which implements unordered-lesser instead of
+     // unordered-greater semantics.  Finally, commute the result bits.  Result
+     // is same as using a CmpF3Greater except we did it with CmpF3 alone.
+-    c = _gvn.transform( new (C, 3) CmpF3Node( b, a)); 
++    c = _gvn.transform( new (C, 3) CmpF3Node( b, a));
+     c = _gvn.transform( new (C, 3) SubINode(_gvn.intcon(0),c) );
+     push(c);
+     break;
+@@ -1689,7 +1705,7 @@
+ 
+   case Bytecodes::_l2f:
+     if (Matcher::convL2FSupported()) {
+-      a = pop_pair(); 
++      a = pop_pair();
+       b = _gvn.transform( new (C, 2) ConvL2FNode(a));
+       // For i486.ad, FILD doesn't restrict precision to 24 or 53 bits.
+       // Rather than storing the result into an FP register then pushing
+@@ -1704,7 +1720,7 @@
+     break;
+ 
+   case Bytecodes::_l2d:
+-    a = pop_pair(); 
++    a = pop_pair();
+     b = _gvn.transform( new (C, 2) ConvL2DNode(a));
+     // For i486.ad, rounding is always necessary (see _l2f above).
+     // c = dprecision_rounding(b);
+@@ -1725,8 +1741,8 @@
+     break;
+ 
+   case Bytecodes::_dsub:
+-    b = pop_pair();  
+-    a = pop_pair();  
++    b = pop_pair();
++    a = pop_pair();
+     c = _gvn.transform( new (C, 3) SubDNode(a,b) );
+     d = dprecision_rounding(c);
+     push_pair( d );
+@@ -1741,8 +1757,8 @@
+     break;
+ 
+   case Bytecodes::_dmul:
+-    b = pop_pair();  
+-    a = pop_pair();  
++    b = pop_pair();
++    a = pop_pair();
+     c = _gvn.transform( new (C, 3) MulDNode(a,b) );
+     d = dprecision_rounding(c);
+     push_pair( d );
+@@ -1750,13 +1766,13 @@
+ 
+   case Bytecodes::_ddiv:
+     b = pop_pair();
+-    a = pop_pair();  
++    a = pop_pair();
+     c = _gvn.transform( new (C, 3) DivDNode(0,a,b) );
+     d = dprecision_rounding(c);
+     push_pair( d );
+     break;
+ 
+-  case Bytecodes::_dneg: 
++  case Bytecodes::_dneg:
+     a = pop_pair();
+     b = _gvn.transform(new (C, 2) NegDNode (a));
+     push_pair(b);
+@@ -1766,9 +1782,9 @@
+     if (Matcher::has_match_rule(Op_ModD)) {
+       // Generate a ModD node.
+       b = pop_pair();
+-      a = pop_pair();  
++      a = pop_pair();
+       // a % b
+-    
++
+       c = _gvn.transform( new (C, 3) ModDNode(0,a,b) );
+       d = dprecision_rounding(c);
+       push_pair( d );
+@@ -1778,11 +1794,11 @@
+       modd();
+     }
+     break;
+-    
++
+   case Bytecodes::_dcmpl:
+     b = pop_pair();
+     a = pop_pair();
+-    c = _gvn.transform( new (C, 3) CmpD3Node( a, b)); 
++    c = _gvn.transform( new (C, 3) CmpD3Node( a, b));
+     push(c);
+     break;
+ 
+@@ -1791,16 +1807,16 @@
+     a = pop_pair();
+     // Same as dcmpl but need to flip the unordered case.
+     // Commute the inputs, which negates the result sign except for unordered.
+-    // Flip the unordered as well by using CmpD3 which implements 
++    // Flip the unordered as well by using CmpD3 which implements
+     // unordered-lesser instead of unordered-greater semantics.
+-    // Finally, negate the result bits.  Result is same as using a 
++    // Finally, negate the result bits.  Result is same as using a
+     // CmpD3Greater except we did it with CmpD3 alone.
+-    c = _gvn.transform( new (C, 3) CmpD3Node( b, a)); 
++    c = _gvn.transform( new (C, 3) CmpD3Node( b, a));
+     c = _gvn.transform( new (C, 3) SubINode(_gvn.intcon(0),c) );
+     push(c);
+     break;
+ 
+-   
++
+     // Note for longs -> lo word is on TOS, hi word is on TOS - 1
+   case Bytecodes::_land:
+     b = pop_pair();
+@@ -1833,23 +1849,23 @@
+     c = _gvn.transform( new (C, 3) RShiftLNode(a,b) );
+     push_pair(c);
+     break;
+-  case Bytecodes::_lushr: 
++  case Bytecodes::_lushr:
+     b = pop();                  // the shift count
+     a = pop_pair();             // value to be shifted
+     c = _gvn.transform( new (C, 3) URShiftLNode(a,b) );
+     push_pair(c);
+     break;
+-  case Bytecodes::_lmul: 
++  case Bytecodes::_lmul:
+     b = pop_pair();
+     a = pop_pair();
+     c = _gvn.transform( new (C, 3) MulLNode(a,b) );
+     push_pair(c);
+     break;
+ 
+-  case Bytecodes::_lrem: 
++  case Bytecodes::_lrem:
+     // Must keep both values on the expression-stack during null-check
+     assert(peek(0) == top(), "long word order");
+-    do_null_check(peek(1), T_LONG); 
++    do_null_check(peek(1), T_LONG);
+     // Compile-time detect of null-exception?
+     if (stopped())  return;
+     b = pop_pair();
+@@ -1858,10 +1874,10 @@
+     push_pair(c);
+     break;
+ 
+-  case Bytecodes::_ldiv: 
++  case Bytecodes::_ldiv:
+     // Must keep both values on the expression-stack during null-check
+     assert(peek(0) == top(), "long word order");
+-    do_null_check(peek(1), T_LONG); 
++    do_null_check(peek(1), T_LONG);
+     // Compile-time detect of null-exception?
+     if (stopped())  return;
+     b = pop_pair();
+@@ -1870,19 +1886,19 @@
+     push_pair(c);
+     break;
+ 
+-  case Bytecodes::_ladd: 
++  case Bytecodes::_ladd:
+     b = pop_pair();
+     a = pop_pair();
+     c = _gvn.transform( new (C, 3) AddLNode(a,b) );
+     push_pair(c);
+     break;
+-  case Bytecodes::_lsub: 
++  case Bytecodes::_lsub:
+     b = pop_pair();
+     a = pop_pair();
+     c = _gvn.transform( new (C, 3) SubLNode(a,b) );
+     push_pair(c);
+     break;
+-  case Bytecodes::_lcmp: 
++  case Bytecodes::_lcmp:
+     // Safepoints are now inserted _before_ branches.  The long-compare
+     // bytecode painfully produces a 3-way value (-1,0,+1) which requires a
+     // slew of control flow.  These are usually followed by a CmpI vs zero and
+@@ -1904,17 +1920,17 @@
+       case Bytecodes::_ifle:
+       case Bytecodes::_ifne:
+       case Bytecodes::_ifeq:
+-        // If this is a backwards branch in the bytecodes, add Safepoint        
+-        maybe_add_safepoint(iter().next_get_dest());        
++        // If this is a backwards branch in the bytecodes, add Safepoint
++        maybe_add_safepoint(iter().next_get_dest());
+       }
+     }
+     b = pop_pair();
+     a = pop_pair();
+-    c = _gvn.transform( new (C, 3) CmpL3Node( a, b )); 
++    c = _gvn.transform( new (C, 3) CmpL3Node( a, b ));
+     push(c);
+     break;
+ 
+-  case Bytecodes::_lneg: 
++  case Bytecodes::_lneg:
+     a = pop_pair();
+     b = _gvn.transform( new (C, 3) SubLNode(longcon(0),a));
+     push_pair(b);
+@@ -1983,7 +1999,7 @@
+ 
+   case Bytecodes::_athrow:
+     // null exception oop throws NULL pointer exception
+-    do_null_check(peek(), T_OBJECT); 
++    do_null_check(peek(), T_OBJECT);
+     if (stopped())  return;
+     if (JvmtiExport::can_post_exceptions()) {
+       // "Full-speed throwing" is not necessary here,
+@@ -2040,8 +2056,8 @@
+   handle_if_acmp:
+     // If this is a backwards branch in the bytecodes, add Safepoint
+     maybe_add_safepoint(iter().get_dest());
+-    a = pop(); 
+-    b = pop();  
++    a = pop();
++    b = pop();
+     c = _gvn.transform( new (C, 3) CmpPNode(b, a) );
+     do_if(btest, c);
+     break;
+@@ -2056,7 +2072,7 @@
+     // If this is a backwards branch in the bytecodes, add Safepoint
+     maybe_add_safepoint(iter().get_dest());
+     a = _gvn.intcon(0);
+-    b = pop();  
++    b = pop();
+     c = _gvn.transform( new (C, 3) CmpINode(b, a) );
+     do_if(btest, c);
+     break;
+@@ -2067,11 +2083,11 @@
+   case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp;
+   case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp;
+   case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp;
+-  handle_if_icmp: 
++  handle_if_icmp:
+     // If this is a backwards branch in the bytecodes, add Safepoint
+     maybe_add_safepoint(iter().get_dest());
+-    a = pop(); 
+-    b = pop();  
++    a = pop();
++    b = pop();
+     c = _gvn.transform( new (C, 3) CmpINode( b, a ) );
+     do_if(btest, c);
+     break;
+@@ -2086,7 +2102,7 @@
+ 
+   case Bytecodes::_invokestatic:
+   case Bytecodes::_invokespecial:
+-  case Bytecodes::_invokevirtual: 
++  case Bytecodes::_invokevirtual:
+   case Bytecodes::_invokeinterface:
+     do_call();
+     break;
+@@ -2096,7 +2112,7 @@
+   case Bytecodes::_instanceof:
+     do_instanceof();
+     break;
+-  case Bytecodes::_anewarray: 
++  case Bytecodes::_anewarray:
+     do_anewarray();
+     break;
+   case Bytecodes::_newarray:
+@@ -2113,7 +2129,7 @@
+   case Bytecodes::_jsr_w:
+     do_jsr();
+     break;
+-      
++
+   case Bytecodes::_ret:
+     do_ret();
+     break;
+@@ -2140,4 +2156,16 @@
+     tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) );
+     ShouldNotReachHere();
+   }
++
++#ifndef PRODUCT
++  IdealGraphPrinter *printer = IdealGraphPrinter::printer();
++  if(printer) {
++    char buffer[256];
++    sprintf(buffer, "Bytecode %d: %s", bci(), Bytecodes::name(bc()));
++    bool old = printer->traverse_outs();
++    printer->set_traverse_outs(true);
++    printer->print_method(C, buffer, 3);
++    printer->set_traverse_outs(old);
++  }
++#endif
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/opto/parse3.cpp openjdk/hotspot/src/share/vm/opto/parse3.cpp
+--- openjdk6/hotspot/src/share/vm/opto/parse3.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/parse3.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)parse3.cpp	1.267 07/11/21 11:31:54 JVM"
+-#endif
+ /*
+  * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -35,13 +32,13 @@
+   // Could be the field_holder's <clinit> method, or <clinit> for a subklass.
+   // Better to check now than to Deoptimize as soon as we execute
+   assert( field->is_static(), "Only check if field is static");
+-  // is_being_initialized() is too generous.  It allows access to statics 
++  // is_being_initialized() is too generous.  It allows access to statics
+   // by threads that are not running the <clinit> before the <clinit> finishes.
+   // return field->holder()->is_being_initialized();
+ 
+   // The following restriction is correct but conservative.
+   // It is also desirable to allow compilation of methods called from <clinit>
+-  // but this generated code will need to be made safe for execution by 
++  // but this generated code will need to be made safe for execution by
+   // other threads, or the transition from interpreted to compiled code would
+   // need to be guarded.
+   ciInstanceKlass *field_holder = field->holder();
+@@ -161,13 +158,13 @@
+   } else {
+     type = Type::get_const_basic_type(bt);
+   }
+-  // Build the load.  
++  // Build the load.
+   Node* ld = make_load(NULL, adr, type, bt, adr_type, is_vol);
+ 
+   // Adjust Java stack
+-  if (type2size[bt] == 1) 
++  if (type2size[bt] == 1)
+     push(ld);
+-  else 
++  else
+     push_pair(ld);
+ 
+   if (must_assert_null) {
+@@ -222,11 +219,19 @@
+   // Round doubles before storing
+   if (bt == T_DOUBLE)  val = dstore_rounding(val);
+ 
+-  // Store the value.  
+-  Node* store = store_to_memory( control(), adr, val, bt, adr_type, is_vol );
+-
+-  // Object-writes need a store-barrier
+-  if (bt == T_OBJECT)  store_barrier(store, T_OBJECT, obj, adr, val);
++  // Store the value.
++  Node* store;
++  if (bt == T_OBJECT) {
++    const TypePtr* field_type;
++    if (!field->type()->is_loaded()) {
++      field_type = TypeInstPtr::BOTTOM;
++    } else {
++      field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
++    }
++    store = store_oop_to_object( control(), obj, adr, adr_type, val, field_type, bt);
++  } else {
++    store = store_to_memory( control(), adr, val, bt, adr_type, is_vol );
++  }
+ 
+   // If reference is volatile, prevent following volatiles ops from
+   // floating up before the volatile write.
+@@ -237,12 +242,12 @@
+     int adr_idx = C->get_alias_index(adr_type);
+     insert_mem_bar_volatile(Op_MemBarVolatile, adr_idx);
+ 
+-    // Now place a membar for AliasIdxBot for the unknown yet-to-be-parsed 
++    // Now place a membar for AliasIdxBot for the unknown yet-to-be-parsed
+     // volatile alias indices. Skip this if the membar is redundant.
+     if (adr_idx != Compile::AliasIdxBot) {
+       insert_mem_bar_volatile(Op_MemBarVolatile, Compile::AliasIdxBot);
+     }
+-      
++
+     // Finally, place alias-index-specific membars for each volatile index
+     // that isn't the adr_idx membar. Typically there's only 1 or 2.
+     for( int i = Compile::AliasIdxRaw; i < C->num_alias_types(); i++ ) {
+@@ -315,7 +320,7 @@
+   // we need the loaded class for the rest of graph; do not
+   // initialize the container class (see Java spec)!!!
+   assert(will_link, "anewarray: typeflow responsibility");
+-  
++
+   ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass);
+   // Check that array_klass object is loaded
+   if (!array_klass->is_loaded()) {
+@@ -325,7 +330,7 @@
+                   array_klass);
+     return;
+   }
+-  
++
+   kill_dead_locals();
+ 
+   const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass);
+@@ -345,6 +350,28 @@
+   push(obj);
+ }
+ 
++// Expand simple expressions like new int[3][5] and new Object[2][nonConLen].
++// Also handle the degenerate 1-dimensional case of anewarray.
++Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions) {
++  Node* length = lengths[0];
++  assert(length != NULL, "");
++  Node* array = new_array(makecon(TypeKlassPtr::make(array_klass)), length);
++  if (ndimensions > 1) {
++    jint length_con = find_int_con(length, -1);
++    guarantee(length_con >= 0, "non-constant multianewarray");
++    ciArrayKlass* array_klass_1 = array_klass->as_obj_array_klass()->element_klass()->as_array_klass();
++    const TypePtr* adr_type = TypeAryPtr::OOPS;
++    const Type*    elemtype = _gvn.type(array)->is_aryptr()->elem();
++    const intptr_t header   = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
++    for (jint i = 0; i < length_con; i++) {
++      Node*    elem   = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1);
++      intptr_t offset = header + ((intptr_t)i << LogBytesPerWord);
++      Node*    eaddr  = basic_plus_adr(array, offset);
++      store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT);
++    }
++  }
++  return array;
++}
+ 
+ void Parse::do_multianewarray() {
+   int ndimensions = iter().get_dimensions();
+@@ -356,7 +383,8 @@
+ 
+   // Note:  Array classes are always initialized; no is_initialized check.
+ 
+-  if (ndimensions > 5) {
++  enum { MAX_DIMENSION = 5 };
++  if (ndimensions > MAX_DIMENSION || ndimensions <= 0) {
+     uncommon_trap(Deoptimization::Reason_unhandled,
+                   Deoptimization::Action_none);
+     return;
+@@ -364,80 +392,46 @@
+ 
+   kill_dead_locals();
+ 
+-  // Can use _multianewarray instead of _anewarray or _newarray
+-  // if only one dimension
+-  if( ndimensions == 1 && array_klass->is_type_array_klass() ) {
+-    // If this is for a basic type, call code for do_newarray instead
+-    BasicType element_type = array_klass->as_type_array_klass()->element_type();
+-    do_newarray(element_type);
+-    return;
+-  }
+-
+-  ciObjArrayKlass* obj_array_klass = array_klass->as_obj_array_klass();
+-
+-  // find the element type (etype)
+-  ciKlass* element_klass = obj_array_klass->base_element_klass();
+-  // Base_element is either an instance-klass or a type-array but NOT
+-  // a basic type.  We really wanted the klass of a basic type; since that's
+-  // not available we have to test for type-array here.
+-  const Type* element_type = element_klass->is_type_array_klass()
+-    ? Type::get_const_basic_type(element_klass->as_type_array_klass()->element_type())
+-    : TypeInstPtr::make(TypePtr::BotPTR, element_klass->as_instance_klass());
+-
+-  int mdimensions = obj_array_klass->dimension();
+-
+   // get the lengths from the stack (first dimension is on top)
+-  Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1);
++  Node* length[MAX_DIMENSION+1];
+   length[ndimensions] = NULL;  // terminating null for make_runtime_call
+-  for (int j = ndimensions-1; j >= 0 ; j--) length[j] = pop();
++  int j;
++  for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop();
+ 
+-  // construct the array type
+-  const Type* prev_type  = element_type;
+-  ciKlass*    prev_array = element_klass->is_type_array_klass() ? element_klass : NULL;
+-
+-  // fill the lowest dimensions with unknown sizes
+-  for (int index = 0; index < mdimensions - ndimensions; index++) {
+-    const TypeAry* arr0 = TypeAry::make(prev_type, TypeInt::POS);
+-    prev_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, prev_array, true, 0);
+-    prev_array = NULL; // array klasses can be lazy, except the first
+-  }
+-
+-  // Fill in the dimensions with known sizes (passed in the JVM stack)
+-  for (int i = 0; i < ndimensions; i++) {
+-    const Type* count_type = TypeInt::POS;
+-    TypePtr::PTR ptr = TypePtr::BotPTR;
+-    bool    is_exact = false;
+-    // For the outermost dimension, try to get a better type than POS for the
+-    // size.  We don't do this for inner dimmensions because we lack the 
+-    // support to invalidate the refined type when the base array is modified
+-    // by an aastore, or when it aliased via certain uses of an aaload.
+-    if (i == ndimensions - 1) {
+-      const Type* count_range_type = length[0]->bottom_type()->join(count_type);
+-      // Only improve the type if the array length is non-negative.
+-      if (!count_range_type->empty()) {
+-        count_type = count_range_type;
+-        ptr = TypePtr::NotNull;
+-      }
+-      // Only the outermost type is exact (4957832, 6587132),
+-      // since rows of the array can be either nulled out or replaced
+-      // by subarrays of sharper types.
+-      is_exact = true;
+-    } 
+-    assert(count_type->is_int(), "must be integer");
+-    const TypeAry* arr0 = TypeAry::make(prev_type, (TypeInt*)count_type);
+-    prev_type = TypeAryPtr::make(ptr, arr0, prev_array, is_exact, 0);
+-    prev_array = NULL; // array klasses can be lazy, except the first
++  // The original expression was of this form: new T[length0][length1]...
++  // It is often the case that the lengths are small (except the last).
++  // If that happens, use the fast 1-d creator a constant number of times.
++  const jint expand_limit = MIN2((juint)MultiArrayExpandLimit, (juint)100);
++  jint expand_count = 1;        // count of allocations in the expansion
++  jint expand_fanout = 1;       // running total fanout
++  for (j = 0; j < ndimensions-1; j++) {
++    jint dim_con = find_int_con(length[j], -1);
++    expand_fanout *= dim_con;
++    expand_count  += expand_fanout; // count the level-J sub-arrays
++    if (dim_con < 0
++        || dim_con > expand_limit
++        || expand_count > expand_limit) {
++      expand_count = 0;
++      break;
++    }
++  }
++
++  // Can use multianewarray instead of [a]newarray if only one dimension,
++  // or if all non-final dimensions are small constants.
++  if (expand_count == 1 || (1 <= expand_count && expand_count <= expand_limit)) {
++    Node* obj = expand_multianewarray(array_klass, &length[0], ndimensions);
++    push(obj);
++    return;
+   }
+-  const TypeAryPtr* arr = (const TypeAryPtr*)prev_type;
+ 
+   address fun = NULL;
+   switch (ndimensions) {
+-   case 1: fun = OptoRuntime::multianewarray1_Java(); break;
+-   case 2: fun = OptoRuntime::multianewarray2_Java(); break;
+-   case 3: fun = OptoRuntime::multianewarray3_Java(); break;
+-   case 4: fun = OptoRuntime::multianewarray4_Java(); break;
+-   case 5: fun = OptoRuntime::multianewarray5_Java(); break;
+-   default: ShouldNotReachHere();
++  //case 1: Actually, there is no case 1.  It's handled by new_array.
++  case 2: fun = OptoRuntime::multianewarray2_Java(); break;
++  case 3: fun = OptoRuntime::multianewarray3_Java(); break;
++  case 4: fun = OptoRuntime::multianewarray4_Java(); break;
++  case 5: fun = OptoRuntime::multianewarray5_Java(); break;
++  default: ShouldNotReachHere();
+   };
+ 
+   Node* c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
+@@ -447,6 +441,23 @@
+                               length[0], length[1], length[2],
+                               length[3], length[4]);
+   Node* res = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms));
+-  Node *cast = _gvn.transform( new (C, 2) CheckCastPPNode(control(), res, arr) );
+-  push( cast );
++
++  const Type* type = TypeOopPtr::make_from_klass_raw(array_klass);
++
++  // Improve the type:  We know it's not null, exact, and of a given length.
++  type = type->is_ptr()->cast_to_ptr_type(TypePtr::NotNull);
++  type = type->is_aryptr()->cast_to_exactness(true);
++
++  const TypeInt* ltype = _gvn.find_int_type(length[0]);
++  if (ltype != NULL)
++    type = type->is_aryptr()->cast_to_size(ltype);
++
++  // We cannot sharpen the nested sub-arrays, since the top level is mutable.
++
++  Node* cast = _gvn.transform( new (C, 2) CheckCastPPNode(control(), res, type) );
++  push(cast);
++
++  // Possible improvements:
++  // - Make a fast path for small multi-arrays.  (W/ implicit init. loops.)
++  // - Issue CastII against length[*] values, to TypeInt::POS.
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/opto/parseHelper.cpp openjdk/hotspot/src/share/vm/opto/parseHelper.cpp
+--- openjdk6/hotspot/src/share/vm/opto/parseHelper.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/parseHelper.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)parseHelper.cpp	1.196 07/05/23 17:37:28 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -33,7 +30,7 @@
+ void GraphKit::make_dtrace_method_entry_exit(ciMethod* method, bool is_entry) {
+   const TypeFunc *call_type    = OptoRuntime::dtrace_method_entry_exit_Type();
+   address         call_address = is_entry ? CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry) :
+-                                            CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit); 
++                                            CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit);
+   const char     *call_name    = is_entry ? "dtrace_method_entry" : "dtrace_method_exit";
+ 
+   // Get base of thread-local storage area
+@@ -48,7 +45,7 @@
+   // For some reason, this call reads only raw memory.
+   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
+   make_runtime_call(RC_LEAF | RC_NARROW_MEM,
+-                    call_type, call_address, 
++                    call_type, call_address,
+                     call_name, raw_adr_type,
+                     thread, method_node);
+ }
+@@ -175,7 +172,7 @@
+     }
+     if (stopped()) {          // MUST uncommon-trap?
+       set_control(ctrl);      // Then Don't Do It, just fall into the normal checking
+-    } else {                  // Cast array klass to exactness: 
++    } else {                  // Cast array klass to exactness:
+       // Use the exact constant value we know it is.
+       replace_in_map(array_klass,con);
+       CompileLog* log = C->log();
+@@ -231,7 +228,7 @@
+ // Debug dump of the mapping from address types to MergeMemNode indices.
+ void Parse::dump_map_adr_mem() const {
+   tty->print_cr("--- Mapping from address types to memory Nodes ---");
+-  MergeMemNode *mem = map() == NULL ? NULL : (map()->memory()->is_MergeMem() ? 
++  MergeMemNode *mem = map() == NULL ? NULL : (map()->memory()->is_MergeMem() ?
+                                       map()->memory()->as_MergeMem() : NULL);
+   for (uint i = 0; i < (uint)C->num_alias_types(); i++) {
+     C->alias_type(i)->print_on(tty);
+@@ -257,9 +254,9 @@
+ void Parse::test_counter_against_threshold(Node* cnt, int limit) {
+   // Test the counter against the limit and uncommon trap if greater.
+ 
+-  // This code is largely copied from the range check code in 
++  // This code is largely copied from the range check code in
+   // array_addressing()
+-  
++
+   // Test invocation count vs threshold
+   Node *threshold = makecon(TypeInt::make(limit));
+   Node *chk   = _gvn.transform( new (C, 3) CmpUNode( cnt, threshold) );
+@@ -292,25 +289,25 @@
+   store_to_memory( NULL, adr_node, incr, T_INT, adr_type );
+ }
+ 
+-//----------------------------method_data_addressing--------------------------- 
+-Node* Parse::method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) { 
++//----------------------------method_data_addressing---------------------------
++Node* Parse::method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) {
+   // Get offset within methodDataOop of the data array
+   ByteSize data_offset = methodDataOopDesc::data_offset();
+-  
++
+   // Get cell offset of the ProfileData within data array
+   int cell_offset = md->dp_to_di(data->dp());
+ 
+   // Add in counter_offset, the # of bytes into the ProfileData of counter or flag
+   int offset = in_bytes(data_offset) + cell_offset + in_bytes(counter_offset);
+ 
+-  const TypePtr* adr_type = TypeOopPtr::make_from_constant(md); 
+-  Node* mdo = makecon(adr_type); 
+-  Node* ptr = basic_plus_adr(mdo, mdo, offset); 
+- 
+-  if (stride != 0) { 
+-    Node* str = _gvn.MakeConX(stride); 
+-    Node* scale = _gvn.transform( new (C, 3) MulXNode( idx, str ) ); 
+-    ptr   = _gvn.transform( new (C, 4) AddPNode( mdo, ptr, scale ) ); 
++  const TypePtr* adr_type = TypeOopPtr::make_from_constant(md);
++  Node* mdo = makecon(adr_type);
++  Node* ptr = basic_plus_adr(mdo, mdo, offset);
++
++  if (stride != 0) {
++    Node* str = _gvn.MakeConX(stride);
++    Node* scale = _gvn.transform( new (C, 3) MulXNode( idx, str ) );
++    ptr   = _gvn.transform( new (C, 4) AddPNode( mdo, ptr, scale ) );
+   }
+ 
+   return ptr;
+@@ -347,10 +344,10 @@
+ }
+ 
+ //----------------------------profile_taken_branch-----------------------------
+-void Parse::profile_taken_branch(int target_bci) {
++void Parse::profile_taken_branch(int target_bci, bool force_update) {
+   // This is a potential osr_site if we have a backedge.
+   int cur_bci = bci();
+-  bool osr_site = 
++  bool osr_site =
+     (target_bci <= cur_bci) && count_invocations() && UseOnStackReplacement;
+ 
+   // If we are going to OSR, restart at the target bytecode.
+@@ -359,15 +356,22 @@
+   // To do: factor out the the limit calculations below. These duplicate
+   // the similar limit calculations in the interpreter.
+ 
+-  if (method_data_update()) {
++  if (method_data_update() || force_update) {
+     ciMethodData* md = method()->method_data();
+     assert(md != NULL, "expected valid ciMethodData");
+     ciProfileData* data = md->bci_to_data(cur_bci);
+     assert(data->is_JumpData(), "need JumpData for taken branch");
+     increment_md_counter_at(md, data, JumpData::taken_offset());
+-    
++  }
++
++  // In the new tiered system this is all we need to do. In the old
++  // (c2 based) tiered sytem we must do the code below.
++#ifndef TIERED
++  if (method_data_update()) {
++    ciMethodData* md = method()->method_data();
+     if (osr_site) {
+-      int limit = (CompileThreshold 
++      ciProfileData* data = md->bci_to_data(cur_bci);
++      int limit = (CompileThreshold
+                    * (OnStackReplacePercentage - InterpreterProfilePercentage)) / 100;
+       test_for_osr_md_counter_at(md, data, JumpData::taken_offset(), limit);
+     }
+@@ -379,34 +383,37 @@
+       increment_and_test_invocation_counter(limit);
+     }
+   }
++#endif // TIERED
+ 
+   // Restore the original bytecode.
+   set_bci(cur_bci);
+ }
+ 
+ //--------------------------profile_not_taken_branch---------------------------
+-void Parse::profile_not_taken_branch() {
+-  if (!method_data_update()) return;
++void Parse::profile_not_taken_branch(bool force_update) {
++
++  if (method_data_update() || force_update) {
++    ciMethodData* md = method()->method_data();
++    assert(md != NULL, "expected valid ciMethodData");
++    ciProfileData* data = md->bci_to_data(bci());
++    assert(data->is_BranchData(), "need BranchData for not taken branch");
++    increment_md_counter_at(md, data, BranchData::not_taken_offset());
++  }
+ 
+-  ciMethodData* md = method()->method_data();
+-  assert(md != NULL, "expected valid ciMethodData");
+-  ciProfileData* data = md->bci_to_data(bci());
+-  assert(data->is_BranchData(), "need BranchData for not taken branch");
+-  increment_md_counter_at(md, data, BranchData::not_taken_offset());
+ }
+ 
+ //---------------------------------profile_call--------------------------------
+ void Parse::profile_call(Node* receiver) {
+   if (!method_data_update()) return;
+ 
+-  profile_generic_call(); 
+- 
++  profile_generic_call();
++
+   switch (bc()) {
+   case Bytecodes::_invokevirtual:
+   case Bytecodes::_invokeinterface:
+     profile_receiver_type(receiver);
+     break;
+-  case Bytecodes::_invokestatic:  
++  case Bytecodes::_invokestatic:
+   case Bytecodes::_invokespecial:
+     break;
+   default: fatal("unexpected call bytecode");
+@@ -511,5 +518,3 @@
+     increment_md_counter_at(md, data, MultiBranchData::default_count_offset());
+   }
+ }
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/parse.hpp openjdk/hotspot/src/share/vm/opto/parse.hpp
+--- openjdk6/hotspot/src/share/vm/opto/parse.hpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/parse.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)parse.hpp	1.269 07/05/05 17:06:25 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class BytecodeParseHistogram;
+@@ -41,7 +38,7 @@
+   // Call-site count / interpreter invocation count, scaled recursively.
+   // Always between 0.0 and 1.0.  Represents the percentage of the method's
+   // total execution time used at this call site.
+-  const float _site_invoke_ratio; 
++  const float _site_invoke_ratio;
+   float compute_callee_frequency( int caller_bci ) const;
+ 
+   GrowableArray<InlineTree*> _subtrees;
+@@ -75,9 +72,9 @@
+ 
+   // InlineTree enum
+   enum InlineStyle {
+-    Inline_do_not_inline             =   0, // 
+-    Inline_cha_is_monomorphic        =   1, // 
+-    Inline_type_profile_monomorphic  =   2  // 
++    Inline_do_not_inline             =   0, //
++    Inline_cha_is_monomorphic        =   1, //
++    Inline_type_profile_monomorphic  =   2  //
+   };
+ 
+   // See if it is OK to inline.
+@@ -107,6 +104,7 @@
+   // Debug information collected during parse
+   uint        count_inlines()     const { return _count_inlines; };
+ #endif
++  GrowableArray<InlineTree*> subtrees() { return _subtrees; }
+ };
+ 
+ 
+@@ -132,6 +130,7 @@
+     Block**            _successors;
+ 
+     // Use init_node/init_graph to initialize Blocks.
++    // Block() : _live_locals((uintptr_t*)NULL,0) { ShouldNotReachHere(); }
+     Block() : _live_locals(NULL,0) { ShouldNotReachHere(); }
+ 
+    public:
+@@ -152,7 +151,7 @@
+     SafePointNode* start_map() const       { assert(is_merged(),"");   return _start_map; }
+     void set_start_map(SafePointNode* m)   { assert(!is_merged(), ""); _start_map = m; }
+ 
+-    // True after any predecessor flows control into this block 
++    // True after any predecessor flows control into this block
+     bool is_merged() const                 { return _start_map != NULL; }
+ 
+     // True when all non-exception predecessors have been parsed.
+@@ -430,9 +429,9 @@
+   bool can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass *klass);
+ 
+   // Helper function to identify inlining potential at call-site
+-  ciMethod* optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass, 
+-                              ciMethod *dest_method, const TypeInstPtr* receiver_type);
+-    
++  ciMethod* optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
++                              ciMethod *dest_method, const TypeOopPtr* receiver_type);
++
+   // Helper function to setup for type-profile based inlining
+   bool prepare_type_profile_inline(ciInstanceKlass* prof_klass, ciMethod* prof_method);
+ 
+@@ -470,6 +469,7 @@
+   void do_newarray(BasicType elemtype);
+   void do_anewarray();
+   void do_multianewarray();
++  Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions);
+ 
+   // implementation of jsr/ret
+   void do_jsr();
+@@ -499,14 +499,14 @@
+   // helper functions for methodData style profiling
+   void test_counter_against_threshold(Node* cnt, int limit);
+   void increment_and_test_invocation_counter(int limit);
+-  void test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, int limit); 
+-  Node* method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0); 
+-  void increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0); 
+-  void set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant); 
++  void test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, int limit);
++  Node* method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0);
++  void increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0);
++  void set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant);
+ 
+   void profile_method_entry();
+-  void profile_taken_branch(int target_bci);
+-  void profile_not_taken_branch();
++  void profile_taken_branch(int target_bci, bool force_update = false);
++  void profile_not_taken_branch(bool force_update = false);
+   void profile_call(Node* receiver);
+   void profile_generic_call();
+   void profile_receiver_type(Node* receiver);
+@@ -532,7 +532,7 @@
+   // The call is either a Java call or the VM's rethrow stub
+   void catch_call_exceptions(ciExceptionHandlerStream&);
+ 
+-  // Handle all exceptions thrown by the inlined method.  
++  // Handle all exceptions thrown by the inlined method.
+   // Also handles exceptions for individual bytecodes.
+   void catch_inline_exceptions(SafePointNode* ex_map);
+ 
+@@ -553,4 +553,3 @@
+   void dump_bci(int bci);
+ #endif
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/phase.cpp openjdk/hotspot/src/share/vm/opto/phase.cpp
+--- openjdk6/hotspot/src/share/vm/opto/phase.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/phase.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)phase.cpp	1.59 07/05/17 16:00:26 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -57,11 +54,11 @@
+ elapsedTimer Phase::_t_temporaryTimer1;
+ elapsedTimer Phase::_t_temporaryTimer2;
+ 
+-// Subtimers for _t_optimizer 
++// Subtimers for _t_optimizer
+ elapsedTimer   Phase::_t_iterGVN;
+ elapsedTimer   Phase::_t_iterGVN2;
+ 
+-// Subtimers for _t_registerAllocation 
++// Subtimers for _t_registerAllocation
+ elapsedTimer   Phase::_t_ctorChaitin;
+ elapsedTimer   Phase::_t_buildIFGphysical;
+ elapsedTimer   Phase::_t_computeLive;
+@@ -69,13 +66,13 @@
+ elapsedTimer   Phase::_t_postAllocCopyRemoval;
+ elapsedTimer   Phase::_t_fixupSpills;
+ 
+-// Subtimers for _t_output 
++// Subtimers for _t_output
+ elapsedTimer   Phase::_t_instrSched;
+ elapsedTimer   Phase::_t_buildOopMaps;
+ #endif
+ 
+ //------------------------------Phase------------------------------------------
+-Phase::Phase( PhaseNumber pnum ) : _pnum(pnum), C( pnum == Compiler ? NULL : Compile::current()) { 
++Phase::Phase( PhaseNumber pnum ) : _pnum(pnum), C( pnum == Compiler ? NULL : Compile::current()) {
+   // Poll for requests from shutdown mechanism to quiesce comiler (4448539, 4448544).
+   // This is an effective place to poll, since the compiler is full of phases.
+   // In particular, every inlining site uses a recursively created Parse phase.
+@@ -107,8 +104,8 @@
+     tty->print_cr ("      ccp          : %3.3f sec", Phase::_t_ccp.seconds());
+     tty->print_cr ("      iterGVN2     : %3.3f sec", Phase::_t_iterGVN2.seconds());
+     tty->print_cr ("      graphReshape : %3.3f sec", Phase::_t_graphReshaping.seconds());
+-    double optimizer_subtotal = Phase::_t_iterGVN.seconds() + 
+-      Phase::_t_idealLoop.seconds() + Phase::_t_ccp.seconds() + 
++    double optimizer_subtotal = Phase::_t_iterGVN.seconds() +
++      Phase::_t_idealLoop.seconds() + Phase::_t_ccp.seconds() +
+       Phase::_t_graphReshaping.seconds();
+     double percent_of_optimizer = ((optimizer_subtotal == 0.0) ? 0.0 : (optimizer_subtotal / Phase::_t_optimizer.seconds() * 100.0));
+     tty->print_cr ("      subtotal     : %3.3f sec,  %3.2f %%", optimizer_subtotal, percent_of_optimizer);
+@@ -123,8 +120,8 @@
+     tty->print_cr ("      regAllocSplit: %3.3f sec", Phase::_t_regAllocSplit.seconds());
+     tty->print_cr ("      postAllocCopyRemoval: %3.3f sec", Phase::_t_postAllocCopyRemoval.seconds());
+     tty->print_cr ("      fixupSpills  : %3.3f sec", Phase::_t_fixupSpills.seconds());
+-    double regalloc_subtotal = Phase::_t_ctorChaitin.seconds() + 
+-      Phase::_t_buildIFGphysical.seconds() + Phase::_t_computeLive.seconds() + 
++    double regalloc_subtotal = Phase::_t_ctorChaitin.seconds() +
++      Phase::_t_buildIFGphysical.seconds() + Phase::_t_computeLive.seconds() +
+       Phase::_t_regAllocSplit.seconds()    + Phase::_t_fixupSpills.seconds() +
+       Phase::_t_postAllocCopyRemoval.seconds();
+     double percent_of_regalloc = ((regalloc_subtotal == 0.0) ? 0.0 : (regalloc_subtotal / Phase::_t_registerAllocation.seconds() * 100.0));
+@@ -136,20 +133,20 @@
+   tty->print_cr ("    codeGen      : %3.3f sec", Phase::_t_codeGeneration.seconds());
+   tty->print_cr ("    install_code : %3.3f sec", Phase::_t_registerMethod.seconds());
+   tty->print_cr ("    ------------ : ----------");
+-  double phase_subtotal = Phase::_t_parser.seconds() + 
++  double phase_subtotal = Phase::_t_parser.seconds() +
+     (DoEscapeAnalysis ? Phase::_t_escapeAnalysis.seconds() : 0.0) +
+-    Phase::_t_optimizer.seconds() + Phase::_t_graphReshaping.seconds() + 
+-    Phase::_t_matcher.seconds() + Phase::_t_scheduler.seconds() + 
++    Phase::_t_optimizer.seconds() + Phase::_t_graphReshaping.seconds() +
++    Phase::_t_matcher.seconds() + Phase::_t_scheduler.seconds() +
+     Phase::_t_registerAllocation.seconds() + Phase::_t_removeEmptyBlocks.seconds() +
+-    Phase::_t_macroExpand.seconds() + Phase::_t_peephole.seconds() + 
++    Phase::_t_macroExpand.seconds() + Phase::_t_peephole.seconds() +
+     Phase::_t_codeGeneration.seconds() + Phase::_t_registerMethod.seconds();
+   double percent_of_method_compile = ((phase_subtotal == 0.0) ? 0.0 : phase_subtotal / Phase::_t_methodCompilation.seconds()) * 100.0;
+   // counters inside Compile::CodeGen include time for adapters and stubs
+   // so phase-total can be greater than 100%
+   tty->print_cr ("    total        : %3.3f sec,  %3.2f %%", phase_subtotal, percent_of_method_compile);
+ 
+-  assert( percent_of_method_compile > expected_method_compile_coverage || 
+-          phase_subtotal < minimum_meaningful_method_compile, 
++  assert( percent_of_method_compile > expected_method_compile_coverage ||
++          phase_subtotal < minimum_meaningful_method_compile,
+           "Must account for method compilation");
+ 
+   if( Phase::_t_temporaryTimer1.seconds() > minimum_reported_time ) {
+diff -ruN openjdk6/hotspot/src/share/vm/opto/phase.hpp openjdk/hotspot/src/share/vm/opto/phase.hpp
+--- openjdk6/hotspot/src/share/vm/opto/phase.hpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/phase.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)phase.hpp	1.53 07/05/17 16:00:29 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class Compile;
+@@ -30,7 +27,7 @@
+ //------------------------------Phase------------------------------------------
+ // Most optimizations are done in Phases.  Creating a phase does any long
+ // running analysis required, and caches the analysis in internal data
+-// structures.  Later the analysis is queried using transform() calls to 
++// structures.  Later the analysis is queried using transform() calls to
+ // guide transforming the program.  When the Phase is deleted, so is any
+ // cached analysis info.  This basic Phase class mostly contains timing and
+ // memory management code.
+@@ -91,11 +88,11 @@
+   static elapsedTimer _t_temporaryTimer1;
+   static elapsedTimer _t_temporaryTimer2;
+ 
+-// Subtimers for _t_optimizer 
++// Subtimers for _t_optimizer
+   static elapsedTimer   _t_iterGVN;
+   static elapsedTimer   _t_iterGVN2;
+ 
+-// Subtimers for _t_registerAllocation 
++// Subtimers for _t_registerAllocation
+   static elapsedTimer   _t_ctorChaitin;
+   static elapsedTimer   _t_buildIFGphysical;
+   static elapsedTimer   _t_computeLive;
+@@ -103,7 +100,7 @@
+   static elapsedTimer   _t_postAllocCopyRemoval;
+   static elapsedTimer   _t_fixupSpills;
+ 
+-// Subtimers for _t_output 
++// Subtimers for _t_output
+   static elapsedTimer   _t_instrSched;
+   static elapsedTimer   _t_buildOopMaps;
+ #endif
+@@ -114,4 +111,3 @@
+   static void print_timers();
+ #endif
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/phaseX.cpp openjdk/hotspot/src/share/vm/opto/phaseX.cpp
+--- openjdk6/hotspot/src/share/vm/opto/phaseX.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/phaseX.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)phaseX.cpp	1.261 07/05/05 17:06:24 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -31,13 +28,13 @@
+ //=============================================================================
+ #define NODE_HASH_MINIMUM_SIZE    255
+ //------------------------------NodeHash---------------------------------------
+-NodeHash::NodeHash(uint est_max_size) : 
++NodeHash::NodeHash(uint est_max_size) :
+   _max( round_up(est_max_size < NODE_HASH_MINIMUM_SIZE ? NODE_HASH_MINIMUM_SIZE : est_max_size) ),
+   _a(Thread::current()->resource_area()),
+-  _table( NEW_ARENA_ARRAY( _a , Node* , _max ) ), // (Node**)_a->Amalloc(_max * sizeof(Node*)) ), 
++  _table( NEW_ARENA_ARRAY( _a , Node* , _max ) ), // (Node**)_a->Amalloc(_max * sizeof(Node*)) ),
+   _inserts(0), _insert_limit( insert_limit() ),
+-  _look_probes(0), _lookup_hits(0), _lookup_misses(0), 
+-  _total_insert_probes(0), _total_inserts(0), 
++  _look_probes(0), _lookup_hits(0), _lookup_misses(0),
++  _total_insert_probes(0), _total_inserts(0),
+   _insert_probes(0), _grows(0) {
+   // _sentinel must be in the current node space
+   _sentinel = new (Compile::current(), 1) ProjNode(NULL, TypeFunc::Control);
+@@ -45,14 +42,14 @@
+ }
+ 
+ //------------------------------NodeHash---------------------------------------
+-NodeHash::NodeHash(Arena *arena, uint est_max_size) : 
++NodeHash::NodeHash(Arena *arena, uint est_max_size) :
+   _max( round_up(est_max_size < NODE_HASH_MINIMUM_SIZE ? NODE_HASH_MINIMUM_SIZE : est_max_size) ),
+   _a(arena),
+-  _table( NEW_ARENA_ARRAY( _a , Node* , _max ) ), 
++  _table( NEW_ARENA_ARRAY( _a , Node* , _max ) ),
+   _inserts(0), _insert_limit( insert_limit() ),
+-  _look_probes(0), _lookup_hits(0), _lookup_misses(0), 
+-  _delete_probes(0), _delete_hits(0), _delete_misses(0), 
+-  _total_insert_probes(0), _total_inserts(0), 
++  _look_probes(0), _lookup_hits(0), _lookup_misses(0),
++  _delete_probes(0), _delete_hits(0), _delete_misses(0),
++  _total_insert_probes(0), _total_inserts(0),
+   _insert_probes(0), _grows(0) {
+   // _sentinel must be in the current node space
+   _sentinel = new (Compile::current(), 1) ProjNode(NULL, TypeFunc::Control);
+@@ -76,7 +73,7 @@
+     debug_only( _lookup_misses++ );
+     return NULL;
+   }
+-  uint key = hash & (_max-1); 
++  uint key = hash & (_max-1);
+   uint stride = key | 0x01;
+   debug_only( _look_probes++ );
+   Node *k = _table[key];        // Get hashed value
+@@ -84,7 +81,7 @@
+     debug_only( _lookup_misses++ );
+     return NULL;                // Miss!
+   }
+-  
++
+   int op = n->Opcode();
+   uint req = n->req();
+   while( 1 ) {                  // While probing hash table
+@@ -121,7 +118,7 @@
+     debug_only( _lookup_misses++ );
+     return NULL;
+   }
+-  uint key = hash & (_max-1); 
++  uint key = hash & (_max-1);
+   uint stride = key | 0x01;     // stride must be relatively prime to table siz
+   uint first_sentinel = 0;      // replace a sentinel if seen.
+   debug_only( _look_probes++ );
+@@ -136,7 +133,7 @@
+   else if( k == _sentinel ) {
+     first_sentinel = key;      // Can insert here
+   }
+-  
++
+   int op = n->Opcode();
+   uint req = n->req();
+   while( 1 ) {                  // While probing hash table
+@@ -165,7 +162,7 @@
+     else if( first_sentinel == 0 && k == _sentinel ) {
+       first_sentinel = key;    // Can insert here
+     }
+-      
++
+   }
+   ShouldNotReachHere();
+   return NULL;
+@@ -182,9 +179,9 @@
+     return;
+   }
+   check_grow();
+-  uint key = hash & (_max-1); 
++  uint key = hash & (_max-1);
+   uint stride = key | 0x01;
+-  
++
+   while( 1 ) {                  // While probing hash table
+     debug_only( _insert_probes++ );
+     Node *k = _table[key];      // Get hashed value
+@@ -207,7 +204,7 @@
+     debug_only( _delete_misses++ );
+     return false;
+   }
+-  uint key = hash & (_max-1); 
++  uint key = hash & (_max-1);
+   uint stride = key | 0x01;
+   debug_only( uint counter = 0; );
+   for( ; /* (k != NULL) && (k != _sentinal) */; ) {
+@@ -308,7 +305,7 @@
+   }
+ }
+ 
+-#ifndef PRODUCT  
++#ifndef PRODUCT
+ //------------------------------dump-------------------------------------------
+ // Dump statistics for the hash table
+ void NodeHash::dump() {
+@@ -317,7 +314,7 @@
+   if( PrintCompilation && PrintOptoStatistics && Verbose && (_inserts > 0) ) { // PrintOptoGVN
+     if( PrintCompilation2 ) {
+       for( uint i=0; i<_max; i++ )
+-      if( _table[i] ) 
++      if( _table[i] )
+         tty->print("%d/%d/%d ",i,_table[i]->hash()&(_max-1),_table[i]->_idx);
+     }
+     tty->print("\nGVN Hash stats:  %d grows to %d max_size\n", _grows, _max);
+@@ -403,7 +400,7 @@
+ 
+ //=============================================================================
+ //------------------------------PhaseTransform---------------------------------
+-PhaseTransform::PhaseTransform( PhaseNumber pnum ) : Phase(pnum), 
++PhaseTransform::PhaseTransform( PhaseNumber pnum ) : Phase(pnum),
+   _arena(Thread::current()->resource_area()),
+   _nodes(_arena),
+   _types(_arena)
+@@ -419,7 +416,7 @@
+ }
+ 
+ //------------------------------PhaseTransform---------------------------------
+-PhaseTransform::PhaseTransform( Arena *arena, PhaseNumber pnum ) : Phase(pnum), 
++PhaseTransform::PhaseTransform( Arena *arena, PhaseNumber pnum ) : Phase(pnum),
+   _arena(arena),
+   _nodes(arena),
+   _types(arena)
+@@ -436,7 +433,7 @@
+ 
+ //------------------------------PhaseTransform---------------------------------
+ // Initialize with previously generated type information
+-PhaseTransform::PhaseTransform( PhaseTransform *pt, PhaseNumber pnum ) : Phase(pnum), 
++PhaseTransform::PhaseTransform( PhaseTransform *pt, PhaseNumber pnum ) : Phase(pnum),
+   _arena(pt->_arena),
+   _nodes(pt->_nodes),
+   _types(pt->_types)
+@@ -553,7 +550,7 @@
+   if( PrintCompilation && Verbose && WizardMode ) {
+     tty->print("\n%sValues: %d nodes ---> %d/%d (%d)",
+       is_IterGVN() ? "Iter" : "    ", C->unique(), made_progress(), made_transforms(), made_new_values());
+-    if( made_transforms() != 0 ) { 
++    if( made_transforms() != 0 ) {
+       tty->print_cr("  ratio %f", made_progress()/(float)made_transforms() );
+     } else {
+       tty->cr();
+@@ -567,7 +564,7 @@
+   assert(t->singleton(), "must be a constant");
+   assert(!t->empty() || t == Type::TOP, "must not be vacuous range");
+   switch (t->base()) {  // fast paths
+-  case Type::Half: 
++  case Type::Half:
+   case Type::Top:  return (ConNode*) C->top();
+   case Type::Int:  return intcon( t->is_int()->get_con() );
+   case Type::Long: return longcon( t->is_long()->get_con() );
+@@ -619,7 +616,7 @@
+ }
+ 
+ //------------------------------longcon----------------------------------------
+-// Fast long constant.  
++// Fast long constant.
+ ConLNode* PhaseTransform::longcon(jlong l) {
+   // Small integer?  Check cache! Check that cached node is not dead
+   if (l >= _lcon_min && l <= _lcon_max) {
+@@ -651,12 +648,12 @@
+ //=============================================================================
+ //------------------------------transform--------------------------------------
+ // Return a node which computes the same function as this node, but in a
+-// faster or cheaper fashion.  The Node passed in here must have no other 
++// faster or cheaper fashion.  The Node passed in here must have no other
+ // pointers to it, as its storage will be reclaimed if the Node can be
+ // optimized away.
+ Node *PhaseGVN::transform( Node *n ) {
+   NOT_PRODUCT( set_transforms(); )
+-  
++
+   // Apply the Ideal call in a loop until it no longer applies
+   Node *k = n;
+   NOT_PRODUCT( uint loop_count = 0; )
+@@ -728,7 +725,7 @@
+ 
+ //------------------------------transform--------------------------------------
+ // Return a node which computes the same function as this node, but
+-// in a faster or cheaper fashion.  
++// in a faster or cheaper fashion.
+ Node *PhaseGVN::transform_no_reclaim( Node *n ) {
+   NOT_PRODUCT( set_transforms(); )
+ 
+@@ -827,14 +824,14 @@
+ 
+ //------------------------------PhaseIterGVN-----------------------------------
+ // Initialize with previous PhaseIterGVN info; used by PhaseCCP
+-PhaseIterGVN::PhaseIterGVN( PhaseIterGVN *igvn ) : PhaseGVN(igvn), 
++PhaseIterGVN::PhaseIterGVN( PhaseIterGVN *igvn ) : PhaseGVN(igvn),
+   _worklist( igvn->_worklist )
+ {
+ }
+ 
+ //------------------------------PhaseIterGVN-----------------------------------
+ // Initialize with previous PhaseGVN info from Parser
+-PhaseIterGVN::PhaseIterGVN( PhaseGVN *gvn ) : PhaseGVN(gvn), 
++PhaseIterGVN::PhaseIterGVN( PhaseGVN *gvn ) : PhaseGVN(gvn),
+   _worklist(*C->for_igvn())
+ {
+   uint max;
+@@ -853,7 +850,7 @@
+   }
+ 
+   // Any Phis or Regions on the worklist probably had uses that could not
+-  // make more progress because the uses were made while the Phis and Regions 
++  // make more progress because the uses were made while the Phis and Regions
+   // were in half-built states.  Put all uses of Phis and Regions on worklist.
+   max = _worklist.size();
+   for( uint j = 0; j < max; j++ ) {
+@@ -919,7 +916,7 @@
+   }
+ #endif
+ 
+-  // Pull from worklist; transform node; 
++  // Pull from worklist; transform node;
+   // If node has changed: update edge info and put uses on worklist.
+   while( _worklist.size() ) {
+     Node *n  = _worklist.pop();
+@@ -1004,7 +1001,7 @@
+       igvn2.set_allow_progress(false);
+       igvn2.optimize();
+       igvn2.set_allow_progress(true);
+-    } 
++    }
+   }
+   if ( VerifyIterativeGVN && PrintOpto ) {
+     if ( _verify_counter == _verify_full_passes )
+@@ -1085,7 +1082,7 @@
+     assert((i->_idx >= k->_idx) || i->is_top(), "Idealize should return new nodes, use Identity to return old nodes");
+     // Made a change; put users of original Node on worklist
+     add_users_to_worklist( k );
+-    // Replacing root of transform tree?  
++    // Replacing root of transform tree?
+     if( k != i ) {
+       // Make users of old Node now use new.
+       subsume_node( k, i );
+@@ -1107,7 +1104,7 @@
+   // See what kind of values 'k' takes on at runtime
+   const Type *t = k->Value(this);
+   assert(t != NULL, "value sanity");
+-  
++
+   // Since I just called 'Value' to compute the set of run-time values
+   // for this Node, and 'Value' is non-local (and therefore expensive) I'll
+   // cache Value.  Later requests for the local phase->type of this Node can
+@@ -1129,7 +1126,7 @@
+     add_users_to_worklist( k );
+     subsume_node( k, con );     // Everybody using k now uses con
+     return con;
+-  } 
++  }
+ 
+   // Now check for Identities
+   i = k->Identity(this);        // Look for a nearby replacement
+@@ -1179,7 +1176,7 @@
+         dead->set_req(i,NULL);  // Kill the edge
+         if (in->outcnt() == 0 && in != C->top()) {// Made input go dead?
+           remove_dead_node(in); // Recursively remove
+-        } else if (in->outcnt() == 1 && 
++        } else if (in->outcnt() == 1 &&
+                    in->has_special_unique_user()) {
+           _worklist.push(in->unique_out());
+         } else if (in->outcnt() <= 2 && dead->is_Phi()) {
+@@ -1275,7 +1272,7 @@
+ 
+     if( use->is_Multi() ||      // Multi-definer?  Push projs on worklist
+         use->is_Store() )       // Enable store/load same address
+-      add_users_to_worklist0(use); 
++      add_users_to_worklist0(use);
+ 
+     // If we changed the receiver type to a call, we need to revisit
+     // the Catch following the call.  It's looking for a non-NULL
+@@ -1284,7 +1281,7 @@
+     if (use->is_CallDynamicJava() && n == use->in(TypeFunc::Parms)) {
+       Node* p = use->as_CallDynamicJava()->proj_out(TypeFunc::Control);
+       if (p != NULL) {
+-        add_users_to_worklist0(p); 
++        add_users_to_worklist0(p);
+       }
+     }
+ 
+@@ -1309,7 +1306,7 @@
+         }
+       }
+     }
+-  
++
+     uint use_op = use->Opcode();
+     // If changed Cast input, check Phi users for simple cycles
+     if( use->is_ConstraintCast() || use->Opcode() == Op_CheckCastPP ) {
+@@ -1335,6 +1332,18 @@
+           _worklist.push(u);
+       }
+     }
++    // If changed initialization activity, check dependent Stores
++    if (use_op == Op_Allocate || use_op == Op_AllocateArray) {
++      InitializeNode* init = use->as_Allocate()->initialization();
++      if (init != NULL) {
++        Node* imem = init->proj_out(TypeFunc::Memory);
++        if (imem != NULL)  add_users_to_worklist0(imem);
++      }
++    }
++    if (use_op == Op_Initialize) {
++      Node* imem = use->as_Initialize()->proj_out(TypeFunc::Memory);
++      if (imem != NULL)  add_users_to_worklist0(imem);
++    }
+   }
+ }
+ 
+@@ -1444,7 +1453,7 @@
+ // Convert any of his old-space children into new-space children.
+ Node *PhaseCCP::transform( Node *n ) {
+   Node *new_node = _nodes[n->_idx]; // Check for transformed node
+-  if( new_node != NULL ) 
++  if( new_node != NULL )
+     return new_node;                // Been there, done that, return old answer
+   new_node = transform_once(n);     // Check for constant
+   _nodes.map( n->_idx, new_node );  // Flag as having been cloned
+@@ -1487,8 +1496,8 @@
+         set_type(C->top(), Type::TOP);
+       }
+       nn = C->top();
+-    } 
+-    if( !n->is_Con() ) { 
++    }
++    if( !n->is_Con() ) {
+       if( t != Type::TOP ) {
+         nn = makecon(t);        // ConNode::make(t);
+         NOT_PRODUCT( inc_constants(); )
+@@ -1725,7 +1734,7 @@
+ //=============================================================================
+ //-----------------------------------------------------------------------------
+ void Type_Array::grow( uint i ) {
+-  if( !_max ) { 
++  if( !_max ) {
+     _max = 1;
+     _types = (const Type**)_a->Amalloc( _max * sizeof(Type*) );
+     _types[0] = NULL;
+diff -ruN openjdk6/hotspot/src/share/vm/opto/phaseX.hpp openjdk/hotspot/src/share/vm/opto/phaseX.hpp
+--- openjdk6/hotspot/src/share/vm/opto/phaseX.hpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/phaseX.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)phaseX.hpp	1.119 07/05/05 17:06:26 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class Compile;
+@@ -87,7 +84,7 @@
+ 
+   Node  *sentinel() { return _sentinel; }
+ 
+-#ifndef PRODUCT  
++#ifndef PRODUCT
+   Node  *find_index(uint idx);  // For debugging
+   void   dump();                // For debugging, dump statistics
+ #endif
+@@ -107,7 +104,7 @@
+ //-----------------------------------------------------------------------------
+ // Map dense integer indices to Types.  Uses classic doubling-array trick.
+ // Abstractly provides an infinite array of Type*'s, initialized to NULL.
+-// Note that the constructor just zeros things, and since I use Arena 
++// Note that the constructor just zeros things, and since I use Arena
+ // allocation I do not need a destructor to reclaim storage.
+ // Despite the general name, this class is customized for use by PhaseTransform.
+ class Type_Array : public StackObj {
+@@ -146,7 +143,7 @@
+ 
+ //------------------------------PhaseTransform---------------------------------
+ // Phases that analyze, then transform.  Constructing the Phase object does any
+-// global or slow analysis.  The results are cached later for a fast 
++// global or slow analysis.  The results are cached later for a fast
+ // transformation pass.  When the Phase object is deleted the cached analysis
+ // results are deleted.
+ class PhaseTransform : public Phase {
+@@ -240,10 +237,10 @@
+   ConNode* zerocon(BasicType bt);
+ 
+   // Return a node which computes the same function as this node, but
+-  // in a faster or cheaper fashion.  
++  // in a faster or cheaper fashion.
+   virtual Node *transform( Node *n ) = 0;
+ 
+-  // Return whether two Nodes are equivalent.  
++  // Return whether two Nodes are equivalent.
+   // Must not be recursive, since the recursive version is built from this.
+   // For pessimistic optimizations this is simply pointer equivalence.
+   bool eqv(const Node* n1, const Node* n2) const { return n1 == n2; }
+@@ -374,7 +371,7 @@
+   PhaseGVN( PhaseGVN *gvn, const char *dummy ) : PhaseValues( gvn, dummy ) {}
+ 
+   // Return a node which computes the same function as this node, but
+-  // in a faster or cheaper fashion.  
++  // in a faster or cheaper fashion.
+   Node  *transform( Node *n );
+   Node  *transform_no_reclaim( Node *n );
+ 
+@@ -406,11 +403,11 @@
+   PhaseIterGVN( PhaseGVN *gvn ); // Used after Parser
+   PhaseIterGVN( PhaseIterGVN *igvn, const char *dummy ); // Used after +VerifyOpto
+ 
+-  virtual PhaseIterGVN *is_IterGVN() { return this; } 
++  virtual PhaseIterGVN *is_IterGVN() { return this; }
+ 
+-  Unique_Node_List _worklist;       // Iterative worklist 
++  Unique_Node_List _worklist;       // Iterative worklist
+ 
+-  // Given def-use info and an initial worklist, apply Node::Ideal, 
++  // Given def-use info and an initial worklist, apply Node::Ideal,
+   // Node::Value, Node::Identity, hash-based value numbering, Node::Ideal_DU
+   // and dominator info to a fixed point.
+   void optimize();
+@@ -517,4 +514,3 @@
+   static void print_statistics();
+ #endif
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/postaloc.cpp openjdk/hotspot/src/share/vm/opto/postaloc.cpp
+--- openjdk6/hotspot/src/share/vm/opto/postaloc.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/postaloc.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)postaloc.cpp	1.83 07/05/05 17:06:28 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -54,7 +51,7 @@
+   const int limit = 60;
+   int i;
+   for( i=0; i < limit; i++ ) {
+-    if( def->is_Proj() && def->in(0)->is_Start() && 
++    if( def->is_Proj() && def->in(0)->is_Start() &&
+         _matcher.is_save_on_entry(lrgs(n2lidx(def)).reg()) )
+       return true;              // Direct use of callee-save proj
+     if( def->is_Copy() )        // Copies carry value through
+@@ -63,7 +60,7 @@
+       def = def->in(1);
+     else
+       break;
+-    guarantee(def != NULL, "must not resurrect dead copy"); 
++    guarantee(def != NULL, "must not resurrect dead copy");
+   }
+   // If we reached the end and didn't find a callee save proj
+   // then this may be a callee save proj so we return true
+@@ -111,7 +108,7 @@
+ 
+   // Not every pair of physical registers are assignment compatible,
+   // e.g. on sparc floating point registers are not assignable to integer
+-  // registers.  
++  // registers.
+   const LRG &def_lrg = lrgs(n2lidx(def));
+   OptoReg::Name def_reg = def_lrg.reg();
+   const RegMask &use_mask = n->in_RegMask(idx);
+@@ -157,7 +154,7 @@
+   int idx = c->is_Copy();
+   uint is_oop = lrgs(n2lidx(c))._is_oop;
+   while (idx != 0) {
+-    guarantee(c->in(idx) != NULL, "must not resurrect dead copy"); 
++    guarantee(c->in(idx) != NULL, "must not resurrect dead copy");
+     if (lrgs(n2lidx(c->in(idx)))._is_oop != is_oop)
+       break;  // casting copy, not the same value
+     c = c->in(idx);
+@@ -179,7 +176,7 @@
+   int idx;
+   while( (idx=x->is_Copy()) != 0 ) {
+     Node *copy = x->in(idx);
+-    guarantee(copy != NULL, "must not resurrect dead copy"); 
++    guarantee(copy != NULL, "must not resurrect dead copy");
+     if( lrgs(n2lidx(copy)).reg() != nk_reg ) break;
+     blk_adjust += use_prior_register(n,k,copy,current_block,value,regnd);
+     if( n->in(k) != copy ) break; // Failed for some cutout?
+@@ -187,10 +184,10 @@
+   }
+ 
+   // Phis and 2-address instructions cannot change registers so easily - their
+-  // outputs must match their input.  
++  // outputs must match their input.
+   if( !can_change_regs )
+     return blk_adjust;          // Only check stupid copies!
+-    
++
+   // Loop backedges won't have a value-mapping yet
+   if( &value == NULL ) return blk_adjust;
+ 
+@@ -205,7 +202,7 @@
+   bool single = is_single_register(val->ideal_reg());
+   uint val_idx = n2lidx(val);
+   OptoReg::Name val_reg = lrgs(val_idx).reg();
+-  
++
+   // See if it happens to already be in the correct register!
+   // (either Phi's direct register, or the common case of the name
+   // never-clobbered original-def register)
+@@ -221,7 +218,7 @@
+   // using a register to using the stack unless we know we can remove a
+   // copy-load.  Otherwise we might end up making a pile of Intel cisc-spill
+   // ops reading from memory instead of just loading once and using the
+-  // register. 
++  // register.
+ 
+   // Also handle duplicate copies here.
+   const Type *t = val->is_Con() ? val->bottom_type() : NULL;
+@@ -261,7 +258,7 @@
+                                               OptoReg::Name nreg, OptoReg::Name nreg2) {
+   if (value[nreg] != val && val->is_Con() &&
+       value[nreg] != NULL && value[nreg]->is_Con() &&
+-      (nreg2 == OptoReg::Bad || value[nreg] == value[nreg2]) && 
++      (nreg2 == OptoReg::Bad || value[nreg] == value[nreg2]) &&
+       value[nreg]->bottom_type() == val->bottom_type() &&
+       value[nreg]->as_Mach()->rule() == val->as_Mach()->rule()) {
+     // This code assumes that two MachNodes representing constants
+@@ -293,10 +290,10 @@
+ 
+ 
+ //------------------------------post_allocate_copy_removal---------------------
+-// Post-Allocation peephole copy removal.  We do this in 1 pass over the 
+-// basic blocks.  We maintain a mapping of registers to Nodes (an  array of 
+-// Nodes indexed by machine register or stack slot number).  NULL means that a 
+-// register is not mapped to any Node.  We can (want to have!) have several 
++// Post-Allocation peephole copy removal.  We do this in 1 pass over the
++// basic blocks.  We maintain a mapping of registers to Nodes (an  array of
++// Nodes indexed by machine register or stack slot number).  NULL means that a
++// register is not mapped to any Node.  We can (want to have!) have several
+ // registers map to the same Node.  We walk forward over the instructions
+ // updating the mapping as we go.  At merge points we force a NULL if we have
+ // to merge 2 different Nodes into the same register.  Phi functions will give
+@@ -304,9 +301,9 @@
+ // arranged in some RPO, we will visit all parent blocks before visiting any
+ // successor blocks (except at loops).
+ //
+-// If we find a Copy we look to see if the Copy's source register is a stack 
+-// slot and that value has already been loaded into some machine register; if 
+-// so we use machine register directly.  This turns a Load into a reg-reg 
++// If we find a Copy we look to see if the Copy's source register is a stack
++// slot and that value has already been loaded into some machine register; if
++// so we use machine register directly.  This turns a Load into a reg-reg
+ // Move.  We also look for reloads of identical constants.
+ //
+ // When we see a use from a reg-reg Copy, we will attempt to use the copy's
+@@ -460,13 +457,13 @@
+     for( j = phi_dex; j < b->_nodes.size(); j++ ) {
+       Node *n = b->_nodes[j];
+ 
+-      if( n->outcnt() == 0 &&   // Dead? 
+-          n != C->top() &&      // (ignore TOP, it has no du info) 
+-          !n->is_Proj() ) {     // fat-proj kills 
+-        j -= yank_if_dead(n,b,&value,&regnd); 
+-        continue; 
+-      } 
+-      
++      if( n->outcnt() == 0 &&   // Dead?
++          n != C->top() &&      // (ignore TOP, it has no du info)
++          !n->is_Proj() ) {     // fat-proj kills
++        j -= yank_if_dead(n,b,&value,&regnd);
++        continue;
++      }
++
+       // Improve reaching-def info.  Occasionally post-alloc's liveness gives
+       // up (at loop backedges, because we aren't doing a full flow pass).
+       // The presence of a live use essentially asserts that the use's def is
+@@ -477,7 +474,7 @@
+         Node *def = n->in(k);   // n->in(k) is a USE; def is the DEF for this USE
+         guarantee(def != NULL, "no disconnected nodes at this point");
+         uint useidx = n2lidx(def); // useidx is the live range index for this USE
+-        
++
+         if( useidx ) {
+           OptoReg::Name ureg = lrgs(useidx).reg();
+           if( !value[ureg] ) {
+@@ -503,9 +500,9 @@
+       }
+ 
+       const uint two_adr = n->is_Mach() ? n->as_Mach()->two_adr() : 0;
+-      
++
+       // Remove copies along input edges
+-      for( k = 1; k < n->req(); k++ ) 
++      for( k = 1; k < n->req(); k++ )
+         j -= elide_copy( n, k, b, value, regnd, two_adr!=k );
+ 
+       // Unallocated Nodes define no registers
+@@ -517,7 +514,7 @@
+       // Skip through all copies to the _value_ being defined.
+       // Do not change from int to pointer
+       Node *val = skip_copies(n);
+-      
++
+       uint n_ideal_reg = n->ideal_reg();
+       if( is_single_register(n_ideal_reg) ) {
+         // If Node 'n' does not change the value mapped by the register,
+@@ -567,7 +564,7 @@
+           j -= yank_if_dead(n,b,&value,&regnd);
+         }
+       }
+-      
++
+       // Fat projections kill many registers
+       if( n_ideal_reg == MachProjNode::fat_proj ) {
+         RegMask rm = n->out_RegMask();
+@@ -585,5 +582,3 @@
+ 
+   } // End for all blocks
+ }
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/regalloc.cpp openjdk/hotspot/src/share/vm/opto/regalloc.cpp
+--- openjdk6/hotspot/src/share/vm/opto/regalloc.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/regalloc.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)regalloc.cpp	1.28 07/05/05 17:06:27 JVM"
+-#endif
+ /*
+  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -34,7 +31,7 @@
+ int PhaseRegAlloc::_num_allocators = 0;
+ #ifndef PRODUCT
+ int PhaseRegAlloc::_total_framesize = 0;
+-int PhaseRegAlloc::_max_framesize = 0; 
++int PhaseRegAlloc::_max_framesize = 0;
+ #endif
+ 
+ PhaseRegAlloc::PhaseRegAlloc( uint unique, PhaseCFG &cfg,
+@@ -67,7 +64,7 @@
+   // OptoReg::reg2stack(reg), in order to avoid asserts in the latter
+   // function.  This routine must remain unchecked, so that dump_frame()
+   // can do its work undisturbed.
+-  // %%% not really clear why reg2stack would assert here 
++  // %%% not really clear why reg2stack would assert here
+ 
+   return slot*VMRegImpl::stack_slot_size;
+ }
+@@ -87,10 +84,10 @@
+ //------------------------------offset2reg-------------------------------------
+ OptoReg::Name PhaseRegAlloc::offset2reg(int stk_offset) const {
+   int slot = stk_offset / jintSize;
+-  int reg = (slot < (int) _framesize) 
++  int reg = (slot < (int) _framesize)
+     ? slot + _matcher._new_SP
+     : OptoReg::stack2reg(slot) - _framesize;
+-  assert(stk_offset == reg2offset((OptoReg::Name) reg), 
++  assert(stk_offset == reg2offset((OptoReg::Name) reg),
+          "offset2reg does not invert properly");
+   return (OptoReg::Name) reg;
+ }
+@@ -128,4 +125,3 @@
+   }
+ }
+ #endif
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/regalloc.hpp openjdk/hotspot/src/share/vm/opto/regalloc.hpp
+--- openjdk6/hotspot/src/share/vm/opto/regalloc.hpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/regalloc.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)regalloc.hpp	1.23 07/05/05 17:06:28 JVM"
+-#endif
+ /*
+  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class Node;
+@@ -114,7 +111,7 @@
+     assert( !OptoReg::is_valid(second) || second == first+1, "" );
+     assert(OptoReg::is_reg(first), "out of range");
+     return Matcher::_regEncode[first];
+-  }  
++  }
+ 
+   // Platform dependent hook for actions prior to allocation
+   void  pd_preallocate_hook();
+diff -ruN openjdk6/hotspot/src/share/vm/opto/regmask.cpp openjdk/hotspot/src/share/vm/opto/regmask.cpp
+--- openjdk6/hotspot/src/share/vm/opto/regmask.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/regmask.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)regmask.cpp	1.62 07/05/05 17:06:25 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -63,7 +60,7 @@
+ // Find highest 1, or return 32 if empty
+ int find_hihghest_bit( uint32 mask ) {
+   int n = 0;
+-  if( mask > 0xffff ) { 
++  if( mask > 0xffff ) {
+     mask >>= 16;
+     n += 16;
+   }
+@@ -122,7 +119,7 @@
+     if( _A[i] ) {               // Found some bits
+       int bit = _A[i] & -_A[i]; // Extract low bit
+       // Convert to bit number, return hi bit in pair
+-      return OptoReg::Name((i<<_LogWordBits)+find_lowest_bit(bit)+1); 
++      return OptoReg::Name((i<<_LogWordBits)+find_lowest_bit(bit)+1);
+     }
+   }
+   return OptoReg::Bad;
+@@ -228,8 +225,8 @@
+ uint RegMask::Size() const {
+   extern uint8 bitsInByte[256];
+   uint sum = 0;
+-  for( int i = 0; i < RM_SIZE; i++ ) 
+-    sum += 
++  for( int i = 0; i < RM_SIZE; i++ )
++    sum +=
+       bitsInByte[(_A[i]>>24) & 0xff] +
+       bitsInByte[(_A[i]>>16) & 0xff] +
+       bitsInByte[(_A[i]>> 8) & 0xff] +
+@@ -252,9 +249,9 @@
+     // Now I have printed an initial register.
+     // Print adjacent registers as "rX-rZ" instead of "rX,rY,rZ".
+     // Begin looping over the remaining registers.
+-    while( 1 ) {                // 
++    while( 1 ) {                //
+       OptoReg::Name reg = rm.find_first_elem(); // Get a register
+-      if( !OptoReg::is_valid(reg) ) 
++      if( !OptoReg::is_valid(reg) )
+         break;                  // Empty mask, end loop
+       rm.Remove(reg);           // Yank from mask
+ 
+@@ -272,10 +269,10 @@
+         }
+         tty->print(",");        // Seperate start of new run
+         start = last = reg;     // Start a new register run
+-        OptoReg::dump(start); // Print register      
++        OptoReg::dump(start); // Print register
+       } // End of if ending a register run or not
+     } // End of while regmask not empty
+-                                
++
+     if( start == last ) {       // 1-register run; no special printing
+     } else if( start+1 == last ) {
+       tty->print(",");          // 2-register run; print as "rX,rY"
+@@ -289,5 +286,3 @@
+   tty->print("]");
+ }
+ #endif
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/regmask.hpp openjdk/hotspot/src/share/vm/opto/regmask.hpp
+--- openjdk6/hotspot/src/share/vm/opto/regmask.hpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/regmask.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)regmask.hpp	1.65 07/05/05 17:06:26 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Some fun naming (textual) substitutions:
+@@ -50,7 +47,7 @@
+ //------------------------------RegMask----------------------------------------
+ // The ADL file describes how to print the machine-specific registers, as well
+ // as any notion of register classes.  We provide a register mask, which is
+-// just a collection of Register numbers.  
++// just a collection of Register numbers.
+ 
+ // The ADLC defines 2 macros, RM_SIZE and FORALL_BODY.
+ // RM_SIZE is the size of a register mask in words.
+@@ -62,7 +59,7 @@
+ class RegMask VALUE_OBJ_CLASS_SPEC {
+   union {
+     double _dummy_force_double_alignment[RM_SIZE>>1];
+-    // Array of Register Mask bits.  This array is large enough to cover 
++    // Array of Register Mask bits.  This array is large enough to cover
+     // all the machine registers and all parameters that need to be passed
+     // on the stack (stack registers) up to some interesting limit.  Methods
+     // that need more parameters will NOT be compiled.  On Intel, the limit
+@@ -95,9 +92,9 @@
+ 
+   // A constructor only used by the ADLC output.  All mask fields are filled
+   // in directly.  Calls to this look something like RM(1,2,3,4);
+-  RegMask( 
++  RegMask(
+ #   define BODY(I) int a##I,
+-    FORALL_BODY   
++    FORALL_BODY
+ #   undef BODY
+     int dummy = 0 ) {
+ #   define BODY(I) _A[I] = a##I;
+@@ -125,7 +122,7 @@
+   }
+ 
+   // The last bit in the register mask indicates that the mask should repeat
+-  // indefinitely with ONE bits.  Returns TRUE if mask is infinite or 
++  // indefinitely with ONE bits.  Returns TRUE if mask is infinite or
+   // unbounded in size.  Returns FALSE if mask is finite size.
+   int is_AllStack() const { return _A[RM_SIZE-1] >> (_WordBits-1); }
+ 
+@@ -147,22 +144,22 @@
+   }
+ 
+   // Find lowest-numbered register from mask, or BAD if mask is empty.
+-  OptoReg::Name find_first_elem() const { 
++  OptoReg::Name find_first_elem() const {
+     int base, bits;
+ #   define BODY(I) if( (bits = _A[I]) != 0 ) base = I<<_LogWordBits; else
+     FORALL_BODY
+ #   undef BODY
+       { base = OptoReg::Bad; bits = 1<<0; }
+-    return OptoReg::Name(base + find_lowest_bit(bits)); 
++    return OptoReg::Name(base + find_lowest_bit(bits));
+   }
+   // Get highest-numbered register from mask, or BAD if mask is empty.
+-  OptoReg::Name find_last_elem() const { 
++  OptoReg::Name find_last_elem() const {
+     int base, bits;
+ #   define BODY(I) if( (bits = _A[RM_SIZE-1-I]) != 0 ) base = (RM_SIZE-1-I)<<_LogWordBits; else
+     FORALL_BODY
+ #   undef BODY
+       { base = OptoReg::Bad; bits = 1<<0; }
+-    return OptoReg::Name(base + find_hihghest_bit(bits)); 
++    return OptoReg::Name(base + find_hihghest_bit(bits));
+   }
+ 
+   // Find the lowest-numbered register pair in the mask.  Return the
+@@ -188,7 +185,7 @@
+ 
+   // Fast overlap test.  Non-zero if any registers in common.
+   int overlap( const RegMask &rm ) const {
+-    return 
++    return
+ #   define BODY(I) (_A[I] & rm._A[I]) |
+     FORALL_BODY
+ #   undef BODY
+diff -ruN openjdk6/hotspot/src/share/vm/opto/reg_split.cpp openjdk/hotspot/src/share/vm/opto/reg_split.cpp
+--- openjdk6/hotspot/src/share/vm/opto/reg_split.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/reg_split.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)reg_split.cpp	1.81 07/05/05 17:06:27 JVM"
+-#endif
+ /*
+  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -96,7 +93,7 @@
+ void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) {
+   // Skip intervening ProjNodes.  Do not insert between a ProjNode and
+   // its definer.
+-  while( i < b->_nodes.size() && 
++  while( i < b->_nodes.size() &&
+          (b->_nodes[i]->is_Proj() ||
+           b->_nodes[i]->is_Phi() ) )
+     i++;
+@@ -141,7 +138,7 @@
+   // If we are spilling the memory op for an implicit null check, at the
+   // null check location (ie - null check is in HRP block) we need to do
+   // the null-check first, then spill-down in the following block.
+-  // (The implicit_null_check function ensures the use is also dominated 
++  // (The implicit_null_check function ensures the use is also dominated
+   // by the branch-not-taken block.)
+   Node *be = b->end();
+   if( be->is_MachNullCheck() && be->in(1) == def && def == b->_nodes[loc] ) {
+@@ -183,14 +180,14 @@
+   JVMState* jvms = use->jvms();
+   uint debug_start = jvms ? jvms->debug_start() : 999999;
+   uint debug_end   = jvms ? jvms->debug_end()   : 999999;
+-  
++
+   //-------------------------------------------
+   // Check for use of debug info
+   if (useidx >= debug_start && useidx < debug_end) {
+     // Actually it's perfectly legal for constant debug info to appear
+     // just unlikely.  In this case the optimizer left a ConI of a 4
+     // as both inputs to a Phi with only a debug use.  It's a single-def
+-    // live range of a rematerializable value.  The live range spills, 
++    // live range of a rematerializable value.  The live range spills,
+     // rematerializes and now the ConI directly feeds into the debug info.
+     // assert(!def->is_Con(), "constant debug info already constructed directly");
+ 
+@@ -209,7 +206,7 @@
+         // DEF is UP, so must copy it DOWN and hook in USE
+         // Insert SpillCopy before the USE, which uses DEF as its input,
+         // and defs a new live range, which is used by this node.
+-        Node *spill = get_spillcopy_wide(def,use,useidx); 
++        Node *spill = get_spillcopy_wide(def,use,useidx);
+         // did we fail to split?
+         if (!spill) {
+           // Bail
+@@ -274,7 +271,7 @@
+ }
+ 
+ //------------------------------split_Rematerialize----------------------------
+-// Clone a local copy of the def.  
++// Clone a local copy of the def.
+ Node *PhaseChaitin::split_Rematerialize( Node *def, Block *b, uint insidx, uint &maxlrg, GrowableArray<uint> splits, int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru ) {
+   // The input live ranges will be stretched to the site of the new
+   // instruction.  They might be stretched past a def and will thus
+@@ -306,7 +303,7 @@
+     return 0;
+   }
+ 
+-  // See if any inputs are currently being spilled, and take the 
++  // See if any inputs are currently being spilled, and take the
+   // latest copy of spilled inputs.
+   if( spill->req() > 1 ) {
+     for( uint i = 1; i < spill->req(); i++ ) {
+@@ -370,7 +367,7 @@
+   // got "freed up" and that num_regs will become INT_PRESSURE.
+   int bound_pres = lrg->_is_float ? FLOATPRESSURE : INTPRESSURE;
+   // Effective register pressure limit.
+-  int lrg_pres = (lrg->get_invalid_mask_size() > lrg->num_regs()) 
++  int lrg_pres = (lrg->get_invalid_mask_size() > lrg->num_regs())
+     ? (lrg->get_invalid_mask_size() >> (lrg->num_regs()-1)) : bound_pres;
+   // High pressure if block pressure requires more register freedom
+   // than live range has.
+@@ -390,7 +387,7 @@
+     if( n->is_Phi() ) continue;
+     for( uint j = 1; j < n->req(); j++ )
+       if( Find_id(n->in(j)) == lidx )
+-        return true;          // Found 1st use!  
++        return true;          // Found 1st use!
+     if( n->out_RegMask().is_NotEmpty() ) return false;
+   }
+   return false;
+@@ -431,7 +428,7 @@
+   phis = new Node_List();
+   // Gather info on which LRG's are spilling, and build maps
+   for( bidx = 1; bidx < _maxlrg; bidx++ ) {
+-    if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) { 
++    if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) {
+       assert(!lrgs(bidx).mask().is_AllStack(),"AllStack should color");
+       lrg2reach[bidx] = spill_cnt;
+       spill_cnt++;
+@@ -441,7 +438,7 @@
+       splits.append(0);
+ #endif
+ #ifndef PRODUCT
+-      if( PrintOpto && WizardMode && lrgs(bidx)._was_spilled1 ) 
++      if( PrintOpto && WizardMode && lrgs(bidx)._was_spilled1 )
+         tty->print_cr("Warning, 2nd spill of L%d",bidx);
+ #endif
+     }
+@@ -475,7 +472,7 @@
+   }
+ 
+   // Initialize to array of empty vectorsets
+-  for( slidx = 0; slidx < spill_cnt; slidx++ ) 
++  for( slidx = 0; slidx < spill_cnt; slidx++ )
+     UP_entry[slidx] = new VectorSet(Thread::current()->resource_area());
+ 
+   //----------PASS 1----------
+@@ -564,7 +561,7 @@
+           u3 = u2;
+         }
+       }  // End for all potential Phi inputs
+-      
++
+       // If a phi is needed, check for it
+       if( needs_phi ) {
+         // check block for appropriate phinode & update edges
+@@ -615,7 +612,7 @@
+           UPblock[slidx] = false;
+         // If we are not split up/down and all inputs are down, then we
+         // are down
+-        if( !needs_split && !u3 ) 
++        if( !needs_split && !u3 )
+           UPblock[slidx] = false;
+       }  // end if phi is needed
+ 
+@@ -702,12 +699,12 @@
+         }
+         continue;
+       }
+-      assert( insidx > b->_ihrp_index || 
++      assert( insidx > b->_ihrp_index ||
+               (b->_reg_pressure < (uint)INTPRESSURE) ||
+               b->_ihrp_index > 4000000 ||
+               b->_ihrp_index >= b->end_idx() ||
+               !b->_nodes[b->_ihrp_index]->is_Proj(), "" );
+-      assert( insidx > b->_fhrp_index || 
++      assert( insidx > b->_fhrp_index ||
+               (b->_freg_pressure < (uint)FLOATPRESSURE) ||
+               b->_fhrp_index > 4000000 ||
+               b->_fhrp_index >= b->end_idx() ||
+@@ -726,7 +723,7 @@
+           if( UPblock[slidx] ) {
+             // set location to insert spills at
+             // SPLIT DOWN HERE - NO CISC SPILL
+-            if( is_high_pressure( b, &lrgs(lidx), insidx ) && 
++            if( is_high_pressure( b, &lrgs(lidx), insidx ) &&
+                 !n1->rematerialize() ) {
+               // If there is already a valid stack definition available, use it
+               if( debug_defs[slidx] != NULL ) {
+@@ -803,8 +800,8 @@
+         uint old_last = cnt - 1;
+         for( inpidx = 1; inpidx < cnt; inpidx++ ) {
+           // Derived/base pairs may be added to our inputs during this loop.
+-          // If inpidx > old_last, then one of these new inputs is being 
+-          // handled. Skip the derived part of the pair, but process 
++          // If inpidx > old_last, then one of these new inputs is being
++          // handled. Skip the derived part of the pair, but process
+           // the base like any other input.
+           if( inpidx > old_last && ((inpidx - oopoff) & 1) == DERIVED ) {
+             continue;  // skip derived_debug added below
+@@ -833,7 +830,7 @@
+               }
+               continue;
+             }
+- 
++
+             // Rematerializable?  Then clone def at use site instead
+             // of store/load
+             if( def->rematerialize() ) {
+@@ -844,12 +841,12 @@
+             }
+ 
+             MachNode *mach = n->is_Mach() ? n->as_Mach() : NULL;
+-            // Base pointers and oopmap references do not care where they live. 
+-            if ((inpidx >= oopoff) || 
++            // Base pointers and oopmap references do not care where they live.
++            if ((inpidx >= oopoff) ||
+                 (mach && mach->ideal_Opcode() == Op_AddP && inpidx == AddPNode::Base)) {
+               if (def->rematerialize() && lrgs(useidx)._was_spilled2) {
+-                // This def has been rematerialized a couple of times without 
+-                // progress. It doesn't care if it lives UP or DOWN, so 
++                // This def has been rematerialized a couple of times without
++                // progress. It doesn't care if it lives UP or DOWN, so
+                 // spill it down now.
+                 maxlrg = split_USE(def,b,n,inpidx,maxlrg,false,false,splits,slidx);
+                 // If it wasn't split bail
+@@ -871,7 +868,7 @@
+                 Node *derived_debug = debug_defs[slidx];
+                 if( ((inpidx - oopoff) & 1) == DERIVED && // derived vs base?
+                     mach && mach->ideal_Opcode() != Op_Halt &&
+-                    derived_debug != NULL && 
++                    derived_debug != NULL &&
+                     derived_debug != def ) { // Actual 2nd value appears
+                   // We have already set 'def' as a derived value.
+                   // Also set debug_defs[slidx] as a derived value.
+@@ -881,9 +878,9 @@
+                       break;      // Found an instance of debug derived
+                   if( k == cnt ) {// No instance of debug_defs[slidx]
+                     // Add a derived/base pair to cover the debug info.
+-                    // We have to process the added base later since it is not 
++                    // We have to process the added base later since it is not
+                     // handled yet at this point but skip derived part.
+-                    assert(((n->req() - oopoff) & 1) == DERIVED, 
++                    assert(((n->req() - oopoff) & 1) == DERIVED,
+                            "must match skip condition above");
+                     n->add_req( derived_debug );   // this will be skipped above
+                     n->add_req( n->in(inpidx+1) ); // this will be processed
+@@ -923,11 +920,11 @@
+             bool uup = umask.is_UP();
+ 
+             // Need special logic to handle bound USES. Insert a split at this
+-            // bound use if we can't rematerialize the def, or if we need the 
++            // bound use if we can't rematerialize the def, or if we need the
+             // split to form a misaligned pair.
+-            if( !umask.is_AllStack() && 
+-                (int)umask.Size() <= lrgs(useidx).num_regs() && 
+-                (!def->rematerialize() || 
++            if( !umask.is_AllStack() &&
++                (int)umask.Size() <= lrgs(useidx).num_regs() &&
++                (!def->rematerialize() ||
+                  umask.is_misaligned_Pair())) {
+               // These need a Split regardless of overlap or pressure
+               // SPLIT - NO DEF - NO CISC SPILL
+@@ -1070,7 +1067,7 @@
+         if( !n->rematerialize() &&
+             (((dmask.is_bound1() || dmask.is_bound2() || dmask.is_misaligned_Pair()) &&
+              (deflrg._direct_conflict || deflrg._must_spill)) ||
+-             // Check for LRG being up in a register and we are inside a high 
++             // Check for LRG being up in a register and we are inside a high
+              // pressure area.  Spill it down immediately.
+              (defup && is_high_pressure(b,&deflrg,insidx))) ) {
+           assert( !n->rematerialize(), "" );
+@@ -1205,8 +1202,8 @@
+     // goes in.
+ 
+     // Walk the predecessor blocks and assign the reaching def to the Phi.
+-    // Split Phi nodes by placing USE side splits wherever the reaching 
+-    // DEF has the wrong UP/DOWN value.  
++    // Split Phi nodes by placing USE side splits wherever the reaching
++    // DEF has the wrong UP/DOWN value.
+     for( uint i = 1; i < b->num_preds(); i++ ) {
+       // Get predecessor block pre-order number
+       Block *pred = _cfg._bbs[b->pred(i)->_idx];
+@@ -1215,7 +1212,7 @@
+       Node *def = Reaches[pidx][slidx];
+       assert( def, "must have reaching def" );
+       // If input up/down sense and reg-pressure DISagree
+-      if( def->rematerialize() ) { 
++      if( def->rematerialize() ) {
+         def = split_Rematerialize( def, pred, pred->end_idx(), maxlrg, splits, slidx, lrg2reach, Reachblock, false );
+         if( !def ) return 0;    // Bail out
+       }
+@@ -1266,7 +1263,7 @@
+       uint lr2 = Find(n1->in(twoidx));
+       if( lr1 < lr2 )
+         Union(n1, n1->in(twoidx));
+-      else if( lr1 > lr2 ) 
++      else if( lr1 > lr2 )
+         Union(n1->in(twoidx), n1);
+     }  // End if two address
+   }  // End for all defs
+diff -ruN openjdk6/hotspot/src/share/vm/opto/rootnode.cpp openjdk/hotspot/src/share/vm/opto/rootnode.cpp
+--- openjdk6/hotspot/src/share/vm/opto/rootnode.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/rootnode.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)rootnode.cpp	1.77 07/05/05 17:06:28 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -56,9 +53,9 @@
+ }
+ 
+ //=============================================================================
+-HaltNode::HaltNode( Node *ctrl, Node *frameptr ) : Node(TypeFunc::Parms) { 
++HaltNode::HaltNode( Node *ctrl, Node *frameptr ) : Node(TypeFunc::Parms) {
+   Node* top = Compile::current()->top();
+-  init_req(TypeFunc::Control,  ctrl        ); 
++  init_req(TypeFunc::Control,  ctrl        );
+   init_req(TypeFunc::I_O,      top);
+   init_req(TypeFunc::Memory,   top);
+   init_req(TypeFunc::FramePtr, frameptr    );
+@@ -73,14 +70,12 @@
+ }
+ 
+ //------------------------------Value------------------------------------------
+-const Type *HaltNode::Value( PhaseTransform *phase ) const { 
++const Type *HaltNode::Value( PhaseTransform *phase ) const {
+   return ( phase->type(in(TypeFunc::Control)) == Type::TOP)
+     ? Type::TOP
+     : Type::BOTTOM;
+ }
+ 
+-const RegMask &HaltNode::out_RegMask() const { 
++const RegMask &HaltNode::out_RegMask() const {
+   return RegMask::Empty;
+ }
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/rootnode.hpp openjdk/hotspot/src/share/vm/opto/rootnode.hpp
+--- openjdk6/hotspot/src/share/vm/opto/rootnode.hpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/rootnode.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)rootnode.hpp	1.48 07/05/05 17:06:28 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //------------------------------RootNode---------------------------------------
+@@ -63,4 +60,3 @@
+   virtual uint ideal_reg() const { return NotAMachineReg; }
+   virtual uint match_edge(uint idx) const { return 0; }
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/runtime.cpp openjdk/hotspot/src/share/vm/opto/runtime.cpp
+--- openjdk6/hotspot/src/share/vm/opto/runtime.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/runtime.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)runtime.cpp	1.458 07/05/17 16:00:35 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -43,7 +40,6 @@
+ // Compiled code entry points
+ address OptoRuntime::_new_instance_Java                           = NULL;
+ address OptoRuntime::_new_array_Java                              = NULL;
+-address OptoRuntime::_multianewarray1_Java                        = NULL;
+ address OptoRuntime::_multianewarray2_Java                        = NULL;
+ address OptoRuntime::_multianewarray3_Java                        = NULL;
+ address OptoRuntime::_multianewarray4_Java                        = NULL;
+@@ -64,12 +60,12 @@
+ // This should be called in an assertion at the start of OptoRuntime routines
+ // which are entered from compiled code (all of them)
+ #ifndef PRODUCT
+-static bool check_compiled_frame(JavaThread* thread) {  
++static bool check_compiled_frame(JavaThread* thread) {
+   assert(thread->last_frame().is_runtime_frame(), "cannot call runtime directly from compiled code");
+ #ifdef ASSERT
+   RegisterMap map(thread, false);
+   frame caller = thread->last_frame().sender(&map);
+-  assert(caller.is_compiled_frame(), "not being called from compiled like code");  
++  assert(caller.is_compiled_frame(), "not being called from compiled like code");
+ #endif  /* ASSERT */
+   return true;
+ }
+@@ -80,7 +76,7 @@
+   var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, save_arg_regs, return_pc)
+ 
+ void OptoRuntime::generate(ciEnv* env) {
+-  
++
+   generate_exception_blob();
+ 
+   // Note: tls: Means fetching the return oop out of the thread-local storage
+@@ -89,7 +85,6 @@
+   // -------------------------------------------------------------------------------------------------------------------------------
+   gen(env, _new_instance_Java              , new_instance_Type            , new_instance_C                  ,    0 , true , false, false);
+   gen(env, _new_array_Java                 , new_array_Type               , new_array_C                     ,    0 , true , false, false);
+-  gen(env, _multianewarray1_Java           , multianewarray1_Type         , multianewarray1_C               ,    0 , true , false, false);  
+   gen(env, _multianewarray2_Java           , multianewarray2_Type         , multianewarray2_C               ,    0 , true , false, false);
+   gen(env, _multianewarray3_Java           , multianewarray3_Type         , multianewarray3_C               ,    0 , true , false, false);
+   gen(env, _multianewarray4_Java           , multianewarray4_Type         , multianewarray4_C               ,    0 , true , false, false);
+@@ -98,22 +93,22 @@
+   gen(env, _rethrow_Java                   , rethrow_Type                 , rethrow_C                       ,    2 , true , false, true );
+ 
+   gen(env, _slow_arraycopy_Java            , slow_arraycopy_Type          , SharedRuntime::slow_arraycopy_C ,    0 , false, false, false);
+-  gen(env, _register_finalizer_Java        , register_finalizer_Type      , register_finalizer              ,    0 , false, false, false);  
++  gen(env, _register_finalizer_Java        , register_finalizer_Type      , register_finalizer              ,    0 , false, false, false);
+ 
+-# ifdef ENABLE_ZAP_DEAD_LOCALS                                                                                              
++# ifdef ENABLE_ZAP_DEAD_LOCALS
+   gen(env, _zap_dead_Java_locals_Java      , zap_dead_locals_Type         , zap_dead_Java_locals_C          ,    0 , false, true , false );
+   gen(env, _zap_dead_native_locals_Java    , zap_dead_locals_Type         , zap_dead_native_locals_C        ,    0 , false, true , false );
+ # endif
+-  
+-} 
++
++}
+ 
+ #undef gen
+ 
+ 
+ // Helper method to do generation of RunTimeStub's
+ address OptoRuntime::generate_stub( ciEnv* env,
+-                                    TypeFunc_generator gen, address C_function, 
+-                                    const char *name, int is_fancy_jump, 
++                                    TypeFunc_generator gen, address C_function,
++                                    const char *name, int is_fancy_jump,
+                                     bool pass_tls,
+                                     bool save_argument_registers,
+                                     bool return_pc ) {
+@@ -122,10 +117,10 @@
+   return  C.stub_entry_point();
+ }
+ 
+-const char* OptoRuntime::stub_name(address entry) { 
+-#ifndef PRODUCT  
++const char* OptoRuntime::stub_name(address entry) {
++#ifndef PRODUCT
+   CodeBlob* cb = CodeCache::find_blob(entry);
+-  RuntimeStub* rs =(RuntimeStub *)cb;  
++  RuntimeStub* rs =(RuntimeStub *)cb;
+   assert(rs != NULL && rs->is_runtime_stub(), "not a runtime stub");
+   return rs->name();
+ #else
+@@ -144,6 +139,21 @@
+ // We failed the fast-path allocation.  Now we need to do a scavenge or GC
+ // and try allocation again.
+ 
++void OptoRuntime::do_eager_card_mark(JavaThread* thread) {
++  // After any safepoint, just before going back to compiled code,
++  // we perform a card mark.  This lets the compiled code omit
++  // card marks for initialization of new objects.
++  // Keep this code consistent with GraphKit::store_barrier.
++
++  oop new_obj = thread->vm_result();
++  if (new_obj == NULL)  return;
++
++  assert(Universe::heap()->can_elide_tlab_store_barriers(),
++         "compiler must check this first");
++  new_obj = Universe::heap()->new_store_barrier(new_obj);
++  thread->set_vm_result(new_obj);
++}
++
+ // object allocation
+ JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(klassOopDesc* klass, JavaThread* thread))
+   JRT_BLOCK;
+@@ -173,7 +183,7 @@
+     oop result = instanceKlass::cast(klass)->allocate_instance(THREAD);
+     thread->set_vm_result(result);
+ 
+-    // Pass oops back through thread local storage.  Our apparent type to Java 
++    // Pass oops back through thread local storage.  Our apparent type to Java
+     // is that we return an oop, but we can block on exit from this routine and
+     // a GC can trash the oop in C's return register.  The generated stub will
+     // fetch the oop from TLS after any possible GC.
+@@ -182,6 +192,10 @@
+   deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION);
+   JRT_BLOCK_END;
+ 
++  if (GraphKit::use_ReduceInitialCardMarks()) {
++    // do them now so we don't have to do them on the fast path
++    do_eager_card_mark(thread);
++  }
+ JRT_END
+ 
+ 
+@@ -209,7 +223,7 @@
+     result = oopFactory::new_objArray(elem_type, len, THREAD);
+   }
+ 
+-  // Pass oops back through thread local storage.  Our apparent type to Java 
++  // Pass oops back through thread local storage.  Our apparent type to Java
+   // is that we return an oop, but we can block on exit from this routine and
+   // a GC can trash the oop in C's return register.  The generated stub will
+   // fetch the oop from TLS after any possible GC.
+@@ -217,21 +231,13 @@
+   thread->set_vm_result(result);
+   JRT_BLOCK_END;
+ 
++  if (GraphKit::use_ReduceInitialCardMarks()) {
++    // do them now so we don't have to do them on the fast path
++    do_eager_card_mark(thread);
++  }
+ JRT_END
+ 
+-// multianewarray for one dimension
+-JRT_ENTRY(void, OptoRuntime::multianewarray1_C(klassOopDesc* elem_type, int len1, JavaThread *thread))
+-#ifndef PRODUCT
+-  SharedRuntime::_multi1_ctr++;                // multianewarray for 1 dimension
+-#endif
+-  assert(check_compiled_frame(thread), "incorrect caller");
+-  assert(oop(elem_type)->is_klass(), "not a class");
+-  jint dims[1];
+-  dims[0] = len1;
+-  oop obj = arrayKlass::cast(elem_type)->multi_allocate(1, dims, THREAD);
+-  deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION);
+-  thread->set_vm_result(obj);
+-JRT_END
++// Note: multianewarray for one dimension is handled inline by GraphKit::new_array.
+ 
+ // multianewarray for 2 dimensions
+ JRT_ENTRY(void, OptoRuntime::multianewarray2_C(klassOopDesc* elem_type, int len1, int len2, JavaThread *thread))
+@@ -363,10 +369,6 @@
+   return TypeFunc::make(domain, range);
+ }
+ 
+-const TypeFunc *OptoRuntime::multianewarray1_Type() {
+-  return multianewarray_Type(1);
+-}
+-
+ const TypeFunc *OptoRuntime::multianewarray2_Type() {
+   return multianewarray_Type(2);
+ }
+@@ -387,7 +389,7 @@
+   // create input type (domain)
+   const Type **fields = TypeTuple::fields(1);
+   // symbolOop name of class to be loaded
+-  fields[TypeFunc::Parms+0] = TypeInt::INT; 
++  fields[TypeFunc::Parms+0] = TypeInt::INT;
+   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
+ 
+   // create result type (range)
+@@ -451,12 +453,12 @@
+ const TypeFunc* OptoRuntime::flush_windows_Type() {
+   // create input type (domain)
+   const Type** fields = TypeTuple::fields(1);
+-  fields[TypeFunc::Parms+0] = NULL; // void 
++  fields[TypeFunc::Parms+0] = NULL; // void
+   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields);
+ 
+   // create result type
+   fields = TypeTuple::fields(1);
+-  fields[TypeFunc::Parms+0] = NULL; // void 
++  fields[TypeFunc::Parms+0] = NULL; // void
+   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
+ 
+   return TypeFunc::make(domain, range);
+@@ -496,8 +498,8 @@
+   // create input type (domain)
+   const Type **fields = TypeTuple::fields(2);
+   // symbolOop name of class to be loaded
+-  fields[TypeFunc::Parms+0] = Type::DOUBLE; 
+-  fields[TypeFunc::Parms+1] = Type::HALF; 
++  fields[TypeFunc::Parms+0] = Type::DOUBLE;
++  fields[TypeFunc::Parms+1] = Type::HALF;
+   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
+ 
+   // create result type (range)
+@@ -620,9 +622,9 @@
+   // create result type
+   fields = TypeTuple::fields(1);
+   // fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // locked oop
+-  fields[TypeFunc::Parms+0] = NULL; // void 
++  fields[TypeFunc::Parms+0] = NULL; // void
+   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
+-  return TypeFunc::make(domain, range);  
++  return TypeFunc::make(domain, range);
+ }
+ 
+ //-------------- methodData update helpers
+@@ -636,7 +638,7 @@
+ 
+   // create result type
+   fields = TypeTuple::fields(1);
+-  fields[TypeFunc::Parms+0] = NULL; // void 
++  fields[TypeFunc::Parms+0] = NULL; // void
+   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
+   return TypeFunc::make(domain,range);
+ }
+@@ -675,7 +677,7 @@
+ JRT_END
+ 
+ //-----------------------------------------------------------------------------
+-// implicit exception support. 
++// implicit exception support.
+ 
+ static void report_null_exception_in_code_cache(address exception_pc) {
+   ResourceMark rm;
+@@ -690,7 +692,7 @@
+       methodOop method = ((nmethod*)n)->method();
+       tty->print_cr("# Method where it happened %s.%s ", Klass::cast(method->method_holder())->name()->as_C_string(), method->name()->as_C_string());
+       tty->print_cr("#");
+-      if (ShowMessageBoxOnError && UpdateHotSpotCompilerFileOnError) { 
++      if (ShowMessageBoxOnError && UpdateHotSpotCompilerFileOnError) {
+         const char* title    = "HotSpot Runtime Error";
+         const char* question = "Do you want to exclude compilation of this method in future runs?";
+         if (os::message_box(title, question)) {
+@@ -705,7 +707,7 @@
+       }
+       fatal("Implicit null exception happened in compiled method");
+     } else {
+-      n->print(); 
++      n->print();
+       fatal("Implicit null exception happened in generated stub");
+     }
+   }
+@@ -730,7 +732,7 @@
+ 
+ //-----------------------------------------------------------------------
+ // Exceptions
+-// 
++//
+ 
+ static void trace_exception(oop exception_oop, address exception_pc, const char* msg) PRODUCT_RETURN;
+ 
+@@ -743,16 +745,16 @@
+   // is only used to pass arguments into the method. Not for general
+   // exception handling.  DO NOT CHANGE IT to use pending_exception, since
+   // the runtime stubs checks this on exit.
+-  assert(thread->exception_oop() != NULL, "exception oop is found");  
++  assert(thread->exception_oop() != NULL, "exception oop is found");
+   address handler_address = NULL;
+ 
+   Handle exception(thread, thread->exception_oop());
+ 
+   if (TraceExceptions) {
+-    trace_exception(exception(), thread->exception_pc(), ""); 
++    trace_exception(exception(), thread->exception_pc(), "");
+   }
+   // for AbortVMOnException flag
+-  NOT_PRODUCT(Exceptions::debug_check_abort(exception));  
++  NOT_PRODUCT(Exceptions::debug_check_abort(exception));
+ 
+   #ifdef ASSERT
+     if (!(exception->is_a(SystemDictionary::throwable_klass()))) {
+@@ -838,10 +840,10 @@
+ // will do the normal VM entry. We do it this way so that we can see if the nmethod
+ // we looked up the handler for has been deoptimized in the meantime. If it has been
+ // we must not use the handler and instread return the deopt blob.
+-address OptoRuntime::handle_exception_C(JavaThread* thread) { 
++address OptoRuntime::handle_exception_C(JavaThread* thread) {
+ //
+ // We are in Java not VM and in debug mode we have a NoHandleMark
+-// 
++//
+ #ifndef PRODUCT
+   SharedRuntime::_find_handler_ctr++;          // find exception handler
+ #endif
+@@ -849,7 +851,7 @@
+   nmethod* nm = NULL;
+   address handler_address = NULL;
+   {
+-    // Enter the VM 
++    // Enter the VM
+ 
+     ResetNoHandleMark rnhm;
+     handler_address = handle_exception_C_helper(thread, nm);
+@@ -874,16 +876,16 @@
+ }
+ 
+ //------------------------------rethrow----------------------------------------
+-// We get here after compiled code has executed a 'RethrowNode'.  The callee 
++// We get here after compiled code has executed a 'RethrowNode'.  The callee
+ // is either throwing or rethrowing an exception.  The callee-save registers
+ // have been restored, synchronized objects have been unlocked and the callee
+ // stack frame has been removed.  The return address was passed in.
+-// Exception oop is passed as the 1st argument.  This routine is then called 
+-// from the stub.  On exit, we know where to jump in the caller's code.  
+-// After this C code exits, the stub will pop his frame and end in a jump 
++// Exception oop is passed as the 1st argument.  This routine is then called
++// from the stub.  On exit, we know where to jump in the caller's code.
++// After this C code exits, the stub will pop his frame and end in a jump
+ // (instead of a return).  We enter the caller's default handler.
+ //
+-// This must be JRT_LEAF: 
++// This must be JRT_LEAF:
+ //     - caller will not change its state as we cannot block on exit,
+ //       therefore raw_exception_handler_for_return_address is all it takes
+ //       to handle deoptimized blobs
+@@ -891,10 +893,10 @@
+ // However, there needs to be a safepoint check in the middle!  So compiled
+ // safepoints are completely watertight.
+ //
+-// Thus, it cannot be a leaf since it contains the No_GC_Verifier. 
+-//  
++// Thus, it cannot be a leaf since it contains the No_GC_Verifier.
++//
+ // *THIS IS NOT RECOMMENDED PROGRAMMING STYLE*
+-// 
++//
+ address OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc) {
+ #ifndef PRODUCT
+   SharedRuntime::_rethrow_ctr++;               // count rethrows
+@@ -915,7 +917,7 @@
+ 
+ const TypeFunc *OptoRuntime::rethrow_Type() {
+   // create input type (domain)
+-  const Type **fields = TypeTuple::fields(1);  
++  const Type **fields = TypeTuple::fields(1);
+   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop
+   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields);
+ 
+@@ -935,7 +937,7 @@
+     RegisterMap reg_map(thread);
+     frame stub_frame = thread->last_frame();
+     assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
+-    frame caller_frame = stub_frame.sender(&reg_map); 
++    frame caller_frame = stub_frame.sender(&reg_map);
+ 
+     VM_DeoptimizeFrame deopt(thread, caller_frame.id());
+     VMThread::execute(&deopt);
+@@ -1050,7 +1052,7 @@
+ 
+ NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCounter::CounterTag tag) {
+   int max_depth = youngest_jvms->depth();
+-  
++
+   // Visit scopes from youngest to oldest.
+   bool first = true;
+   stringStream st;
+@@ -1114,7 +1116,7 @@
+ // Called from call sites in compiled code with oop maps (actually safepoints)
+ // Zaps dead locals in first java frame.
+ // Is entry because may need to lock to generate oop maps
+-// Currently, only used for compiler frames, but someday may be used 
++// Currently, only used for compiler frames, but someday may be used
+ // for interpreter frames, too.
+ 
+ int OptoRuntime::ZapDeadCompiledLocals_count = 0;
+@@ -1124,10 +1126,10 @@
+ static bool is_native_frame(frame* f) { return f->is_native_frame(); }
+ 
+ 
+-void OptoRuntime::zap_dead_java_or_native_locals(JavaThread* thread, 
++void OptoRuntime::zap_dead_java_or_native_locals(JavaThread* thread,
+                                                 bool (*is_this_the_right_frame_to_zap)(frame*)) {
+   assert(JavaThread::current() == thread, "is this needed?");
+- 
++
+   if ( !ZapDeadCompiledLocals )  return;
+ 
+   bool skip = false;
+diff -ruN openjdk6/hotspot/src/share/vm/opto/runtime.hpp openjdk/hotspot/src/share/vm/opto/runtime.hpp
+--- openjdk6/hotspot/src/share/vm/opto/runtime.hpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/runtime.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)runtime.hpp	1.199 07/05/17 16:01:38 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //------------------------------OptoRuntime------------------------------------
+@@ -32,14 +29,14 @@
+ // Java calling convention.  Internally they call C++.  They are made once at
+ // startup time and Opto compiles calls to them later.
+ // Things are broken up into quads: the signature they will be called with,
+-// the address of the generated code, the corresponding C++ code and an 
++// the address of the generated code, the corresponding C++ code and an
+ // nmethod.
+ 
+-// The signature (returned by "xxx_Type()") is used at startup time by the 
+-// Generator to make the generated code "xxx_Java".  Opto compiles calls 
+-// to the generated code "xxx_Java".  When the compiled code gets executed, 
+-// it calls the C++ code "xxx_C".  The generated nmethod is saved in the 
+-// CodeCache.  Exception handlers use the nmethod to get the callee-save 
++// The signature (returned by "xxx_Type()") is used at startup time by the
++// Generator to make the generated code "xxx_Java".  Opto compiles calls
++// to the generated code "xxx_Java".  When the compiled code gets executed,
++// it calls the C++ code "xxx_C".  The generated nmethod is saved in the
++// CodeCache.  Exception handlers use the nmethod to get the callee-save
+ // register OopMaps.
+ class CallInfo;
+ 
+@@ -107,12 +104,11 @@
+   // References to generated stubs
+   static address _new_instance_Java;
+   static address _new_array_Java;
+-  static address _multianewarray1_Java;
+   static address _multianewarray2_Java;
+   static address _multianewarray3_Java;
+   static address _multianewarray4_Java;
+   static address _multianewarray5_Java;
+-  static address _vtable_must_compile_Java;    
++  static address _vtable_must_compile_Java;
+   static address _complete_monitor_locking_Java;
+   static address _rethrow_Java;
+ 
+@@ -120,36 +116,38 @@
+   static address _register_finalizer_Java;
+ 
+ # ifdef ENABLE_ZAP_DEAD_LOCALS
+-  static address _zap_dead_Java_locals_Java; 
+-  static address _zap_dead_native_locals_Java; 
++  static address _zap_dead_Java_locals_Java;
++  static address _zap_dead_native_locals_Java;
+ # endif
+ 
+ 
+   //
+   // Implementation of runtime methods
+   // =================================
+-  
+-  // Allocate storage for a Java instance.  
++
++  // Allocate storage for a Java instance.
+   static void new_instance_C(klassOopDesc* instance_klass, JavaThread *thread);
+-  
++
+   // Allocate storage for a objArray or typeArray
+   static void new_array_C(klassOopDesc* array_klass, int len, JavaThread *thread);
+ 
++  // Post-allocation step for implementing ReduceInitialCardMarks:
++  static void do_eager_card_mark(JavaThread* thread);
++
+   // Allocate storage for a multi-dimensional arrays
+-  // Note: needs to be fixed for arbitrary number of dimensions  
+-  static void multianewarray1_C(klassOopDesc* klass, int len1, JavaThread *thread);  
+-  static void multianewarray2_C(klassOopDesc* klass, int len1, int len2, JavaThread *thread);  
+-  static void multianewarray3_C(klassOopDesc* klass, int len1, int len2, int len3, JavaThread *thread);  
+-  static void multianewarray4_C(klassOopDesc* klass, int len1, int len2, int len3, int len4, JavaThread *thread);  
++  // Note: needs to be fixed for arbitrary number of dimensions
++  static void multianewarray2_C(klassOopDesc* klass, int len1, int len2, JavaThread *thread);
++  static void multianewarray3_C(klassOopDesc* klass, int len1, int len2, int len3, JavaThread *thread);
++  static void multianewarray4_C(klassOopDesc* klass, int len1, int len2, int len3, int len4, JavaThread *thread);
+   static void multianewarray5_C(klassOopDesc* klass, int len1, int len2, int len3, int len4, int len5, JavaThread *thread);
+-  
++
+ public:
+-  // Slow-path Locking and Unlocking    
+-  static void complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* thread);  
++  // Slow-path Locking and Unlocking
++  static void complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* thread);
+   static void complete_monitor_unlocking_C(oopDesc* obj, BasicLock* lock);
+ 
+ private:
+-    
++
+   // Implicit exception support
+   static void throw_null_exception_C(JavaThread* thread);
+ 
+@@ -178,7 +176,7 @@
+    static int ZapDeadCompiledLocals_count;
+ 
+ # endif
+-  
++
+ 
+  public:
+ 
+@@ -189,17 +187,16 @@
+ 
+   // Returns the name of a stub
+   static const char* stub_name(address entry);
+-  
++
+   // access to runtime stubs entry points for java code
+   static address new_instance_Java()                     { return _new_instance_Java; }
+   static address new_array_Java()                        { return _new_array_Java; }
+-  static address multianewarray1_Java()                  { return _multianewarray1_Java; }
+   static address multianewarray2_Java()                  { return _multianewarray2_Java; }
+   static address multianewarray3_Java()                  { return _multianewarray3_Java; }
+   static address multianewarray4_Java()                  { return _multianewarray4_Java; }
+   static address multianewarray5_Java()                  { return _multianewarray5_Java; }
+-  static address vtable_must_compile_stub()              { return _vtable_must_compile_Java; }  
+-  static address complete_monitor_locking_Java()         { return _complete_monitor_locking_Java;   }  
++  static address vtable_must_compile_stub()              { return _vtable_must_compile_Java; }
++  static address complete_monitor_locking_Java()         { return _complete_monitor_locking_Java;   }
+ 
+   static address slow_arraycopy_Java()                   { return _slow_arraycopy_Java; }
+   static address register_finalizer_Java()               { return _register_finalizer_Java; }
+@@ -217,13 +214,13 @@
+   // Leaf routines helping with method data update
+   static void profile_receiver_type_C(DataLayout* data, oopDesc* receiver);
+ 
+-  // Implicit exception support  
+-  static void throw_div0_exception_C      (JavaThread* thread);  
+-  static void throw_stack_overflow_error_C(JavaThread* thread);    
++  // Implicit exception support
++  static void throw_div0_exception_C      (JavaThread* thread);
++  static void throw_stack_overflow_error_C(JavaThread* thread);
+ 
+   // Exception handling
+   static address rethrow_stub()             { return _rethrow_Java; }
+-  
++
+ 
+   // Type functions
+   // ======================================================
+@@ -231,7 +228,6 @@
+   static const TypeFunc* new_instance_Type(); // object allocation (slow case)
+   static const TypeFunc* new_array_Type ();   // [a]newarray (slow case)
+   static const TypeFunc* multianewarray_Type(int ndim); // multianewarray
+-  static const TypeFunc* multianewarray1_Type(); // multianewarray
+   static const TypeFunc* multianewarray2_Type(); // multianewarray
+   static const TypeFunc* multianewarray3_Type(); // multianewarray
+   static const TypeFunc* multianewarray4_Type(); // multianewarray
+diff -ruN openjdk6/hotspot/src/share/vm/opto/split_if.cpp openjdk/hotspot/src/share/vm/opto/split_if.cpp
+--- openjdk6/hotspot/src/share/vm/opto/split_if.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/split_if.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)split_if.cpp	1.68 07/05/05 17:06:29 JVM"
+-#endif
+ /*
+  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -30,7 +27,7 @@
+ 
+ 
+ //------------------------------split_thru_region------------------------------
+-// Split Node 'n' through merge point.  
++// Split Node 'n' through merge point.
+ Node *PhaseIdealLoop::split_thru_region( Node *n, Node *region ) {
+   uint wins = 0;
+   assert( n->is_CFG(), "" );
+@@ -43,7 +40,7 @@
+     if( in0->in(0) == region ) x->set_req( 0, in0->in(i) );
+     for( uint j = 1; j < n->req(); j++ ) {
+       Node *in = n->in(j);
+-      if( get_ctrl(in) == region ) 
++      if( get_ctrl(in) == region )
+         x->set_req( j, in->in(i) );
+     }
+     _igvn.register_new_node_with_optimizer(x);
+@@ -51,7 +48,7 @@
+     set_idom(x, x->in(0), dom_depth(x->in(0))+1);
+     r->init_req(i, x);
+   }
+-  
++
+   // Record region
+   r->set_req(0,region);         // Not a TRUE RegionNode
+   _igvn.register_new_node_with_optimizer(r);
+@@ -68,7 +65,7 @@
+     assert( n->in(0) != blk1, "Lousy candidate for split-if" );
+     return false;
+   }
+-  if( get_ctrl(n) != blk1 && get_ctrl(n) != blk2 ) 
++  if( get_ctrl(n) != blk1 && get_ctrl(n) != blk2 )
+     return false;               // Not block local
+   if( n->is_Phi() ) return false; // Local PHIs are expected
+ 
+@@ -76,7 +73,7 @@
+   for (uint i = 1; i < n->req(); i++) {
+     if( split_up( n->in(i), blk1, blk2 ) ) {
+       // Got split recursively and self went dead?
+-      if (n->outcnt() == 0) 
++      if (n->outcnt() == 0)
+         _igvn.remove_dead_node(n);
+       return true;
+     }
+@@ -102,7 +99,7 @@
+           (cmov = bol->unique_out()->as_CMove()) &&
+           (get_ctrl(cmov) == blk1 ||
+            get_ctrl(cmov) == blk2) ) ) {
+-      
++
+       // Must clone down
+ #ifndef PRODUCT
+       if( PrintOpto && VerifyLoopOptimizations ) {
+@@ -128,7 +125,7 @@
+             tty->print("Cloning down: ");
+             bol->dump();
+           }
+-#endif  
++#endif
+           for (DUIterator_Last jmin, j = bol->last_outs(jmin); j >= jmin; --j) {
+             // Uses are either IfNodes or CMoves
+             Node* iff = bol->last_out(j);
+@@ -156,7 +153,7 @@
+         _igvn._worklist.push(bol);
+       }
+       _igvn.remove_dead_node( n );
+-      
++
+       return true;
+     }
+   }
+@@ -169,7 +166,7 @@
+   if( n->is_Store() && n->in(MemNode::Memory)->in(0) == n->in(0) ) {
+     // Get store's memory slice
+     int alias_idx = C->get_alias_index(_igvn.type(n->in(MemNode::Address))->is_ptr());
+-   
++
+     // Get memory-phi anti-dep loads will be using
+     Node *memphi = n->in(MemNode::Memory);
+     assert( memphi->is_Phi(), "" );
+@@ -182,7 +179,7 @@
+     }
+   }
+ 
+-  // Found some other Node; must clone it up    
++  // Found some other Node; must clone it up
+ #ifndef PRODUCT
+   if( PrintOpto && VerifyLoopOptimizations ) {
+     tty->print("Cloning up: ");
+@@ -194,7 +191,7 @@
+   Node *phi = PhiNode::make_blank(blk1, n);
+   for( uint j = 1; j < blk1->req(); j++ ) {
+     Node *x = n->clone();
+-    if( n->in(0) && n->in(0) == blk1 ) 
++    if( n->in(0) && n->in(0) == blk1 )
+       x->set_req( 0, blk1->in(j) );
+     for( uint i = 1; i < n->req(); i++ ) {
+       Node *m = n->in(i);
+@@ -243,7 +240,7 @@
+ 
+ // We must be at the merge point which post-dominates 'new_false' and
+ // 'new_true'.  Figure out which edges into the RegionNode eventually lead up
+-// to false and which to true.  Put in a PhiNode to merge values; plug in 
++// to false and which to true.  Put in a PhiNode to merge values; plug in
+ // the appropriate false-arm or true-arm values.  If some path leads to the
+ // original IF, then insert a Phi recursively.
+ Node *PhaseIdealLoop::spinup( Node *iff_dom, Node *new_false, Node *new_true, Node *use_blk, Node *def, small_cache *cache ) {
+@@ -277,7 +274,7 @@
+     } else {
+       assert( def->is_Phi(), "" );
+       assert( prior_n->is_Region(), "must be a post-dominating merge point" );
+-  
++
+       // Need a Phi here
+       phi_post = PhiNode::make_blank(prior_n, def);
+       // Search for both true and false on all paths till find one.
+@@ -316,7 +313,7 @@
+ // along the corresponding path.
+ Node *PhaseIdealLoop::find_use_block( Node *use, Node *def, Node *old_false, Node *new_false, Node *old_true, Node *new_true ) {
+   // CFG uses are their own block
+-  if( use->is_CFG() ) 
++  if( use->is_CFG() )
+     return use;
+ 
+   if( use->is_Phi() ) {         // Phi uses in prior block
+@@ -324,12 +321,12 @@
+     // Each will be handled as a seperate iteration of
+     // the "while( phi->outcnt() )" loop.
+     uint j;
+-    for( j = 1; j < use->req(); j++ ) 
+-      if( use->in(j) == def ) 
++    for( j = 1; j < use->req(); j++ )
++      if( use->in(j) == def )
+         break;
+     assert( j < use->req(), "def should be among use's inputs" );
+     return use->in(0)->in(j);
+-  } 
++  }
+   // Normal (non-phi) use
+   Node *use_blk = get_ctrl(use);
+   // Some uses are directly attached to the old (and going away)
+@@ -342,7 +339,7 @@
+     use_blk = new_true;
+     set_ctrl(use, new_true);
+   }
+-  
++
+   if (use_blk == NULL) {        // He's dead, Jim
+     _igvn.hash_delete(use);
+     _igvn.subsume_node(use, C->top());
+@@ -371,11 +368,11 @@
+   // Walk up the dominator tree until I hit either the old IfFalse, the old
+   // IfTrue or the old If.  Insert Phis where needed.
+   Node *new_def = spinup( region_dom, new_false, new_true, use_blk, def, cache );
+-  
++
+   // Found where this USE goes.  Re-point him.
+   uint i;
+   for( i = 0; i < use->req(); i++ )
+-    if( use->in(i) == def ) 
++    if( use->in(i) == def )
+       break;
+   assert( i < use->req(), "def should be among use's inputs" );
+   _igvn.hash_delete(use);
+@@ -388,7 +385,7 @@
+ // Split thru the Region.
+ void PhaseIdealLoop::do_split_if( Node *iff ) {
+ #ifndef PRODUCT
+-  if( PrintOpto && VerifyLoopOptimizations ) 
++  if( PrintOpto && VerifyLoopOptimizations )
+     tty->print_cr("Split-if");
+ #endif
+   C->set_major_progress();
+@@ -396,7 +393,7 @@
+   Node *region_dom = idom(region);
+ 
+   // We are going to clone this test (and the control flow with it) up through
+-  // the incoming merge point.  We need to empty the current basic block. 
++  // the incoming merge point.  We need to empty the current basic block.
+   // Clone any instructions which must be in this block up through the merge
+   // point.
+   DUIterator i, j;
+@@ -503,7 +500,7 @@
+     if( phi->is_Phi() ) {
+       // Need a per-def cache.  Phi represents a def, so make a cache
+       small_cache phi_cache;
+-      
++
+       // Inspect all Phi uses to make the Phi go dead
+       for (DUIterator_Last lmin, l = phi->last_outs(lmin); l >= lmin; --l) {
+         Node* use = phi->last_out(l);
+@@ -514,7 +511,7 @@
+         // 2-element cache to handle multiple uses from the same block.
+         handle_use( use, phi, &phi_cache, region_dom, new_false, new_true, old_false, old_true );
+       } // End of while phi has uses
+-      
++
+       // Because handle_use might relocate region->_out,
+       // we must refresh the iterator.
+       k = region->last_outs(kmin);
+@@ -529,7 +526,7 @@
+ 
+   } // End of while merge point has phis
+ 
+-  // Any leftover bits in the splitting block must not have depended on local 
++  // Any leftover bits in the splitting block must not have depended on local
+   // Phi inputs (these have already been split-up).  Hence it's safe to hoist
+   // these guys to the dominating point.
+   lazy_replace( region, region_dom );
+@@ -537,4 +534,3 @@
+   if( VerifyLoopOptimizations ) verify();
+ #endif
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/subnode.cpp openjdk/hotspot/src/share/vm/opto/subnode.cpp
+--- openjdk6/hotspot/src/share/vm/opto/subnode.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/subnode.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)subnode.cpp	1.159 07/05/05 17:06:27 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Portions of code courtesy of Clifford Click
+@@ -35,7 +32,7 @@
+ 
+ //=============================================================================
+ //------------------------------Identity---------------------------------------
+-// If right input is a constant 0, return the left input.  
++// If right input is a constant 0, return the left input.
+ Node *SubNode::Identity( PhaseTransform *phase ) {
+   assert(in(1) != this, "Must already have called Value");
+   assert(in(2) != this, "Must already have called Value");
+@@ -66,7 +63,7 @@
+ }
+ 
+ //------------------------------Value------------------------------------------
+-// A subtract node differences it's two inputs.  
++// A subtract node differences it's two inputs.
+ const Type *SubNode::Value( PhaseTransform *phase ) const {
+   const Node* in1 = in(1);
+   const Node* in2 = in(2);
+@@ -78,10 +75,10 @@
+ 
+   // Not correct for SubFnode and AddFNode (must check for infinity)
+   // Equal?  Subtract is zero
+-  if( phase->eqv(in1, in2) ) return add_id();
++  if (phase->eqv_uncast(in1, in2))  return add_id();
+ 
+   // Either input is BOTTOM ==> the result is the local BOTTOM
+-  if( t1 == Type::BOTTOM || t2 == Type::BOTTOM ) 
++  if( t1 == Type::BOTTOM || t2 == Type::BOTTOM )
+     return bottom_type();
+ 
+   return sub(t1,t2);            // Local flavor of type subtraction
+@@ -89,6 +86,31 @@
+ }
+ 
+ //=============================================================================
++
++//------------------------------Helper function--------------------------------
++static bool ok_to_convert(Node* inc, Node* iv) {
++    // Do not collapse (x+c0)-y if "+" is a loop increment, because the
++    // "-" is loop invariant and collapsing extends the live-range of "x"
++    // to overlap with the "+", forcing another register to be used in
++    // the loop.
++    // This test will be clearer with '&&' (apply DeMorgan's rule)
++    // but I like the early cutouts that happen here.
++    const PhiNode *phi;
++    if( ( !inc->in(1)->is_Phi() ||
++          !(phi=inc->in(1)->as_Phi()) ||
++          phi->is_copy() ||
++          !phi->region()->is_CountedLoop() ||
++          inc != phi->region()->as_CountedLoop()->incr() )
++       &&
++        // Do not collapse (x+c0)-iv if "iv" is a loop induction variable,
++        // because "x" maybe invariant.
++        ( !iv->is_loop_iv() )
++      ) {
++      return true;
++    } else {
++      return false;
++    }
++}
+ //------------------------------Ideal------------------------------------------
+ Node *SubINode::Ideal(PhaseGVN *phase, bool can_reshape){
+   Node *in1 = in(1);
+@@ -99,7 +121,7 @@
+ #ifdef ASSERT
+   // Check for dead loop
+   if( phase->eqv( in1, this ) || phase->eqv( in2, this ) ||
+-      ( op1 == Op_AddI || op1 == Op_SubI ) && 
++      ( op1 == Op_AddI || op1 == Op_SubI ) &&
+       ( phase->eqv( in1->in(1), this ) || phase->eqv( in1->in(2), this ) ||
+         phase->eqv( in1->in(1), in1  ) || phase->eqv( in1->in(2), in1 ) ) )
+     assert(false, "dead loop in SubINode::Ideal");
+@@ -115,33 +137,27 @@
+   }
+ 
+   // Convert "(x+c0) - y" into (x-y) + c0"
+-  if( op1 == Op_AddI ) {
+-    // Do not collapse (x+y)-y if "+" is a loop increment, because the
+-    // "-" is loop invariant and collapsing extends the live-range of "x"
+-    // to overlap with the "+", forcing another register to be used in
+-    // the loop.
+-    const PhiNode *phi;
+-    // This test will be clearer with '&&' (apply DeMorgan's rule)
+-    // but I like the early cutouts that happen here.
+-    if( ( !in1->in(1)->is_Phi() ||
+-          !(phi=in1->in(1)->as_Phi()) ||
+-          phi->is_copy() ||
+-          !phi->region()->is_CountedLoop() ||
+-          in1 != phi->region()->as_CountedLoop()->incr() )
+-       &&
+-        // Do not collapse (x+c0)-iv if "iv" is a loop induction variable,
+-        // because "x" maybe invariant.
+-        ( !in2->is_Phi() ||
+-          !(phi=in2->as_Phi()) ||
+-          phi->is_copy() ||
+-          !phi->region()->is_CountedLoop() ||
+-          (Node*)phi != phi->region()->as_CountedLoop()->phi() )
+-      ) { 
+-      const Type *tadd = phase->type( in1->in(2) );
+-      if( tadd->singleton() && tadd != Type::TOP ) {
+-        Node *sub2 = phase->transform( new (phase->C, 3) SubINode( in1->in(1), in2 ));
+-        return new (phase->C, 3) AddINode( sub2, in1->in(2) );
+-      }
++  // Do not collapse (x+c0)-y if "+" is a loop increment or
++  // if "y" is a loop induction variable.
++  if( op1 == Op_AddI && ok_to_convert(in1, in2) ) {
++    const Type *tadd = phase->type( in1->in(2) );
++    if( tadd->singleton() && tadd != Type::TOP ) {
++      Node *sub2 = phase->transform( new (phase->C, 3) SubINode( in1->in(1), in2 ));
++      return new (phase->C, 3) AddINode( sub2, in1->in(2) );
++    }
++  }
++
++
++  // Convert "x - (y+c0)" into "(x-y) - c0"
++  // Need the same check as in above optimization but reversed.
++  if (op2 == Op_AddI && ok_to_convert(in2, in1)) {
++    Node* in21 = in2->in(1);
++    Node* in22 = in2->in(2);
++    const TypeInt* tcon = phase->type(in22)->isa_int();
++    if (tcon != NULL && tcon->is_con()) {
++      Node* sub2 = phase->transform( new (phase->C, 3) SubINode(in1, in21) );
++      Node* neg_c0 = phase->intcon(- tcon->get_con());
++      return new (phase->C, 3) AddINode(sub2, neg_c0);
+     }
+   }
+ 
+@@ -150,7 +166,7 @@
+ 
+ #ifdef ASSERT
+   // Check for dead loop
+-  if( ( op2 == Op_AddI || op2 == Op_SubI ) && 
++  if( ( op2 == Op_AddI || op2 == Op_SubI ) &&
+       ( phase->eqv( in2->in(1), this ) || phase->eqv( in2->in(2), this ) ||
+         phase->eqv( in2->in(1), in2  ) || phase->eqv( in2->in(2), in2  ) ) )
+     assert(false, "dead loop in SubINode::Ideal");
+@@ -170,7 +186,7 @@
+     return new (phase->C, 3) SubINode( phase->intcon(0),in2->in(1));
+ 
+   // Convert "0 - (x-y)" into "y-x"
+-  if( t1 == TypeInt::ZERO && op2 == Op_SubI ) 
++  if( t1 == TypeInt::ZERO && op2 == Op_SubI )
+     return new (phase->C, 3) SubINode( in2->in(2), in2->in(1) );
+ 
+   // Convert "0 - (x+con)" into "-con-x"
+@@ -198,7 +214,7 @@
+ }
+ 
+ //------------------------------sub--------------------------------------------
+-// A subtract node differences it's two inputs.  
++// A subtract node differences it's two inputs.
+ const Type *SubINode::sub( const Type *t1, const Type *t2 ) const {
+   const TypeInt *r0 = t1->is_int(); // Handy access
+   const TypeInt *r1 = t2->is_int();
+@@ -227,7 +243,7 @@
+ #ifdef ASSERT
+   // Check for dead loop
+   if( phase->eqv( in1, this ) || phase->eqv( in2, this ) ||
+-      ( op1 == Op_AddL || op1 == Op_SubL ) && 
++      ( op1 == Op_AddL || op1 == Op_SubL ) &&
+       ( phase->eqv( in1->in(1), this ) || phase->eqv( in1->in(2), this ) ||
+         phase->eqv( in1->in(1), in1  ) || phase->eqv( in1->in(2), in1  ) ) )
+     assert(false, "dead loop in SubLNode::Ideal");
+@@ -241,31 +257,36 @@
+     return new (phase->C, 3) AddLNode(in1, phase->longcon(-i->get_con()));
+ 
+   // Convert "(x+c0) - y" into (x-y) + c0"
+-  if( op1 == Op_AddL ) {
+-    // Do not collapse (x+y)-y if "+" is a loop increment, because the
+-    // "-" is loop invariant and collapsing extends the live-range of "x"
+-    // to overlap with the "+", forcing another register to be used in
+-    // the loop.
++  // Do not collapse (x+c0)-y if "+" is a loop increment or
++  // if "y" is a loop induction variable.
++  if( op1 == Op_AddL && ok_to_convert(in1, in2) ) {
+     Node *in11 = in1->in(1);
+-    const PhiNode *phi = in11->is_Phi() ? in11->as_Phi() : NULL;
+-    if( phi == NULL ||
+-        phi->is_copy() ||
+-        !phi->region()->is_CountedLoop() ||
+-        in1 != phi->region()->as_CountedLoop()->incr() ) {
+-      const Type *tadd = phase->type( in1->in(2) );
+-      if( tadd->singleton() && tadd != Type::TOP ) {
+-        Node *sub2 = phase->transform( new (phase->C, 3) SubLNode( in11, in2 ));
+-        return new (phase->C, 3) AddLNode( sub2, in1->in(2) );
+-      }
++    const Type *tadd = phase->type( in1->in(2) );
++    if( tadd->singleton() && tadd != Type::TOP ) {
++      Node *sub2 = phase->transform( new (phase->C, 3) SubLNode( in11, in2 ));
++      return new (phase->C, 3) AddLNode( sub2, in1->in(2) );
++    }
++  }
++
++  // Convert "x - (y+c0)" into "(x-y) - c0"
++  // Need the same check as in above optimization but reversed.
++  if (op2 == Op_AddL && ok_to_convert(in2, in1)) {
++    Node* in21 = in2->in(1);
++    Node* in22 = in2->in(2);
++    const TypeLong* tcon = phase->type(in22)->isa_long();
++    if (tcon != NULL && tcon->is_con()) {
++      Node* sub2 = phase->transform( new (phase->C, 3) SubLNode(in1, in21) );
++      Node* neg_c0 = phase->longcon(- tcon->get_con());
++      return new (phase->C, 3) AddLNode(sub2, neg_c0);
+     }
+   }
+-                       
++
+   const Type *t1 = phase->type( in1 );
+   if( t1 == Type::TOP ) return NULL;
+ 
+ #ifdef ASSERT
+   // Check for dead loop
+-  if( ( op2 == Op_AddL || op2 == Op_SubL ) && 
++  if( ( op2 == Op_AddL || op2 == Op_SubL ) &&
+       ( phase->eqv( in2->in(1), this ) || phase->eqv( in2->in(2), this ) ||
+         phase->eqv( in2->in(1), in2  ) || phase->eqv( in2->in(2), in2  ) ) )
+     assert(false, "dead loop in SubLNode::Ideal");
+@@ -281,7 +302,7 @@
+     return new (phase->C, 3) SubLNode( phase->makecon(TypeLong::ZERO),in2->in(1));
+ 
+   // Convert "0 - (x-y)" into "y-x"
+-  if( phase->type( in1 ) == TypeLong::ZERO && op2 == Op_SubL ) 
++  if( phase->type( in1 ) == TypeLong::ZERO && op2 == Op_SubL )
+     return new (phase->C, 3) SubLNode( in2->in(2), in2->in(1) );
+ 
+   // Convert "(X+A) - (X+B)" into "A - B"
+@@ -302,7 +323,7 @@
+ }
+ 
+ //------------------------------sub--------------------------------------------
+-// A subtract node differences it's two inputs.  
++// A subtract node differences it's two inputs.
+ const Type *SubLNode::sub( const Type *t1, const Type *t2 ) const {
+   const TypeLong *r0 = t1->is_long(); // Handy access
+   const TypeLong *r1 = t2->is_long();
+@@ -322,7 +343,7 @@
+ 
+ //=============================================================================
+ //------------------------------Value------------------------------------------
+-// A subtract node differences its two inputs.  
++// A subtract node differences its two inputs.
+ const Type *SubFPNode::Value( PhaseTransform *phase ) const {
+   const Node* in1 = in(1);
+   const Node* in2 = in(2);
+@@ -341,7 +362,7 @@
+   // Either input is BOTTOM ==> the result is the local BOTTOM
+   const Type *bot = bottom_type();
+   if( (t1 == bot) || (t2 == bot) ||
+-      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) ) 
++      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) )
+     return bot;
+ 
+   return sub(t1,t2);            // Local flavor of type subtraction
+@@ -356,12 +377,12 @@
+   if( t2->base() == Type::FloatCon ) {  // Might be bottom or top...
+     // return new (phase->C, 3) AddFNode(in(1), phase->makecon( TypeF::make(-t2->getf()) ) );
+   }
+-                       
++
+   // Not associative because of boundary conditions (infinity)
+   if( IdealizedNumerics && !phase->C->method()->is_strict() ) {
+     // Convert "x - (x+y)" into "-y"
+     if( in(2)->is_Add() &&
+-        phase->eqv(in(1),in(2)->in(1) ) ) 
++        phase->eqv(in(1),in(2)->in(1) ) )
+       return new (phase->C, 3) SubFNode( phase->makecon(TypeF::ZERO),in(2)->in(2));
+   }
+ 
+@@ -374,18 +395,18 @@
+ }
+ 
+ //------------------------------sub--------------------------------------------
+-// A subtract node differences its two inputs.  
++// A subtract node differences its two inputs.
+ const Type *SubFNode::sub( const Type *t1, const Type *t2 ) const {
+   // no folding if one of operands is infinity or NaN, do not do constant folding
+   if( g_isfinite(t1->getf()) && g_isfinite(t2->getf()) ) {
+     return TypeF::make( t1->getf() - t2->getf() );
+-  } 
++  }
+   else if( g_isnan(t1->getf()) ) {
+     return t1;
+-  } 
++  }
+   else if( g_isnan(t2->getf()) ) {
+     return t2;
+-  } 
++  }
+   else {
+     return Type::FLOAT;
+   }
+@@ -399,12 +420,12 @@
+   if( t2->base() == Type::DoubleCon ) { // Might be bottom or top...
+     // return new (phase->C, 3) AddDNode(in(1), phase->makecon( TypeD::make(-t2->getd()) ) );
+   }
+-                       
++
+   // Not associative because of boundary conditions (infinity)
+-  if( IdealizedNumerics && !phase->C->method()->is_strict() ) { 
++  if( IdealizedNumerics && !phase->C->method()->is_strict() ) {
+     // Convert "x - (x+y)" into "-y"
+     if( in(2)->is_Add() &&
+-        phase->eqv(in(1),in(2)->in(1) ) ) 
++        phase->eqv(in(1),in(2)->in(1) ) )
+       return new (phase->C, 3) SubDNode( phase->makecon(TypeD::ZERO),in(2)->in(2));
+   }
+ 
+@@ -417,7 +438,7 @@
+ }
+ 
+ //------------------------------sub--------------------------------------------
+-// A subtract node differences its two inputs.  
++// A subtract node differences its two inputs.
+ const Type *SubDNode::sub( const Type *t1, const Type *t2 ) const {
+   // no folding if one of operands is infinity or NaN, do not do constant folding
+   if( g_isfinite(t1->getd()) && g_isfinite(t2->getd()) ) {
+@@ -446,7 +467,7 @@
+ //=============================================================================
+ //------------------------------cmp--------------------------------------------
+ // Simplify a CmpI (compare 2 integers) node, based on local information.
+-// If both inputs are constants, compare them.  
++// If both inputs are constants, compare them.
+ const Type *CmpINode::sub( const Type *t1, const Type *t2 ) const {
+   const TypeInt *r0 = t1->is_int(); // Handy access
+   const TypeInt *r1 = t2->is_int();
+@@ -467,7 +488,7 @@
+ }
+ 
+ // Simplify a CmpU (compare 2 integers) node, based on local information.
+-// If both inputs are constants, compare them.  
++// If both inputs are constants, compare them.
+ const Type *CmpUNode::sub( const Type *t1, const Type *t2 ) const {
+   assert(!t1->isa_ptr(), "obsolete usage of CmpU");
+ 
+@@ -509,7 +530,7 @@
+       return TypeInt::CC_GE;
+     } else if (hi0 <= lo1) {
+       // Check for special case in Hashtable::get.  (See below.)
+-      if ((jint)lo0 >= 0 && (jint)lo1 >= 0 && 
++      if ((jint)lo0 >= 0 && (jint)lo1 >= 0 &&
+           in(1)->Opcode() == Op_ModI &&
+           in(1)->in(2) == in(2) )
+         return TypeInt::CC_LT;
+@@ -522,7 +543,7 @@
+   // to be positive.
+   // (This is a gross hack, since the sub method never
+   // looks at the structure of the node in any other case.)
+-  if ((jint)lo0 >= 0 && (jint)lo1 >= 0 && 
++  if ((jint)lo0 >= 0 && (jint)lo1 >= 0 &&
+       in(1)->Opcode() == Op_ModI &&
+       in(1)->in(2)->uncast() == in(2)->uncast())
+     return TypeInt::CC_LT;
+@@ -551,7 +572,7 @@
+ 
+ //=============================================================================
+ // Simplify a CmpL (compare 2 longs ) node, based on local information.
+-// If both inputs are constants, compare them.  
++// If both inputs are constants, compare them.
+ const Type *CmpLNode::sub( const Type *t1, const Type *t2 ) const {
+   const TypeLong *r0 = t1->is_long(); // Handy access
+   const TypeLong *r1 = t2->is_long();
+@@ -574,14 +595,14 @@
+ //=============================================================================
+ //------------------------------sub--------------------------------------------
+ // Simplify an CmpP (compare 2 pointers) node, based on local information.
+-// If both inputs are constants, compare them.  
++// If both inputs are constants, compare them.
+ const Type *CmpPNode::sub( const Type *t1, const Type *t2 ) const {
+   const TypePtr *r0 = t1->is_ptr(); // Handy access
+   const TypePtr *r1 = t2->is_ptr();
+-        
++
+   // Undefined inputs makes for an undefined result
+   if( TypePtr::above_centerline(r0->_ptr) ||
+-      TypePtr::above_centerline(r1->_ptr) ) 
++      TypePtr::above_centerline(r1->_ptr) )
+     return Type::TOP;
+ 
+   if (r0 == r1 && r0->singleton()) {
+@@ -625,13 +646,13 @@
+   // Unknown inputs makes an unknown result
+   if( r0->singleton() ) {
+     intptr_t bits0 = r0->get_con();
+-    if( r1->singleton() ) 
++    if( r1->singleton() )
+       return bits0 == r1->get_con() ? TypeInt::CC_EQ : TypeInt::CC_GT;
+     return ( r1->_ptr == TypePtr::NotNull && bits0==0 ) ? TypeInt::CC_GT : TypeInt::CC;
+   } else if( r1->singleton() ) {
+     intptr_t bits1 = r1->get_con();
+     return ( r0->_ptr == TypePtr::NotNull && bits1==0 ) ? TypeInt::CC_GT : TypeInt::CC;
+-  } else 
++  } else
+     return TypeInt::CC;
+ }
+ 
+@@ -700,8 +721,8 @@
+       phase->C->dependencies()->assert_leaf_type(ik);
+     }
+   }
+-  
+-  // Bypass the dependent load, and compare directly 
++
++  // Bypass the dependent load, and compare directly
+   this->set_req(1,ldk2);
+ 
+   return this;
+@@ -710,7 +731,7 @@
+ //=============================================================================
+ //------------------------------Value------------------------------------------
+ // Simplify an CmpF (compare 2 floats ) node, based on local information.
+-// If both inputs are constants, compare them.  
++// If both inputs are constants, compare them.
+ const Type *CmpFNode::Value( PhaseTransform *phase ) const {
+   const Node* in1 = in(1);
+   const Node* in2 = in(2);
+@@ -720,7 +741,7 @@
+   const Type* t2 = (in2 == this) ? Type::TOP : phase->type(in2);
+   if( t2 == Type::TOP ) return Type::TOP;
+ 
+-  // Not constants?  Don't know squat - even if they are the same 
++  // Not constants?  Don't know squat - even if they are the same
+   // value!  If they are NaN's they compare to LT instead of EQ.
+   const TypeF *tf1 = t1->isa_float_constant();
+   const TypeF *tf2 = t2->isa_float_constant();
+@@ -740,7 +761,7 @@
+ //=============================================================================
+ //------------------------------Value------------------------------------------
+ // Simplify an CmpD (compare 2 doubles ) node, based on local information.
+-// If both inputs are constants, compare them.  
++// If both inputs are constants, compare them.
+ const Type *CmpDNode::Value( PhaseTransform *phase ) const {
+   const Node* in1 = in(1);
+   const Node* in2 = in(2);
+@@ -750,7 +771,7 @@
+   const Type* t2 = (in2 == this) ? Type::TOP : phase->type(in2);
+   if( t2 == Type::TOP ) return Type::TOP;
+ 
+-  // Not constants?  Don't know squat - even if they are the same 
++  // Not constants?  Don't know squat - even if they are the same
+   // value!  If they are NaN's they compare to LT instead of EQ.
+   const TypeD *td1 = t1->isa_double_constant();
+   const TypeD *td2 = t2->isa_double_constant();
+@@ -774,10 +795,10 @@
+   // Valid when 'value' does not lose precision as a float.
+   // Benefits: eliminates conversion, does not require 24-bit mode
+ 
+-  // NaNs prevent commuting operands.  This transform works regardless of the 
++  // NaNs prevent commuting operands.  This transform works regardless of the
+   // order of ConD and ConvF2D inputs by preserving the original order.
+   int idx_f2d = 1;              // ConvF2D on left side?
+-  if( in(idx_f2d)->Opcode() != Op_ConvF2D ) 
++  if( in(idx_f2d)->Opcode() != Op_ConvF2D )
+     idx_f2d = 2;                // No, swap to check for reversed args
+   int idx_con = 3-idx_f2d;      // Check for the constant on other input
+ 
+@@ -797,8 +818,8 @@
+         new_in1 = new_in2;
+         new_in2 = tmp;
+       }
+-      CmpFNode *new_cmp = (Opcode() == Op_CmpD3) 
+-        ? new (phase->C, 3) CmpF3Node( new_in1, new_in2 ) 
++      CmpFNode *new_cmp = (Opcode() == Op_CmpD3)
++        ? new (phase->C, 3) CmpF3Node( new_in1, new_in2 )
+         : new (phase->C, 3) CmpFNode ( new_in1, new_in2 ) ;
+       return new_cmp;           // Changed to CmpFNode
+     }
+@@ -817,11 +838,11 @@
+   const TypeInt *ti = CC->is_int();
+   if( ti->is_con() ) {          // Only 1 kind of condition codes set?
+     // Match low order 2 bits
+-    int tmp = ((ti->get_con()&3) == (_test&3)) ? 1 : 0; 
++    int tmp = ((ti->get_con()&3) == (_test&3)) ? 1 : 0;
+     if( _test & 4 ) tmp = 1-tmp;     // Optionally complement result
+     return TypeInt::make(tmp);       // Boolean result
+   }
+- 
++
+   if( CC == TypeInt::CC_GE ) {
+     if( _test == ge ) return TypeInt::ONE;
+     if( _test == lt ) return TypeInt::ZERO;
+@@ -837,14 +858,14 @@
+ //------------------------------dump_spec-------------------------------------
+ // Print special per-node info
+ #ifndef PRODUCT
+-void BoolTest::dump() const {
++void BoolTest::dump_on(outputStream *st) const {
+   const char *msg[] = {"eq","gt","??","lt","ne","le","??","ge"};
+-  tty->print(msg[_test]);
++  st->print(msg[_test]);
+ }
+ #endif
+ 
+ //=============================================================================
+-uint BoolNode::hash() const { return (Node::hash() << 3)|(_test._test+1); } 
++uint BoolNode::hash() const { return (Node::hash() << 3)|(_test._test+1); }
+ uint BoolNode::size_of() const { return sizeof(BoolNode); }
+ 
+ //------------------------------operator==-------------------------------------
+@@ -952,7 +973,7 @@
+     Node *ncmp = phase->transform(new (phase->C, 3) CmpINode(j_xor->in(1),cmp2));
+     return new (phase->C, 2) BoolNode( ncmp, _test.negate() );
+   }
+-  
++
+   // Change "bool eq/ne (cmp (Conv2B X) 0)" into "bool eq/ne (cmp X 0)".
+   // This is a standard idiom for branching on a boolean value.
+   Node *c2b = cmp1;
+@@ -990,19 +1011,19 @@
+     return new (phase->C, 2) BoolNode( ncmp, _test.commute() );
+   }
+ 
+-  //  The transformation below is not valid for either signed or unsigned 
+-  //  comparisons due to wraparound concerns at MAX_VALUE and MIN_VALUE.  
+-  //  This transformation can be resurrected when we are able to  
++  //  The transformation below is not valid for either signed or unsigned
++  //  comparisons due to wraparound concerns at MAX_VALUE and MIN_VALUE.
++  //  This transformation can be resurrected when we are able to
+   //  make inferences about the range of values being subtracted from
+   //  (or added to) relative to the wraparound point.
+   //
+-  //    // Remove +/-1's if possible.  
++  //    // Remove +/-1's if possible.
+   //    // "X <= Y-1" becomes "X <  Y"
+   //    // "X+1 <= Y" becomes "X <  Y"
+   //    // "X <  Y+1" becomes "X <= Y"
+   //    // "X-1 <  Y" becomes "X <= Y"
+   //    // Do not this to compares off of the counted-loop-end.  These guys are
+-  //    // checking the trip counter and they want to use the post-incremented 
++  //    // checking the trip counter and they want to use the post-incremented
+   //    // counter.  If they use the PRE-incremented counter, then the counter has
+   //    // to be incremented in a private block on a loop backedge.
+   //    if( du && du->cnt(this) && du->out(this)[0]->Opcode() == Op_CountedLoopEnd )
+@@ -1021,7 +1042,7 @@
+   //    int cmp2_op = cmp2->Opcode();
+   //    if( _test._test == BoolTest::le ) {
+   //      if( cmp1_op == Op_AddI &&
+-  //          phase->type( cmp1->in(2) ) == TypeInt::ONE ) 
++  //          phase->type( cmp1->in(2) ) == TypeInt::ONE )
+   //        return clone_cmp( cmp, cmp1->in(1), cmp2, phase, BoolTest::lt );
+   //      else if( cmp2_op == Op_AddI &&
+   //         phase->type( cmp2->in(2) ) == TypeInt::MINUS_1 )
+@@ -1034,7 +1055,7 @@
+   //         phase->type( cmp2->in(2) ) == TypeInt::ONE )
+   //        return clone_cmp( cmp, cmp1, cmp2->in(1), phase, BoolTest::le );
+   //    }
+-    
++
+   return NULL;
+ }
+ 
+@@ -1048,10 +1069,10 @@
+ //------------------------------dump_spec--------------------------------------
+ // Dump special per-node info
+ #ifndef PRODUCT
+-void BoolNode::dump_spec() const {
+-  tty->print("[");
+-  _test.dump();
+-  tty->print("]");
++void BoolNode::dump_spec(outputStream *st) const {
++  st->print("[");
++  _test.dump_on(st);
++  st->print("]");
+ }
+ #endif
+ 
+@@ -1183,4 +1204,3 @@
+   if( d2 < 0.0 ) return Type::DOUBLE;
+   return TypeD::make( SharedRuntime::dpow( d1, d2 ) );
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/subnode.hpp openjdk/hotspot/src/share/vm/opto/subnode.hpp
+--- openjdk6/hotspot/src/share/vm/opto/subnode.hpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/subnode.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)subnode.hpp	1.85 07/05/05 17:06:29 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Portions of code courtesy of Clifford Click
+@@ -30,7 +27,7 @@
+ //------------------------------SUBNode----------------------------------------
+ // Class SUBTRACTION functionality.  This covers all the usual 'subtract'
+ // behaviors.  Subtract-integer, -float, -double, binary xor, compare-integer,
+-// -float, and -double are all inherited from this class.  The compare 
++// -float, and -double are all inherited from this class.  The compare
+ // functions behave like subtract functions, except that all negative answers
+ // are compressed into -1, and all positive answers compressed to 1.
+ class SubNode : public Node {
+@@ -40,7 +37,7 @@
+   }
+ 
+   // Handle algebraic identities here.  If we have an identity, return the Node
+-  // we are equivalent to.  We look for "add of zero" as an identity.  
++  // we are equivalent to.  We look for "add of zero" as an identity.
+   virtual Node *Identity( PhaseTransform *phase );
+ 
+   // Compute a new Type for this node.  Basically we just do the pre-check,
+@@ -259,7 +256,7 @@
+   mask negate( ) const { return mask(_test^4); }
+   bool is_canonical( ) const { return (_test == BoolTest::ne || _test == BoolTest::lt || _test == BoolTest::le); }
+ #ifndef PRODUCT
+-  void dump() const;
++  void dump_on(outputStream *st) const;
+ #endif
+ };
+ 
+@@ -289,7 +286,7 @@
+ 
+   bool is_counted_loop_exit_test();
+ #ifndef PRODUCT
+-  virtual void dump_spec() const;
++  virtual void dump_spec(outputStream *st) const;
+ #endif
+ };
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/opto/superword.cpp openjdk/hotspot/src/share/vm/opto/superword.cpp
+--- openjdk6/hotspot/src/share/vm/opto/superword.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/superword.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)superword.cpp	1.6 07/09/25 22:02:47 JVM"
+-#endif
+ /*
+  * Copyright 2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -51,10 +48,10 @@
+   _n_idx_list(arena(), 8),                // scratch list of (node,index) pairs
+   _stk(arena(), 8, 0, NULL),              // scratch stack of nodes
+   _nlist(arena(), 8, 0, NULL),            // scratch list of nodes
+-  _lpt(NULL),          	                  // loop tree node
+-  _lp(NULL),           	                  // LoopNode
+-  _bb(NULL),           	                  // basic block
+-  _iv(NULL)           	                  // induction var
++  _lpt(NULL),                             // loop tree node
++  _lp(NULL),                              // LoopNode
++  _bb(NULL),                              // basic block
++  _iv(NULL)                               // induction var
+ {}
+ 
+ //------------------------------transform_loop---------------------------
+@@ -82,7 +79,7 @@
+   set_lpt(lpt);
+   set_lp(cl);
+ 
+- // For now, define one block which is the entire loop body 
++ // For now, define one block which is the entire loop body
+   set_bb(cl);
+ 
+   assert(_packset.length() == 0, "packset must be empty");
+@@ -484,7 +481,7 @@
+         }
+       }
+     }
+-  } 
++  }
+   return false;
+ }
+ 
+@@ -958,7 +955,7 @@
+       _igvn.hash_delete(ld);
+       ld->set_req(MemNode::Memory, first_mem);
+       _igvn._worklist.push(ld);
+-    }        
++    }
+   }
+ }
+ 
+@@ -1154,7 +1151,7 @@
+ // Construct reverse postorder list of block members
+ void SuperWord::construct_bb() {
+   Node* entry = bb();
+-  
++
+   assert(_stk.length() == 0,            "stk is empty");
+   assert(_block.length() == 0,          "block is empty");
+   assert(_data_entry.length() == 0,     "data_entry is empty");
+@@ -1283,10 +1280,10 @@
+   // Make room
+   for (int i = _block.length() - 1; i >= n_pos; i--) {
+     _block.at_put_grow(i+1, _block.at(i));
+-  }    
++  }
+   for (int j = _node_info.length() - 1; j >= n_pos; j--) {
+     _node_info.at_put_grow(j+1, _node_info.at(j));
+-  }    
++  }
+   // Set value
+   _block.at_put_grow(n_pos, n);
+   _node_info.at_put_grow(n_pos, SWNodeInfo::initial);
+@@ -1350,7 +1347,7 @@
+     const Type* vt = container_type(t);
+     set_velt_type(n, vt);
+   }
+-  
++
+   // Propagate narrowed type backwards through operations
+   // that don't depend on higher order bits
+   for (int i = _block.length() - 1; i >= 0; i--) {
+@@ -1589,7 +1586,7 @@
+       limk = new (_phase->C, 3) AddINode(limk, aref);
+     } else {
+       limk = new (_phase->C, 3) SubINode(limk, aref);
+-    }      
++    }
+     _phase->_igvn.register_new_node_with_optimizer(limk);
+     _phase->set_ctrl(limk, pre_ctrl);
+   }
+@@ -1734,6 +1731,12 @@
+   assert(valid(), "Usable");
+ }
+ 
++// Following is used to create a temporary object during
++// the pattern match of an address expression.
++SWPointer::SWPointer(SWPointer* p) :
++  _mem(p->_mem), _slp(p->_slp),  _base(NULL),  _adr(NULL),
++  _scale(0), _offset(0), _invar(NULL), _negate_invar(false) {}
++
+ //------------------------scaled_iv_plus_offset--------------------
+ // Match: k*iv + offset
+ // where: k is a constant that maybe zero, and
+@@ -1789,6 +1792,25 @@
+       _scale = 1 << n->in(2)->get_int();
+       return true;
+     }
++  } else if (opc == Op_ConvI2L) {
++    if (scaled_iv_plus_offset(n->in(1))) {
++      return true;
++    }
++  } else if (opc == Op_LShiftL) {
++    if (!has_iv() && _invar == NULL) {
++      // Need to preserve the current _offset value, so
++      // create a temporary object for this expression subtree.
++      // Hacky, so should re-engineer the address pattern match.
++      SWPointer tmp(this);
++      if (tmp.scaled_iv_plus_offset(n->in(1))) {
++        if (tmp._invar == NULL) {
++          int mult = 1 << n->in(2)->get_int();
++          _scale   = tmp._scale  * mult;
++          _offset += tmp._offset * mult;
++          return true;
++        }
++      }
++    }
+   }
+   return false;
+ }
+@@ -1801,6 +1823,16 @@
+   if (opc == Op_ConI) {
+     _offset += negate ? -(n->get_int()) : n->get_int();
+     return true;
++  } else if (opc == Op_ConL) {
++    // Okay if value fits into an int
++    const TypeLong* t = n->find_long_type();
++    if (t->higher_equal(TypeLong::INT)) {
++      jlong loff = n->get_long();
++      jint  off  = (jint)loff;
++      _offset += negate ? -off : loff;
++      return true;
++    }
++    return false;
+   }
+   if (_invar != NULL) return false; // already have an invariant
+   if (opc == Op_AddI) {
+diff -ruN openjdk6/hotspot/src/share/vm/opto/superword.hpp openjdk/hotspot/src/share/vm/opto/superword.hpp
+--- openjdk6/hotspot/src/share/vm/opto/superword.hpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/superword.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)superword.hpp	1.6 07/05/17 16:01:57 JVM"
+-#endif
+ /*
+  * Copyright 2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -37,11 +34,11 @@
+ //   Samuel Larsen and Saman Amarasighe
+ //   MIT Laboratory for Computer Science
+ // date
+-//   May 2000           
++//   May 2000
+ // published in
+ //   ACM SIGPLAN Notices
+-//   Proceedings of ACM PLDI '00,  Volume 35 Issue 5 
+-// 
++//   Proceedings of ACM PLDI '00,  Volume 35 Issue 5
++//
+ // Definition 3.1 A Pack is an n-tuple, <s1, ...,sn>, where
+ // s1,...,sn are independent isomorphic statements in a basic
+ // block.
+@@ -205,9 +202,9 @@
+   enum consts { top_align = -1, bottom_align = -666 };
+ 
+   GrowableArray<Node_List*> _packset;    // Packs for the current block
+-   
+-  GrowableArray<int> _bb_idx;            // Map from Node _idx to index within block 
+-     
++
++  GrowableArray<int> _bb_idx;            // Map from Node _idx to index within block
++
+   GrowableArray<Node*> _block;           // Nodes in current block
+   GrowableArray<Node*> _data_entry;      // Nodes with all inputs from outside
+   GrowableArray<Node*> _mem_slice_head;  // Memory slice head nodes
+@@ -246,10 +243,10 @@
+ 
+   // Accessors
+   Arena* arena()                   { return _arena; }
+-      				   
++
+   Node* bb()                       { return _bb; }
+   void  set_bb(Node* bb)           { _bb = bb; }
+-      				   
++
+   void set_lpt(IdealLoopTree* lpt) { _lpt = lpt; }
+ 
+   LoopNode* lp()                   { return _lp; }
+@@ -443,6 +440,9 @@
+   };
+ 
+   SWPointer(MemNode* mem, SuperWord* slp);
++  // Following is used to create a temporary object during
++  // the pattern match of an address expression.
++  SWPointer(SWPointer* p);
+ 
+   bool valid()  { return _adr != NULL; }
+   bool has_iv() { return _scale != 0; }
+diff -ruN openjdk6/hotspot/src/share/vm/opto/type.cpp openjdk/hotspot/src/share/vm/opto/type.cpp
+--- openjdk6/hotspot/src/share/vm/opto/type.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/type.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)type.cpp	1.253 07/05/17 16:02:23 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Portions of code courtesy of Clifford Click
+@@ -173,7 +170,7 @@
+ 
+ //------------------------------cmp--------------------------------------------
+ int Type::cmp( const Type *const t1, const Type *const t2 ) {
+-  if( t1->_base != t2->_base ) 
++  if( t1->_base != t2->_base )
+     return 1;                   // Missed badly
+   assert(t1 != t2 || t1->eq(t2), "eq must be reflexive");
+   return !t1->eq(t2);           // Return ZERO if equal
+@@ -190,7 +187,7 @@
+   // compilations (stub compilations) occur serially.  If they are
+   // changed to proceed in parallel, then this section will need
+   // locking.
+-  
++
+   Arena* save = current->type_arena();
+   Arena* shared_type_arena = new Arena();
+ 
+@@ -207,7 +204,7 @@
+   ABIO    = make(Abio);         // State-of-machine only
+   RETURN_ADDRESS=make(Return_Address);
+   FLOAT   = make(FloatBot);     // All floats
+-  DOUBLE  = make(DoubleBot);    // All doubles                          
++  DOUBLE  = make(DoubleBot);    // All doubles
+   BOTTOM  = make(Bottom);       // Everything
+   HALF    = make(Half);         // Placeholder half of doublewide type
+ 
+@@ -354,8 +351,8 @@
+   _const_basic_type[T_SHORT]   = TypeInt::SHORT;
+   _const_basic_type[T_INT]     = TypeInt::INT;
+   _const_basic_type[T_LONG]    = TypeLong::LONG;
+-  _const_basic_type[T_FLOAT]   = Type::FLOAT; 
+-  _const_basic_type[T_DOUBLE]  = Type::DOUBLE; 
++  _const_basic_type[T_FLOAT]   = Type::FLOAT;
++  _const_basic_type[T_DOUBLE]  = Type::DOUBLE;
+   _const_basic_type[T_OBJECT]  = TypeInstPtr::BOTTOM;
+   _const_basic_type[T_ARRAY]   = TypeInstPtr::BOTTOM; // there is no separate bottom for arrays
+   _const_basic_type[T_VOID]    = TypePtr::NULL_PTR;   // reflection represents void this way
+@@ -368,8 +365,8 @@
+   _zero_type[T_SHORT]   = TypeInt::ZERO;     // 0x0000 == 0
+   _zero_type[T_INT]     = TypeInt::ZERO;
+   _zero_type[T_LONG]    = TypeLong::ZERO;
+-  _zero_type[T_FLOAT]   = TypeF::ZERO; 
+-  _zero_type[T_DOUBLE]  = TypeD::ZERO; 
++  _zero_type[T_FLOAT]   = TypeF::ZERO;
++  _zero_type[T_DOUBLE]  = TypeD::ZERO;
+   _zero_type[T_OBJECT]  = TypePtr::NULL_PTR;
+   _zero_type[T_ARRAY]   = TypePtr::NULL_PTR; // null array is null oop
+   _zero_type[T_ADDRESS] = TypePtr::NULL_PTR; // raw pointers use the same null
+@@ -427,7 +424,7 @@
+   _dual = xdual();              // Compute the dual
+   if( cmp(this,_dual)==0 ) {    // Handle self-symmetric
+     _dual = this;
+-    return this; 
++    return this;
+   }
+   assert( !_dual->_dual, "" );  // No reverse dual yet
+   assert( !(*tdic)[_dual], "" ); // Dual not in type system either
+@@ -459,7 +456,7 @@
+ bool Type::is_finite() const {
+   return false;
+ }
+-  
++
+ //------------------------------is_nan-----------------------------------------
+ // Is not a number (NaN)
+ bool Type::is_nan()    const {
+@@ -535,7 +532,7 @@
+   // Meeting BOTTOM with anything?
+   if( _base == Bottom ) return BOTTOM;
+ 
+-  // Current "this->_base" is one of: Bad, Multi, Control, Top, 
++  // Current "this->_base" is one of: Bad, Multi, Control, Top,
+   // Abio, Abstore, Floatxxx, Doublexxx, Bottom, lastype.
+   switch (t->base()) {  // Switch on original type
+ 
+@@ -584,7 +581,7 @@
+     return Type::BOTTOM;
+ 
+   // These next few cases must match exactly or it is a compile-time error.
+-  case Control:                 // Control of code 
++  case Control:                 // Control of code
+   case Abio:                    // State of world outside of program
+   case Memory:
+     if( _base == t->_base )  return this;
+@@ -616,7 +613,7 @@
+   Bad,          // Int - handled in v-call
+   Bad,          // Long - handled in v-call
+   Half,         // Half
+-  
++
+   Bad,          // Tuple - handled in v-call
+   Bad,          // Array - handled in v-call
+ 
+@@ -662,23 +659,23 @@
+ 
+ #ifndef PRODUCT
+ //------------------------------dump2------------------------------------------
+-void Type::dump2( Dict &d, uint depth ) const {
+-  tty->print(msg[_base]);
++void Type::dump2( Dict &d, uint depth, outputStream *st ) const {
++  st->print(msg[_base]);
+ }
+ 
+ //------------------------------dump-------------------------------------------
+-void Type::dump() const {
++void Type::dump_on(outputStream *st) const {
+   ResourceMark rm;
+   Dict d(cmpkey,hashkey);       // Stop recursive type dumping
+-  dump2(d,1);
++  dump2(d,1, st);
+ }
+ 
+ //------------------------------data-------------------------------------------
+ const char * const Type::msg[Type::lastype] = {
+-  "bad","control","top","int:","long:","half", 
+-  "tuple:", "aryptr", 
+-  "anyptr:", "rawptr:", "java:", "inst:", "ary:", "klass:", 
+-  "func", "abIO", "return_address", "memory", 
++  "bad","control","top","int:","long:","half",
++  "tuple:", "aryptr",
++  "anyptr:", "rawptr:", "java:", "inst:", "ary:", "klass:",
++  "func", "abIO", "return_address", "memory",
+   "float_top", "ftcon:", "float",
+   "double_top", "dblcon:", "double",
+   "bottom"
+@@ -802,10 +799,10 @@
+ 
+   case FloatCon:                // Float-constant vs Float-constant?
+     if( jint_cast(_f) != jint_cast(t->getf()) )         // unequal constants?
+-                                // must compare bitwise as positive zero, negative zero and NaN have 
++                                // must compare bitwise as positive zero, negative zero and NaN have
+                                 // all the same representation in C++
+       return FLOAT;             // Return generic float
+-                                // Equal constants 
++                                // Equal constants
+   case Top:
+   case FloatTop:
+     break;                      // Return the float constant
+@@ -822,7 +819,7 @@
+ //------------------------------eq---------------------------------------------
+ // Structural equality check for Type representations
+ bool TypeF::eq( const Type *t ) const {
+-  if( g_isnan(_f) || 
++  if( g_isnan(_f) ||
+       g_isnan(t->getf()) ) {
+     // One or both are NANs.  If both are NANs return true, else false.
+     return (g_isnan(_f) && g_isnan(t->getf()));
+@@ -849,7 +846,7 @@
+ bool TypeF::is_finite() const {
+   return g_isfinite(getf()) != 0;
+ }
+-  
++
+ //------------------------------is_nan-----------------------------------------
+ // Is not a number (NaN)
+ bool TypeF::is_nan()    const {
+@@ -859,9 +856,9 @@
+ //------------------------------dump2------------------------------------------
+ // Dump float constant Type
+ #ifndef PRODUCT
+-void TypeF::dump2( Dict &d, uint depth ) const {
+-  Type::dump2(d,depth);
+-  tty->print("%f", _f);
++void TypeF::dump2( Dict &d, uint depth, outputStream *st ) const {
++  Type::dump2(d,depth, st);
++  st->print("%f", _f);
+ }
+ #endif
+ 
+@@ -934,7 +931,7 @@
+ //------------------------------eq---------------------------------------------
+ // Structural equality check for Type representations
+ bool TypeD::eq( const Type *t ) const {
+-  if( g_isnan(_d) || 
++  if( g_isnan(_d) ||
+       g_isnan(t->getd()) ) {
+     // One or both are NANs.  If both are NANs return true, else false.
+     return (g_isnan(_d) && g_isnan(t->getd()));
+@@ -961,7 +958,7 @@
+ bool TypeD::is_finite() const {
+   return g_isfinite(getd()) != 0;
+ }
+-  
++
+ //------------------------------is_nan-----------------------------------------
+ // Is not a number (NaN)
+ bool TypeD::is_nan()    const {
+@@ -971,9 +968,9 @@
+ //------------------------------dump2------------------------------------------
+ // Dump double constant Type
+ #ifndef PRODUCT
+-void TypeD::dump2( Dict &d, uint depth ) const {
+-  Type::dump2(d,depth);
+-  tty->print("%f", _d);
++void TypeD::dump2( Dict &d, uint depth, outputStream *st ) const {
++  Type::dump2(d,depth,st);
++  st->print("%f", _d);
+ }
+ #endif
+ 
+@@ -1084,7 +1081,7 @@
+   const TypeInt *ot = old->is_int();
+ 
+   // If new guy is equal to old guy, no widening
+-  if( _lo == ot->_lo && _hi == ot->_hi ) 
++  if( _lo == ot->_lo && _hi == ot->_hi )
+     return old;
+ 
+   // If new guy contains old, then we widened
+@@ -1116,7 +1113,7 @@
+ 
+   // If old guy contains new, then we probably widened too far & dropped to
+   // bottom.  Return the wider fellow.
+-  if ( ot->_lo <= _lo && ot->_hi >= _hi ) 
++  if ( ot->_lo <= _lo && ot->_hi >= _hi )
+     return old;
+ 
+   //fatal("Integer value range is not subset");
+@@ -1187,7 +1184,7 @@
+ bool TypeInt::is_finite() const {
+   return true;
+ }
+-  
++
+ //------------------------------dump2------------------------------------------
+ // Dump TypeInt
+ #ifndef PRODUCT
+@@ -1205,29 +1202,29 @@
+   return buf;
+ }
+ 
+-void TypeInt::dump2( Dict &d, uint depth ) const {
++void TypeInt::dump2( Dict &d, uint depth, outputStream *st ) const {
+   char buf[40], buf2[40];
+   if (_lo == min_jint && _hi == max_jint)
+-    tty->print("int");
+-  else if (is_con()) 
+-    tty->print("int:%s", intname(buf, get_con()));
+-  else if (_lo == BOOL->_lo && _hi == BOOL->_hi) 
+-    tty->print("bool");
++    st->print("int");
++  else if (is_con())
++    st->print("int:%s", intname(buf, get_con()));
++  else if (_lo == BOOL->_lo && _hi == BOOL->_hi)
++    st->print("bool");
+   else if (_lo == BYTE->_lo && _hi == BYTE->_hi)
+-    tty->print("byte");
+-  else if (_lo == CHAR->_lo && _hi == CHAR->_hi) 
+-    tty->print("char");
+-  else if (_lo == SHORT->_lo && _hi == SHORT->_hi) 
+-    tty->print("short");
++    st->print("byte");
++  else if (_lo == CHAR->_lo && _hi == CHAR->_hi)
++    st->print("char");
++  else if (_lo == SHORT->_lo && _hi == SHORT->_hi)
++    st->print("short");
+   else if (_hi == max_jint)
+-    tty->print("int:>=%s", intname(buf, _lo));
++    st->print("int:>=%s", intname(buf, _lo));
+   else if (_lo == min_jint)
+-    tty->print("int:<=%s", intname(buf, _hi));
++    st->print("int:<=%s", intname(buf, _hi));
+   else
+-    tty->print("int:%s..%s", intname(buf, _lo), intname(buf2, _hi));
++    st->print("int:%s..%s", intname(buf, _lo), intname(buf2, _hi));
+ 
+   if (_widen != 0 && this != TypeInt::INT)
+-    tty->print(":%.*s", _widen, "wwww");
++    st->print(":%.*s", _widen, "wwww");
+ }
+ #endif
+ 
+@@ -1325,7 +1322,7 @@
+   const TypeLong *ot = old->is_long();
+ 
+   // If new guy is equal to old guy, no widening
+-  if( _lo == ot->_lo && _hi == ot->_hi ) 
++  if( _lo == ot->_lo && _hi == ot->_hi )
+     return old;
+ 
+   // If new guy contains old, then we widened
+@@ -1360,7 +1357,7 @@
+ 
+   // If old guy contains new, then we probably widened too far & dropped to
+   // bottom.  Return the wider fellow.
+-  if ( ot->_lo <= _lo && ot->_hi >= _hi ) 
++  if ( ot->_lo <= _lo && ot->_hi >= _hi )
+     return old;
+ 
+   //  fatal("Long value range is not subset");
+@@ -1431,7 +1428,7 @@
+ bool TypeLong::is_finite() const {
+   return true;
+ }
+-  
++
+ //------------------------------dump2------------------------------------------
+ // Dump TypeLong
+ #ifndef PRODUCT
+@@ -1469,27 +1466,27 @@
+   return buf;
+ }
+ 
+-void TypeLong::dump2( Dict &d, uint depth ) const {
++void TypeLong::dump2( Dict &d, uint depth, outputStream *st ) const {
+   char buf[80], buf2[80];
+   if (_lo == min_jlong && _hi == max_jlong)
+-    tty->print("long");
+-  else if (is_con()) 
+-    tty->print("long:%s", longname(buf, get_con()));
++    st->print("long");
++  else if (is_con())
++    st->print("long:%s", longname(buf, get_con()));
+   else if (_hi == max_jlong)
+-    tty->print("long:>=%s", longname(buf, _lo));
++    st->print("long:>=%s", longname(buf, _lo));
+   else if (_lo == min_jlong)
+-    tty->print("long:<=%s", longname(buf, _hi));
++    st->print("long:<=%s", longname(buf, _hi));
+   else
+-    tty->print("long:%s..%s", longname(buf, _lo), longname(buf2, _hi));
++    st->print("long:%s..%s", longname(buf, _lo), longname(buf2, _hi));
+ 
+   if (_widen != 0 && this != TypeLong::LONG)
+-    tty->print(":%.*s", _widen, "wwww");
++    st->print(":%.*s", _widen, "wwww");
+ }
+ #endif
+ 
+ //------------------------------singleton--------------------------------------
+ // TRUE if Type is a singleton type, FALSE otherwise.   Singletons are simple
+-// constants 
++// constants
+ bool TypeLong::singleton(void) const {
+   return _lo >= _hi;
+ }
+@@ -1525,7 +1522,7 @@
+     break;
+   case T_DOUBLE:
+     field_array[TypeFunc::Parms]   = Type::DOUBLE;
+-    field_array[TypeFunc::Parms+1] = Type::HALF;      
++    field_array[TypeFunc::Parms+1] = Type::HALF;
+     break;
+   case T_OBJECT:
+   case T_ARRAY:
+@@ -1571,7 +1568,7 @@
+       break;
+     case T_DOUBLE:
+       field_array[pos++] = Type::DOUBLE;
+-      field_array[pos++] = Type::HALF;      
++      field_array[pos++] = Type::HALF;
+       break;
+     case T_OBJECT:
+     case T_ARRAY:
+@@ -1582,7 +1579,7 @@
+     case T_SHORT:
+     case T_INT:
+       field_array[pos++] = get_const_type(type);
+-      break;   
++      break;
+     default:
+       ShouldNotReachHere();
+     }
+@@ -1592,7 +1589,7 @@
+ }
+ 
+ const TypeTuple *TypeTuple::make( uint cnt, const Type **fields ) {
+-  return (TypeTuple*)(new TypeTuple(cnt,fields))->hashcons();  
++  return (TypeTuple*)(new TypeTuple(cnt,fields))->hashcons();
+ }
+ 
+ //------------------------------fields-----------------------------------------
+@@ -1630,8 +1627,8 @@
+     for( uint i=0; i<_cnt; i++ )
+       fields[i] = field_at(i)->xmeet( x->field_at(i) );
+     return TypeTuple::make(_cnt,fields);
+-  }           
+-  case Top:    
++  }
++  case Top:
+     break;
+   }
+   return this;                  // Return the double constant
+@@ -1669,24 +1666,24 @@
+ //------------------------------dump2------------------------------------------
+ // Dump signature Type
+ #ifndef PRODUCT
+-void TypeTuple::dump2( Dict &d, uint depth ) const {
+-  tty->print("{");
++void TypeTuple::dump2( Dict &d, uint depth, outputStream *st ) const {
++  st->print("{");
+   if( !depth || d[this] ) {     // Check for recursive print
+-    tty->print("...}");
++    st->print("...}");
+     return;
+   }
+   d.Insert((void*)this, (void*)this);   // Stop recursion
+   if( _cnt ) {
+     uint i;
+     for( i=0; i<_cnt-1; i++ ) {
+-      tty->print("%d:", i);
+-      _fields[i]->dump2(d, depth-1);
+-      tty->print(", ");
++      st->print("%d:", i);
++      _fields[i]->dump2(d, depth-1, st);
++      st->print(", ");
+     }
+-    tty->print("%d:", i);
+-    _fields[i]->dump2(d, depth-1);
++    st->print("%d:", i);
++    _fields[i]->dump2(d, depth-1, st);
+   }
+-  tty->print("}");
++  st->print("}");
+ }
+ #endif
+ 
+@@ -1708,8 +1705,20 @@
+ //=============================================================================
+ // Convenience common pre-built types.
+ 
++inline const TypeInt* normalize_array_size(const TypeInt* size) {
++  // Certain normalizations keep us sane when comparing types.
++  // We do not want arrayOop variables to differ only by the wideness
++  // of their index types.  Pick minimum wideness, since that is the
++  // forced wideness of small ranges anyway.
++  if (size->_widen != Type::WidenMin)
++    return TypeInt::make(size->_lo, size->_hi, Type::WidenMin);
++  else
++    return size;
++}
++
+ //------------------------------make-------------------------------------------
+ const TypeAry *TypeAry::make( const Type *elem, const TypeInt *size) {
++  size = normalize_array_size(size);
+   return (TypeAry*)(new TypeAry(elem,size))->hashcons();
+ }
+ 
+@@ -1730,7 +1739,7 @@
+ 
+   case Array: {                 // Meeting 2 arrays?
+     const TypeAry *a = t->is_ary();
+-    return TypeAry::make(_elem->meet(a->_elem), 
++    return TypeAry::make(_elem->meet(a->_elem),
+                          _size->xmeet(a->_size)->is_int());
+   }
+   case Top:
+@@ -1742,7 +1751,9 @@
+ //------------------------------xdual------------------------------------------
+ // Dual: compute field-by-field dual
+ const Type *TypeAry::xdual() const {
+-  return new TypeAry( _elem->dual(), _size->dual()->is_int() );
++  const TypeInt* size_dual = _size->dual()->is_int();
++  size_dual = normalize_array_size(size_dual);
++  return new TypeAry( _elem->dual(), size_dual);
+ }
+ 
+ //------------------------------eq---------------------------------------------
+@@ -1761,11 +1772,11 @@
+ 
+ //------------------------------dump2------------------------------------------
+ #ifndef PRODUCT
+-void TypeAry::dump2( Dict &d, uint depth ) const {
+-  _elem->dump2(d, depth);
+-  tty->print("[");
+-  _size->dump2(d, depth);
+-  tty->print("]");
++void TypeAry::dump2( Dict &d, uint depth, outputStream *st ) const {
++  _elem->dump2(d, depth, st);
++  st->print("[");
++  _size->dump2(d, depth, st);
++  st->print("]");
+ }
+ #endif
+ 
+@@ -1872,7 +1883,7 @@
+     typerr(t);
+ 
+   }
+-  return this;                  
++  return this;
+ }
+ 
+ //------------------------------meet_offset------------------------------------
+@@ -1930,18 +1941,18 @@
+ };
+ 
+ #ifndef PRODUCT
+-void TypePtr::dump2( Dict &d, uint depth ) const {
+-  if( _ptr == Null ) tty->print("NULL");
+-  else tty->print("%s *", ptr_msg[_ptr]);
+-  if( _offset == OffsetTop ) tty->print("+top");
+-  else if( _offset == OffsetBot ) tty->print("+bot");
+-  else if( _offset ) tty->print("+%d", _offset);
++void TypePtr::dump2( Dict &d, uint depth, outputStream *st ) const {
++  if( _ptr == Null ) st->print("NULL");
++  else st->print("%s *", ptr_msg[_ptr]);
++  if( _offset == OffsetTop ) st->print("+top");
++  else if( _offset == OffsetBot ) st->print("+bot");
++  else if( _offset ) st->print("+%d", _offset);
+ }
+ #endif
+ 
+ //------------------------------singleton--------------------------------------
+ // TRUE if Type is a singleton type, FALSE otherwise.   Singletons are simple
+-// constants 
++// constants
+ bool TypePtr::singleton(void) const {
+   // TopPTR, Null, AnyNull, Constant are all singletons
+   return (_offset != OffsetBot) && !below_centerline(_ptr);
+@@ -1992,7 +2003,7 @@
+   // Current "this->_base" is RawPtr
+   switch( t->base() ) {         // switch on original type
+   case Bottom:                  // Ye Olde Default
+-    return t; 
++    return t;
+   case Top:
+     return this;
+   case AnyPtr:                  // Meeting to AnyPtrs
+@@ -2001,8 +2012,8 @@
+     enum PTR tptr = t->is_ptr()->ptr();
+     enum PTR ptr = meet_ptr( tptr );
+     if( ptr == Constant ) {     // Cannot be equal constants, so...
+-      if( tptr == Constant && _ptr != Constant)  return t; 
+-      if( _ptr == Constant && tptr != Constant)  return this; 
++      if( tptr == Constant && _ptr != Constant)  return t;
++      if( _ptr == Constant && tptr != Constant)  return this;
+       ptr = NotNull;            // Fall down in lattice
+     }
+     return make( ptr );
+@@ -2029,9 +2040,9 @@
+   case TypePtr::AnyNull:
+     if( _ptr == TypePtr::Constant) return this;
+     return make( meet_ptr(TypePtr::AnyNull) );
+-  default: ShouldNotReachHere();    
++  default: ShouldNotReachHere();
+   }
+-  return this;                  
++  return this;
+ }
+ 
+ //------------------------------xdual------------------------------------------
+@@ -2073,11 +2084,11 @@
+ 
+ //------------------------------dump2------------------------------------------
+ #ifndef PRODUCT
+-void TypeRawPtr::dump2( Dict &d, uint depth ) const {
+-  if( _ptr == Constant ) 
+-    tty->print(INTPTR_FORMAT, _bits);
++void TypeRawPtr::dump2( Dict &d, uint depth, outputStream *st ) const {
++  if( _ptr == Constant )
++    st->print(INTPTR_FORMAT, _bits);
+   else
+-    tty->print("rawptr:%s", ptr_msg[_ptr]);
++    st->print("rawptr:%s", ptr_msg[_ptr]);
+ }
+ #endif
+ 
+@@ -2086,7 +2097,7 @@
+ const TypeOopPtr *TypeOopPtr::BOTTOM;
+ 
+ //------------------------------make-------------------------------------------
+-const TypeOopPtr *TypeOopPtr::make(PTR ptr, 
++const TypeOopPtr *TypeOopPtr::make(PTR ptr,
+                                    int offset) {
+   assert(ptr != Constant, "no constant generic pointers");
+   ciKlass*  k = ciKlassKlass::make();
+@@ -2105,14 +2116,14 @@
+ 
+ //-----------------------------cast_to_instance-------------------------------
+ const TypeOopPtr *TypeOopPtr::cast_to_instance(int instance_id) const {
+-  // There are no instances of a general oop. 
++  // There are no instances of a general oop.
+   // Return self unchanged.
+   return this;
+ }
+ 
+ //-----------------------------cast_to_exactness-------------------------------
+ const Type *TypeOopPtr::cast_to_exactness(bool klass_is_exact) const {
+-  // There is no such thing as an exact general oop. 
++  // There is no such thing as an exact general oop.
+   // Return self unchanged.
+   return this;
+ }
+@@ -2168,7 +2179,7 @@
+     case Null:
+       if (ptr == Null)  return TypePtr::make(AnyPtr, ptr, offset);
+       // else fall through:
+-    case TopPTR: 
++    case TopPTR:
+     case AnyNull:
+       return make(ptr, offset);
+     case BotPTR:
+@@ -2177,7 +2188,7 @@
+     default: typerr(t);
+     }
+   }
+- 
++
+   case OopPtr: {                 // Meeting to other OopPtrs
+     const TypeOopPtr *tp = t->is_oopptr();
+     return make( meet_ptr(tp->ptr()), meet_offset(tp->offset()) );
+@@ -2226,7 +2237,7 @@
+       }
+       if (!klass_is_exact && try_for_exact
+           && deps != NULL && UseExactTypes) {
+-	if (!ik->is_interface() && !ik->has_subklass()) {
++        if (!ik->is_interface() && !ik->has_subklass()) {
+           // Add a dependence; if concrete subclass added we need to recompile
+           deps->assert_leaf_type(ik);
+           klass_is_exact = true;
+@@ -2240,7 +2251,7 @@
+     bool xk = etype->klass_is_exact();
+     const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
+     // We used to pass NotNull in here, asserting that the sub-arrays
+-    // are all not-null.  This is not true in generally, as code can 
++    // are all not-null.  This is not true in generally, as code can
+     // slam NULLs down in the subarrays.
+     const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::BotPTR, arr0, klass, xk, 0);
+     return arr;
+@@ -2274,21 +2285,21 @@
+     assert(o->is_java_object(), "must be java language object");
+     assert(!o->is_null_object(), "null object not yet handled here.");
+     ciKlass *klass = o->klass();
+-    if (klass->is_instance_klass()) {       
++    if (klass->is_instance_klass()) {
+       // Element is an instance
+       if (!o->has_encoding()) {  // not a perm-space constant
+         // %%% remove this restriction by rewriting non-perm ConPNodes in a later phase
+         return TypeInstPtr::make(TypePtr::NotNull, klass, true, NULL, 0);
+       }
+-      return TypeInstPtr::make(o);    
++      return TypeInstPtr::make(o);
+     } else if (klass->is_obj_array_klass()) {
+       // Element is an object array. Recursively call ourself.
+       const Type *etype =
+         TypeOopPtr::make_from_klass_raw(klass->as_obj_array_klass()->element_klass());
+       const TypeAry* arr0 = TypeAry::make(etype, TypeInt::make(o->as_array()->length()));
+       // We used to pass NotNull in here, asserting that the sub-arrays
+-      // are all not-null.  This is not true in generally, as code can 
+-      // slam NULLs down in the subarrays.  
++      // are all not-null.  This is not true in generally, as code can
++      // slam NULLs down in the subarrays.
+       if (!o->has_encoding()) {  // not a perm-space constant
+         // %%% remove this restriction by rewriting non-perm ConPNodes in a later phase
+         return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, 0);
+@@ -2309,8 +2320,8 @@
+       const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0);
+       return arr;
+     }
+-  } 
+-    
++  }
++
+   ShouldNotReachHere();
+   return NULL;
+ }
+@@ -2319,7 +2330,7 @@
+ intptr_t TypeOopPtr::get_con() const {
+   assert( _ptr == Null || _ptr == Constant, "" );
+   assert( _offset >= 0, "" );
+-  
++
+   if (_offset != 0) {
+     // After being ported to the compiler interface, the compiler no longer
+     // directly manipulates the addresses of oops.  Rather, it only has a pointer
+@@ -2331,7 +2342,7 @@
+     tty->print_cr("Found oop constant with non-zero offset");
+     ShouldNotReachHere();
+   }
+-  
++
+   return (intptr_t)const_oop()->encoding();
+ }
+ 
+@@ -2366,7 +2377,7 @@
+   // join report an interface back out.  This isn't possible but happens
+   // because the type system doesn't interact well with interfaces.
+   if (ftip != NULL && ktip != NULL &&
+-      ftip->is_loaded() &&  ftip->klass()->is_interface() && 
++      ftip->is_loaded() &&  ftip->klass()->is_interface() &&
+       ktip->is_loaded() && !ktip->klass()->is_interface()) {
+     // Happens in a CTW of rt.jar, 320-341, no extra flags
+     return ktip->cast_to_ptr_type(ftip->ptr());
+@@ -2393,7 +2404,7 @@
+ //------------------------------hash-------------------------------------------
+ // Type-specific hashing function.
+ int TypeOopPtr::hash(void) const {
+-  return 
++  return
+     (const_oop() ? const_oop()->hash() : 0) +
+     _klass_is_exact +
+     _instance_id +
+@@ -2402,24 +2413,24 @@
+ 
+ //------------------------------dump2------------------------------------------
+ #ifndef PRODUCT
+-void TypeOopPtr::dump2( Dict &d, uint depth ) const {
+-  tty->print("oopptr:%s", ptr_msg[_ptr]);
+-  if( _klass_is_exact ) tty->print(":exact");
+-  if( const_oop() ) tty->print(INTPTR_FORMAT, const_oop());
++void TypeOopPtr::dump2( Dict &d, uint depth, outputStream *st ) const {
++  st->print("oopptr:%s", ptr_msg[_ptr]);
++  if( _klass_is_exact ) st->print(":exact");
++  if( const_oop() ) st->print(INTPTR_FORMAT, const_oop());
+   switch( _offset ) {
+-  case OffsetTop: tty->print("+top"); break;
+-  case OffsetBot: tty->print("+any"); break;
++  case OffsetTop: st->print("+top"); break;
++  case OffsetBot: st->print("+any"); break;
+   case         0: break;
+-  default:        tty->print("+%d",_offset); break;
++  default:        st->print("+%d",_offset); break;
+   }
+   if (_instance_id != UNKNOWN_INSTANCE)
+-    tty->print(",iid=%d",_instance_id);
++    st->print(",iid=%d",_instance_id);
+ }
+ #endif
+ 
+ //------------------------------singleton--------------------------------------
+ // TRUE if Type is a singleton type, FALSE otherwise.   Singletons are simple
+-// constants 
++// constants
+ bool TypeOopPtr::singleton(void) const {
+   // detune optimizer to not generate constant oop + constant offset as a constant!
+   // TopPTR, Null, AnyNull, Constant are all singletons
+@@ -2463,7 +2474,7 @@
+ const TypeInstPtr *TypeInstPtr::KLASS;
+ 
+ //------------------------------TypeInstPtr-------------------------------------
+-TypeInstPtr::TypeInstPtr(PTR ptr, ciKlass* k, bool xk, ciObject* o, int off, int instance_id) 
++TypeInstPtr::TypeInstPtr(PTR ptr, ciKlass* k, bool xk, ciObject* o, int off, int instance_id)
+  : TypeOopPtr(InstPtr, ptr, k, xk, o, off, instance_id), _name(k->name()) {
+    assert(k != NULL &&
+           (k->is_loaded() || o == NULL),
+@@ -2471,7 +2482,7 @@
+ };
+ 
+ //------------------------------make-------------------------------------------
+-const TypeInstPtr *TypeInstPtr::make(PTR ptr, 
++const TypeInstPtr *TypeInstPtr::make(PTR ptr,
+                                      ciKlass* k,
+                                      bool xk,
+                                      ciObject* o,
+@@ -2529,7 +2540,7 @@
+ const TypeOopPtr *TypeInstPtr::cast_to_instance(int instance_id) const {
+   if( instance_id == _instance_id) return this;
+   bool exact = (instance_id == UNKNOWN_INSTANCE) ? _klass_is_exact : true;
+-  
++
+   return make(ptr(), klass(), exact, const_oop(), _offset, instance_id);
+ }
+ 
+@@ -2543,7 +2554,7 @@
+     const TypeInstPtr *loaded    = is_loaded() ? this  : tinst;
+     const TypeInstPtr *unloaded  = is_loaded() ? tinst : this;
+     if( loaded->klass()->equals(ciEnv::current()->Object_klass()) ) {
+-      // 
++      //
+       // Meet unloaded class with java/lang/Object
+       //
+       // Meet
+@@ -2557,7 +2568,7 @@
+       //  BOTTOM  | ........................Object-BOTTOM ..................|
+       //
+       assert(loaded->ptr() != TypePtr::Null, "insanity check");
+-      // 
++      //
+       if(      loaded->ptr() == TypePtr::TopPTR ) { return unloaded; }
+       else if (loaded->ptr() == TypePtr::AnyNull) { return TypeInstPtr::make( ptr, unloaded->klass() ); }
+       else if (loaded->ptr() == TypePtr::BotPTR ) { return TypeInstPtr::BOTTOM; }
+@@ -2612,7 +2623,7 @@
+     PTR ptr = meet_ptr(tp->ptr());
+     int iid = meet_instance(tp->instance_id());
+     switch (ptr) {
+-    case TopPTR: 
++    case TopPTR:
+     case AnyNull:                // Fall 'down' to dual of object klass
+       if (klass()->equals(ciEnv::current()->Object_klass())) {
+         return TypeAryPtr::make(ptr, tp->ary(), tp->klass(), tp->klass_is_exact(), offset, iid);
+@@ -2648,12 +2659,12 @@
+     int offset = meet_offset(tp->offset());
+     PTR ptr = meet_ptr(tp->ptr());
+     switch (tp->ptr()) {
+-    case TopPTR: 
++    case TopPTR:
+     case AnyNull:
+       return make(ptr, klass(), klass_is_exact(),
+                   (ptr == Constant ? const_oop() : NULL), offset);
+     case NotNull:
+-    case BotPTR: 
++    case BotPTR:
+       return TypeOopPtr::make(ptr, offset);
+     default: typerr(t);
+     }
+@@ -2665,20 +2676,20 @@
+     int offset = meet_offset(tp->offset());
+     PTR ptr = meet_ptr(tp->ptr());
+     switch (tp->ptr()) {
+-    case Null: 
++    case Null:
+       if( ptr == Null ) return TypePtr::make( AnyPtr, ptr, offset );
+-    case TopPTR: 
++    case TopPTR:
+     case AnyNull:
+       return make( ptr, klass(), klass_is_exact(),
+                    (ptr == Constant ? const_oop() : NULL), offset );
+     case NotNull:
+-    case BotPTR: 
++    case BotPTR:
+       return TypePtr::make( AnyPtr, ptr, offset );
+     default: typerr(t);
+     }
+   }
+ 
+-  /*  
++  /*
+                  A-top         }
+                /   |   \       }  Tops
+            B-top A-any C-top   }
+@@ -2693,7 +2704,7 @@
+                \   |   /       }  Bottoms
+                  A-bot         }
+   */
+-  
++
+   case InstPtr: {                // Meeting 2 Oops?
+     // Found an InstPtr sub-type vs self-InstPtr type
+     const TypeInstPtr *tinst = t->is_instptr();
+@@ -2708,7 +2719,7 @@
+     if (ptr != Constant && klass()->equals(tinst->klass()) && klass_is_exact() == tinst->klass_is_exact()) {
+       return make( ptr, klass(), klass_is_exact(), NULL, off, instance_id );
+     }
+-      
++
+     // Classes require inspection in the Java klass hierarchy.  Must be loaded.
+     ciKlass* tinst_klass = tinst->klass();
+     ciKlass* this_klass  = this->klass();
+@@ -2782,7 +2793,7 @@
+     // If both are down and they do NOT subtype, "fall hard".
+     // Constants treated as down.
+ 
+-    // Now, reorder the above list; observe that both-down+subtype is also 
++    // Now, reorder the above list; observe that both-down+subtype is also
+     // "fall hard"; "fall hard" becomes the default case:
+     // If we split one up & one down AND they subtype, take the down man.
+     // If both are up and they subtype, take the subtype class.
+@@ -2796,7 +2807,7 @@
+     // If a proper supertype is exact, there can be no subtyping relationship!
+     // If both types are equal to the subtype, exactness is and-ed below the
+     // centerline and or-ed above it.  (N.B. Constants are always exact.)
+-    
++
+     // Check for subtyping:
+     ciKlass *subtype = NULL;
+     bool subtype_exact = false;
+@@ -2847,7 +2858,7 @@
+       }
+       return make( ptr, this_klass, this_xk, o, off, instance_id );
+     } // Else classes are not equal
+-               
++
+     // Since klasses are different, we require a LCA in the Java
+     // class hierarchy - which means we have to fall to at least NotNull.
+     if( ptr == TopPTR || ptr == AnyNull || ptr == Constant )
+@@ -2889,7 +2900,7 @@
+ // Structural equality check for Type representations
+ bool TypeInstPtr::eq( const Type *t ) const {
+   const TypeInstPtr *p = t->is_instptr();
+-  return 
++  return
+     klass()->equals(p->klass()) &&
+     TypeOopPtr::eq(p);          // Check sub-type stuff
+ }
+@@ -2904,38 +2915,38 @@
+ //------------------------------dump2------------------------------------------
+ // Dump oop Type
+ #ifndef PRODUCT
+-void TypeInstPtr::dump2( Dict &d, uint depth ) const {
++void TypeInstPtr::dump2( Dict &d, uint depth, outputStream *st ) const {
+   // Print the name of the klass.
+-  klass()->print_name();
++  klass()->print_name_on(st);
+ 
+   switch( _ptr ) {
+   case Constant:
+     // TO DO: Make CI print the hex address of the underlying oop.
+     if (WizardMode || Verbose) {
+-      const_oop()->print_oop();
++      const_oop()->print_oop(st);
+     }
+   case BotPTR:
+     if (!WizardMode && !Verbose) {
+-      if( _klass_is_exact ) tty->print(":exact");
++      if( _klass_is_exact ) st->print(":exact");
+       break;
+     }
+   case TopPTR:
+   case AnyNull:
+   case NotNull:
+-    tty->print(":%s", ptr_msg[_ptr]);
+-    if( _klass_is_exact ) tty->print(":exact");
++    st->print(":%s", ptr_msg[_ptr]);
++    if( _klass_is_exact ) st->print(":exact");
+     break;
+   }
+ 
+   if( _offset ) {               // Dump offset, if any
+-    if( _offset == OffsetBot )      tty->print("+any");
+-    else if( _offset == OffsetTop ) tty->print("+unknown");
+-    else tty->print("+%d", _offset);
++    if( _offset == OffsetBot )      st->print("+any");
++    else if( _offset == OffsetTop ) st->print("+unknown");
++    else st->print("+%d", _offset);
+   }
+ 
+-  tty->print(" *");
++  st->print(" *");
+   if (_instance_id != UNKNOWN_INSTANCE)
+-    tty->print(",iid=%d",_instance_id);
++    st->print(",iid=%d",_instance_id);
+ }
+ #endif
+ 
+@@ -3001,15 +3012,50 @@
+   return make(ptr(), const_oop(), _ary, klass(), exact, _offset, instance_id);
+ }
+ 
++//-----------------------------narrow_size_type-------------------------------
++// Local cache for arrayOopDesc::max_array_length(etype),
++// which is kind of slow (and cached elsewhere by other users).
++static jint max_array_length_cache[T_CONFLICT+1];
++static jint max_array_length(BasicType etype) {
++  jint& cache = max_array_length_cache[etype];
++  jint res = cache;
++  if (res == 0) {
++    switch (etype) {
++    case T_CONFLICT:
++    case T_ILLEGAL:
++    case T_VOID:
++      etype = T_BYTE;           // will produce conservatively high value
++    }
++    cache = res = arrayOopDesc::max_array_length(etype);
++  }
++  return res;
++}
++
++// Narrow the given size type to the index range for the given array base type.
++// Return NULL if the resulting int type becomes empty.
++const TypeInt* TypeAryPtr::narrow_size_type(const TypeInt* size, BasicType elem) {
++  jint hi = size->_hi;
++  jint lo = size->_lo;
++  jint min_lo = 0;
++  jint max_hi = max_array_length(elem);
++  //if (index_not_size)  --max_hi;     // type of a valid array index, FTR
++  bool chg = false;
++  if (lo < min_lo) { lo = min_lo; chg = true; }
++  if (hi > max_hi) { hi = max_hi; chg = true; }
++  if (lo > hi)
++    return NULL;
++  if (!chg)
++    return size;
++  return TypeInt::make(lo, hi, Type::WidenMin);
++}
++
+ //-------------------------------cast_to_size----------------------------------
+ const TypeAryPtr* TypeAryPtr::cast_to_size(const TypeInt* new_size) const {
+-  if (new_size == size() || new_size == NULL)  return this;
+-  if (new_size->_lo < 0) {
+-    new_size = new_size->join(TypeInt::POS)->is_int();
+-    if (new_size == size())  return this;
+-  }
+-  if (new_size->empty())      // Negative length arrays will produce weird
++  assert(new_size != NULL, "");
++  new_size = narrow_size_type(new_size, elem()->basic_type());
++  if (new_size == NULL)       // Negative length arrays will produce weird
+     new_size = TypeInt::ZERO; // intermediate dead fast-path goo
++  if (new_size == size())  return this;
+   const TypeAry* new_ary = TypeAry::make(elem(), new_size);
+   return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset);
+ }
+@@ -3019,7 +3065,7 @@
+ // Structural equality check for Type representations
+ bool TypeAryPtr::eq( const Type *t ) const {
+   const TypeAryPtr *p = t->is_aryptr();
+-  return 
++  return
+     _ary == p->_ary &&  // Check array
+     TypeOopPtr::eq(p);  // Check sub-parts
+ }
+@@ -3061,7 +3107,7 @@
+     int offset = meet_offset(tp->offset());
+     PTR ptr = meet_ptr(tp->ptr());
+     switch (tp->ptr()) {
+-    case TopPTR: 
++    case TopPTR:
+     case AnyNull:
+       return make(ptr, (ptr == Constant ? const_oop() : NULL), _ary, _klass, _klass_is_exact, offset);
+     case BotPTR:
+@@ -3077,12 +3123,12 @@
+     int offset = meet_offset(tp->offset());
+     PTR ptr = meet_ptr(tp->ptr());
+     switch (tp->ptr()) {
+-    case TopPTR: 
++    case TopPTR:
+       return this;
+     case BotPTR:
+     case NotNull:
+       return TypePtr::make(AnyPtr, ptr, offset);
+-    case Null:   
++    case Null:
+       if( ptr == Null ) return TypePtr::make(AnyPtr, ptr, offset);
+     case AnyNull:
+       return make( ptr, (ptr == Constant ? const_oop() : NULL), _ary, _klass, _klass_is_exact, offset );
+@@ -3106,7 +3152,7 @@
+         lazy_klass = tap->_klass;
+       else if (tap->_klass == NULL || tap->_klass == _klass) {
+         lazy_klass = _klass;
+-      } else {  
++      } else {
+         // Something like byte[int+] meets char[int+].
+         // This must fall to bottom, not (int[-128..65535])[int+].
+         tary = TypeAry::make(Type::BOTTOM, tary->_size);
+@@ -3114,8 +3160,8 @@
+     }
+     bool xk;
+     switch (tap->ptr()) {
+-    case AnyNull: 
+-    case TopPTR:  
++    case AnyNull:
++    case TopPTR:
+       // Compute new klass on demand, do not use tap->_klass
+       xk = (tap->_klass_is_exact | this->_klass_is_exact);
+       return make( ptr, const_oop(), tary, lazy_klass, xk, off );
+@@ -3132,8 +3178,8 @@
+       xk = true;
+       return TypeAryPtr::make( ptr, o, tary, tap->_klass, xk, off );
+     }
+-    case NotNull: 
+-    case BotPTR:  
++    case NotNull:
++    case BotPTR:
+       // Compute new klass on demand, do not use tap->_klass
+       if (above_centerline(this->_ptr))
+             xk = tap->_klass_is_exact;
+@@ -3153,7 +3199,7 @@
+     PTR ptr = meet_ptr(tp->ptr());
+     int iid = meet_instance(tp->instance_id());
+     switch (ptr) {
+-    case TopPTR: 
++    case TopPTR:
+     case AnyNull:                // Fall 'down' to dual of object klass
+       if( tp->klass()->equals(ciEnv::current()->Object_klass()) ) {
+         return TypeAryPtr::make( ptr, _ary, _klass, _klass_is_exact, offset, iid );
+@@ -3198,33 +3244,33 @@
+ 
+ //------------------------------dump2------------------------------------------
+ #ifndef PRODUCT
+-void TypeAryPtr::dump2( Dict &d, uint depth ) const {
+-  _ary->dump2(d,depth);
++void TypeAryPtr::dump2( Dict &d, uint depth, outputStream *st ) const {
++  _ary->dump2(d,depth,st);
+   switch( _ptr ) {
+   case Constant:
+-    const_oop()->print();
++    const_oop()->print(st);
+     break;
+   case BotPTR:
+     if (!WizardMode && !Verbose) {
+-      if( _klass_is_exact ) tty->print(":exact");
++      if( _klass_is_exact ) st->print(":exact");
+       break;
+     }
+   case TopPTR:
+   case AnyNull:
+   case NotNull:
+-    tty->print(":%s", ptr_msg[_ptr]);
+-    if( _klass_is_exact ) tty->print(":exact");
++    st->print(":%s", ptr_msg[_ptr]);
++    if( _klass_is_exact ) st->print(":exact");
+     break;
+   }
+ 
+-  tty->print("*");
++  st->print("*");
+   if (_instance_id != UNKNOWN_INSTANCE)
+-    tty->print(",iid=%d",_instance_id);
++    st->print(",iid=%d",_instance_id);
+   if( !_offset ) return;
+-  if( _offset == OffsetTop )      tty->print("+undefined");
+-  else if( _offset == OffsetBot ) tty->print("+any");
+-  else if( _offset < 12 )         tty->print("+%d",_offset);
+-  else                            tty->print("[%d]", (_offset-12)/4 );
++  if( _offset == OffsetTop )      st->print("+undefined");
++  else if( _offset == OffsetBot ) st->print("+any");
++  else if( _offset < 12 )         st->print("+%d",_offset);
++  else                            st->print("[%d]", (_offset-12)/4 );
+ }
+ #endif
+ 
+@@ -3267,8 +3313,8 @@
+ // Structural equality check for Type representations
+ bool TypeKlassPtr::eq( const Type *t ) const {
+   const TypeKlassPtr *p = t->is_klassptr();
+-  return 
+-    klass()->equals(p->klass()) && 
++  return
++    klass()->equals(p->klass()) &&
+     TypeOopPtr::eq(p);
+ }
+ 
+@@ -3298,7 +3344,7 @@
+     // If element type is something like bottom[], k_elem will be null.
+     if (k_elem != NULL)
+       k_ary = ciObjArrayKlass::make(k_elem);
+-  } else if ((elem()->base() == Type::Top) || 
++  } else if ((elem()->base() == Type::Top) ||
+              (elem()->base() == Type::Bottom)) {
+     // element type of Bottom occurs from meet of basic type
+     // and object; Top occurs when doing join on Bottom.
+@@ -3311,7 +3357,7 @@
+     // Compute array klass directly from basic type
+     k_ary = ciTypeArrayKlass::make(elem()->basic_type());
+   }
+-  
++
+   if( this != TypeAryPtr::OOPS )
+     // The _klass field acts as a cache of the underlying
+     // ciKlass for this array type.  In order to set the field,
+@@ -3399,7 +3445,7 @@
+     int offset = meet_offset(tp->offset());
+     PTR ptr = meet_ptr(tp->ptr());
+     switch (tp->ptr()) {
+-    case TopPTR: 
++    case TopPTR:
+     case AnyNull:
+       return make(ptr, klass(), offset);
+     case BotPTR:
+@@ -3415,7 +3461,7 @@
+     int offset = meet_offset(tp->offset());
+     PTR ptr = meet_ptr(tp->ptr());
+     switch (tp->ptr()) {
+-    case TopPTR: 
++    case TopPTR:
+       return this;
+     case Null:
+       if( ptr == Null ) return TypePtr::make( AnyPtr, ptr, offset );
+@@ -3432,7 +3478,7 @@
+   case InstPtr:                 // Meet with InstPtr
+     return TypeInstPtr::BOTTOM;
+ 
+-  //  
++  //
+   //             A-top         }
+   //           /   |   \       }  Tops
+   //       B-top A-any C-top   }
+@@ -3447,7 +3493,7 @@
+   //           \   |   /       }  Bottoms
+   //             A-bot         }
+   //
+-  
++
+   case KlassPtr: {  // Meet two KlassPtr types
+     const TypeKlassPtr *tkls = t->is_klassptr();
+     int  off     = meet_offset(tkls->offset());
+@@ -3501,7 +3547,7 @@
+       }
+       return make( ptr, this_klass, off );
+     } // Else classes are not equal
+-               
++
+     // Since klasses are different, we require the LCA in the Java
+     // class hierarchy - which means we have to fall to at least NotNull.
+     if( ptr == TopPTR || ptr == AnyNull || ptr == Constant )
+@@ -3524,15 +3570,15 @@
+ //------------------------------dump2------------------------------------------
+ // Dump Klass Type
+ #ifndef PRODUCT
+-void TypeKlassPtr::dump2( Dict & d, uint depth ) const {
++void TypeKlassPtr::dump2( Dict & d, uint depth, outputStream *st ) const {
+   switch( _ptr ) {
+   case Constant:
+-    tty->print("precise ");
++    st->print("precise ");
+   case NotNull:
+     {
+       const char *name = klass()->name()->as_utf8();
+       if( name ) {
+-        tty->print("klass %s: " INTPTR_FORMAT, name, klass());
++        st->print("klass %s: " INTPTR_FORMAT, name, klass());
+       } else {
+         ShouldNotReachHere();
+       }
+@@ -3541,18 +3587,18 @@
+     if( !WizardMode && !Verbose && !_klass_is_exact ) break;
+   case TopPTR:
+   case AnyNull:
+-    tty->print(":%s", ptr_msg[_ptr]);
+-    if( _klass_is_exact ) tty->print(":exact");
++    st->print(":%s", ptr_msg[_ptr]);
++    if( _klass_is_exact ) st->print(":exact");
+     break;
+   }
+ 
+   if( _offset ) {               // Dump offset, if any
+-    if( _offset == OffsetBot )      { tty->print("+any"); }
+-    else if( _offset == OffsetTop ) { tty->print("+unknown"); }
+-    else                            { tty->print("+%d", _offset); }
++    if( _offset == OffsetBot )      { st->print("+any"); }
++    else if( _offset == OffsetTop ) { st->print("+unknown"); }
++    else                            { st->print("+%d", _offset); }
+   }
+ 
+-  tty->print(" *");
++  st->print(" *");
+ }
+ #endif
+ 
+@@ -3627,40 +3673,40 @@
+ //------------------------------dump2------------------------------------------
+ // Dump Function Type
+ #ifndef PRODUCT
+-void TypeFunc::dump2( Dict &d, uint depth ) const {
++void TypeFunc::dump2( Dict &d, uint depth, outputStream *st ) const {
+   if( _range->_cnt <= Parms )
+-    tty->print("void");
++    st->print("void");
+   else {
+     uint i;
+     for (i = Parms; i < _range->_cnt-1; i++) {
+-      _range->field_at(i)->dump2(d,depth);
+-      tty->print("/");
++      _range->field_at(i)->dump2(d,depth,st);
++      st->print("/");
+     }
+-    _range->field_at(i)->dump2(d,depth);
++    _range->field_at(i)->dump2(d,depth,st);
+   }
+-  tty->print(" ");
+-  tty->print("( ");
++  st->print(" ");
++  st->print("( ");
+   if( !depth || d[this] ) {     // Check for recursive dump
+-    tty->print("...)");
++    st->print("...)");
+     return;
+   }
+   d.Insert((void*)this,(void*)this);    // Stop recursion
+   if (Parms < _domain->_cnt)
+-    _domain->field_at(Parms)->dump2(d,depth-1);
++    _domain->field_at(Parms)->dump2(d,depth-1,st);
+   for (uint i = Parms+1; i < _domain->_cnt; i++) {
+-    tty->print(", ");
+-    _domain->field_at(i)->dump2(d,depth-1);
++    st->print(", ");
++    _domain->field_at(i)->dump2(d,depth-1,st);
+   }
+-  tty->print(" )");
++  st->print(" )");
+ }
+ 
+ //------------------------------print_flattened--------------------------------
+ // Print a 'flattened' signature
+ static const char * const flat_type_msg[Type::lastype] = {
+-  "bad","control","top","int","long","_", 
+-  "tuple:", "array:", 
+-  "ptr", "rawptr", "ptr", "ptr", "ptr", "ptr", 
+-  "func", "abIO", "return_address", "mem", 
++  "bad","control","top","int","long","_",
++  "tuple:", "array:",
++  "ptr", "rawptr", "ptr", "ptr", "ptr", "ptr",
++  "func", "abIO", "return_address", "mem",
+   "float_top", "ftcon:", "flt",
+   "double_top", "dblcon:", "dbl",
+   "bottom"
+@@ -3703,4 +3749,3 @@
+   }
+   return range()->field_at(TypeFunc::Parms)->basic_type();
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/type.hpp openjdk/hotspot/src/share/vm/opto/type.hpp
+--- openjdk6/hotspot/src/share/vm/opto/type.hpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/type.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)type.hpp	1.156 07/05/17 16:02:31 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Portions of code courtesy of Clifford Click
+@@ -35,7 +32,7 @@
+ // Basic types include RSD's (lower bound, upper bound, stride for integers),
+ // float & double precision constants, sets of data-labels and code-labels.
+ // The complete lattice is described below.  Subtypes have no relationship to
+-// up or down in the lattice; that is entirely determined by the behavior of 
++// up or down in the lattice; that is entirely determined by the behavior of
+ // the MEET/JOIN functions.
+ 
+ class Dict;
+@@ -143,10 +140,10 @@
+     compile->set_type_hwm(temp);
+     return temp;
+   }
+-  inline void operator delete( void* ptr ) {    
++  inline void operator delete( void* ptr ) {
+     Compile* compile = Compile::current();
+     compile->type_arena()->Afree(ptr,compile->type_last_size());
+-  }  
++  }
+ 
+   // Initialize the type system for a particular compilation.
+   static void Initialize(Compile* compile);
+@@ -181,9 +178,9 @@
+   virtual const Type *xmeet( const Type *t ) const;
+   virtual const Type *xdual() const;    // Compute dual right now.
+ 
+-  // JOIN operation; higher in lattice.  Done by finding the dual of the 
++  // JOIN operation; higher in lattice.  Done by finding the dual of the
+   // meet of the dual of the 2 inputs.
+-  const Type *join( const Type *t ) const { 
++  const Type *join( const Type *t ) const {
+     return dual()->meet(t->dual())->dual(); }
+ 
+   // Modified version of JOIN adapted to the needs Node::Value.
+@@ -215,7 +212,7 @@
+   const TypeInstPtr *isa_instptr() const;        // Returns NULL if not InstPtr
+   const TypeInstPtr *is_instptr() const;         // Instance
+   const TypeAryPtr *isa_aryptr() const;          // Returns NULL if not AryPtr
+-  const TypeAryPtr *is_aryptr() const;           // Array oop  
++  const TypeAryPtr *is_aryptr() const;           // Array oop
+   virtual bool      is_finite() const;           // Has a finite value
+   virtual bool      is_nan()    const;           // Is not a number (NaN)
+ 
+@@ -227,7 +224,7 @@
+ 
+   // Are you a pointer type or not?
+   bool isa_oop_ptr() const;
+-  
++
+   // TRUE if type is a singleton
+   virtual bool singleton(void) const;
+ 
+@@ -242,10 +239,13 @@
+   static const Type *mreg2type[];
+ 
+   // Printing, statistics
+-  static const char * const msg[lastype]; // Printable strings  
++  static const char * const msg[lastype]; // Printable strings
+ #ifndef PRODUCT
+-  void         dump() const;
+-  virtual void dump2( Dict &d, uint depth ) const;
++  void         dump_on(outputStream *st) const;
++  void         dump() const {
++    dump_on(tty);
++  }
++  virtual void dump2( Dict &d, uint depth, outputStream *st ) const;
+   static  void dump_stats();
+   static  void verify_lastype();          // Check that arrays match type enum
+ #endif
+@@ -261,7 +261,7 @@
+   BasicType array_element_basic_type() const;
+ 
+   // Create standard type for a ciType:
+-  static const Type* get_const_type(ciType* type); 
++  static const Type* get_const_type(ciType* type);
+ 
+   // Create standard zero value:
+   static const Type* get_zero_type(BasicType type) {
+@@ -284,7 +284,7 @@
+   static const Type *CONTROL;
+   static const Type *DOUBLE;
+   static const Type *FLOAT;
+-  static const Type *HALF;  
++  static const Type *HALF;
+   static const Type *MEMORY;
+   static const Type *MULTI;
+   static const Type *RETURN_ADDRESS;
+@@ -326,7 +326,7 @@
+   static const TypeF *ZERO; // positive zero only
+   static const TypeF *ONE;
+ #ifndef PRODUCT
+-  virtual void dump2(Dict &d, uint depth) const;
++  virtual void dump2( Dict &d, uint depth, outputStream *st ) const;
+ #endif
+ };
+ 
+@@ -353,12 +353,12 @@
+   static const TypeD *ZERO; // positive zero only
+   static const TypeD *ONE;
+ #ifndef PRODUCT
+-  virtual void dump2(Dict &d, uint depth) const;
++  virtual void dump2( Dict &d, uint depth, outputStream *st ) const;
+ #endif
+ };
+ 
+ //------------------------------TypeInt----------------------------------------
+-// Class of integer ranges, the set of integers between a lower bound and an 
++// Class of integer ranges, the set of integers between a lower bound and an
+ // upper bound, inclusive.
+ class TypeInt : public Type {
+   TypeInt( jint lo, jint hi, int w );
+@@ -407,13 +407,13 @@
+   static const TypeInt *INT;
+   static const TypeInt *SYMINT; // symmetric range [-max_jint..max_jint]
+ #ifndef PRODUCT
+-  virtual void dump2(Dict &d, uint depth) const;
++  virtual void dump2( Dict &d, uint depth, outputStream *st ) const;
+ #endif
+ };
+ 
+ 
+ //------------------------------TypeLong---------------------------------------
+-// Class of long integer ranges, the set of integers between a lower bound and 
++// Class of long integer ranges, the set of integers between a lower bound and
+ // an upper bound, inclusive.
+ class TypeLong : public Type {
+   TypeLong( jlong lo, jlong hi, int w );
+@@ -451,7 +451,7 @@
+   static const TypeLong *INT;    // 32-bit subrange [min_jint..max_jint]
+   static const TypeLong *UINT;   // 32-bit unsigned [0..max_juint]
+ #ifndef PRODUCT
+-  virtual void dump2( Dict &d, uint ) const;// Specialized per-Type dumping
++  virtual void dump2( Dict &d, uint, outputStream *st  ) const;// Specialized per-Type dumping
+ #endif
+ };
+ 
+@@ -503,7 +503,7 @@
+   static const TypeTuple *INT_PAIR;
+   static const TypeTuple *LONG_PAIR;
+ #ifndef PRODUCT
+-  virtual void dump2( Dict &d, uint ) const; // Specialized per-Type dumping
++  virtual void dump2( Dict &d, uint, outputStream *st  ) const; // Specialized per-Type dumping
+ #endif
+ };
+ 
+@@ -530,7 +530,7 @@
+   virtual const Type *xdual() const;    // Compute dual right now.
+   bool ary_must_be_exact() const;  // true if arrays of such are never generic
+ #ifndef PRODUCT
+-  virtual void dump2( Dict &d, uint ) const; // Specialized per-Type dumping
++  virtual void dump2( Dict &d, uint, outputStream *st  ) const; // Specialized per-Type dumping
+ #endif
+ };
+ 
+@@ -549,7 +549,7 @@
+   static const PTR ptr_meet[lastPTR][lastPTR];
+   static const PTR ptr_dual[lastPTR];
+   static const char * const ptr_msg[lastPTR];
+-  
++
+ public:
+   const int _offset;            // Offset into oop, with TOP & BOT
+   const PTR _ptr;               // Pointer equivalence class
+@@ -591,7 +591,7 @@
+   static const TypePtr *NOTNULL;
+   static const TypePtr *BOTTOM;
+ #ifndef PRODUCT
+-  virtual void dump2( Dict &d, uint depth ) const;
++  virtual void dump2( Dict &d, uint depth, outputStream *st  ) const;
+ #endif
+ };
+ 
+@@ -623,7 +623,7 @@
+   static const TypeRawPtr *BOTTOM;
+   static const TypeRawPtr *NOTNULL;
+ #ifndef PRODUCT
+-  virtual void dump2( Dict &d, uint depth ) const;
++  virtual void dump2( Dict &d, uint depth, outputStream *st  ) const;
+ #endif
+ };
+ 
+@@ -682,7 +682,7 @@
+   static const TypeOopPtr* make(PTR ptr, int offset);
+ 
+   ciObject* const_oop()    const { return _const_oop; }
+-  virtual ciKlass* klass() const { return _klass;     } 
++  virtual ciKlass* klass() const { return _klass;     }
+   bool klass_is_exact()    const { return _klass_is_exact; }
+   bool is_instance()       const { return _instance_id != UNKNOWN_INSTANCE; }
+   uint instance_id()       const { return _instance_id; }
+@@ -709,7 +709,7 @@
+   // Convenience common pre-built type.
+   static const TypeOopPtr *BOTTOM;
+ #ifndef PRODUCT
+-  virtual void dump2( Dict &d, uint depth ) const;
++  virtual void dump2( Dict &d, uint depth, outputStream *st ) const;
+ #endif
+ };
+ 
+@@ -780,7 +780,7 @@
+   static const TypeInstPtr *MARK;
+   static const TypeInstPtr *KLASS;
+ #ifndef PRODUCT
+-  virtual void dump2( Dict &d, uint ) const; // Specialized per-Type dumping
++  virtual void dump2( Dict &d, uint depth, outputStream *st ) const; // Specialized per-Type dumping
+ #endif
+ };
+ 
+@@ -837,8 +837,10 @@
+     return _array_body_type[elem];
+   }
+   static const TypeAryPtr *_array_body_type[T_CONFLICT+1];
++  // sharpen the type of an int which is used as an array size
++  static const TypeInt* narrow_size_type(const TypeInt* size, BasicType elem);
+ #ifndef PRODUCT
+-  virtual void dump2( Dict &d, uint ) const; // Specialized per-Type dumping
++  virtual void dump2( Dict &d, uint depth, outputStream *st ) const; // Specialized per-Type dumping
+ #endif
+ };
+ 
+@@ -875,7 +877,7 @@
+   static const TypeKlassPtr* OBJECT; // Not-null object klass or below
+   static const TypeKlassPtr* OBJECT_OR_NULL; // Maybe-null version of same
+ #ifndef PRODUCT
+-  virtual void dump2( Dict &d, uint ) const; // Specialized per-Type dumping
++  virtual void dump2( Dict &d, uint depth, outputStream *st ) const; // Specialized per-Type dumping
+ #endif
+ };
+ 
+@@ -889,11 +891,11 @@
+   virtual bool empty(void) const;        // TRUE if type is vacuous
+ public:
+   // Constants are shared among ADLC and VM
+-  enum { Control    = AdlcVMDeps::Control, 
+-         I_O        = AdlcVMDeps::I_O, 
+-         Memory     = AdlcVMDeps::Memory, 
+-         FramePtr   = AdlcVMDeps::FramePtr, 
+-         ReturnAdr  = AdlcVMDeps::ReturnAdr, 
++  enum { Control    = AdlcVMDeps::Control,
++         I_O        = AdlcVMDeps::I_O,
++         Memory     = AdlcVMDeps::Memory,
++         FramePtr   = AdlcVMDeps::FramePtr,
++         ReturnAdr  = AdlcVMDeps::ReturnAdr,
+          Parms      = AdlcVMDeps::Parms
+   };
+ 
+@@ -914,112 +916,112 @@
+   BasicType return_type() const;
+ 
+ #ifndef PRODUCT
+-  virtual void dump2( Dict &d, uint ) const; // Specialized per-Type dumping
++  virtual void dump2( Dict &d, uint depth, outputStream *st ) const; // Specialized per-Type dumping
+   void print_flattened() const; // Print a 'flattened' signature
+ #endif
+   // Convenience common pre-built types.
+ };
+ 
+ //------------------------------accessors--------------------------------------
+-inline float Type::getf() const { 
+-  assert( _base == FloatCon, "Not a FloatCon" ); 
++inline float Type::getf() const {
++  assert( _base == FloatCon, "Not a FloatCon" );
+   return ((TypeF*)this)->_f;
+ }
+ 
+-inline double Type::getd() const { 
+-  assert( _base == DoubleCon, "Not a DoubleCon" ); 
+-  return ((TypeD*)this)->_d; 
++inline double Type::getd() const {
++  assert( _base == DoubleCon, "Not a DoubleCon" );
++  return ((TypeD*)this)->_d;
+ }
+ 
+-inline const TypeF *Type::is_float_constant() const { 
+-  assert( _base == FloatCon, "Not a Float" ); 
+-  return (TypeF*)this; 
++inline const TypeF *Type::is_float_constant() const {
++  assert( _base == FloatCon, "Not a Float" );
++  return (TypeF*)this;
+ }
+ 
+ inline const TypeF *Type::isa_float_constant() const {
+   return ( _base == FloatCon ? (TypeF*)this : NULL);
+ }
+ 
+-inline const TypeD *Type::is_double_constant() const { 
+-  assert( _base == DoubleCon, "Not a Double" ); 
+-  return (TypeD*)this; 
++inline const TypeD *Type::is_double_constant() const {
++  assert( _base == DoubleCon, "Not a Double" );
++  return (TypeD*)this;
+ }
+ 
+ inline const TypeD *Type::isa_double_constant() const {
+   return ( _base == DoubleCon ? (TypeD*)this : NULL);
+ }
+ 
+-inline const TypeInt *Type::is_int() const { 
+-  assert( _base == Int, "Not an Int" ); 
+-  return (TypeInt*)this; 
++inline const TypeInt *Type::is_int() const {
++  assert( _base == Int, "Not an Int" );
++  return (TypeInt*)this;
+ }
+ 
+-inline const TypeInt *Type::isa_int() const { 
+-  return ( _base == Int ? (TypeInt*)this : NULL); 
++inline const TypeInt *Type::isa_int() const {
++  return ( _base == Int ? (TypeInt*)this : NULL);
+ }
+ 
+-inline const TypeLong *Type::is_long() const { 
+-  assert( _base == Long, "Not a Long" ); 
+-  return (TypeLong*)this; 
++inline const TypeLong *Type::is_long() const {
++  assert( _base == Long, "Not a Long" );
++  return (TypeLong*)this;
+ }
+ 
+-inline const TypeLong *Type::isa_long() const { 
++inline const TypeLong *Type::isa_long() const {
+   return ( _base == Long ? (TypeLong*)this : NULL);
+ }
+ 
+-inline const TypeTuple *Type::is_tuple() const { 
+-  assert( _base == Tuple, "Not a Tuple" ); 
+-  return (TypeTuple*)this; 
++inline const TypeTuple *Type::is_tuple() const {
++  assert( _base == Tuple, "Not a Tuple" );
++  return (TypeTuple*)this;
+ }
+ 
+-inline const TypeAry *Type::is_ary() const { 
+-  assert( _base == Array , "Not an Array" ); 
+-  return (TypeAry*)this; 
++inline const TypeAry *Type::is_ary() const {
++  assert( _base == Array , "Not an Array" );
++  return (TypeAry*)this;
+ }
+ 
+-inline const TypePtr *Type::is_ptr() const { 
++inline const TypePtr *Type::is_ptr() const {
+   // AnyPtr is the first Ptr and KlassPtr the last, with no non-ptrs between.
+   assert(_base >= AnyPtr && _base <= KlassPtr, "Not a pointer");
+-  return (TypePtr*)this; 
++  return (TypePtr*)this;
+ }
+ 
+-inline const TypePtr *Type::isa_ptr() const { 
++inline const TypePtr *Type::isa_ptr() const {
+   // AnyPtr is the first Ptr and KlassPtr the last, with no non-ptrs between.
+   return (_base >= AnyPtr && _base <= KlassPtr) ? (TypePtr*)this : NULL;
+ }
+ 
+-inline const TypeOopPtr *Type::is_oopptr() const { 
++inline const TypeOopPtr *Type::is_oopptr() const {
+   // OopPtr is the first and KlassPtr the last, with no non-oops between.
+   assert(_base >= OopPtr && _base <= KlassPtr, "Not a Java pointer" ) ;
+   return (TypeOopPtr*)this;
+ }
+ 
+-inline const TypeOopPtr *Type::isa_oopptr() const { 
++inline const TypeOopPtr *Type::isa_oopptr() const {
+   // OopPtr is the first and KlassPtr the last, with no non-oops between.
+   return (_base >= OopPtr && _base <= KlassPtr) ? (TypeOopPtr*)this : NULL;
+ }
+ 
+-inline const TypeRawPtr *Type::is_rawptr() const { 
+-  assert( _base == RawPtr, "Not a raw pointer" ); 
+-  return (TypeRawPtr*)this; 
++inline const TypeRawPtr *Type::is_rawptr() const {
++  assert( _base == RawPtr, "Not a raw pointer" );
++  return (TypeRawPtr*)this;
+ }
+ 
+-inline const TypeInstPtr *Type::isa_instptr() const { 
++inline const TypeInstPtr *Type::isa_instptr() const {
+   return (_base == InstPtr) ? (TypeInstPtr*)this : NULL;
+ }
+ 
+-inline const TypeInstPtr *Type::is_instptr() const { 
+-  assert( _base == InstPtr, "Not an object pointer" ); 
+-  return (TypeInstPtr*)this; 
++inline const TypeInstPtr *Type::is_instptr() const {
++  assert( _base == InstPtr, "Not an object pointer" );
++  return (TypeInstPtr*)this;
+ }
+ 
+-inline const TypeAryPtr *Type::isa_aryptr() const { 
++inline const TypeAryPtr *Type::isa_aryptr() const {
+   return (_base == AryPtr) ? (TypeAryPtr*)this : NULL;
+ }
+ 
+-inline const TypeAryPtr *Type::is_aryptr() const { 
+-  assert( _base == AryPtr, "Not an array pointer" ); 
+-  return (TypeAryPtr*)this; 
++inline const TypeAryPtr *Type::is_aryptr() const {
++  assert( _base == AryPtr, "Not an array pointer" );
++  return (TypeAryPtr*)this;
+ }
+ 
+ inline const TypeKlassPtr *Type::isa_klassptr() const {
+@@ -1054,7 +1056,7 @@
+ #define Type_X       Type::Long
+ #define TypeX_X      TypeLong::LONG
+ #define TypeX_ZERO   TypeLong::ZERO
+-// For 'ideal_reg' machine registers 
++// For 'ideal_reg' machine registers
+ #define Op_RegX      Op_RegL
+ // For phase->intcon variants
+ #define MakeConX     longcon
+@@ -1120,4 +1122,3 @@
+ #define ConvX2L(x)   ConvI2L(x)
+ 
+ #endif
+-
+diff -ruN openjdk6/hotspot/src/share/vm/opto/vectornode.cpp openjdk/hotspot/src/share/vm/opto/vectornode.cpp
+--- openjdk6/hotspot/src/share/vm/opto/vectornode.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/vectornode.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)vectornode.cpp	1.5 07/05/17 16:02:33 JVM"
+-#endif
+ /*
+  * Copyright 2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -114,17 +111,17 @@
+   case T_BOOLEAN:
+   case T_BYTE:
+     return new (C, 2) PackBNode(s);
+-  case T_CHAR:     
++  case T_CHAR:
+     return new (C, 2) PackCNode(s);
+-  case T_SHORT:    
++  case T_SHORT:
+     return new (C, 2) PackSNode(s);
+-  case T_INT:      
++  case T_INT:
+     return new (C, 2) PackINode(s);
+-  case T_LONG:     
++  case T_LONG:
+     return new (C, 2) PackLNode(s);
+-  case T_FLOAT:    
++  case T_FLOAT:
+     return new (C, 2) PackFNode(s);
+-  case T_DOUBLE:   
++  case T_DOUBLE:
+     return new (C, 2) PackDNode(s);
+   }
+   ShouldNotReachHere();
+@@ -402,23 +399,23 @@
+   case Op_Load16B: return new (C, 3) Load16BNode(ctl, mem, adr, atyp);
+   case Op_Load8B:  return new (C, 3) Load8BNode(ctl, mem, adr, atyp);
+   case Op_Load4B:  return new (C, 3) Load4BNode(ctl, mem, adr, atyp);
+- 
++
+   case Op_Load8C:  return new (C, 3) Load8CNode(ctl, mem, adr, atyp);
+   case Op_Load4C:  return new (C, 3) Load4CNode(ctl, mem, adr, atyp);
+   case Op_Load2C:  return new (C, 3) Load2CNode(ctl, mem, adr, atyp);
+- 
++
+   case Op_Load8S:  return new (C, 3) Load8SNode(ctl, mem, adr, atyp);
+   case Op_Load4S:  return new (C, 3) Load4SNode(ctl, mem, adr, atyp);
+   case Op_Load2S:  return new (C, 3) Load2SNode(ctl, mem, adr, atyp);
+- 
++
+   case Op_Load4I:  return new (C, 3) Load4INode(ctl, mem, adr, atyp);
+   case Op_Load2I:  return new (C, 3) Load2INode(ctl, mem, adr, atyp);
+- 
++
+   case Op_Load2L:  return new (C, 3) Load2LNode(ctl, mem, adr, atyp);
+- 
++
+   case Op_Load4F:  return new (C, 3) Load4FNode(ctl, mem, adr, atyp);
+   case Op_Load2F:  return new (C, 3) Load2FNode(ctl, mem, adr, atyp);
+- 
++
+   case Op_Load2D:  return new (C, 3) Load2DNode(ctl, mem, adr, atyp);
+   }
+   ShouldNotReachHere();
+diff -ruN openjdk6/hotspot/src/share/vm/opto/vectornode.hpp openjdk/hotspot/src/share/vm/opto/vectornode.hpp
+--- openjdk6/hotspot/src/share/vm/opto/vectornode.hpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/opto/vectornode.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)vectornode.hpp	1.6 07/05/17 16:02:36 JVM"
+-#endif
+ /*
+  * Copyright 2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -1004,7 +1001,7 @@
+ };
+ 
+ //------------------------------PackFNode---------------------------------------
+-// Pack float scalars into vector 
++// Pack float scalars into vector
+ class PackFNode : public PackNode {
+  protected:
+   virtual BasicType elt_basic_type() const { return T_FLOAT; }
+diff -ruN openjdk6/hotspot/src/share/vm/prims/evmCompat.cpp openjdk/hotspot/src/share/vm/prims/evmCompat.cpp
+--- openjdk6/hotspot/src/share/vm/prims/evmCompat.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/evmCompat.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)evmCompat.cpp	1.12 07/05/05 17:06:31 JVM"
+-#endif
+ /*
+  * Copyright 1999-2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This file contains definitions for functions that exist
+diff -ruN openjdk6/hotspot/src/share/vm/prims/forte.cpp openjdk/hotspot/src/share/vm/prims/forte.cpp
+--- openjdk6/hotspot/src/share/vm/prims/forte.cpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/forte.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)forte.cpp	1.69 07/05/17 16:02:39 JVM"
+-#endif
+ /*
+  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -193,7 +190,7 @@
+   // This failure mode only occurs when the compiled frame's PC
+   // is in the code cache so we are okay for this check if the
+   // PC is not in the code cache.
+-  CodeBlob* cb = CodeCache::find_blob(fr->pc()); 
++  CodeBlob* cb = CodeCache::find_blob(fr->pc());
+   if (cb == NULL) {
+     return ret_value;
+   }
+@@ -403,40 +400,6 @@
+   } while (!fill_from_frame());
+ }
+ 
+-
+-// is_valid_method() exists in fprofiler.cpp and now here.
+-// We need one central version of this routine.
+-
+-bool forte_is_valid_method(methodOop method) {
+-
+-  if (method == NULL || 
+-      // The methodOop is extracted via an offset from the current
+-      // interpreter frame. With AsyncGetCallTrace() the interpreter
+-      // frame may still be under construction so we need to make
+-      // sure that we got an aligned oop before we try to use it.
+-      !Space::is_aligned(method) ||
+-      !Universe::heap()->is_in((void*)method) ||
+-      // See if GC became active after we entered AsyncGetCallTrace()
+-      // and before we try to use the methodOop. This routine is
+-      // used in validation of the top_frame so we don't have any
+-      // other data to flush if we bail due to GC here.
+-      // Yes, there is still a window after this check and before
+-      // we use methodOop below, but we can't lock out GC so that
+-      // has to be an acceptable risk.
+-      Universe::heap()->is_gc_active() ||
+-      //
+-      // is_perm_and_alloced() needs to be checked before klass() because you can
+-      // get a method pointing into the unmapped part of the heap's
+-      // reserved area (e.g., a very high address just within bounds),
+-      // and the instruction which loads the class will SIGSEGV.
+-      !method->is_perm_and_alloced() || 
+-      method->klass() != Universe::methodKlassObj()) {
+-    return false;   // doesn't look good
+-  }
+-  return true;      // hopefully this is a method indeed
+-}
+-
+-
+ // Determine if 'fr' is a walkable, compiled frame.
+ // *is_compiled_p is set to true if the frame is compiled and if it
+ // is, then *is_walkable_p is set to true if it is also walkable.
+@@ -446,8 +409,8 @@
+   *is_compiled_p = false;
+   *is_walkable_p = false;
+ 
+-  CodeBlob* cb = CodeCache::find_blob(fr->pc()); 
+-  if (cb != NULL && 
++  CodeBlob* cb = CodeCache::find_blob(fr->pc());
++  if (cb != NULL &&
+       cb->is_nmethod() &&
+       ((nmethod*)cb)->is_java_method()) {
+     // frame is compiled and executing a Java method
+@@ -498,14 +461,14 @@
+   methodOop* method_p, int* bci_p) {
+   assert(fr->is_interpreted_frame(), "just checking");
+ 
+-  // top frame is an interpreted frame 
++  // top frame is an interpreted frame
+   // check if it is walkable (i.e. valid methodOop and valid bci)
+   if (fr->is_interpreted_frame_valid()) {
+     if (fr->fp() != NULL) {
+       // access address in order not to trigger asserts that
+       // are built in interpreter_frame_method function
+       methodOop method = *fr->interpreter_frame_method_addr();
+-      if (forte_is_valid_method(method)) {
++      if (Universe::heap()->is_valid_method(method)) {
+         intptr_t bcx = fr->interpreter_frame_bcx();
+         int      bci = method->validate_bci_from_bcx(bcx);
+         // note: bci is set to -1 if not a valid bci
+@@ -680,6 +643,8 @@
+     return;
+   }
+ 
++  CollectedHeap* ch = Universe::heap();
++
+   if (method != NULL) {
+     // The method is not stored GC safe so see if GC became active
+     // after we entered AsyncGetCallTrace() and before we try to
+@@ -687,7 +652,7 @@
+     // Yes, there is still a window after this check and before
+     // we use methodOop below, but we can't lock out GC so that
+     // has to be an acceptable risk.
+-    if (!forte_is_valid_method(method)) {
++    if (!ch->is_valid_method(method)) {
+       trace->num_frames = -2;
+       return;
+     }
+@@ -705,7 +670,7 @@
+         trace->frames[0].lineno = -3;
+       }
+     }
+-  } 
++  }
+ 
+   // check has_last_Java_frame() after looking at the top frame
+   // which may be an interpreted Java frame.
+@@ -725,7 +690,7 @@
+     // Yes, there is still a window after this check and before
+     // we use methodOop below, but we can't lock out GC so that
+     // has to be an acceptable risk.
+-    if (!forte_is_valid_method(method)) {
++    if (!ch->is_valid_method(method)) {
+       // we throw away everything we've gathered in this sample since
+       // none of it is safe
+       trace->num_frames = -2;
+@@ -750,7 +715,7 @@
+ // Async-safe version of GetCallTrace being called from a signal handler
+ // when a LWP gets interrupted by SIGPROF but the stack traces are filled
+ // with different content (see below).
+-// 
++//
+ // This function must only be called when JVM/TI
+ // CLASS_LOAD events have been enabled since agent startup. The enabled
+ // event will cause the jmethodIDs to be allocated at class load time.
+@@ -759,16 +724,16 @@
+ //
+ // void (*AsyncGetCallTrace)(ASGCT_CallTrace *trace, jint depth, void* ucontext)
+ //
+-// Called by the profiler to obtain the current method call stack trace for 
+-// a given thread. The thread is identified by the env_id field in the 
+-// ASGCT_CallTrace structure. The profiler agent should allocate a ASGCT_CallTrace 
+-// structure with enough memory for the requested stack depth. The VM fills in 
+-// the frames buffer and the num_frames field. 
++// Called by the profiler to obtain the current method call stack trace for
++// a given thread. The thread is identified by the env_id field in the
++// ASGCT_CallTrace structure. The profiler agent should allocate a ASGCT_CallTrace
++// structure with enough memory for the requested stack depth. The VM fills in
++// the frames buffer and the num_frames field.
+ //
+-// Arguments: 
++// Arguments:
+ //
+-//   trace    - trace data structure to be filled by the VM. 
+-//   depth    - depth of the call stack trace. 
++//   trace    - trace data structure to be filled by the VM.
++//   depth    - depth of the call stack trace.
+ //   ucontext - ucontext_t of the LWP
+ //
+ // ASGCT_CallTrace:
+@@ -779,18 +744,18 @@
+ //   } ASGCT_CallTrace;
+ //
+ // Fields:
+-//   env_id     - ID of thread which executed this trace. 
+-//   num_frames - number of frames in the trace. 
++//   env_id     - ID of thread which executed this trace.
++//   num_frames - number of frames in the trace.
+ //                (< 0 indicates the frame is not walkable).
+ //   frames     - the ASGCT_CallFrames that make up this trace. Callee followed by callers.
+ //
+ //  ASGCT_CallFrame:
+ //    typedef struct {
+-//        jint lineno;                     
+-//        jmethodID method_id;              
++//        jint lineno;
++//        jmethodID method_id;
+ //    } ASGCT_CallFrame;
+ //
+-//  Fields: 
++//  Fields:
+ //    1) For Java frame (interpreted and compiled),
+ //       lineno    - bci of the method being executed or -1 if bci is not available
+ //       method_id - jmethodID of the method being executed
+@@ -830,7 +795,7 @@
+     return;
+   }
+ 
+-  assert(JavaThread::current() == thread, 
++  assert(JavaThread::current() == thread,
+          "AsyncGetCallTrace must be called by the current interrupted thread");
+ 
+   if (!JvmtiExport::should_post_class_load()) {
+@@ -859,7 +824,7 @@
+   case _thread_in_vm_trans:
+     {
+       frame fr;
+-      
++
+       // param isInJava == false - indicate we aren't in Java code
+       if (!thread->pd_get_top_frame_for_signal_handler(&fr, ucontext, false)) {
+         if (!thread->has_last_Java_frame()) {
+@@ -870,11 +835,11 @@
+       } else {
+         trace->num_frames = -4;    // non walkable frame by default
+         forte_fill_call_trace_given_top(thread, trace, depth, fr);
+-      }      
++      }
+     }
+     break;
+-  case _thread_in_Java: 
+-  case _thread_in_Java_trans: 
++  case _thread_in_Java:
++  case _thread_in_Java_trans:
+     {
+       frame fr;
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/prims/forte.hpp openjdk/hotspot/src/share/vm/prims/forte.hpp
+--- openjdk6/hotspot/src/share/vm/prims/forte.hpp	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/forte.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)forte.hpp	1.7 07/05/05 17:06:31 JVM"
+-#endif
+ /*
+  * Copyright 2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,13 +19,14 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Interface to Forte support.
+ 
+ class Forte : AllStatic {
+  public:
+-   static void register_stub(const char* name, address start, address end);    
++   static void register_stub(const char* name, address start, address end)
++                                                 KERNEL_RETURN;
+                                                  // register internal VM stub
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/prims/hpi_imported.h openjdk/hotspot/src/share/vm/prims/hpi_imported.h
+--- openjdk6/hotspot/src/share/vm/prims/hpi_imported.h	2008-02-28 05:02:40.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/hpi_imported.h	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)hpi_imported.h	1.17 07/05/05 17:06:31 JVM"
+-#endif
+ /*
+  * Copyright 1998-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ /*
+@@ -79,7 +76,7 @@
+     /*
+      * Free must allow ptr == NULL to be a no-op.
+      */
+-  void	  (*Free)(void *ptr);
++  void    (*Free)(void *ptr);
+     /*
+      * Calloc must return a unique pointer for if
+      * n_item == 0 || item_size == 0.
+@@ -102,7 +99,7 @@
+    */
+   void *  (*DecommitMem)(void *ptr, size_t size, size_t *actual);
+ 
+-#define HPI_PAGE_ALIGNMENT	    (64 * 1024)
++#define HPI_PAGE_ALIGNMENT          (64 * 1024)
+ 
+   void *  (*AllocBlock)(size_t size, void **headP);
+   void    (*FreeBlock)(void *head);
+@@ -112,7 +109,7 @@
+  * dynamic linking libraries
+  */
+ typedef struct {
+-  void	 (*BuildLibName)(char *buf, int buf_len, char *path, const char *name);
++  void   (*BuildLibName)(char *buf, int buf_len, char *path, const char *name);
+   int    (*BuildFunName)(char *name, int name_len, int arg_size, int en_idx);
+ 
+   void * (*LoadLibrary)(const char *name, char *err_buf, int err_buflen);
+@@ -133,7 +130,7 @@
+ 
+ typedef struct {
+   HPI_SysInfo *    (*GetSysInfo)(void);
+-  long 	           (*GetMilliTicks)(void);
++  long             (*GetMilliTicks)(void);
+   jlong            (*TimeMillis)(void);
+ 
+   signal_handler_t (*Signal)(int sig, signal_handler_t handler);
+@@ -144,7 +141,7 @@
+   int              (*Shutdown)(void);
+ 
+   int              (*SetLoggingLevel)(int level);
+-  bool_t           (*SetMonitoringOn)(bool_t on);  
++  bool_t           (*SetMonitoringOn)(bool_t on);
+   int              (*GetLastErrorString)(char *buf, int len);
+ } HPI_SystemInterface;
+ 
+@@ -154,16 +151,16 @@
+ typedef struct  sys_thread sys_thread_t;
+ typedef struct  sys_mon sys_mon_t;
+ 
+-#define HPI_OK	        0
+-#define HPI_ERR	       -1
++#define HPI_OK          0
++#define HPI_ERR        -1
+ #define HPI_INTRPT     -2    /* Operation was interrupted */
+ #define HPI_TIMEOUT    -3    /* A timer ran out */
+ #define HPI_NOMEM      -5    /* Ran out of memory */
+ #define HPI_NORESOURCE -6    /* Ran out of some system resource */
+ 
+ /* There are three basic states: RUNNABLE, MONITOR_WAIT, and CONDVAR_WAIT.
+- * When the thread is suspended in any of these states, the 
+- * HPI_THREAD_SUSPENDED bit will be set 
++ * When the thread is suspended in any of these states, the
++ * HPI_THREAD_SUSPENDED bit will be set
+  */
+ enum {
+     HPI_THREAD_RUNNABLE = 1,
+@@ -171,9 +168,9 @@
+     HPI_THREAD_CONDVAR_WAIT
+ };
+ 
+-#define HPI_MINIMUM_PRIORITY	    1
+-#define HPI_MAXIMUM_PRIORITY	    10
+-#define HPI_NORMAL_PRIORITY	    5
++#define HPI_MINIMUM_PRIORITY        1
++#define HPI_MAXIMUM_PRIORITY        10
++#define HPI_NORMAL_PRIORITY         5
+ 
+ #define HPI_THREAD_SUSPENDED        0x8000
+ #define HPI_THREAD_INTERRUPTED      0x4000
+@@ -190,28 +187,28 @@
+ } sys_mon_info;
+ 
+ typedef struct {
+-  int 	         (*ThreadBootstrap)(sys_thread_t **tidP,
+-				    sys_mon_t **qlockP,
+-				    int nReservedBytes);
+-  int 	         (*ThreadCreate)(sys_thread_t **tidP,
+-				 long stk_size,
+-				 void (*func)(void *),
+-				 void *arg);
++  int            (*ThreadBootstrap)(sys_thread_t **tidP,
++                                    sys_mon_t **qlockP,
++                                    int nReservedBytes);
++  int            (*ThreadCreate)(sys_thread_t **tidP,
++                                 long stk_size,
++                                 void (*func)(void *),
++                                 void *arg);
+   sys_thread_t * (*ThreadSelf)(void);
+   void           (*ThreadYield)(void);
+-  int	         (*ThreadSuspend)(sys_thread_t *tid);
+-  int	         (*ThreadResume)(sys_thread_t *tid);
+-  int	         (*ThreadSetPriority)(sys_thread_t *tid, int prio);
+-  int	         (*ThreadGetPriority)(sys_thread_t *tid, int *prio);
+-  void *         (*ThreadStackPointer)(sys_thread_t *tid); 
+-  void *	 (*ThreadStackTop)(sys_thread_t *tid);
++  int            (*ThreadSuspend)(sys_thread_t *tid);
++  int            (*ThreadResume)(sys_thread_t *tid);
++  int            (*ThreadSetPriority)(sys_thread_t *tid, int prio);
++  int            (*ThreadGetPriority)(sys_thread_t *tid, int *prio);
++  void *         (*ThreadStackPointer)(sys_thread_t *tid);
++  void *         (*ThreadStackTop)(sys_thread_t *tid);
+   long *         (*ThreadRegs)(sys_thread_t *tid, int *regs);
+-  int	         (*ThreadSingle)(void);
+-  void	         (*ThreadMulti)(void);
++  int            (*ThreadSingle)(void);
++  void           (*ThreadMulti)(void);
+   int            (*ThreadEnumerateOver)(int (*func)(sys_thread_t *, void *),
+-					void *arg);
++                                        void *arg);
+   int            (*ThreadCheckStack)(void);
+-  void	         (*ThreadPostException)(sys_thread_t *tid, void *arg);
++  void           (*ThreadPostException)(sys_thread_t *tid, void *arg);
+   void           (*ThreadInterrupt)(sys_thread_t *tid);
+   int            (*ThreadIsInterrupted)(sys_thread_t *tid, int clear);
+   int            (*ThreadAlloc)(sys_thread_t **tidP);
+@@ -220,18 +217,18 @@
+   int            (*ThreadGetStatus)(sys_thread_t *tid, sys_mon_t **monitor);
+   void *         (*ThreadInterruptEvent)(void);
+   void *         (*ThreadNativeID)(sys_thread_t *tid);
+-  
++
+   /* These three functions are used by the CPU time profiler.
+-   * sysThreadIsRunning determines whether the thread is running (not just 
++   * sysThreadIsRunning determines whether the thread is running (not just
+    * runnable). It is only safe to call this function after calling
+    * sysThreadProfSuspend.
+    */
+   bool_t         (*ThreadIsRunning)(sys_thread_t *tid);
+   void           (*ThreadProfSuspend)(sys_thread_t *tid);
+   void           (*ThreadProfResume)(sys_thread_t *tid);
+-  
++
+   int            (*AdjustTimeSlice)(int ms);
+-  
++
+   size_t         (*MonitorSizeof)(void);
+   int            (*MonitorInit)(sys_mon_t *mid);
+   int            (*MonitorDestroy)(sys_mon_t *mid);
+@@ -240,11 +237,11 @@
+   int            (*MonitorExit)(sys_thread_t *self, sys_mon_t *mid);
+   int            (*MonitorNotify)(sys_thread_t *self, sys_mon_t *mid);
+   int            (*MonitorNotifyAll)(sys_thread_t *self, sys_mon_t *mid);
+-  int 	         (*MonitorWait)(sys_thread_t *self, sys_mon_t *mid, jlong ms);
++  int            (*MonitorWait)(sys_thread_t *self, sys_mon_t *mid, jlong ms);
+   bool_t         (*MonitorInUse)(sys_mon_t *mid);
+   sys_thread_t * (*MonitorOwner)(sys_mon_t *mid);
+   int            (*MonitorGetInfo)(sys_mon_t *mid, sys_mon_info *info);
+-  
++
+ } HPI_ThreadInterface;
+ 
+ /*
+@@ -281,13 +278,13 @@
+   int              (*Connect)(int fd, struct sockaddr *him, int len);
+   int              (*Accept)(int fd, struct sockaddr *him, int *len);
+   int              (*SendTo)(int fd, char *buf, int len, int flags,
+-			     struct sockaddr *to, int tolen);
++                             struct sockaddr *to, int tolen);
+   int              (*RecvFrom)(int fd, char *buf, int nbytes, int flags,
+-			       struct sockaddr *from, int *fromlen);
++                               struct sockaddr *from, int *fromlen);
+   int              (*Listen)(int fd, long count);
+   int              (*Recv)(int fd, char *buf, int nBytes, int flags);
+   int              (*Send)(int fd, char *buf, int nBytes, int flags);
+-  int              (*Timeout)(int fd, long timeout); 
++  int              (*Timeout)(int fd, long timeout);
+   struct hostent * (*GetHostByName)(char *hostname);
+   int              (*Socket)(int domain, int type, int protocol);
+   int              (*SocketShutdown)(int fd, int howto);
+@@ -318,4 +315,3 @@
+ #endif
+ 
+ #endif /* !_JAVASOFT_HPI_H_ */
+-
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jniCheck.cpp openjdk/hotspot/src/share/vm/prims/jniCheck.cpp
+--- openjdk6/hotspot/src/share/vm/prims/jniCheck.cpp	2008-02-28 05:02:41.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jniCheck.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)jniCheck.cpp	1.49 07/05/05 17:06:30 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -58,7 +55,7 @@
+ static struct JNINativeInterface_ * unchecked_jni_NativeInterface;
+ 
+ 
+-/* 
++/*
+  * MACRO DEFINITIONS
+  */
+ 
+@@ -147,9 +144,9 @@
+ }
+ 
+ 
+- 
+ 
+-/* 
++
++/*
+  * SUPPORT FUNCTIONS
+  */
+ 
+@@ -237,6 +234,10 @@
+   }
+   klassOop k_oop = oopObj->klass();
+ 
++  if (!jfieldIDWorkaround::is_valid_jfieldID(k_oop, fid)) {
++    ReportJNIFatalError(thr, fatal_wrong_field);
++  }
++
+   /* make sure the field exists */
+   int offset = jfieldIDWorkaround::from_instance_jfieldID(k_oop, fid);
+   if (!instanceKlass::cast(k_oop)->contains_field_offset(offset))
+@@ -244,7 +245,7 @@
+ 
+   /* check for proper field type */
+   if (!instanceKlass::cast(k_oop)->find_field_from_offset(offset,
+-							      false, &fd))
++                                                              false, &fd))
+     ReportJNIFatalError(thr, fatal_instance_field_not_found);
+ 
+   if ((fd.field_type() != ftype) &&
+@@ -312,7 +313,7 @@
+ 
+ oop jniCheck::validate_object(JavaThread* thr, jobject obj) {
+     if (!obj)
+-	return NULL;
++        return NULL;
+     ASSERT_OOPS_ALLOWED;
+     oop oopObj = jniCheck::validate_handle(thr, obj);
+     if (!oopObj) {
+@@ -326,7 +327,7 @@
+ // array descriptors.
+ void jniCheck::validate_class_descriptor(JavaThread* thr, const char* name) {
+   if (name == NULL) return;  // implementation accepts NULL so just return
+-  
++
+   size_t len = strlen(name);
+ 
+   if (len >= 2 &&
+@@ -382,7 +383,7 @@
+ }
+ 
+ 
+-/* 
++/*
+  * IMPLEMENTATION OF FUNCTIONS IN CHECKED TABLE
+  */
+ 
+@@ -448,7 +449,7 @@
+       jniCheck::validate_jmethod_id(thr, methodID);
+     )
+     jobject result = UNCHECKED()->ToReflectedMethod(env, cls, methodID,
+-						    isStatic);
++                                                    isStatic);
+     functionExit(env);
+     return result;
+ JNI_END
+@@ -557,7 +558,7 @@
+     functionExit(env);
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(jint, 
++JNI_ENTRY_CHECKED(jint,
+   checked_jni_PushLocalFrame(JNIEnv *env,
+                              jint capacity))
+     functionEnterExceptionAllowed(thr);
+@@ -944,7 +945,7 @@
+ WRAPPER_CallNonvirtualMethod(jfloat,Float)
+ WRAPPER_CallNonvirtualMethod(jdouble,Double)
+ 
+-JNI_ENTRY_CHECKED(void, 
++JNI_ENTRY_CHECKED(void,
+   checked_jni_CallNonvirtualVoidMethod(JNIEnv *env,
+                                        jobject obj,
+                                        jclass clazz,
+@@ -977,7 +978,7 @@
+     functionExit(env);
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(void, 
++JNI_ENTRY_CHECKED(void,
+   checked_jni_CallNonvirtualVoidMethodA(JNIEnv *env,
+                                         jobject obj,
+                                         jclass clazz,
+@@ -985,7 +986,7 @@
+                                         const jvalue * args))
+     functionEnter(thr);
+     IN_VM(
+-      jniCheck::validate_call_object(thr, obj, methodID); 
++      jniCheck::validate_call_object(thr, obj, methodID);
+       jniCheck::validate_call_class(thr, clazz, methodID);
+     )
+     UNCHECKED()->CallNonvirtualVoidMethodA(env,obj,clazz,methodID,args);
+@@ -1055,7 +1056,7 @@
+ WRAPPER_SetField(jdouble,  Double,  T_DOUBLE)
+ 
+ 
+-JNI_ENTRY_CHECKED(jmethodID, 
++JNI_ENTRY_CHECKED(jmethodID,
+   checked_jni_GetStaticMethodID(JNIEnv *env,
+                                 jclass clazz,
+                                 const char *name,
+@@ -1137,7 +1138,7 @@
+ WRAPPER_CallStaticMethod(jfloat,Float)
+ WRAPPER_CallStaticMethod(jdouble,Double)
+ 
+-JNI_ENTRY_CHECKED(void, 
++JNI_ENTRY_CHECKED(void,
+   checked_jni_CallStaticVoidMethod(JNIEnv *env,
+                                    jclass cls,
+                                    jmethodID methodID,
+@@ -1154,7 +1155,7 @@
+     functionExit(env);
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(void, 
++JNI_ENTRY_CHECKED(void,
+   checked_jni_CallStaticVoidMethodV(JNIEnv *env,
+                                     jclass cls,
+                                     jmethodID methodID,
+@@ -1182,7 +1183,7 @@
+     functionExit(env);
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(jfieldID, 
++JNI_ENTRY_CHECKED(jfieldID,
+   checked_jni_GetStaticFieldID(JNIEnv *env,
+                                jclass clazz,
+                                const char *name,
+@@ -1249,7 +1250,7 @@
+ WRAPPER_SetStaticField(jdouble,  Double,  T_DOUBLE)
+ 
+ 
+-JNI_ENTRY_CHECKED(jstring, 
++JNI_ENTRY_CHECKED(jstring,
+   checked_jni_NewString(JNIEnv *env,
+                         const jchar *unicode,
+                         jsize len))
+@@ -1259,7 +1260,7 @@
+     return result;
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(jsize, 
++JNI_ENTRY_CHECKED(jsize,
+   checked_jni_GetStringLength(JNIEnv *env,
+                               jstring str))
+     functionEnter(thr);
+@@ -1271,7 +1272,7 @@
+     return result;
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(const jchar *, 
++JNI_ENTRY_CHECKED(const jchar *,
+   checked_jni_GetStringChars(JNIEnv *env,
+                              jstring str,
+                              jboolean *isCopy))
+@@ -1284,7 +1285,7 @@
+     return result;
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(void, 
++JNI_ENTRY_CHECKED(void,
+   checked_jni_ReleaseStringChars(JNIEnv *env,
+                                  jstring str,
+                                  const jchar *chars))
+@@ -1300,7 +1301,7 @@
+     functionExit(env);
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(jstring, 
++JNI_ENTRY_CHECKED(jstring,
+   checked_jni_NewStringUTF(JNIEnv *env,
+                            const char *utf))
+     functionEnter(thr);
+@@ -1309,7 +1310,7 @@
+     return result;
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(jsize, 
++JNI_ENTRY_CHECKED(jsize,
+   checked_jni_GetStringUTFLength(JNIEnv *env,
+                                  jstring str))
+     functionEnter(thr);
+@@ -1321,7 +1322,7 @@
+     return result;
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(const char *, 
++JNI_ENTRY_CHECKED(const char *,
+   checked_jni_GetStringUTFChars(JNIEnv *env,
+                                 jstring str,
+                                 jboolean *isCopy))
+@@ -1334,7 +1335,7 @@
+     return result;
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(void, 
++JNI_ENTRY_CHECKED(void,
+   checked_jni_ReleaseStringUTFChars(JNIEnv *env,
+                                     jstring str,
+                                     const char* chars))
+@@ -1350,7 +1351,7 @@
+     functionExit(env);
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(jsize, 
++JNI_ENTRY_CHECKED(jsize,
+   checked_jni_GetArrayLength(JNIEnv *env,
+                              jarray array))
+     functionEnter(thr);
+@@ -1362,7 +1363,7 @@
+     return result;
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(jobjectArray, 
++JNI_ENTRY_CHECKED(jobjectArray,
+   checked_jni_NewObjectArray(JNIEnv *env,
+                              jsize len,
+                              jclass clazz,
+@@ -1373,7 +1374,7 @@
+     return result;
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(jobject, 
++JNI_ENTRY_CHECKED(jobject,
+   checked_jni_GetObjectArrayElement(JNIEnv *env,
+                                     jobjectArray array,
+                                     jsize index))
+@@ -1386,7 +1387,7 @@
+     return result;
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(void, 
++JNI_ENTRY_CHECKED(void,
+   checked_jni_SetObjectArrayElement(JNIEnv *env,
+                                     jobjectArray array,
+                                     jsize index,
+@@ -1520,7 +1521,7 @@
+ WRAPPER_SetScalarArrayRegion(T_FLOAT,   jfloat,   Float)
+ WRAPPER_SetScalarArrayRegion(T_DOUBLE,  jdouble,  Double)
+ 
+-JNI_ENTRY_CHECKED(jint, 
++JNI_ENTRY_CHECKED(jint,
+   checked_jni_RegisterNatives(JNIEnv *env,
+                               jclass clazz,
+                               const JNINativeMethod *methods,
+@@ -1531,7 +1532,7 @@
+     return result;
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(jint, 
++JNI_ENTRY_CHECKED(jint,
+   checked_jni_UnregisterNatives(JNIEnv *env,
+                                 jclass clazz))
+     functionEnter(thr);
+@@ -1540,7 +1541,7 @@
+     return result;
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(jint, 
++JNI_ENTRY_CHECKED(jint,
+   checked_jni_MonitorEnter(JNIEnv *env,
+                            jobject obj))
+     functionEnter(thr);
+@@ -1552,7 +1553,7 @@
+     return result;
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(jint, 
++JNI_ENTRY_CHECKED(jint,
+   checked_jni_MonitorExit(JNIEnv *env,
+                           jobject obj))
+     functionEnterExceptionAllowed(thr);
+@@ -1564,7 +1565,7 @@
+     return result;
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(jint, 
++JNI_ENTRY_CHECKED(jint,
+   checked_jni_GetJavaVM(JNIEnv *env,
+                         JavaVM **vm))
+     functionEnter(thr);
+@@ -1573,7 +1574,7 @@
+     return result;
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(void, 
++JNI_ENTRY_CHECKED(void,
+   checked_jni_GetStringRegion(JNIEnv *env,
+                               jstring str,
+                               jsize start,
+@@ -1587,7 +1588,7 @@
+     functionExit(env);
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(void, 
++JNI_ENTRY_CHECKED(void,
+   checked_jni_GetStringUTFRegion(JNIEnv *env,
+                                  jstring str,
+                                  jsize start,
+@@ -1601,7 +1602,7 @@
+     functionExit(env);
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(void *, 
++JNI_ENTRY_CHECKED(void *,
+   checked_jni_GetPrimitiveArrayCritical(JNIEnv *env,
+                                         jarray array,
+                                         jboolean *isCopy))
+@@ -1614,7 +1615,7 @@
+     return result;
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(void, 
++JNI_ENTRY_CHECKED(void,
+   checked_jni_ReleasePrimitiveArrayCritical(JNIEnv *env,
+                                             jarray array,
+                                             void *carray,
+@@ -1630,7 +1631,7 @@
+     functionExit(env);
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(const jchar*, 
++JNI_ENTRY_CHECKED(const jchar*,
+   checked_jni_GetStringCritical(JNIEnv *env,
+                                 jstring string,
+                                 jboolean *isCopy))
+@@ -1643,7 +1644,7 @@
+     return result;
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(void, 
++JNI_ENTRY_CHECKED(void,
+   checked_jni_ReleaseStringCritical(JNIEnv *env,
+                                     jstring str,
+                                     const jchar *chars))
+@@ -1658,7 +1659,7 @@
+     functionExit(env);
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(jweak, 
++JNI_ENTRY_CHECKED(jweak,
+   checked_jni_NewWeakGlobalRef(JNIEnv *env,
+                                jobject obj))
+     functionEnter(thr);
+@@ -1672,7 +1673,7 @@
+     return result;
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(void, 
++JNI_ENTRY_CHECKED(void,
+   checked_jni_DeleteWeakGlobalRef(JNIEnv *env,
+                                   jweak ref))
+     functionEnterExceptionAllowed(thr);
+@@ -1680,7 +1681,7 @@
+     functionExit(env);
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(jboolean, 
++JNI_ENTRY_CHECKED(jboolean,
+   checked_jni_ExceptionCheck(JNIEnv *env))
+     functionEnterExceptionAllowed(thr);
+     jboolean result = UNCHECKED()->ExceptionCheck(env);
+@@ -1688,7 +1689,7 @@
+     return result;
+ JNI_END
+ 
+-JNI_ENTRY_CHECKED(jobject, 
++JNI_ENTRY_CHECKED(jobject,
+   checked_jni_NewDirectByteBuffer(JNIEnv *env,
+                                   void *address,
+                                   jlong capacity))
+@@ -1696,7 +1697,7 @@
+     jobject result = UNCHECKED()->NewDirectByteBuffer(env, address, capacity);
+     functionExit(env);
+     return result;
+-JNI_END    
++JNI_END
+ 
+ JNI_ENTRY_CHECKED(void *,
+   checked_jni_GetDirectBufferAddress(JNIEnv *env,
+@@ -1705,7 +1706,7 @@
+     void* result = UNCHECKED()->GetDirectBufferAddress(env, buf);
+     functionExit(env);
+     return result;
+-JNI_END    
++JNI_END
+ 
+ JNI_ENTRY_CHECKED(jlong,
+   checked_jni_GetDirectBufferCapacity(JNIEnv *env,
+@@ -1726,7 +1727,7 @@
+     )
+     jobjectRefType result = UNCHECKED()->GetObjectRefType(env, obj);
+     functionExit(env);
+-    return result; 
++    return result;
+ JNI_END
+ 
+ 
+@@ -2036,9 +2037,9 @@
+   debug_only(int *lastPtr = (int *)((char *)&checked_jni_NativeInterface + \
+              sizeof(*unchecked_jni_NativeInterface) - sizeof(char *));)
+   assert(*lastPtr != 0,
+-	 "Mismatched JNINativeInterface tables, check for new entries");
++         "Mismatched JNINativeInterface tables, check for new entries");
+ 
+-  // with -verbose:jni this message will print 
++  // with -verbose:jni this message will print
+   if (PrintJNIResolving) {
+     tty->print_cr("Checked JNI functions are being used to " \
+                   "validate JNI usage");
+@@ -2046,5 +2047,3 @@
+ 
+   return &checked_jni_NativeInterface;
+ }
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jniCheck.hpp openjdk/hotspot/src/share/vm/prims/jniCheck.hpp
+--- openjdk6/hotspot/src/share/vm/prims/jniCheck.hpp	2008-02-28 05:02:41.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jniCheck.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jniCheck.hpp	1.11 07/05/05 17:06:32 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jni.cpp openjdk/hotspot/src/share/vm/prims/jni.cpp
+--- openjdk6/hotspot/src/share/vm/prims/jni.cpp	2008-02-28 05:02:41.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jni.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)jni.cpp	1.435 07/06/28 16:50:01 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -38,7 +35,7 @@
+ // since those macros can cause an immedate uninstrumented return.
+ //
+ // In order to get the return value, a reference to the variable containing
+-// the return value must be passed to the contructor of the object, and 
++// the return value must be passed to the contructor of the object, and
+ // the return value must be set before return (since the mark object has
+ // a reference to it).
+ //
+@@ -85,7 +82,7 @@
+ 
+ 
+ // Use these to select distinct code for floating-point vs. non-floating point
+-// situations.  Used from within common macros where we need slightly 
++// situations.  Used from within common macros where we need slightly
+ // different behavior for Float/Double
+ #define FP_SELECT_Boolean(intcode, fpcode) intcode
+ #define FP_SELECT_Byte(intcode, fpcode)    intcode
+@@ -101,7 +98,7 @@
+ 
+ #define COMMA ,
+ 
+-// Choose DT_RETURN_MARK macros  based on the type: float/double -> void 
++// Choose DT_RETURN_MARK macros  based on the type: float/double -> void
+ // (dtrace doesn't do FP yet)
+ #define DT_RETURN_MARK_DECL_FOR(TypeName, name, type) \
+   FP_SELECT(TypeName, \
+@@ -272,7 +269,7 @@
+   jclass cls = NULL;
+   DT_RETURN_MARK(DefineClass, jclass, (const jclass&)cls);
+ 
+-  // Since exceptions can be thrown, class initialization can take place  
++  // Since exceptions can be thrown, class initialization can take place
+   // if name is NULL no check for class name in .class stream has to be made.
+   symbolHandle class_name;
+   if (name != NULL) {
+@@ -288,12 +285,12 @@
+   ResourceMark rm(THREAD);
+   ClassFileStream st((u1*) buf, bufLen, NULL);
+   Handle class_loader (THREAD, JNIHandles::resolve(loaderRef));
+-  
++
+   if (UsePerfData && !class_loader.is_null()) {
+     // check whether the current caller thread holds the lock or not.
+     // If not, increment the corresponding counter
+     if (ObjectSynchronizer::
+-        query_lock_ownership((JavaThread*)THREAD, class_loader) != 
++        query_lock_ownership((JavaThread*)THREAD, class_loader) !=
+         ObjectSynchronizer::owner_self) {
+       ClassLoader::sync_JNIDefineClassLockFreeCounter()->inc();
+     }
+@@ -500,7 +497,7 @@
+   DTRACE_PROBE3(hotspot_jni, IsAssignableFrom__entry, env, sub, super);
+   oop sub_mirror   = JNIHandles::resolve_non_null(sub);
+   oop super_mirror = JNIHandles::resolve_non_null(super);
+-  if (java_lang_Class::is_primitive(sub_mirror) || 
++  if (java_lang_Class::is_primitive(sub_mirror) ||
+       java_lang_Class::is_primitive(super_mirror)) {
+     jboolean ret = (sub_mirror == super_mirror);
+     DTRACE_PROBE1(hotspot_jni, IsAssignableFrom__return, ret);
+@@ -509,7 +506,7 @@
+   klassOop sub_klass   = java_lang_Class::as_klassOop(sub_mirror);
+   klassOop super_klass = java_lang_Class::as_klassOop(super_mirror);
+   assert(sub_klass != NULL && super_klass != NULL, "invalid arguments to jni_IsAssignableFrom");
+-  jboolean ret = Klass::cast(sub_klass)->is_subtype_of(super_klass) ? 
++  jboolean ret = Klass::cast(sub_klass)->is_subtype_of(super_klass) ?
+                    JNI_TRUE : JNI_FALSE;
+   DTRACE_PROBE1(hotspot_jni, IsAssignableFrom__return, ret);
+   return ret;
+@@ -529,13 +526,13 @@
+ 
+ DT_RETURN_MARK_DECL(ThrowNew, jint);
+ 
+-JNI_ENTRY(jint, jni_ThrowNew(JNIEnv *env, jclass clazz, const char *message))  
+-  JNIWrapper("ThrowNew");  
++JNI_ENTRY(jint, jni_ThrowNew(JNIEnv *env, jclass clazz, const char *message))
++  JNIWrapper("ThrowNew");
+   DTRACE_PROBE3(hotspot_jni, ThrowNew__entry, env, clazz, message);
+   jint ret = JNI_OK;
+   DT_RETURN_MARK(ThrowNew, jint, (const jint&)ret);
+ 
+-  instanceKlass* k = instanceKlass::cast(java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(clazz)));  
++  instanceKlass* k = instanceKlass::cast(java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(clazz)));
+   symbolHandle name = symbolHandle(THREAD, k->name());
+   Handle class_loader (THREAD,  k->class_loader());
+   Handle protection_domain (THREAD, k->protection_domain());
+@@ -569,7 +566,7 @@
+ JNI_END
+ 
+ 
+-JNI_ENTRY_NO_PRESERVE(void, jni_ExceptionDescribe(JNIEnv *env))  
++JNI_ENTRY_NO_PRESERVE(void, jni_ExceptionDescribe(JNIEnv *env))
+   JNIWrapper("ExceptionDescribe");
+   DTRACE_PROBE1(hotspot_jni, ExceptionDescribe__entry, env);
+   if (thread->has_pending_exception()) {
+@@ -614,13 +611,13 @@
+ JNI_QUICK_ENTRY(void, jni_ExceptionClear(JNIEnv *env))
+   JNIWrapper("ExceptionClear");
+   DTRACE_PROBE1(hotspot_jni, ExceptionClear__entry, env);
+-  
+-  // The jni code might be using this API to clear java thrown exception. 
+-  // So just mark jvmti thread exception state as exception caught. 
+-  JvmtiThreadState *state = JavaThread::current()->jvmti_thread_state(); 
+-  if (state != NULL && state->is_exception_detected()) { 
+-    state->set_exception_caught(); 
+-  } 
++
++  // The jni code might be using this API to clear java thrown exception.
++  // So just mark jvmti thread exception state as exception caught.
++  JvmtiThreadState *state = JavaThread::current()->jvmti_thread_state();
++  if (state != NULL && state->is_exception_detected()) {
++    state->set_exception_caught();
++  }
+   thread->clear_pending_exception();
+   DTRACE_PROBE(hotspot_jni, ExceptionClear__return);
+ JNI_END
+@@ -737,7 +734,7 @@
+   JNIWrapper("GetObjectRefType");
+   DTRACE_PROBE2(hotspot_jni, GetObjectRefType__entry, env, obj);
+   jobjectRefType ret;
+-  if (JNIHandles::is_local_handle(thread, obj) || 
++  if (JNIHandles::is_local_handle(thread, obj) ||
+       JNIHandles::is_frame_handle(thread, obj))
+     ret = JNILocalRefType;
+   else if (JNIHandles::is_global_handle(obj))
+@@ -1029,7 +1026,7 @@
+     klassOop holder = m->method_holder();
+     if (!(Klass::cast(holder))->is_interface()) {
+       // non-interface call -- for that little speed boost, don't handlize
+-      debug_only(No_Safepoint_Verifier nosafepoint;) 
++      debug_only(No_Safepoint_Verifier nosafepoint;)
+       if (call_type == JNI_VIRTUAL) {
+         // jni_GetMethodID makes sure class is linked and initialized
+         // so m should have a valid vtable index.
+@@ -1048,7 +1045,7 @@
+         // JNI_NONVIRTUAL call
+         selected_method = m;
+       }
+-    } else {  
++    } else {
+       // interface call
+       KlassHandle h_holder(THREAD, holder);
+ 
+@@ -1114,7 +1111,7 @@
+ 
+ DT_RETURN_MARK_DECL(NewObjectA, jobject);
+ 
+-JNI_ENTRY(jobject, jni_NewObjectA(JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args))  
++JNI_ENTRY(jobject, jni_NewObjectA(JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args))
+   JNIWrapper("NewObjectA");
+   DTRACE_PROBE3(hotspot_jni, NewObjectA__entry, env, clazz, methodID);
+   jobject obj = NULL;
+@@ -1130,7 +1127,7 @@
+ 
+ DT_RETURN_MARK_DECL(NewObjectV, jobject);
+ 
+-JNI_ENTRY(jobject, jni_NewObjectV(JNIEnv *env, jclass clazz, jmethodID methodID, va_list args))  
++JNI_ENTRY(jobject, jni_NewObjectV(JNIEnv *env, jclass clazz, jmethodID methodID, va_list args))
+   JNIWrapper("NewObjectV");
+   DTRACE_PROBE3(hotspot_jni, NewObjectV__entry, env, clazz, methodID);
+   jobject obj = NULL;
+@@ -1146,7 +1143,7 @@
+ 
+ DT_RETURN_MARK_DECL(NewObject, jobject);
+ 
+-JNI_ENTRY(jobject, jni_NewObject(JNIEnv *env, jclass clazz, jmethodID methodID, ...))  
++JNI_ENTRY(jobject, jni_NewObject(JNIEnv *env, jclass clazz, jmethodID methodID, ...))
+   JNIWrapper("NewObject");
+   DTRACE_PROBE3(hotspot_jni, NewObject__entry, env, clazz, methodID);
+   jobject obj = NULL;
+@@ -1248,7 +1245,7 @@
+ }
+ 
+ 
+-JNI_ENTRY(jmethodID, jni_GetMethodID(JNIEnv *env, jclass clazz, 
++JNI_ENTRY(jmethodID, jni_GetMethodID(JNIEnv *env, jclass clazz,
+           const char *name, const char *sig))
+   JNIWrapper("GetMethodID");
+   DTRACE_PROBE4(hotspot_jni, GetMethodID__entry, env, clazz, name, sig);
+@@ -1258,7 +1255,7 @@
+ JNI_END
+ 
+ 
+-JNI_ENTRY(jmethodID, jni_GetStaticMethodID(JNIEnv *env, jclass clazz, 
++JNI_ENTRY(jmethodID, jni_GetStaticMethodID(JNIEnv *env, jclass clazz,
+           const char *name, const char *sig))
+   JNIWrapper("GetStaticMethodID");
+   DTRACE_PROBE4(hotspot_jni, GetStaticMethodID__entry, env, clazz, name, sig);
+@@ -1348,7 +1345,7 @@
+ DT_VOID_RETURN_MARK_DECL(CallVoidMethodV);
+ DT_VOID_RETURN_MARK_DECL(CallVoidMethodA);
+ 
+-JNI_ENTRY(void, jni_CallVoidMethod(JNIEnv *env, jobject obj, jmethodID methodID, ...))  
++JNI_ENTRY(void, jni_CallVoidMethod(JNIEnv *env, jobject obj, jmethodID methodID, ...))
+   JNIWrapper("CallVoidMethod");
+   DTRACE_PROBE3(hotspot_jni, CallVoidMethod__entry, env, obj, methodID);
+   DT_VOID_RETURN_MARK(CallVoidMethod);
+@@ -1360,9 +1357,9 @@
+   jni_invoke_nonstatic(env, &jvalue, obj, JNI_VIRTUAL, methodID, &ap, CHECK);
+   va_end(args);
+ JNI_END
+-                                                
+-                                                   
+-JNI_ENTRY(void, jni_CallVoidMethodV(JNIEnv *env, jobject obj, jmethodID methodID, va_list args))  
++
++
++JNI_ENTRY(void, jni_CallVoidMethodV(JNIEnv *env, jobject obj, jmethodID methodID, va_list args))
+   JNIWrapper("CallVoidMethodV");
+   DTRACE_PROBE3(hotspot_jni, CallVoidMethodV__entry, env, obj, methodID);
+   DT_VOID_RETURN_MARK(CallVoidMethodV);
+@@ -1371,9 +1368,9 @@
+   JNI_ArgumentPusherVaArg ap(THREAD, methodID, args);
+   jni_invoke_nonstatic(env, &jvalue, obj, JNI_VIRTUAL, methodID, &ap, CHECK);
+ JNI_END
+-                                  
+-                                     
+-JNI_ENTRY(void, jni_CallVoidMethodA(JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args))  
++
++
++JNI_ENTRY(void, jni_CallVoidMethodA(JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args))
+   JNIWrapper("CallVoidMethodA");
+   DTRACE_PROBE3(hotspot_jni, CallVoidMethodA__entry, env, obj, methodID);
+   DT_VOID_RETURN_MARK(CallVoidMethodA);
+@@ -1456,10 +1453,10 @@
+ DT_VOID_RETURN_MARK_DECL(CallNonvirtualVoidMethodV);
+ DT_VOID_RETURN_MARK_DECL(CallNonvirtualVoidMethodA);
+ 
+-JNI_ENTRY(void, jni_CallNonvirtualVoidMethod(JNIEnv *env, jobject obj, jclass cls, jmethodID methodID, ...))  
++JNI_ENTRY(void, jni_CallNonvirtualVoidMethod(JNIEnv *env, jobject obj, jclass cls, jmethodID methodID, ...))
+   JNIWrapper("CallNonvirtualVoidMethod");
+ 
+-  DTRACE_PROBE4(hotspot_jni, CallNonvirtualVoidMethod__entry, 
++  DTRACE_PROBE4(hotspot_jni, CallNonvirtualVoidMethod__entry,
+                env, obj, cls, methodID);
+   DT_VOID_RETURN_MARK(CallNonvirtualVoidMethod);
+ 
+@@ -1470,12 +1467,12 @@
+   jni_invoke_nonstatic(env, &jvalue, obj, JNI_NONVIRTUAL, methodID, &ap, CHECK);
+   va_end(args);
+ JNI_END
+-                                                 
+-                                                   
+-JNI_ENTRY(void, jni_CallNonvirtualVoidMethodV(JNIEnv *env, jobject obj, jclass cls, jmethodID methodID, va_list args))  
++
++
++JNI_ENTRY(void, jni_CallNonvirtualVoidMethodV(JNIEnv *env, jobject obj, jclass cls, jmethodID methodID, va_list args))
+   JNIWrapper("CallNonvirtualVoidMethodV");
+ 
+-  DTRACE_PROBE4(hotspot_jni, CallNonvirtualVoidMethodV__entry, 
++  DTRACE_PROBE4(hotspot_jni, CallNonvirtualVoidMethodV__entry,
+                env, obj, cls, methodID);
+   DT_VOID_RETURN_MARK(CallNonvirtualVoidMethodV);
+ 
+@@ -1485,9 +1482,9 @@
+ JNI_END
+ 
+ 
+-JNI_ENTRY(void, jni_CallNonvirtualVoidMethodA(JNIEnv *env, jobject obj, jclass cls, jmethodID methodID, const jvalue *args))  
++JNI_ENTRY(void, jni_CallNonvirtualVoidMethodA(JNIEnv *env, jobject obj, jclass cls, jmethodID methodID, const jvalue *args))
+   JNIWrapper("CallNonvirtualVoidMethodA");
+-  DTRACE_PROBE4(hotspot_jni, CallNonvirtualVoidMethodA__entry, 
++  DTRACE_PROBE4(hotspot_jni, CallNonvirtualVoidMethodA__entry,
+                 env, obj, cls, methodID);
+   DT_VOID_RETURN_MARK(CallNonvirtualVoidMethodA);
+   JavaValue jvalue(T_VOID);
+@@ -1568,7 +1565,7 @@
+ DT_VOID_RETURN_MARK_DECL(CallStaticVoidMethodV);
+ DT_VOID_RETURN_MARK_DECL(CallStaticVoidMethodA);
+ 
+-JNI_ENTRY(void, jni_CallStaticVoidMethod(JNIEnv *env, jclass cls, jmethodID methodID, ...))  
++JNI_ENTRY(void, jni_CallStaticVoidMethod(JNIEnv *env, jclass cls, jmethodID methodID, ...))
+   JNIWrapper("CallStaticVoidMethod");
+   DTRACE_PROBE3(hotspot_jni, CallStaticVoidMethod__entry, env, cls, methodID);
+   DT_VOID_RETURN_MARK(CallStaticVoidMethod);
+@@ -1580,9 +1577,9 @@
+   jni_invoke_static(env, &jvalue, NULL, JNI_STATIC, methodID, &ap, CHECK);
+   va_end(args);
+ JNI_END
+-                                                 
+-                                                   
+-JNI_ENTRY(void, jni_CallStaticVoidMethodV(JNIEnv *env, jclass cls, jmethodID methodID, va_list args))  
++
++
++JNI_ENTRY(void, jni_CallStaticVoidMethodV(JNIEnv *env, jclass cls, jmethodID methodID, va_list args))
+   JNIWrapper("CallStaticVoidMethodV");
+   DTRACE_PROBE3(hotspot_jni, CallStaticVoidMethodV__entry, env, cls, methodID);
+   DT_VOID_RETURN_MARK(CallStaticVoidMethodV);
+@@ -1591,9 +1588,9 @@
+   JNI_ArgumentPusherVaArg ap(THREAD, methodID, args);
+   jni_invoke_static(env, &jvalue, NULL, JNI_STATIC, methodID, &ap, CHECK);
+ JNI_END
+-                                  
+-                                     
+-JNI_ENTRY(void, jni_CallStaticVoidMethodA(JNIEnv *env, jclass cls, jmethodID methodID, const jvalue *args))  
++
++
++JNI_ENTRY(void, jni_CallStaticVoidMethodA(JNIEnv *env, jclass cls, jmethodID methodID, const jvalue *args))
+   JNIWrapper("CallStaticVoidMethodA");
+   DTRACE_PROBE3(hotspot_jni, CallStaticVoidMethodA__entry, env, cls, methodID);
+   DT_VOID_RETURN_MARK(CallStaticVoidMethodA);
+@@ -1611,7 +1608,7 @@
+ 
+ DT_RETURN_MARK_DECL(GetFieldID, jfieldID);
+ 
+-JNI_ENTRY(jfieldID, jni_GetFieldID(JNIEnv *env, jclass clazz, 
++JNI_ENTRY(jfieldID, jni_GetFieldID(JNIEnv *env, jclass clazz,
+           const char *name, const char *sig))
+   JNIWrapper("GetFieldID");
+   DTRACE_PROBE4(hotspot_jni, GetFieldID__entry, env, clazz, name, sig);
+@@ -1722,9 +1719,9 @@
+ }
+ 
+ JNI_QUICK_ENTRY(void, jni_SetObjectField(JNIEnv *env, jobject obj, jfieldID fieldID, jobject value))
+-  JNIWrapper("SetObjectField"); 
++  JNIWrapper("SetObjectField");
+   DTRACE_PROBE4(hotspot_jni, SetObjectField__entry, env, obj, fieldID, value);
+-  oop o = JNIHandles::resolve_non_null(obj); 
++  oop o = JNIHandles::resolve_non_null(obj);
+   klassOop k = o->klass();
+   int offset = jfieldIDWorkaround::from_instance_jfieldID(k, fieldID);
+   // Keep JVMTI addition small and only check enabled flag here.
+@@ -1777,7 +1774,7 @@
+ 
+ JNI_ENTRY(jobject, jni_ToReflectedField(JNIEnv *env, jclass cls, jfieldID fieldID, jboolean isStatic))
+   JNIWrapper("ToReflectedField");
+-  DTRACE_PROBE4(hotspot_jni, ToReflectedField__entry, 
++  DTRACE_PROBE4(hotspot_jni, ToReflectedField__entry,
+                 env, cls, fieldID, isStatic);
+   jobject ret = NULL;
+   DT_RETURN_MARK(ToReflectedField, jobject, (const jobject&)ret);
+@@ -1810,7 +1807,7 @@
+ //
+ DT_RETURN_MARK_DECL(GetStaticFieldID, jfieldID);
+ 
+-JNI_ENTRY(jfieldID, jni_GetStaticFieldID(JNIEnv *env, jclass clazz, 
++JNI_ENTRY(jfieldID, jni_GetStaticFieldID(JNIEnv *env, jclass clazz,
+           const char *name, const char *sig))
+   JNIWrapper("GetStaticFieldID");
+   DTRACE_PROBE4(hotspot_jni, GetStaticFieldID__entry, env, clazz, name, sig);
+@@ -1854,7 +1851,9 @@
+ JNI_ENTRY(jobject, jni_GetStaticObjectField(JNIEnv *env, jclass clazz, jfieldID fieldID))
+   JNIWrapper("GetStaticObjectField");
+   DTRACE_PROBE3(hotspot_jni, GetStaticObjectField__entry, env, clazz, fieldID);
++#ifndef JNICHECK_KERNEL
+   DEBUG_ONLY(klassOop param_k = jniCheck::validate_class(thread, clazz);)
++#endif // JNICHECK_KERNEL
+   JNIid* id = jfieldIDWorkaround::from_static_jfieldID(fieldID);
+   assert(id->is_static_field_id(), "invalid static field id");
+   // Keep JVMTI addition small and only check enabled flag here.
+@@ -1901,8 +1900,8 @@
+ JNI_ENTRY(void, jni_SetStaticObjectField(JNIEnv *env, jclass clazz, jfieldID fieldID, jobject value))
+   JNIWrapper("SetStaticObjectField");
+   DTRACE_PROBE4(hotspot_jni, SetStaticObjectField__entry, env, clazz, fieldID, value);
+-  JNIid* id = jfieldIDWorkaround::from_static_jfieldID(fieldID); 
+-  assert(id->is_static_field_id(), "invalid static field id"); 
++  JNIid* id = jfieldIDWorkaround::from_static_jfieldID(fieldID);
++  assert(id->is_static_field_id(), "invalid static field id");
+   // Keep JVMTI addition small and only check enabled flag here.
+   // jni_SetField_probe() assumes that is okay to create handles.
+   if (JvmtiExport::should_post_field_modification()) {
+@@ -2311,10 +2310,10 @@
+ //
+ 
+ // The RegisterNatives call being attempted tried to register with a method that
+-// is not native.  Ask JVM TI what prefixes have been specified.  Then check 
+-// to see if the native method is now wrapped with the prefixes.  See the 
++// is not native.  Ask JVM TI what prefixes have been specified.  Then check
++// to see if the native method is now wrapped with the prefixes.  See the
+ // SetNativeMethodPrefix(es) functions in the JVM TI Spec for details.
+-static methodOop find_prefixed_native(KlassHandle k, 
++static methodOop find_prefixed_native(KlassHandle k,
+                                       symbolHandle name, symbolHandle signature, TRAPS) {
+   ResourceMark rm(THREAD);
+   methodOop method;
+@@ -2388,9 +2387,9 @@
+ 
+ DT_RETURN_MARK_DECL(RegisterNatives, jint);
+ 
+-JNI_ENTRY(jint, jni_RegisterNatives(JNIEnv *env, jclass clazz, 
+-                                    const JNINativeMethod *methods, 
+-                                    jint nMethods))  
++JNI_ENTRY(jint, jni_RegisterNatives(JNIEnv *env, jclass clazz,
++                                    const JNINativeMethod *methods,
++                                    jint nMethods))
+   JNIWrapper("RegisterNatives");
+   DTRACE_PROBE4(hotspot_jni, RegisterNatives__entry, env, clazz, methods, nMethods);
+   jint ret = 0;
+@@ -2428,7 +2427,7 @@
+ JNI_END
+ 
+ 
+-JNI_ENTRY(jint, jni_UnregisterNatives(JNIEnv *env, jclass clazz))  
++JNI_ENTRY(jint, jni_UnregisterNatives(JNIEnv *env, jclass clazz))
+   JNIWrapper("UnregisterNatives");
+   DTRACE_PROBE2(hotspot_jni, UnregisterNatives__entry, env, clazz);
+   klassOop k   = java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(clazz));
+@@ -3145,7 +3144,11 @@
+ 
+ // Returns the function structure
+ struct JNINativeInterface_* jni_functions() {
++#ifndef JNICHECK_KERNEL
+   if (CheckJNICalls) return jni_functions_check();
++#else  // JNICHECK_KERNEL
++  if (CheckJNICalls) warning("-Xcheck:jni is not supported in kernel vm.");
++#endif // JNICHECK_KERNEL
+   return &jni_NativeInterface;
+ }
+ 
+@@ -3256,7 +3259,7 @@
+     // Check if we should compile all classes on bootclasspath
+     NOT_PRODUCT(if (CompileTheWorld) ClassLoader::compile_the_world();)
+     // Since this is not a JVM_ENTRY we have to set the thread state manually before leaving.
+-    ThreadStateTransition::transition_and_fence(thread, _thread_in_vm, _thread_in_native); 
++    ThreadStateTransition::transition_and_fence(thread, _thread_in_vm, _thread_in_native);
+   } else {
+     if (can_try_again) {
+       // reset safe_to_recreate_vm to 1 so that retrial would be possible
+@@ -3266,7 +3269,7 @@
+     // Creation failed. We must reset vm_created
+     *vm = 0;
+     *(JNIEnv**)penv = 0;
+-    // reset vm_created last to avoid race condition. Use OrderAccess to 
++    // reset vm_created last to avoid race condition. Use OrderAccess to
+     // control both compiler and architectural-based reordering.
+     OrderAccess::release_store(&vm_created, 0);
+   }
+@@ -3442,7 +3445,7 @@
+ 
+ jint JNICALL jni_AttachCurrentThread(JavaVM *vm, void **penv, void *_args) {
+   DTRACE_PROBE3(hotspot_jni, AttachCurrentThread__entry, vm, penv, _args);
+-  if (!vm_created) { 
++  if (!vm_created) {
+     DTRACE_PROBE1(hotspot_jni, AttachCurrentThread__return, JNI_ERR);
+     return JNI_ERR;
+   }
+@@ -3552,7 +3555,7 @@
+ 
+ jint JNICALL jni_AttachCurrentThreadAsDaemon(JavaVM *vm, void **penv, void *_args) {
+   DTRACE_PROBE3(hotspot_jni, AttachCurrentThreadAsDaemon__entry, vm, penv, _args);
+-  if (!vm_created) { 
++  if (!vm_created) {
+     DTRACE_PROBE1(hotspot_jni, AttachCurrentThreadAsDaemon__return, JNI_ERR);
+     return JNI_ERR;
+   }
+@@ -3577,4 +3580,3 @@
+     jni_GetEnv,
+     jni_AttachCurrentThreadAsDaemon
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jniFastGetField.cpp openjdk/hotspot/src/share/vm/prims/jniFastGetField.cpp
+--- openjdk6/hotspot/src/share/vm/prims/jniFastGetField.cpp	2008-02-28 05:02:41.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jniFastGetField.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)jniFastGetField.cpp	1.8 07/05/05 17:06:32 JVM"
+-#endif
+ /*
+  * Copyright 2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -40,4 +37,3 @@
+   }
+   return (address)-1;
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jniFastGetField.hpp openjdk/hotspot/src/share/vm/prims/jniFastGetField.hpp
+--- openjdk6/hotspot/src/share/vm/prims/jniFastGetField.hpp	2008-02-28 05:02:41.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jniFastGetField.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jniFastGetField.hpp	1.8 07/05/05 17:06:32 JVM"
+-#endif
+ /*
+  * Copyright 2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Basic logic of a fast version of jni_Get<Primitive>Field:
+@@ -44,12 +41,12 @@
+ //
+ // LoadLoad membars to maintain the load order may be necessary
+ // for some platforms.
+-// 
++//
+ // The fast versions don't check for pending suspension request.
+ // This is fine since it's totally read-only and doesn't create new race.
+ //
+ // There is a hypothetical safepoint counter wraparound. But it's not
+-// a practical concern.  
++// a practical concern.
+ 
+ class JNI_FastGetField : AllStatic {
+  private:
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jni.h openjdk/hotspot/src/share/vm/prims/jni.h
+--- openjdk6/hotspot/src/share/vm/prims/jni.h	2008-02-28 05:02:41.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jni.h	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jni.h	1.45 07/05/05 17:06:31 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -57,11 +54,11 @@
+ 
+ #ifndef JNI_TYPES_ALREADY_DEFINED_IN_JNI_MD_H
+ 
+-typedef unsigned char	jboolean;
+-typedef unsigned short	jchar;
+-typedef short		jshort;
+-typedef float		jfloat;
+-typedef double		jdouble;
++typedef unsigned char   jboolean;
++typedef unsigned short  jchar;
++typedef short           jshort;
++typedef float           jfloat;
++typedef double          jdouble;
+ 
+ typedef jint            jsize;
+ 
+@@ -143,7 +140,7 @@
+      JNIInvalidRefType    = 0,
+      JNILocalRefType      = 1,
+      JNIGlobalRefType     = 2,
+-     JNIWeakGlobalRefType = 3 
++     JNIWeakGlobalRefType = 3
+ } jobjectRefType;
+ 
+ 
+@@ -790,7 +787,7 @@
+         return functions->GetVersion(this);
+     }
+     jclass DefineClass(const char *name, jobject loader, const jbyte *buf,
+-		       jsize len) {
++                       jsize len) {
+         return functions->DefineClass(this, name, loader, buf, len);
+     }
+     jclass FindClass(const char *name) {
+@@ -870,18 +867,18 @@
+     }
+     jobject NewObject(jclass clazz, jmethodID methodID, ...) {
+         va_list args;
+-	jobject result;
+-	va_start(args, methodID);
++        jobject result;
++        va_start(args, methodID);
+         result = functions->NewObjectV(this,clazz,methodID,args);
+-	va_end(args);
+-	return result;
++        va_end(args);
++        return result;
+     }
+     jobject NewObjectV(jclass clazz, jmethodID methodID,
+-		       va_list args) {
++                       va_list args) {
+         return functions->NewObjectV(this,clazz,methodID,args);
+     }
+     jobject NewObjectA(jclass clazz, jmethodID methodID,
+-		       const jvalue *args) {
++                       const jvalue *args) {
+         return functions->NewObjectA(this,clazz,methodID,args);
+     }
+ 
+@@ -893,392 +890,392 @@
+     }
+ 
+     jmethodID GetMethodID(jclass clazz, const char *name,
+-			  const char *sig) {
++                          const char *sig) {
+         return functions->GetMethodID(this,clazz,name,sig);
+     }
+ 
+     jobject CallObjectMethod(jobject obj, jmethodID methodID, ...) {
+         va_list args;
+-	jobject result;
+-	va_start(args,methodID);
+-	result = functions->CallObjectMethodV(this,obj,methodID,args);
+-	va_end(args);
+-	return result;
++        jobject result;
++        va_start(args,methodID);
++        result = functions->CallObjectMethodV(this,obj,methodID,args);
++        va_end(args);
++        return result;
+     }
+     jobject CallObjectMethodV(jobject obj, jmethodID methodID,
+-			va_list args) {
++                        va_list args) {
+         return functions->CallObjectMethodV(this,obj,methodID,args);
+     }
+     jobject CallObjectMethodA(jobject obj, jmethodID methodID,
+-			const jvalue * args) {
++                        const jvalue * args) {
+         return functions->CallObjectMethodA(this,obj,methodID,args);
+     }
+ 
+     jboolean CallBooleanMethod(jobject obj,
+-			       jmethodID methodID, ...) {
++                               jmethodID methodID, ...) {
+         va_list args;
+-	jboolean result;
+-	va_start(args,methodID);
+-	result = functions->CallBooleanMethodV(this,obj,methodID,args);
+-	va_end(args);
+-	return result;
++        jboolean result;
++        va_start(args,methodID);
++        result = functions->CallBooleanMethodV(this,obj,methodID,args);
++        va_end(args);
++        return result;
+     }
+     jboolean CallBooleanMethodV(jobject obj, jmethodID methodID,
+-				va_list args) {
++                                va_list args) {
+         return functions->CallBooleanMethodV(this,obj,methodID,args);
+     }
+     jboolean CallBooleanMethodA(jobject obj, jmethodID methodID,
+-				const jvalue * args) {
++                                const jvalue * args) {
+         return functions->CallBooleanMethodA(this,obj,methodID, args);
+     }
+ 
+     jbyte CallByteMethod(jobject obj, jmethodID methodID, ...) {
+         va_list args;
+-	jbyte result;
+-	va_start(args,methodID);
+-	result = functions->CallByteMethodV(this,obj,methodID,args);
+-	va_end(args);
+-	return result;
++        jbyte result;
++        va_start(args,methodID);
++        result = functions->CallByteMethodV(this,obj,methodID,args);
++        va_end(args);
++        return result;
+     }
+     jbyte CallByteMethodV(jobject obj, jmethodID methodID,
+-			  va_list args) {
++                          va_list args) {
+         return functions->CallByteMethodV(this,obj,methodID,args);
+     }
+     jbyte CallByteMethodA(jobject obj, jmethodID methodID,
+-			  const jvalue * args) {
++                          const jvalue * args) {
+         return functions->CallByteMethodA(this,obj,methodID,args);
+     }
+ 
+     jchar CallCharMethod(jobject obj, jmethodID methodID, ...) {
+         va_list args;
+-	jchar result;
+-	va_start(args,methodID);
+-	result = functions->CallCharMethodV(this,obj,methodID,args);
+-	va_end(args);
+-	return result;
++        jchar result;
++        va_start(args,methodID);
++        result = functions->CallCharMethodV(this,obj,methodID,args);
++        va_end(args);
++        return result;
+     }
+     jchar CallCharMethodV(jobject obj, jmethodID methodID,
+-			  va_list args) {
++                          va_list args) {
+         return functions->CallCharMethodV(this,obj,methodID,args);
+     }
+     jchar CallCharMethodA(jobject obj, jmethodID methodID,
+-			  const jvalue * args) {
++                          const jvalue * args) {
+         return functions->CallCharMethodA(this,obj,methodID,args);
+     }
+ 
+     jshort CallShortMethod(jobject obj, jmethodID methodID, ...) {
+         va_list args;
+-	jshort result;
+-	va_start(args,methodID);
+-	result = functions->CallShortMethodV(this,obj,methodID,args);
+-	va_end(args);
+-	return result;
++        jshort result;
++        va_start(args,methodID);
++        result = functions->CallShortMethodV(this,obj,methodID,args);
++        va_end(args);
++        return result;
+     }
+     jshort CallShortMethodV(jobject obj, jmethodID methodID,
+-			    va_list args) {
++                            va_list args) {
+         return functions->CallShortMethodV(this,obj,methodID,args);
+     }
+     jshort CallShortMethodA(jobject obj, jmethodID methodID,
+-			    const jvalue * args) {
++                            const jvalue * args) {
+         return functions->CallShortMethodA(this,obj,methodID,args);
+     }
+ 
+     jint CallIntMethod(jobject obj, jmethodID methodID, ...) {
+         va_list args;
+-	jint result;
+-	va_start(args,methodID);
+-	result = functions->CallIntMethodV(this,obj,methodID,args);
+-	va_end(args);
+-	return result;
++        jint result;
++        va_start(args,methodID);
++        result = functions->CallIntMethodV(this,obj,methodID,args);
++        va_end(args);
++        return result;
+     }
+     jint CallIntMethodV(jobject obj, jmethodID methodID,
+-			va_list args) {
++                        va_list args) {
+         return functions->CallIntMethodV(this,obj,methodID,args);
+     }
+     jint CallIntMethodA(jobject obj, jmethodID methodID,
+-			const jvalue * args) {
++                        const jvalue * args) {
+         return functions->CallIntMethodA(this,obj,methodID,args);
+     }
+ 
+     jlong CallLongMethod(jobject obj, jmethodID methodID, ...) {
+         va_list args;
+-	jlong result;
+-	va_start(args,methodID);
+-	result = functions->CallLongMethodV(this,obj,methodID,args);
+-	va_end(args);
+-	return result;
++        jlong result;
++        va_start(args,methodID);
++        result = functions->CallLongMethodV(this,obj,methodID,args);
++        va_end(args);
++        return result;
+     }
+     jlong CallLongMethodV(jobject obj, jmethodID methodID,
+-			  va_list args) {
++                          va_list args) {
+         return functions->CallLongMethodV(this,obj,methodID,args);
+     }
+     jlong CallLongMethodA(jobject obj, jmethodID methodID,
+-			  const jvalue * args) {
++                          const jvalue * args) {
+         return functions->CallLongMethodA(this,obj,methodID,args);
+     }
+ 
+     jfloat CallFloatMethod(jobject obj, jmethodID methodID, ...) {
+         va_list args;
+-	jfloat result;
+-	va_start(args,methodID);
+-	result = functions->CallFloatMethodV(this,obj,methodID,args);
+-	va_end(args);
+-	return result;
++        jfloat result;
++        va_start(args,methodID);
++        result = functions->CallFloatMethodV(this,obj,methodID,args);
++        va_end(args);
++        return result;
+     }
+     jfloat CallFloatMethodV(jobject obj, jmethodID methodID,
+-			    va_list args) {
++                            va_list args) {
+         return functions->CallFloatMethodV(this,obj,methodID,args);
+     }
+     jfloat CallFloatMethodA(jobject obj, jmethodID methodID,
+-			    const jvalue * args) {
++                            const jvalue * args) {
+         return functions->CallFloatMethodA(this,obj,methodID,args);
+     }
+ 
+     jdouble CallDoubleMethod(jobject obj, jmethodID methodID, ...) {
+         va_list args;
+-	jdouble result;
+-	va_start(args,methodID);
+-	result = functions->CallDoubleMethodV(this,obj,methodID,args);
+-	va_end(args);
+-	return result;
++        jdouble result;
++        va_start(args,methodID);
++        result = functions->CallDoubleMethodV(this,obj,methodID,args);
++        va_end(args);
++        return result;
+     }
+     jdouble CallDoubleMethodV(jobject obj, jmethodID methodID,
+-			va_list args) {
++                        va_list args) {
+         return functions->CallDoubleMethodV(this,obj,methodID,args);
+     }
+     jdouble CallDoubleMethodA(jobject obj, jmethodID methodID,
+-			const jvalue * args) {
++                        const jvalue * args) {
+         return functions->CallDoubleMethodA(this,obj,methodID,args);
+     }
+ 
+     void CallVoidMethod(jobject obj, jmethodID methodID, ...) {
+         va_list args;
+-	va_start(args,methodID);
+-	functions->CallVoidMethodV(this,obj,methodID,args);
+-	va_end(args);
++        va_start(args,methodID);
++        functions->CallVoidMethodV(this,obj,methodID,args);
++        va_end(args);
+     }
+     void CallVoidMethodV(jobject obj, jmethodID methodID,
+-			 va_list args) {
++                         va_list args) {
+         functions->CallVoidMethodV(this,obj,methodID,args);
+     }
+     void CallVoidMethodA(jobject obj, jmethodID methodID,
+-			 const jvalue * args) {
++                         const jvalue * args) {
+         functions->CallVoidMethodA(this,obj,methodID,args);
+     }
+ 
+     jobject CallNonvirtualObjectMethod(jobject obj, jclass clazz,
+-				       jmethodID methodID, ...) {
++                                       jmethodID methodID, ...) {
+         va_list args;
+-	jobject result;
+-	va_start(args,methodID);
+-	result = functions->CallNonvirtualObjectMethodV(this,obj,clazz,
+-							methodID,args);
+-	va_end(args);
+-	return result;
++        jobject result;
++        va_start(args,methodID);
++        result = functions->CallNonvirtualObjectMethodV(this,obj,clazz,
++                                                        methodID,args);
++        va_end(args);
++        return result;
+     }
+     jobject CallNonvirtualObjectMethodV(jobject obj, jclass clazz,
+-					jmethodID methodID, va_list args) {
++                                        jmethodID methodID, va_list args) {
+         return functions->CallNonvirtualObjectMethodV(this,obj,clazz,
+-						      methodID,args);
++                                                      methodID,args);
+     }
+     jobject CallNonvirtualObjectMethodA(jobject obj, jclass clazz,
+-					jmethodID methodID, const jvalue * args) {
++                                        jmethodID methodID, const jvalue * args) {
+         return functions->CallNonvirtualObjectMethodA(this,obj,clazz,
+-						      methodID,args);
++                                                      methodID,args);
+     }
+ 
+     jboolean CallNonvirtualBooleanMethod(jobject obj, jclass clazz,
+-					 jmethodID methodID, ...) {
++                                         jmethodID methodID, ...) {
+         va_list args;
+-	jboolean result;
+-	va_start(args,methodID);
+-	result = functions->CallNonvirtualBooleanMethodV(this,obj,clazz,
+-							 methodID,args);
+-	va_end(args);
+-	return result;
++        jboolean result;
++        va_start(args,methodID);
++        result = functions->CallNonvirtualBooleanMethodV(this,obj,clazz,
++                                                         methodID,args);
++        va_end(args);
++        return result;
+     }
+     jboolean CallNonvirtualBooleanMethodV(jobject obj, jclass clazz,
+-					  jmethodID methodID, va_list args) {
++                                          jmethodID methodID, va_list args) {
+         return functions->CallNonvirtualBooleanMethodV(this,obj,clazz,
+-						       methodID,args);
++                                                       methodID,args);
+     }
+     jboolean CallNonvirtualBooleanMethodA(jobject obj, jclass clazz,
+-					  jmethodID methodID, const jvalue * args) {
++                                          jmethodID methodID, const jvalue * args) {
+         return functions->CallNonvirtualBooleanMethodA(this,obj,clazz,
+-						       methodID, args);
++                                                       methodID, args);
+     }
+ 
+     jbyte CallNonvirtualByteMethod(jobject obj, jclass clazz,
+-				   jmethodID methodID, ...) {
++                                   jmethodID methodID, ...) {
+         va_list args;
+-	jbyte result;
+-	va_start(args,methodID);
+-	result = functions->CallNonvirtualByteMethodV(this,obj,clazz,
+-						      methodID,args);
+-	va_end(args);
+-	return result;
++        jbyte result;
++        va_start(args,methodID);
++        result = functions->CallNonvirtualByteMethodV(this,obj,clazz,
++                                                      methodID,args);
++        va_end(args);
++        return result;
+     }
+     jbyte CallNonvirtualByteMethodV(jobject obj, jclass clazz,
+-				    jmethodID methodID, va_list args) {
++                                    jmethodID methodID, va_list args) {
+         return functions->CallNonvirtualByteMethodV(this,obj,clazz,
+-						    methodID,args);
++                                                    methodID,args);
+     }
+     jbyte CallNonvirtualByteMethodA(jobject obj, jclass clazz,
+-				    jmethodID methodID, const jvalue * args) {
++                                    jmethodID methodID, const jvalue * args) {
+         return functions->CallNonvirtualByteMethodA(this,obj,clazz,
+-						    methodID,args);
++                                                    methodID,args);
+     }
+ 
+     jchar CallNonvirtualCharMethod(jobject obj, jclass clazz,
+-				   jmethodID methodID, ...) {
++                                   jmethodID methodID, ...) {
+         va_list args;
+-	jchar result;
+-	va_start(args,methodID);
+-	result = functions->CallNonvirtualCharMethodV(this,obj,clazz,
+-						      methodID,args);
+-	va_end(args);
+-	return result;
++        jchar result;
++        va_start(args,methodID);
++        result = functions->CallNonvirtualCharMethodV(this,obj,clazz,
++                                                      methodID,args);
++        va_end(args);
++        return result;
+     }
+     jchar CallNonvirtualCharMethodV(jobject obj, jclass clazz,
+-				    jmethodID methodID, va_list args) {
++                                    jmethodID methodID, va_list args) {
+         return functions->CallNonvirtualCharMethodV(this,obj,clazz,
+-						    methodID,args);
++                                                    methodID,args);
+     }
+     jchar CallNonvirtualCharMethodA(jobject obj, jclass clazz,
+-				    jmethodID methodID, const jvalue * args) {
++                                    jmethodID methodID, const jvalue * args) {
+         return functions->CallNonvirtualCharMethodA(this,obj,clazz,
+-						    methodID,args);
++                                                    methodID,args);
+     }
+ 
+     jshort CallNonvirtualShortMethod(jobject obj, jclass clazz,
+-				     jmethodID methodID, ...) {
++                                     jmethodID methodID, ...) {
+         va_list args;
+-	jshort result;
+-	va_start(args,methodID);
+-	result = functions->CallNonvirtualShortMethodV(this,obj,clazz,
+-						       methodID,args);
+-	va_end(args);
+-	return result;
++        jshort result;
++        va_start(args,methodID);
++        result = functions->CallNonvirtualShortMethodV(this,obj,clazz,
++                                                       methodID,args);
++        va_end(args);
++        return result;
+     }
+     jshort CallNonvirtualShortMethodV(jobject obj, jclass clazz,
+-				      jmethodID methodID, va_list args) {
++                                      jmethodID methodID, va_list args) {
+         return functions->CallNonvirtualShortMethodV(this,obj,clazz,
+-						     methodID,args);
++                                                     methodID,args);
+     }
+     jshort CallNonvirtualShortMethodA(jobject obj, jclass clazz,
+-				      jmethodID methodID, const jvalue * args) {
++                                      jmethodID methodID, const jvalue * args) {
+         return functions->CallNonvirtualShortMethodA(this,obj,clazz,
+-						     methodID,args);
++                                                     methodID,args);
+     }
+ 
+     jint CallNonvirtualIntMethod(jobject obj, jclass clazz,
+-				 jmethodID methodID, ...) {
++                                 jmethodID methodID, ...) {
+         va_list args;
+-	jint result;
+-	va_start(args,methodID);
+-	result = functions->CallNonvirtualIntMethodV(this,obj,clazz,
+-						     methodID,args);
+-	va_end(args);
+-	return result;
++        jint result;
++        va_start(args,methodID);
++        result = functions->CallNonvirtualIntMethodV(this,obj,clazz,
++                                                     methodID,args);
++        va_end(args);
++        return result;
+     }
+     jint CallNonvirtualIntMethodV(jobject obj, jclass clazz,
+-				  jmethodID methodID, va_list args) {
++                                  jmethodID methodID, va_list args) {
+         return functions->CallNonvirtualIntMethodV(this,obj,clazz,
+-						   methodID,args);
++                                                   methodID,args);
+     }
+     jint CallNonvirtualIntMethodA(jobject obj, jclass clazz,
+-				  jmethodID methodID, const jvalue * args) {
++                                  jmethodID methodID, const jvalue * args) {
+         return functions->CallNonvirtualIntMethodA(this,obj,clazz,
+-						   methodID,args);
++                                                   methodID,args);
+     }
+ 
+     jlong CallNonvirtualLongMethod(jobject obj, jclass clazz,
+-				   jmethodID methodID, ...) {
++                                   jmethodID methodID, ...) {
+         va_list args;
+-	jlong result;
+-	va_start(args,methodID);
+-	result = functions->CallNonvirtualLongMethodV(this,obj,clazz,
+-						      methodID,args);
+-	va_end(args);
+-	return result;
++        jlong result;
++        va_start(args,methodID);
++        result = functions->CallNonvirtualLongMethodV(this,obj,clazz,
++                                                      methodID,args);
++        va_end(args);
++        return result;
+     }
+     jlong CallNonvirtualLongMethodV(jobject obj, jclass clazz,
+-				    jmethodID methodID, va_list args) {
++                                    jmethodID methodID, va_list args) {
+         return functions->CallNonvirtualLongMethodV(this,obj,clazz,
+-						    methodID,args);
++                                                    methodID,args);
+     }
+     jlong CallNonvirtualLongMethodA(jobject obj, jclass clazz,
+-				    jmethodID methodID, const jvalue * args) {
++                                    jmethodID methodID, const jvalue * args) {
+         return functions->CallNonvirtualLongMethodA(this,obj,clazz,
+-						    methodID,args);
++                                                    methodID,args);
+     }
+ 
+     jfloat CallNonvirtualFloatMethod(jobject obj, jclass clazz,
+-				     jmethodID methodID, ...) {
++                                     jmethodID methodID, ...) {
+         va_list args;
+-	jfloat result;
+-	va_start(args,methodID);
+-	result = functions->CallNonvirtualFloatMethodV(this,obj,clazz,
+-						       methodID,args);
+-	va_end(args);
+-	return result;
++        jfloat result;
++        va_start(args,methodID);
++        result = functions->CallNonvirtualFloatMethodV(this,obj,clazz,
++                                                       methodID,args);
++        va_end(args);
++        return result;
+     }
+     jfloat CallNonvirtualFloatMethodV(jobject obj, jclass clazz,
+-				      jmethodID methodID,
+-				      va_list args) {
++                                      jmethodID methodID,
++                                      va_list args) {
+         return functions->CallNonvirtualFloatMethodV(this,obj,clazz,
+-						     methodID,args);
++                                                     methodID,args);
+     }
+     jfloat CallNonvirtualFloatMethodA(jobject obj, jclass clazz,
+-				      jmethodID methodID,
+-				      const jvalue * args) {
++                                      jmethodID methodID,
++                                      const jvalue * args) {
+         return functions->CallNonvirtualFloatMethodA(this,obj,clazz,
+-						     methodID,args);
++                                                     methodID,args);
+     }
+ 
+     jdouble CallNonvirtualDoubleMethod(jobject obj, jclass clazz,
+-				       jmethodID methodID, ...) {
++                                       jmethodID methodID, ...) {
+         va_list args;
+-	jdouble result;
+-	va_start(args,methodID);
+-	result = functions->CallNonvirtualDoubleMethodV(this,obj,clazz,
+-							methodID,args);
+-	va_end(args);
+-	return result;
++        jdouble result;
++        va_start(args,methodID);
++        result = functions->CallNonvirtualDoubleMethodV(this,obj,clazz,
++                                                        methodID,args);
++        va_end(args);
++        return result;
+     }
+     jdouble CallNonvirtualDoubleMethodV(jobject obj, jclass clazz,
+-					jmethodID methodID,
+-					va_list args) {
++                                        jmethodID methodID,
++                                        va_list args) {
+         return functions->CallNonvirtualDoubleMethodV(this,obj,clazz,
+-						      methodID,args);
++                                                      methodID,args);
+     }
+     jdouble CallNonvirtualDoubleMethodA(jobject obj, jclass clazz,
+-					jmethodID methodID,
+-					const jvalue * args) {
++                                        jmethodID methodID,
++                                        const jvalue * args) {
+         return functions->CallNonvirtualDoubleMethodA(this,obj,clazz,
+-						      methodID,args);
++                                                      methodID,args);
+     }
+ 
+     void CallNonvirtualVoidMethod(jobject obj, jclass clazz,
+-				  jmethodID methodID, ...) {
++                                  jmethodID methodID, ...) {
+         va_list args;
+-	va_start(args,methodID);
+-	functions->CallNonvirtualVoidMethodV(this,obj,clazz,methodID,args);
+-	va_end(args);
++        va_start(args,methodID);
++        functions->CallNonvirtualVoidMethodV(this,obj,clazz,methodID,args);
++        va_end(args);
+     }
+     void CallNonvirtualVoidMethodV(jobject obj, jclass clazz,
+-				   jmethodID methodID,
+-				   va_list args) {
++                                   jmethodID methodID,
++                                   va_list args) {
+         functions->CallNonvirtualVoidMethodV(this,obj,clazz,methodID,args);
+     }
+     void CallNonvirtualVoidMethodA(jobject obj, jclass clazz,
+-				   jmethodID methodID,
+-				   const jvalue * args) {
++                                   jmethodID methodID,
++                                   const jvalue * args) {
+         functions->CallNonvirtualVoidMethodA(this,obj,clazz,methodID,args);
+     }
+ 
+     jfieldID GetFieldID(jclass clazz, const char *name,
+-			const char *sig) {
++                        const char *sig) {
+         return functions->GetFieldID(this,clazz,name,sig);
+     }
+ 
+@@ -1314,222 +1311,222 @@
+         functions->SetObjectField(this,obj,fieldID,val);
+     }
+     void SetBooleanField(jobject obj, jfieldID fieldID,
+-			 jboolean val) {
++                         jboolean val) {
+         functions->SetBooleanField(this,obj,fieldID,val);
+     }
+     void SetByteField(jobject obj, jfieldID fieldID,
+-		      jbyte val) {
++                      jbyte val) {
+         functions->SetByteField(this,obj,fieldID,val);
+     }
+     void SetCharField(jobject obj, jfieldID fieldID,
+-		      jchar val) {
++                      jchar val) {
+         functions->SetCharField(this,obj,fieldID,val);
+     }
+     void SetShortField(jobject obj, jfieldID fieldID,
+-		       jshort val) {
++                       jshort val) {
+         functions->SetShortField(this,obj,fieldID,val);
+     }
+     void SetIntField(jobject obj, jfieldID fieldID,
+-		     jint val) {
++                     jint val) {
+         functions->SetIntField(this,obj,fieldID,val);
+     }
+     void SetLongField(jobject obj, jfieldID fieldID,
+-		      jlong val) {
++                      jlong val) {
+         functions->SetLongField(this,obj,fieldID,val);
+     }
+     void SetFloatField(jobject obj, jfieldID fieldID,
+-		       jfloat val) {
++                       jfloat val) {
+         functions->SetFloatField(this,obj,fieldID,val);
+     }
+     void SetDoubleField(jobject obj, jfieldID fieldID,
+-			jdouble val) {
++                        jdouble val) {
+         functions->SetDoubleField(this,obj,fieldID,val);
+     }
+ 
+     jmethodID GetStaticMethodID(jclass clazz, const char *name,
+-				const char *sig) {
++                                const char *sig) {
+         return functions->GetStaticMethodID(this,clazz,name,sig);
+     }
+ 
+     jobject CallStaticObjectMethod(jclass clazz, jmethodID methodID,
+-			     ...) {
++                             ...) {
+         va_list args;
+-	jobject result;
+-	va_start(args,methodID);
+-	result = functions->CallStaticObjectMethodV(this,clazz,methodID,args);
+-	va_end(args);
+-	return result;
++        jobject result;
++        va_start(args,methodID);
++        result = functions->CallStaticObjectMethodV(this,clazz,methodID,args);
++        va_end(args);
++        return result;
+     }
+     jobject CallStaticObjectMethodV(jclass clazz, jmethodID methodID,
+-			      va_list args) {
++                              va_list args) {
+         return functions->CallStaticObjectMethodV(this,clazz,methodID,args);
+     }
+     jobject CallStaticObjectMethodA(jclass clazz, jmethodID methodID,
+-			      const jvalue *args) {
++                              const jvalue *args) {
+         return functions->CallStaticObjectMethodA(this,clazz,methodID,args);
+     }
+ 
+     jboolean CallStaticBooleanMethod(jclass clazz,
+-				     jmethodID methodID, ...) {
++                                     jmethodID methodID, ...) {
+         va_list args;
+-	jboolean result;
+-	va_start(args,methodID);
+-	result = functions->CallStaticBooleanMethodV(this,clazz,methodID,args);
+-	va_end(args);
+-	return result;
++        jboolean result;
++        va_start(args,methodID);
++        result = functions->CallStaticBooleanMethodV(this,clazz,methodID,args);
++        va_end(args);
++        return result;
+     }
+     jboolean CallStaticBooleanMethodV(jclass clazz,
+-				      jmethodID methodID, va_list args) {
++                                      jmethodID methodID, va_list args) {
+         return functions->CallStaticBooleanMethodV(this,clazz,methodID,args);
+     }
+     jboolean CallStaticBooleanMethodA(jclass clazz,
+-				      jmethodID methodID, const jvalue *args) {
++                                      jmethodID methodID, const jvalue *args) {
+         return functions->CallStaticBooleanMethodA(this,clazz,methodID,args);
+     }
+ 
+     jbyte CallStaticByteMethod(jclass clazz,
+-			       jmethodID methodID, ...) {
++                               jmethodID methodID, ...) {
+         va_list args;
+-	jbyte result;
+-	va_start(args,methodID);
+-	result = functions->CallStaticByteMethodV(this,clazz,methodID,args);
+-	va_end(args);
+-	return result;
++        jbyte result;
++        va_start(args,methodID);
++        result = functions->CallStaticByteMethodV(this,clazz,methodID,args);
++        va_end(args);
++        return result;
+     }
+     jbyte CallStaticByteMethodV(jclass clazz,
+-				jmethodID methodID, va_list args) {
++                                jmethodID methodID, va_list args) {
+         return functions->CallStaticByteMethodV(this,clazz,methodID,args);
+     }
+     jbyte CallStaticByteMethodA(jclass clazz,
+-				jmethodID methodID, const jvalue *args) {
++                                jmethodID methodID, const jvalue *args) {
+         return functions->CallStaticByteMethodA(this,clazz,methodID,args);
+     }
+ 
+     jchar CallStaticCharMethod(jclass clazz,
+-			       jmethodID methodID, ...) {
++                               jmethodID methodID, ...) {
+         va_list args;
+-	jchar result;
+-	va_start(args,methodID);
+-	result = functions->CallStaticCharMethodV(this,clazz,methodID,args);
+-	va_end(args);
+-	return result;
++        jchar result;
++        va_start(args,methodID);
++        result = functions->CallStaticCharMethodV(this,clazz,methodID,args);
++        va_end(args);
++        return result;
+     }
+     jchar CallStaticCharMethodV(jclass clazz,
+-				jmethodID methodID, va_list args) {
++                                jmethodID methodID, va_list args) {
+         return functions->CallStaticCharMethodV(this,clazz,methodID,args);
+     }
+     jchar CallStaticCharMethodA(jclass clazz,
+-				jmethodID methodID, const jvalue *args) {
++                                jmethodID methodID, const jvalue *args) {
+         return functions->CallStaticCharMethodA(this,clazz,methodID,args);
+     }
+ 
+     jshort CallStaticShortMethod(jclass clazz,
+-				 jmethodID methodID, ...) {
++                                 jmethodID methodID, ...) {
+         va_list args;
+-	jshort result;
+-	va_start(args,methodID);
+-	result = functions->CallStaticShortMethodV(this,clazz,methodID,args);
+-	va_end(args);
+-	return result;
++        jshort result;
++        va_start(args,methodID);
++        result = functions->CallStaticShortMethodV(this,clazz,methodID,args);
++        va_end(args);
++        return result;
+     }
+     jshort CallStaticShortMethodV(jclass clazz,
+-				  jmethodID methodID, va_list args) {
++                                  jmethodID methodID, va_list args) {
+         return functions->CallStaticShortMethodV(this,clazz,methodID,args);
+     }
+     jshort CallStaticShortMethodA(jclass clazz,
+-				  jmethodID methodID, const jvalue *args) {
++                                  jmethodID methodID, const jvalue *args) {
+         return functions->CallStaticShortMethodA(this,clazz,methodID,args);
+     }
+ 
+     jint CallStaticIntMethod(jclass clazz,
+-			     jmethodID methodID, ...) {
++                             jmethodID methodID, ...) {
+         va_list args;
+-	jint result;
+-	va_start(args,methodID);
+-	result = functions->CallStaticIntMethodV(this,clazz,methodID,args);
+-	va_end(args);
+-	return result;
++        jint result;
++        va_start(args,methodID);
++        result = functions->CallStaticIntMethodV(this,clazz,methodID,args);
++        va_end(args);
++        return result;
+     }
+     jint CallStaticIntMethodV(jclass clazz,
+-			      jmethodID methodID, va_list args) {
++                              jmethodID methodID, va_list args) {
+         return functions->CallStaticIntMethodV(this,clazz,methodID,args);
+     }
+     jint CallStaticIntMethodA(jclass clazz,
+-			      jmethodID methodID, const jvalue *args) {
++                              jmethodID methodID, const jvalue *args) {
+         return functions->CallStaticIntMethodA(this,clazz,methodID,args);
+     }
+ 
+     jlong CallStaticLongMethod(jclass clazz,
+-			       jmethodID methodID, ...) {
++                               jmethodID methodID, ...) {
+         va_list args;
+-	jlong result;
+-	va_start(args,methodID);
+-	result = functions->CallStaticLongMethodV(this,clazz,methodID,args);
+-	va_end(args);
+-	return result;
++        jlong result;
++        va_start(args,methodID);
++        result = functions->CallStaticLongMethodV(this,clazz,methodID,args);
++        va_end(args);
++        return result;
+     }
+     jlong CallStaticLongMethodV(jclass clazz,
+-				jmethodID methodID, va_list args) {
++                                jmethodID methodID, va_list args) {
+         return functions->CallStaticLongMethodV(this,clazz,methodID,args);
+     }
+     jlong CallStaticLongMethodA(jclass clazz,
+-				jmethodID methodID, const jvalue *args) {
++                                jmethodID methodID, const jvalue *args) {
+         return functions->CallStaticLongMethodA(this,clazz,methodID,args);
+     }
+ 
+     jfloat CallStaticFloatMethod(jclass clazz,
+-				 jmethodID methodID, ...) {
++                                 jmethodID methodID, ...) {
+         va_list args;
+-	jfloat result;
+-	va_start(args,methodID);
+-	result = functions->CallStaticFloatMethodV(this,clazz,methodID,args);
+-	va_end(args);
+-	return result;
++        jfloat result;
++        va_start(args,methodID);
++        result = functions->CallStaticFloatMethodV(this,clazz,methodID,args);
++        va_end(args);
++        return result;
+     }
+     jfloat CallStaticFloatMethodV(jclass clazz,
+-				  jmethodID methodID, va_list args) {
++                                  jmethodID methodID, va_list args) {
+         return functions->CallStaticFloatMethodV(this,clazz,methodID,args);
+     }
+     jfloat CallStaticFloatMethodA(jclass clazz,
+-				  jmethodID methodID, const jvalue *args) {
++                                  jmethodID methodID, const jvalue *args) {
+         return functions->CallStaticFloatMethodA(this,clazz,methodID,args);
+     }
+ 
+     jdouble CallStaticDoubleMethod(jclass clazz,
+-				   jmethodID methodID, ...) {
++                                   jmethodID methodID, ...) {
+         va_list args;
+-	jdouble result;
+-	va_start(args,methodID);
+-	result = functions->CallStaticDoubleMethodV(this,clazz,methodID,args);
+-	va_end(args);
+-	return result;
++        jdouble result;
++        va_start(args,methodID);
++        result = functions->CallStaticDoubleMethodV(this,clazz,methodID,args);
++        va_end(args);
++        return result;
+     }
+     jdouble CallStaticDoubleMethodV(jclass clazz,
+-				    jmethodID methodID, va_list args) {
++                                    jmethodID methodID, va_list args) {
+         return functions->CallStaticDoubleMethodV(this,clazz,methodID,args);
+     }
+     jdouble CallStaticDoubleMethodA(jclass clazz,
+-				    jmethodID methodID, const jvalue *args) {
++                                    jmethodID methodID, const jvalue *args) {
+         return functions->CallStaticDoubleMethodA(this,clazz,methodID,args);
+     }
+ 
+     void CallStaticVoidMethod(jclass cls, jmethodID methodID, ...) {
+         va_list args;
+-	va_start(args,methodID);
+-	functions->CallStaticVoidMethodV(this,cls,methodID,args);
+-	va_end(args);
++        va_start(args,methodID);
++        functions->CallStaticVoidMethodV(this,cls,methodID,args);
++        va_end(args);
+     }
+     void CallStaticVoidMethodV(jclass cls, jmethodID methodID,
+-			       va_list args) {
++                               va_list args) {
+         functions->CallStaticVoidMethodV(this,cls,methodID,args);
+     }
+     void CallStaticVoidMethodA(jclass cls, jmethodID methodID,
+-			       const jvalue * args) {
++                               const jvalue * args) {
+         functions->CallStaticVoidMethodA(this,cls,methodID,args);
+     }
+ 
+     jfieldID GetStaticFieldID(jclass clazz, const char *name,
+-			      const char *sig) {
++                              const char *sig) {
+         return functions->GetStaticFieldID(this,clazz,name,sig);
+     }
+     jobject GetStaticObjectField(jclass clazz, jfieldID fieldID) {
+@@ -1561,39 +1558,39 @@
+     }
+ 
+     void SetStaticObjectField(jclass clazz, jfieldID fieldID,
+-			jobject value) {
++                        jobject value) {
+       functions->SetStaticObjectField(this,clazz,fieldID,value);
+     }
+     void SetStaticBooleanField(jclass clazz, jfieldID fieldID,
+-			jboolean value) {
++                        jboolean value) {
+       functions->SetStaticBooleanField(this,clazz,fieldID,value);
+     }
+     void SetStaticByteField(jclass clazz, jfieldID fieldID,
+-			jbyte value) {
++                        jbyte value) {
+       functions->SetStaticByteField(this,clazz,fieldID,value);
+     }
+     void SetStaticCharField(jclass clazz, jfieldID fieldID,
+-			jchar value) {
++                        jchar value) {
+       functions->SetStaticCharField(this,clazz,fieldID,value);
+     }
+     void SetStaticShortField(jclass clazz, jfieldID fieldID,
+-			jshort value) {
++                        jshort value) {
+       functions->SetStaticShortField(this,clazz,fieldID,value);
+     }
+     void SetStaticIntField(jclass clazz, jfieldID fieldID,
+-			jint value) {
++                        jint value) {
+       functions->SetStaticIntField(this,clazz,fieldID,value);
+     }
+     void SetStaticLongField(jclass clazz, jfieldID fieldID,
+-			jlong value) {
++                        jlong value) {
+       functions->SetStaticLongField(this,clazz,fieldID,value);
+     }
+     void SetStaticFloatField(jclass clazz, jfieldID fieldID,
+-			jfloat value) {
++                        jfloat value) {
+       functions->SetStaticFloatField(this,clazz,fieldID,value);
+     }
+     void SetStaticDoubleField(jclass clazz, jfieldID fieldID,
+-			jdouble value) {
++                        jdouble value) {
+       functions->SetStaticDoubleField(this,clazz,fieldID,value);
+     }
+ 
+@@ -1628,14 +1625,14 @@
+     }
+ 
+     jobjectArray NewObjectArray(jsize len, jclass clazz,
+-				jobject init) {
++                                jobject init) {
+         return functions->NewObjectArray(this,len,clazz,init);
+     }
+     jobject GetObjectArrayElement(jobjectArray array, jsize index) {
+         return functions->GetObjectArrayElement(this,array,index);
+     }
+     void SetObjectArrayElement(jobjectArray array, jsize index,
+-			       jobject val) {
++                               jobject val) {
+         functions->SetObjectArrayElement(this,array,index,val);
+     }
+ 
+@@ -1690,114 +1687,114 @@
+     }
+ 
+     void ReleaseBooleanArrayElements(jbooleanArray array,
+-				     jboolean *elems,
+-				     jint mode) {
++                                     jboolean *elems,
++                                     jint mode) {
+         functions->ReleaseBooleanArrayElements(this,array,elems,mode);
+     }
+     void ReleaseByteArrayElements(jbyteArray array,
+-				  jbyte *elems,
+-				  jint mode) {
++                                  jbyte *elems,
++                                  jint mode) {
+         functions->ReleaseByteArrayElements(this,array,elems,mode);
+     }
+     void ReleaseCharArrayElements(jcharArray array,
+-				  jchar *elems,
+-				  jint mode) {
++                                  jchar *elems,
++                                  jint mode) {
+         functions->ReleaseCharArrayElements(this,array,elems,mode);
+     }
+     void ReleaseShortArrayElements(jshortArray array,
+-				   jshort *elems,
+-				   jint mode) {
++                                   jshort *elems,
++                                   jint mode) {
+         functions->ReleaseShortArrayElements(this,array,elems,mode);
+     }
+     void ReleaseIntArrayElements(jintArray array,
+-				 jint *elems,
+-				 jint mode) {
++                                 jint *elems,
++                                 jint mode) {
+         functions->ReleaseIntArrayElements(this,array,elems,mode);
+     }
+     void ReleaseLongArrayElements(jlongArray array,
+-				  jlong *elems,
+-				  jint mode) {
++                                  jlong *elems,
++                                  jint mode) {
+         functions->ReleaseLongArrayElements(this,array,elems,mode);
+     }
+     void ReleaseFloatArrayElements(jfloatArray array,
+-				   jfloat *elems,
+-				   jint mode) {
++                                   jfloat *elems,
++                                   jint mode) {
+         functions->ReleaseFloatArrayElements(this,array,elems,mode);
+     }
+     void ReleaseDoubleArrayElements(jdoubleArray array,
+-				    jdouble *elems,
+-				    jint mode) {
++                                    jdouble *elems,
++                                    jint mode) {
+         functions->ReleaseDoubleArrayElements(this,array,elems,mode);
+     }
+ 
+     void GetBooleanArrayRegion(jbooleanArray array,
+-			       jsize start, jsize len, jboolean *buf) {
++                               jsize start, jsize len, jboolean *buf) {
+         functions->GetBooleanArrayRegion(this,array,start,len,buf);
+     }
+     void GetByteArrayRegion(jbyteArray array,
+-			    jsize start, jsize len, jbyte *buf) {
++                            jsize start, jsize len, jbyte *buf) {
+         functions->GetByteArrayRegion(this,array,start,len,buf);
+     }
+     void GetCharArrayRegion(jcharArray array,
+-			    jsize start, jsize len, jchar *buf) {
++                            jsize start, jsize len, jchar *buf) {
+         functions->GetCharArrayRegion(this,array,start,len,buf);
+     }
+     void GetShortArrayRegion(jshortArray array,
+-			     jsize start, jsize len, jshort *buf) {
++                             jsize start, jsize len, jshort *buf) {
+         functions->GetShortArrayRegion(this,array,start,len,buf);
+     }
+     void GetIntArrayRegion(jintArray array,
+-			   jsize start, jsize len, jint *buf) {
++                           jsize start, jsize len, jint *buf) {
+         functions->GetIntArrayRegion(this,array,start,len,buf);
+     }
+     void GetLongArrayRegion(jlongArray array,
+-			    jsize start, jsize len, jlong *buf) {
++                            jsize start, jsize len, jlong *buf) {
+         functions->GetLongArrayRegion(this,array,start,len,buf);
+     }
+     void GetFloatArrayRegion(jfloatArray array,
+-			     jsize start, jsize len, jfloat *buf) {
++                             jsize start, jsize len, jfloat *buf) {
+         functions->GetFloatArrayRegion(this,array,start,len,buf);
+     }
+     void GetDoubleArrayRegion(jdoubleArray array,
+-			      jsize start, jsize len, jdouble *buf) {
++                              jsize start, jsize len, jdouble *buf) {
+         functions->GetDoubleArrayRegion(this,array,start,len,buf);
+     }
+ 
+     void SetBooleanArrayRegion(jbooleanArray array, jsize start, jsize len,
+-			       const jboolean *buf) {
++                               const jboolean *buf) {
+         functions->SetBooleanArrayRegion(this,array,start,len,buf);
+     }
+     void SetByteArrayRegion(jbyteArray array, jsize start, jsize len,
+-			    const jbyte *buf) {
++                            const jbyte *buf) {
+         functions->SetByteArrayRegion(this,array,start,len,buf);
+     }
+     void SetCharArrayRegion(jcharArray array, jsize start, jsize len,
+-			    const jchar *buf) {
++                            const jchar *buf) {
+         functions->SetCharArrayRegion(this,array,start,len,buf);
+     }
+     void SetShortArrayRegion(jshortArray array, jsize start, jsize len,
+-			     const jshort *buf) {
++                             const jshort *buf) {
+         functions->SetShortArrayRegion(this,array,start,len,buf);
+     }
+     void SetIntArrayRegion(jintArray array, jsize start, jsize len,
+-			   const jint *buf) {
++                           const jint *buf) {
+         functions->SetIntArrayRegion(this,array,start,len,buf);
+     }
+     void SetLongArrayRegion(jlongArray array, jsize start, jsize len,
+-			    const jlong *buf) {
++                            const jlong *buf) {
+         functions->SetLongArrayRegion(this,array,start,len,buf);
+     }
+     void SetFloatArrayRegion(jfloatArray array, jsize start, jsize len,
+-			     const jfloat *buf) {
++                             const jfloat *buf) {
+         functions->SetFloatArrayRegion(this,array,start,len,buf);
+     }
+     void SetDoubleArrayRegion(jdoubleArray array, jsize start, jsize len,
+-			      const jdouble *buf) {
++                              const jdouble *buf) {
+         functions->SetDoubleArrayRegion(this,array,start,len,buf);
+     }
+ 
+     jint RegisterNatives(jclass clazz, const JNINativeMethod *methods,
+-			 jint nMethods) {
++                         jint nMethods) {
+         return functions->RegisterNatives(this,clazz,methods,nMethods);
+     }
+     jint UnregisterNatives(jclass clazz) {
+@@ -1844,7 +1841,7 @@
+     }
+ 
+     jboolean ExceptionCheck() {
+-	return functions->ExceptionCheck(this);
++        return functions->ExceptionCheck(this);
+     }
+ 
+     jobject NewDirectByteBuffer(void* address, jlong capacity) {
+@@ -1960,6 +1957,3 @@
+ #endif /* __cplusplus */
+ 
+ #endif /* !_JAVASOFT_JNI_H_ */
+-
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jni_md.h openjdk/hotspot/src/share/vm/prims/jni_md.h
+--- openjdk6/hotspot/src/share/vm/prims/jni_md.h	2008-02-28 05:02:41.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jni_md.h	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jni_md.h	1.25 07/05/05 17:06:34 JVM"
+-#endif
+ /*
+  * Copyright 1997-2000 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvm.cpp openjdk/hotspot/src/share/vm/prims/jvm.cpp
+--- openjdk6/hotspot/src/share/vm/prims/jvm.cpp	2008-02-28 05:02:41.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvm.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)jvm.cpp	1.567 07/08/20 16:28:14 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -30,10 +27,10 @@
+ #include <errno.h>
+ 
+ /*
+-  NOTE about use of any ctor or function call that can trigger a safepoint/GC: 
+-  such ctors and calls MUST NOT come between an oop declaration/init and its 
+-  usage because if objects are move this may cause various memory stomps, bus 
+-  errors and segfaults. Here is a cookbook for causing so called "naked oop 
++  NOTE about use of any ctor or function call that can trigger a safepoint/GC:
++  such ctors and calls MUST NOT come between an oop declaration/init and its
++  usage because if objects are move this may cause various memory stomps, bus
++  errors and segfaults. Here is a cookbook for causing so called "naked oop
+   failures":
+ 
+       JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredFields<etc> {
+@@ -58,7 +55,7 @@
+ 
+   The solution is to keep the oop declaration BELOW the ctor or function
+   call that might cause a GC, do another resolve to reassign the oop, or
+-  consider use of a Handle instead of an oop so there is immunity from object 
++  consider use of a Handle instead of an oop so there is immunity from object
+   motion. But note that the "QUICK" entries below do not have a handlemark
+   and thus can only support use of handles passed in.
+ */
+@@ -82,8 +79,8 @@
+ 
+     while (!vfst.at_end()) {
+       methodOop m = vfst.method();
+-      if (!vfst.method()->method_holder()->klass_part()->is_subclass_of(SystemDictionary::classloader_klass())&& 
+-          !vfst.method()->method_holder()->klass_part()->is_subclass_of(access_controller_klass) && 
++      if (!vfst.method()->method_holder()->klass_part()->is_subclass_of(SystemDictionary::classloader_klass())&&
++          !vfst.method()->method_holder()->klass_part()->is_subclass_of(access_controller_klass) &&
+           !vfst.method()->method_holder()->klass_part()->is_subclass_of(privileged_action_klass)) {
+         break;
+       }
+@@ -148,7 +145,7 @@
+ #ifdef ASSERT
+   class JVMTraceWrapper : public StackObj {
+    public:
+-    JVMTraceWrapper(const char* format, ...) {      
++    JVMTraceWrapper(const char* format, ...) {
+       if (TraceJVMCalls) {
+         va_list ap;
+         va_start(ap, format);
+@@ -226,8 +223,8 @@
+ JVM_END
+ 
+ 
+-JVM_ENTRY(void, JVM_ArrayCopy(JNIEnv *env, jclass ignored, jobject src, jint src_pos, 
+-                               jobject dst, jint dst_pos, jint length))  
++JVM_ENTRY(void, JVM_ArrayCopy(JNIEnv *env, jclass ignored, jobject src, jint src_pos,
++                               jobject dst, jint dst_pos, jint length))
+   JVMWrapper("JVM_ArrayCopy");
+   // Check if we have null pointers
+   if (src == NULL || dst == NULL) {
+@@ -244,22 +241,22 @@
+ 
+ static void set_property(Handle props, const char* key, const char* value, TRAPS) {
+   JavaValue r(T_OBJECT);
+-  // public synchronized Object put(Object key, Object value);  
++  // public synchronized Object put(Object key, Object value);
+   HandleMark hm(THREAD);
+   Handle key_str    = java_lang_String::create_from_platform_dependent_str(key, CHECK);
+-  Handle value_str  = java_lang_String::create_from_platform_dependent_str((value != NULL ? value : ""), CHECK);      
+-  JavaCalls::call_virtual(&r,                           
+-                          props, 
++  Handle value_str  = java_lang_String::create_from_platform_dependent_str((value != NULL ? value : ""), CHECK);
++  JavaCalls::call_virtual(&r,
++                          props,
+                           KlassHandle(THREAD, SystemDictionary::properties_klass()),
+-                          vmSymbolHandles::put_name(), 
+-                          vmSymbolHandles::object_object_object_signature(), 
+-                          key_str, 
+-                          value_str, 
+-                          THREAD);  
++                          vmSymbolHandles::put_name(),
++                          vmSymbolHandles::object_object_object_signature(),
++                          key_str,
++                          value_str,
++                          THREAD);
+ }
+ 
+ 
+-#define PUTPROP(props, name, value) set_property((props), (name), (value), CHECK_(properties)); 
++#define PUTPROP(props, name, value) set_property((props), (name), (value), CHECK_(properties));
+ 
+ 
+ JVM_ENTRY(jobject, JVM_InitProperties(JNIEnv *env, jobject properties))
+@@ -274,9 +271,9 @@
+     PUTPROP(props, p->key(), p->value());
+   }
+ 
+-  // Convert the -XX:MaxDirectMemorySize= command line flag 
++  // Convert the -XX:MaxDirectMemorySize= command line flag
+   // to the sun.nio.MaxDirectMemorySize property.
+-  // Do this after setting user properties to prevent people 
++  // Do this after setting user properties to prevent people
+   // from setting the value with a -D option, as requested.
+   {
+     char as_chars[256];
+@@ -291,7 +288,7 @@
+ #if defined(_LP64) || defined(_WIN64)
+   #define CSIZE "64-Bit "
+ #else
+-  #define CSIZE 
++  #define CSIZE
+ #endif // 64bit
+ 
+ #ifdef TIERED
+@@ -330,7 +327,7 @@
+ JVM_END
+ 
+ 
+-JVM_ENTRY_NO_ENV(void, JVM_Halt(jint code))  
++JVM_ENTRY_NO_ENV(void, JVM_Halt(jint code))
+   before_exit(thread);
+   vm_exit(code);
+ JVM_END
+@@ -341,7 +338,7 @@
+ JVM_END
+ 
+ 
+-JVM_ENTRY_NO_ENV(void, JVM_GC(void))  
++JVM_ENTRY_NO_ENV(void, JVM_GC(void))
+   JVMWrapper("JVM_GC");
+   if (!DisableExplicitGC) {
+     Universe::heap()->collect(GCCause::_java_lang_system_gc);
+@@ -403,13 +400,13 @@
+ 
+ 
+ JVM_ENTRY(void, JVM_FillInStackTrace(JNIEnv *env, jobject receiver))
+-  JVMWrapper("JVM_FillInStackTrace");    
++  JVMWrapper("JVM_FillInStackTrace");
+   Handle exception(thread, JNIHandles::resolve_non_null(receiver));
+   java_lang_Throwable::fill_in_stack_trace(exception);
+ JVM_END
+ 
+ 
+-JVM_ENTRY(void, JVM_PrintStackTrace(JNIEnv *env, jobject receiver, jobject printable))  
++JVM_ENTRY(void, JVM_PrintStackTrace(JNIEnv *env, jobject receiver, jobject printable))
+   JVMWrapper("JVM_PrintStackTrace");
+   // Note: This is no longer used in Merlin, but we still support it for compatibility.
+   oop exception = JNIHandles::resolve_non_null(receiver);
+@@ -418,14 +415,14 @@
+ JVM_END
+ 
+ 
+-JVM_ENTRY(jint, JVM_GetStackTraceDepth(JNIEnv *env, jobject throwable))  
++JVM_ENTRY(jint, JVM_GetStackTraceDepth(JNIEnv *env, jobject throwable))
+   JVMWrapper("JVM_GetStackTraceDepth");
+   oop exception = JNIHandles::resolve(throwable);
+   return java_lang_Throwable::get_stack_trace_depth(exception, THREAD);
+ JVM_END
+ 
+ 
+-JVM_ENTRY(jobject, JVM_GetStackTraceElement(JNIEnv *env, jobject throwable, jint index))  
++JVM_ENTRY(jobject, JVM_GetStackTraceElement(JNIEnv *env, jobject throwable, jint index))
+   JVMWrapper("JVM_GetStackTraceElement");
+   JvmtiVMObjectAllocEventCollector oam; // This ctor (throughout this module) may trigger a safepoint/GC
+   oop exception = JNIHandles::resolve(throwable);
+@@ -440,7 +437,7 @@
+ JVM_ENTRY(jint, JVM_IHashCode(JNIEnv* env, jobject handle))
+   JVMWrapper("JVM_IHashCode");
+   // as implemented in the classic virtual machine; return 0 if object is NULL
+-  return handle == NULL ? 0 : ObjectSynchronizer::FastHashCode (THREAD, JNIHandles::resolve_non_null(handle)) ; 
++  return handle == NULL ? 0 : ObjectSynchronizer::FastHashCode (THREAD, JNIHandles::resolve_non_null(handle)) ;
+ JVM_END
+ 
+ 
+@@ -458,9 +455,9 @@
+ 
+ JVM_ENTRY(void, JVM_MonitorNotify(JNIEnv* env, jobject handle))
+   JVMWrapper("JVM_MonitorNotify");
+-  Handle obj(THREAD, JNIHandles::resolve_non_null(handle));  
++  Handle obj(THREAD, JNIHandles::resolve_non_null(handle));
+   assert(obj->is_instance() || obj->is_array(), "JVM_MonitorNotify must apply to an object");
+-  ObjectSynchronizer::notify(obj, CHECK); 
++  ObjectSynchronizer::notify(obj, CHECK);
+ JVM_END
+ 
+ 
+@@ -602,17 +599,17 @@
+ 
+ JVM_ENTRY(jclass, JVM_GetCallerClass(JNIEnv* env, int depth))
+   JVMWrapper("JVM_GetCallerClass");
+-  klassOop k = thread->security_get_caller_class(depth);    
++  klassOop k = thread->security_get_caller_class(depth);
+   return (k == NULL) ? NULL : (jclass) JNIHandles::make_local(env, Klass::cast(k)->java_mirror());
+ JVM_END
+ 
+ 
+-JVM_ENTRY(jclass, JVM_FindPrimitiveClass(JNIEnv* env, const char* utf))  
++JVM_ENTRY(jclass, JVM_FindPrimitiveClass(JNIEnv* env, const char* utf))
+   JVMWrapper("JVM_FindPrimitiveClass");
+   oop mirror = NULL;
+   BasicType t = name2type(utf);
+   if (t != T_ILLEGAL && t != T_OBJECT && t != T_ARRAY) {
+-    mirror = SystemDictionary::java_mirror(t);
++    mirror = Universe::java_mirror(t);
+   }
+   if (mirror == NULL) {
+     THROW_MSG_0(vmSymbols::java_lang_ClassNotFoundException(), (char*) utf);
+@@ -628,10 +625,10 @@
+ JVM_END
+ 
+ 
+-JVM_ENTRY(jclass, JVM_FindClassFromClassLoader(JNIEnv* env, const char* name, 
+-                                               jboolean init, jobject loader, 
++JVM_ENTRY(jclass, JVM_FindClassFromClassLoader(JNIEnv* env, const char* name,
++                                               jboolean init, jobject loader,
+                                                jboolean throwError))
+-  JVMWrapper3("JVM_FindClassFromClassLoader %s throw %s", name, 
++  JVMWrapper3("JVM_FindClassFromClassLoader %s throw %s", name,
+                throwError ? "error" : "exception");
+   // Java libraries should ensure that name is never null...
+   if (name == NULL || (int)strlen(name) > symbolOopDesc::max_length()) {
+@@ -645,7 +642,7 @@
+   }
+   symbolHandle h_name = oopFactory::new_symbol_handle(name, CHECK_NULL);
+   Handle h_loader(THREAD, JNIHandles::resolve(loader));
+-  jclass result = find_class_from_class_loader(env, h_name, init, h_loader, 
++  jclass result = find_class_from_class_loader(env, h_name, init, h_loader,
+                                                Handle(), throwError, thread);
+ 
+   if (TraceClassResolution && result != NULL) {
+@@ -656,7 +653,7 @@
+ JVM_END
+ 
+ 
+-JVM_ENTRY(jclass, JVM_FindClassFromClass(JNIEnv *env, const char *name, 
++JVM_ENTRY(jclass, JVM_FindClassFromClass(JNIEnv *env, const char *name,
+                                          jboolean init, jclass from))
+   JVMWrapper2("JVM_FindClassFromClass %s", name);
+   if (name == NULL || (int)strlen(name) > symbolOopDesc::max_length()) {
+@@ -666,7 +663,7 @@
+   }
+   symbolHandle h_name = oopFactory::new_symbol_handle(name, CHECK_NULL);
+   oop from_class_oop = JNIHandles::resolve(from);
+-  klassOop from_class = (from_class_oop == NULL) 
++  klassOop from_class = (from_class_oop == NULL)
+                            ? (klassOop)NULL
+                            : java_lang_Class::as_klassOop(from_class_oop);
+   oop class_loader = NULL;
+@@ -677,7 +674,7 @@
+   }
+   Handle h_loader(THREAD, class_loader);
+   Handle h_prot  (THREAD, protection_domain);
+-  jclass result = find_class_from_class_loader(env, h_name, init, h_loader, 
++  jclass result = find_class_from_class_loader(env, h_name, init, h_loader,
+                                                h_prot, true, thread);
+ 
+   if (TraceClassResolution && result != NULL) {
+@@ -712,7 +709,7 @@
+ // common code for JVM_DefineClass() and JVM_DefineClassWithSource()
+ static jclass jvm_define_class_common(JNIEnv *env, const char *name, jobject loader, const jbyte *buf, jsize len, jobject pd, const char *source, TRAPS) {
+ 
+-  // Since exceptions can be thrown, class initialization can take place  
++  // Since exceptions can be thrown, class initialization can take place
+   // if name is NULL no check for class name in .class stream has to be made.
+   symbolHandle class_name;
+   if (name != NULL) {
+@@ -729,20 +726,20 @@
+   ClassFileStream st((u1*) buf, len, (char *)source);
+   Handle class_loader (THREAD, JNIHandles::resolve(loader));
+   if (UsePerfData) {
+-    is_lock_held_by_thread(class_loader, 
++    is_lock_held_by_thread(class_loader,
+                            ClassLoader::sync_JVMDefineClassLockFreeCounter(),
+                            THREAD);
+   }
+   Handle protection_domain (THREAD, JNIHandles::resolve(pd));
+-  klassOop k = SystemDictionary::resolve_from_stream(class_name, class_loader, 
+-                                                     protection_domain, &st, 
++  klassOop k = SystemDictionary::resolve_from_stream(class_name, class_loader,
++                                                     protection_domain, &st,
+                                                      CHECK_NULL);
+ 
+   if (TraceClassResolution && k != NULL) {
+     trace_class_resolution(k);
+   }
+ 
+-  return (jclass) JNIHandles::make_local(env, Klass::cast(k)->java_mirror());  
++  return (jclass) JNIHandles::make_local(env, Klass::cast(k)->java_mirror());
+ }
+ 
+ 
+@@ -781,16 +778,16 @@
+ 
+   // Security Note:
+   //   The Java level wrapper will perform the necessary security check allowing
+-  //   us to pass the NULL as the initiating class loader. 
++  //   us to pass the NULL as the initiating class loader.
+   Handle h_loader(THREAD, JNIHandles::resolve(loader));
+   if (UsePerfData) {
+-    is_lock_held_by_thread(h_loader, 
++    is_lock_held_by_thread(h_loader,
+                            ClassLoader::sync_JVMFindLoadedClassLockFreeCounter(),
+                            THREAD);
+   }
+- 
+-  klassOop k = SystemDictionary::find_instance_or_array_klass(klass_name, 
+-                                                              h_loader, 
++
++  klassOop k = SystemDictionary::find_instance_or_array_klass(klass_name,
++                                                              h_loader,
+                                                               Handle(),
+                                                               CHECK_NULL);
+ 
+@@ -807,14 +804,14 @@
+   JvmtiVMObjectAllocEventCollector oam;
+   ResourceMark rm(THREAD);
+   const char* name;
+-  if (java_lang_Class::is_primitive(JNIHandles::resolve(cls))) {    
++  if (java_lang_Class::is_primitive(JNIHandles::resolve(cls))) {
+     name = type2name(java_lang_Class::primitive_type(JNIHandles::resolve(cls)));
+   } else {
+     // Consider caching interned string in Klass
+     klassOop k = java_lang_Class::as_klassOop(JNIHandles::resolve(cls));
+     assert(k->is_klass(), "just checking");
+     name = Klass::cast(k)->external_name();
+-  } 
++  }
+   oop result = StringTable::intern((char*) name, CHECK_NULL);
+   return (jstring) JNIHandles::make_local(env, result);
+ JVM_END
+@@ -822,18 +819,18 @@
+ 
+ JVM_ENTRY(jobjectArray, JVM_GetClassInterfaces(JNIEnv *env, jclass cls))
+   JVMWrapper("JVM_GetClassInterfaces");
+-  JvmtiVMObjectAllocEventCollector oam;  
++  JvmtiVMObjectAllocEventCollector oam;
+   oop mirror = JNIHandles::resolve_non_null(cls);
+ 
+   // Special handling for primitive objects
+-  if (java_lang_Class::is_primitive(mirror)) {    
++  if (java_lang_Class::is_primitive(mirror)) {
+     // Primitive objects does not have any interfaces
+     objArrayOop r = oopFactory::new_objArray(SystemDictionary::class_klass(), 0, CHECK_NULL);
+     return (jobjectArray) JNIHandles::make_local(env, r);
+   }
+ 
+   KlassHandle klass(thread, java_lang_Class::as_klassOop(mirror));
+-  // Figure size of result array    
++  // Figure size of result array
+   int size;
+   if (klass->oop_is_instance()) {
+     size = instanceKlass::cast(klass())->local_interfaces()->length();
+@@ -866,7 +863,7 @@
+   if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(cls))) {
+     return NULL;
+   }
+-  klassOop k = java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(cls));  
++  klassOop k = java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(cls));
+   oop loader = Klass::cast(k)->class_loader();
+   return JNIHandles::make_local(env, loader);
+ JVM_END
+@@ -892,8 +889,8 @@
+   JVMWrapper("JVM_GetClassSigners");
+   JvmtiVMObjectAllocEventCollector oam;
+   if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(cls))) {
+-    // There are no signers for primitive types 
+-    return NULL;    
++    // There are no signers for primitive types
++    return NULL;
+   }
+ 
+   klassOop k = java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(cls));
+@@ -920,7 +917,7 @@
+ 
+ JVM_ENTRY(void, JVM_SetClassSigners(JNIEnv *env, jclass cls, jobjectArray signers))
+   JVMWrapper("JVM_SetClassSigners");
+-  if (!java_lang_Class::is_primitive(JNIHandles::resolve_non_null(cls))) {          
++  if (!java_lang_Class::is_primitive(JNIHandles::resolve_non_null(cls))) {
+     // This call is ignored for primitive types and arrays.
+     // Signers are only set once, ClassLoader.java, and thus shouldn't
+     // be called with an array.  Only the bootstrap loader creates arrays.
+@@ -943,7 +940,7 @@
+     return NULL;
+   }
+ 
+-  klassOop k = java_lang_Class::as_klassOop(JNIHandles::resolve(cls));  
++  klassOop k = java_lang_Class::as_klassOop(JNIHandles::resolve(cls));
+   return (jobject) JNIHandles::make_local(env, Klass::cast(k)->protection_domain());
+ JVM_END
+ 
+@@ -957,13 +954,13 @@
+   }
+   if (!java_lang_Class::is_primitive(JNIHandles::resolve(cls))) {
+     // Call is ignored for primitive types
+-    klassOop k = java_lang_Class::as_klassOop(JNIHandles::resolve(cls));    
++    klassOop k = java_lang_Class::as_klassOop(JNIHandles::resolve(cls));
+ 
+     // cls won't be an array, as this called only from ClassLoader.defineClass
+     if (Klass::cast(k)->oop_is_instance()) {
+       oop pd = JNIHandles::resolve(protection_domain);
+       assert(pd == NULL || pd->is_oop(), "just checking");
+-      instanceKlass::cast(k)->set_protection_domain(pd);    
++      instanceKlass::cast(k)->set_protection_domain(pd);
+     }
+   }
+ JVM_END
+@@ -984,8 +981,8 @@
+ 
+   // get run() method
+   methodOop m_oop = Klass::cast(object->klass())->uncached_lookup_method(
+-                                           vmSymbols::run_method_name(), 
+-                                           vmSymbols::void_object_signature());  
++                                           vmSymbols::run_method_name(),
++                                           vmSymbols::void_object_signature());
+   methodHandle m (THREAD, m_oop);
+   if (m.is_null() || !m->is_method() || !methodOop(m())->is_public() || methodOop(m())->is_static()) {
+     THROW_MSG_0(vmSymbols::java_lang_InternalError(), "No run method");
+@@ -995,12 +992,12 @@
+   vframeStream vfst(thread);
+   vfst.security_get_caller_frame(1);
+ 
+-  if (!vfst.at_end()) {    
++  if (!vfst.at_end()) {
+     pi.initialize(&vfst, JNIHandles::resolve(context), thread->privileged_stack_top(), CHECK_NULL);
+-    thread->set_privileged_stack_top(&pi);      
++    thread->set_privileged_stack_top(&pi);
+   }
+ 
+-    
++
+   // invoke the Object run() in the action object. We cannot use call_interface here, since the static type
+   // is not really known - it is either java.security.PrivilegedAction or java.security.PrivilegedExceptionAction
+   Handle pending_exception;
+@@ -1011,15 +1008,15 @@
+   // done with action, remove ourselves from the list
+   if (!vfst.at_end()) {
+     assert(thread->privileged_stack_top() != NULL && thread->privileged_stack_top() == &pi, "wrong top element");
+-    thread->set_privileged_stack_top(thread->privileged_stack_top()->next());	
+-  }  
++    thread->set_privileged_stack_top(thread->privileged_stack_top()->next());
++  }
+ 
+-  if (HAS_PENDING_EXCEPTION) {      
++  if (HAS_PENDING_EXCEPTION) {
+     pending_exception = Handle(THREAD, PENDING_EXCEPTION);
+     CLEAR_PENDING_EXCEPTION;
+-            
+-    if ( pending_exception->is_a(SystemDictionary::exception_klass()) && 
+-        !pending_exception->is_a(SystemDictionary::runtime_exception_klass())) {      
++
++    if ( pending_exception->is_a(SystemDictionary::exception_klass()) &&
++        !pending_exception->is_a(SystemDictionary::runtime_exception_klass())) {
+       // Throw a java.security.PrivilegedActionException(Exception e) exception
+       JavaCallArguments args(pending_exception);
+       THROW_ARG_0(vmSymbolHandles::java_security_PrivilegedActionException(),
+@@ -1049,14 +1046,14 @@
+     _thread->register_array_for_gc(array);
+   }
+ 
+-  ~RegisterArrayForGC() { 
++  ~RegisterArrayForGC() {
+     _thread->register_array_for_gc(NULL);
+-  }  
++  }
+ };
+ 
+ 
+ JVM_ENTRY(jobject, JVM_GetStackAccessControlContext(JNIEnv *env, jclass cls))
+-  JVMWrapper("JVM_GetStackAccessControlContext");  
++  JVMWrapper("JVM_GetStackAccessControlContext");
+   if (!UsePrivilegedStack) return NULL;
+ 
+   ResourceMark rm(THREAD);
+@@ -1069,55 +1066,55 @@
+ 
+   // Use vframeStream to iterate through Java frames
+   vframeStream vfst(thread);
+-    
++
+   oop previous_protection_domain = NULL;
+-  Handle privileged_context(thread, NULL);  
++  Handle privileged_context(thread, NULL);
+   bool is_privileged = false;
+-  oop protection_domain = NULL;  
+-  
++  oop protection_domain = NULL;
++
+   for(; !vfst.at_end(); vfst.next()) {
+     // get method of frame
+     methodOop method = vfst.method();
+     intptr_t* frame_id   = vfst.frame_id();
+-    
++
+     // check the privileged frames to see if we have a match
+     if (thread->privileged_stack_top() && thread->privileged_stack_top()->frame_id() == frame_id) {
+-      // this frame is privileged 
++      // this frame is privileged
+       is_privileged = true;
+       privileged_context = Handle(thread, thread->privileged_stack_top()->privileged_context());
+       protection_domain  = thread->privileged_stack_top()->protection_domain();
+     } else {
+       protection_domain = instanceKlass::cast(method->method_holder())->protection_domain();
+-    }		
+-     
++    }
++
+     if ((previous_protection_domain != protection_domain) && (protection_domain != NULL)) {
+       local_array->push(protection_domain);
+       previous_protection_domain = protection_domain;
+     }
+ 
+     if (is_privileged) break;
+-  } 
++  }
+ 
+ 
+   // either all the domains on the stack were system domains, or
+   // we had a privileged system domain
+   if (local_array->is_empty()) {
+     if (is_privileged && privileged_context.is_null()) return NULL;
+-        
++
+     oop result = java_security_AccessControlContext::create(objArrayHandle(), is_privileged, privileged_context, CHECK_NULL);
+-    return JNIHandles::make_local(env, result);      
++    return JNIHandles::make_local(env, result);
+   }
+-  
++
+   // the resource area must be registered in case of a gc
+-  RegisterArrayForGC ragc(thread, local_array);      
+-  objArrayOop context = oopFactory::new_objArray(SystemDictionary::protectionDomain_klass(), 
++  RegisterArrayForGC ragc(thread, local_array);
++  objArrayOop context = oopFactory::new_objArray(SystemDictionary::protectionDomain_klass(),
+                                                  local_array->length(), CHECK_NULL);
+   objArrayHandle h_context(thread, context);
+   for (int index = 0; index < local_array->length(); index++) {
+     h_context->obj_at_put(index, local_array->at(index));
+   }
+-  
+-  oop result = java_security_AccessControlContext::create(h_context, is_privileged, privileged_context, CHECK_NULL);    
++
++  oop result = java_security_AccessControlContext::create(h_context, is_privileged, privileged_context, CHECK_NULL);
+ 
+   return JNIHandles::make_local(env, result);
+ JVM_END
+@@ -1137,7 +1134,7 @@
+ JVM_END
+ 
+ 
+-JVM_ENTRY(jclass, JVM_GetComponentType(JNIEnv *env, jclass cls))  
++JVM_ENTRY(jclass, JVM_GetComponentType(JNIEnv *env, jclass cls))
+   JVMWrapper("JVM_GetComponentType");
+   oop mirror = JNIHandles::resolve_non_null(cls);
+   oop result = Reflection::array_component_type(mirror, CHECK_NULL);
+@@ -1169,7 +1166,7 @@
+   // ofClass is a reference to a java_lang_Class object. The mirror object
+   // of an instanceKlass
+ 
+-  if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass)) || 
++  if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass)) ||
+       ! Klass::cast(java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(ofClass)))->oop_is_instance()) {
+     oop result = oopFactory::new_objArray(SystemDictionary::class_klass(), 0, CHECK_NULL);
+     return (jobjectArray)JNIHandles::make_local(env, result);
+@@ -1192,21 +1189,21 @@
+   objArrayOop r = oopFactory::new_objArray(SystemDictionary::class_klass(), length/4, CHECK_NULL);
+   objArrayHandle result (THREAD, r);
+   int members = 0;
+-  
++
+   for(int i = 0; i < length; i += 4) {
+     int ioff = icls->ushort_at(i + inner_class_info_index);
+-    int ooff = icls->ushort_at(i + outer_class_info_index);         
++    int ooff = icls->ushort_at(i + outer_class_info_index);
+ 
+     if (ioff != 0 && ooff != 0) {
+       // Check to see if the name matches the class we're looking for
+       // before attempting to find the class.
+-      if (cp->klass_name_at_matches(k, ooff)) { 
++      if (cp->klass_name_at_matches(k, ooff)) {
+         klassOop outer_klass = cp->klass_at(ooff, CHECK_NULL);
+         if (outer_klass == k()) {
+            klassOop ik = cp->klass_at(ioff, CHECK_NULL);
+            instanceKlassHandle inner_klass (THREAD, ik);
+ 
+-           // Throws an exception if outer klass has not declared k as 
++           // Throws an exception if outer klass has not declared k as
+            // an inner klass
+            Reflection::check_for_inner_class(k, inner_klass, CHECK_NULL);
+ 
+@@ -1224,7 +1221,7 @@
+       res->obj_at_put(i, result->obj_at(i));
+     }
+     return (jobjectArray)JNIHandles::make_local(env, res);
+-  } 
++  }
+ 
+   return (jobjectArray)JNIHandles::make_local(env, result());
+ JVM_END
+@@ -1233,9 +1230,9 @@
+ JVM_ENTRY(jclass, JVM_GetDeclaringClass(JNIEnv *env, jclass ofClass))
+   const int inner_class_info_index = 0;
+   const int outer_class_info_index = 1;
+-  
++
+   // ofClass is a reference to a java_lang_Class object.
+-  if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass)) || 
++  if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass)) ||
+       ! Klass::cast(java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(ofClass)))->oop_is_instance()) {
+     return NULL;
+   }
+@@ -1250,31 +1247,31 @@
+   typeArrayHandle i_icls(thread, k->inner_classes());
+   constantPoolHandle i_cp(thread, k->constants());
+   int i_length = i_icls->length();
+-  
++
+   bool found = false;
+   klassOop ok;
+   instanceKlassHandle outer_klass;
+-  
+-  // Find inner_klass attribute 
++
++  // Find inner_klass attribute
+   for(int i = 0; i < i_length && !found; i+= 4) {
+     int ioff = i_icls->ushort_at(i + inner_class_info_index);
+-    int ooff = i_icls->ushort_at(i + outer_class_info_index);         
++    int ooff = i_icls->ushort_at(i + outer_class_info_index);
+ 
+     if (ioff != 0 && ooff != 0) {
+       // Check to see if the name matches the class we're looking for
+       // before attempting to find the class.
+-      if (i_cp->klass_name_at_matches(k, ioff)) { 
++      if (i_cp->klass_name_at_matches(k, ioff)) {
+         klassOop inner_klass = i_cp->klass_at(ioff, CHECK_NULL);
+         if (k() == inner_klass) {
+           found = true;
+           ok = i_cp->klass_at(ooff, CHECK_NULL);
+           outer_klass = instanceKlassHandle(thread, ok);
+         }
+-      }        
++      }
+     }
+   }
+ 
+-  // If no inner class attribute found for this class. 
++  // If no inner class attribute found for this class.
+   if (!found) return NULL;
+ 
+   // Throws an exception if outer klass has not declared k as an inner klass
+@@ -1290,7 +1287,7 @@
+   JvmtiVMObjectAllocEventCollector oam;
+   ResourceMark rm(THREAD);
+   // Return null for arrays and primatives
+-  if (!java_lang_Class::is_primitive(JNIHandles::resolve(cls))) {    
++  if (!java_lang_Class::is_primitive(JNIHandles::resolve(cls))) {
+     klassOop k = java_lang_Class::as_klassOop(JNIHandles::resolve(cls));
+     if (Klass::cast(k)->oop_is_instance()) {
+       symbolHandle sym = symbolHandle(THREAD, instanceKlass::cast(k)->generic_signature());
+@@ -1308,10 +1305,10 @@
+   JVMWrapper("JVM_GetClassAnnotations");
+   ResourceMark rm(THREAD);
+   // Return null for arrays and primitives
+-  if (!java_lang_Class::is_primitive(JNIHandles::resolve(cls))) {    
++  if (!java_lang_Class::is_primitive(JNIHandles::resolve(cls))) {
+     klassOop k = java_lang_Class::as_klassOop(JNIHandles::resolve(cls));
+     if (Klass::cast(k)->oop_is_instance()) {
+-      return (jbyteArray) JNIHandles::make_local(env, 
++      return (jbyteArray) JNIHandles::make_local(env,
+                                   instanceKlass::cast(k)->class_annotations());
+     }
+   }
+@@ -1430,7 +1427,7 @@
+ 
+   instanceKlassHandle k(THREAD, java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(ofClass)));
+   constantPoolHandle cp(THREAD, k->constants());
+-  
++
+   // Ensure class is linked
+   k->link_class(CHECK_NULL);
+ 
+@@ -1457,7 +1454,7 @@
+       skip_backtrace = true;
+     }
+   }
+- 
++
+   objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_field_klass(), num_fields, CHECK_NULL);
+   objArrayHandle result (THREAD, r);
+ 
+@@ -1469,7 +1466,7 @@
+       int offset = k->offset_from_fields(i);
+       if (offset == java_lang_Throwable::get_backtrace_offset()) continue;
+     }
+- 
++
+     int mods = fields->ushort_at(i + instanceKlass::access_flags_offset) & JVM_RECOGNIZED_FIELD_MODIFIERS;
+     if (!publicOnly || (mods & JVM_ACC_PUBLIC)) {
+       fd.initialize(k(), i);
+@@ -1497,7 +1494,7 @@
+   }
+ 
+   instanceKlassHandle k(THREAD, java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(ofClass)));
+-  
++
+   // Ensure class is linked
+   k->link_class(CHECK_NULL);
+ 
+@@ -1549,7 +1546,7 @@
+   }
+ 
+   instanceKlassHandle k(THREAD, java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(ofClass)));
+-  
++
+   // Ensure class is linked
+   k->link_class(CHECK_NULL);
+ 
+@@ -2049,7 +2046,7 @@
+   klassOop k = java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(cls));
+   k = JvmtiThreadState::class_to_verify_considering_redefinition(k, thread);
+   oop method = instanceKlass::cast(k)->methods()->obj_at(method_index);
+-  return methodOop(method)->access_flags().as_int() & JVM_RECOGNIZED_METHOD_MODIFIERS; 
++  return methodOop(method)->access_flags().as_int() & JVM_RECOGNIZED_METHOD_MODIFIERS;
+ JVM_END
+ 
+ 
+@@ -2117,11 +2114,11 @@
+ JVM_END
+ 
+ /**
+- * All of these JVM_GetCP-xxx methods are used by the old verifier to 
+- * read entries in the constant pool.  Since the old verifier always 
+- * works on a copy of the code, it will not see any rewriting that 
++ * All of these JVM_GetCP-xxx methods are used by the old verifier to
++ * read entries in the constant pool.  Since the old verifier always
++ * works on a copy of the code, it will not see any rewriting that
+  * may possibly occur in the middle of verification.  So it is important
+- * that nothing it calls tries to use the cpCache instead of the raw 
++ * that nothing it calls tries to use the cpCache instead of the raw
+  * constant pool, so we must use cp->uncached_x methods when appropriate.
+  */
+ JVM_ENTRY(const char*, JVM_GetCPFieldNameUTF(JNIEnv *env, jclass cls, jint cp_index))
+@@ -2180,7 +2177,7 @@
+   k = JvmtiThreadState::class_to_verify_considering_redefinition(k, thread);
+   constantPoolOop cp = instanceKlass::cast(k)->constants();
+   switch (cp->tag_at(cp_index).value()) {
+-    case JVM_CONSTANT_Fieldref: 
++    case JVM_CONSTANT_Fieldref:
+       return cp->uncached_signature_ref_at(cp_index)->as_utf8();
+     default:
+       fatal("JVM_GetCPFieldSignatureUTF: illegal constant");
+@@ -2225,7 +2222,7 @@
+   k = JvmtiThreadState::class_to_verify_considering_redefinition(k, thread);
+   constantPoolOop cp = instanceKlass::cast(k)->constants();
+   switch (cp->tag_at(cp_index).value()) {
+-    case JVM_CONSTANT_Methodref: 
++    case JVM_CONSTANT_Methodref:
+     case JVM_CONSTANT_InterfaceMethodref: {
+       int class_index = cp->uncached_klass_ref_index_at(cp_index);
+       symbolOop classname = cp->klass_name_at(class_index);
+@@ -2319,10 +2316,10 @@
+ // IO functions ////////////////////////////////////////////////////////////////////////////////////////
+ 
+ JVM_LEAF(jint, JVM_Open(const char *fname, jint flags, jint mode))
+-  JVMWrapper2("JVM_Open (%s)", fname);  
++  JVMWrapper2("JVM_Open (%s)", fname);
+ 
+   //%note jvm_r6
+-  int result = hpi::open(fname, flags, mode);    
++  int result = hpi::open(fname, flags, mode);
+   if (result >= 0) {
+     return result;
+   } else {
+@@ -2332,7 +2329,7 @@
+       default:
+         return -1;
+     }
+-  }  
++  }
+ JVM_END
+ 
+ 
+@@ -2369,13 +2366,13 @@
+ JVM_LEAF(jlong, JVM_Lseek(jint fd, jlong offset, jint whence))
+   JVMWrapper4("JVM_Lseek (0x%x, %Ld, %d)", fd, offset, whence);
+   //%note jvm_r6
+-  return hpi::lseek(fd, offset, whence);  
++  return hpi::lseek(fd, offset, whence);
+ JVM_END
+ 
+ 
+ JVM_LEAF(jint, JVM_SetLength(jint fd, jlong length))
+   JVMWrapper3("JVM_SetLength (0x%x, %Ld)", fd, length);
+-  return hpi::ftruncate(fd, length);  
++  return hpi::ftruncate(fd, length);
+ JVM_END
+ 
+ 
+@@ -2416,10 +2413,10 @@
+ }
+ 
+ 
+-int jio_vfprintf(FILE* f, const char *fmt, va_list args) {  
++int jio_vfprintf(FILE* f, const char *fmt, va_list args) {
+   if (Arguments::vfprintf_hook() != NULL) {
+      return Arguments::vfprintf_hook()(f, fmt, args);
+-  } else {	
++  } else {
+     return vfprintf(f, fmt, args);
+   }
+ }
+@@ -2439,10 +2436,10 @@
+ void jio_print(const char* s) {
+   // Try to make this function as atomic as possible.
+   if (Arguments::vfprintf_hook() != NULL) {
+-    jio_fprintf(defaultStream::output_stream(), "%s", s);    
++    jio_fprintf(defaultStream::output_stream(), "%s", s);
+   } else {
+     ::write(defaultStream::output_fd(), s, (int)strlen(s));
+-  }  
++  }
+ }
+ 
+ } // Extern C
+@@ -2461,14 +2458,14 @@
+ // implementation is local to this file, we always lock Threads_lock for that one.
+ 
+ static void thread_entry(JavaThread* thread, TRAPS) {
+-  HandleMark hm(THREAD);  
+-  Handle obj(THREAD, thread->threadObj());    
++  HandleMark hm(THREAD);
++  Handle obj(THREAD, thread->threadObj());
+   JavaValue result(T_VOID);
+-  JavaCalls::call_virtual(&result, 
+-                          obj, 
++  JavaCalls::call_virtual(&result,
++                          obj,
+                           KlassHandle(THREAD, SystemDictionary::thread_klass()),
+-                          vmSymbolHandles::run_method_name(), 
+-                          vmSymbolHandles::void_method_signature(), 
++                          vmSymbolHandles::run_method_name(),
++                          vmSymbolHandles::void_method_signature(),
+                           THREAD);
+ }
+ 
+@@ -2484,10 +2481,10 @@
+ 
+   // We must release the Threads_lock before we can post a jvmti event
+   // in Thread::start.
+-  { 
+-    // Ensure that the C++ Thread and OSThread structures aren't freed before 
++  {
++    // Ensure that the C++ Thread and OSThread structures aren't freed before
+     // we operate.
+-    MutexLocker mu(Threads_lock);  
++    MutexLocker mu(Threads_lock);
+ 
+     // Check to see if we're running a thread that's already exited or was
+     // stopped (is_stillborn) or is still active (thread is not NULL).
+@@ -2495,7 +2492,7 @@
+         java_lang_Thread::thread(JNIHandles::resolve_non_null(jthread)) != NULL) {
+         throw_illegal_thread_state = true;
+     } else {
+-      jlong size = 
++      jlong size =
+              java_lang_Thread::stackSize(JNIHandles::resolve_non_null(jthread));
+       // Allocate the C++ Thread structure and create the native thread.  The
+       // stack size retrieved from java is signed, but the constructor takes
+@@ -2508,7 +2505,7 @@
+       // JavaThread due to lack of memory. Check for this situation and throw
+       // an exception if necessary. Eventually we may want to change this so
+       // that we only grab the lock if the thread was created successfully -
+-      // then we can also do this check and throw the exception in the 
++      // then we can also do this check and throw the exception in the
+       // JavaThread constructor.
+       if (native_thread->osthread() != NULL) {
+         // Note: the current thread is not being used within "prepare".
+@@ -2528,11 +2525,11 @@
+     delete native_thread;
+     if (JvmtiExport::should_post_resource_exhausted()) {
+       JvmtiExport::post_resource_exhausted(
+-        JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_THREADS, 
++        JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_THREADS,
+         "unable to create new native thread");
+     }
+-    THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(), 
+-              "unable to create new native thread");      
++    THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(),
++              "unable to create new native thread");
+   }
+ 
+   Thread::start(native_thread);
+@@ -2553,12 +2550,12 @@
+   oop java_thread = JNIHandles::resolve_non_null(jthread);
+   JavaThread* receiver = java_lang_Thread::thread(java_thread);
+   Events::log("JVM_StopThread thread JavaThread " INTPTR_FORMAT " as oop " INTPTR_FORMAT " [exception " INTPTR_FORMAT "]", receiver, (address)java_thread, throwable);
+-  // First check if thread already exited  
++  // First check if thread already exited
+   if (receiver != NULL) {
+     // Check if exception is getting thrown at self (use oop equality, since the
+     // target object might exit)
+     if (java_thread == thread->threadObj()) {
+-      // This is a change from JDK 1.1, but JDK 1.2 will also do it:    
++      // This is a change from JDK 1.1, but JDK 1.2 will also do it:
+       // NOTE (from JDK 1.2): this is done solely to prevent stopped
+       // threads from being restarted.
+       // Fix for 4314342, 4145910, perhaps others: it now doesn't have
+@@ -2568,9 +2565,9 @@
+         java_lang_Thread::set_stillborn(java_thread);
+       }
+       THROW_OOP(java_throwable);
+-    } else {  
++    } else {
+       // Enques a VM_Operation to stop all threads and then deliver the exception...
+-      Thread::send_async_exception(java_thread, JNIHandles::resolve(throwable));      
++      Thread::send_async_exception(java_thread, JNIHandles::resolve(throwable));
+     }
+   }
+ JVM_END
+@@ -2585,9 +2582,9 @@
+ 
+ 
+ JVM_ENTRY(void, JVM_SuspendThread(JNIEnv* env, jobject jthread))
+-  JVMWrapper("JVM_SuspendThread");  
++  JVMWrapper("JVM_SuspendThread");
+   oop java_thread = JNIHandles::resolve_non_null(jthread);
+-  JavaThread* receiver = java_lang_Thread::thread(java_thread); 
++  JavaThread* receiver = java_lang_Thread::thread(java_thread);
+ 
+   if (receiver != NULL) {
+     // thread has run and has not exited (still on threads list)
+@@ -2617,22 +2614,22 @@
+     //
+     // assert(java_lang_Thread::thread(receiver->threadObj()) == NULL ||
+     //   receiver->is_being_ext_suspended(), "thread is not suspended");
+-  }  
++  }
+ JVM_END
+ 
+ 
+ JVM_ENTRY(void, JVM_ResumeThread(JNIEnv* env, jobject jthread))
+-  JVMWrapper("JVM_ResumeThread");  
++  JVMWrapper("JVM_ResumeThread");
+   // Ensure that the C++ Thread and OSThread structures aren't freed before we operate.
+   // We need to *always* get the threads lock here, since this operation cannot be allowed during
+-  // a safepoint. The safepoint code relies on suspending a thread to examine its state. If other 
++  // a safepoint. The safepoint code relies on suspending a thread to examine its state. If other
+   // threads randomly resumes threads, then a thread might not be suspended when the safepoint code
+   // looks at it.
+-  MutexLocker ml(Threads_lock);  
++  MutexLocker ml(Threads_lock);
+   JavaThread* thr = java_lang_Thread::thread(JNIHandles::resolve_non_null(jthread));
+   if (thr != NULL) {
+     // the thread has run and is not in the process of exiting
+-    thr->java_resume();  
++    thr->java_resume();
+   }
+ JVM_END
+ 
+@@ -2641,7 +2638,7 @@
+   JVMWrapper("JVM_SetThreadPriority");
+   // Ensure that the C++ Thread and OSThread structures aren't freed before we operate
+   MutexLocker ml(Threads_lock);
+-  oop java_thread = JNIHandles::resolve_non_null(jthread);  
++  oop java_thread = JNIHandles::resolve_non_null(jthread);
+   java_lang_Thread::set_priority(java_thread, (ThreadPriority)prio);
+   JavaThread* thr = java_lang_Thread::thread(java_thread);
+   if (thr != NULL) {                  // Thread not yet started; priority pushed down when it is
+@@ -2653,7 +2650,7 @@
+ JVM_ENTRY(void, JVM_Yield(JNIEnv *env, jclass threadClass))
+   JVMWrapper("JVM_Yield");
+   if (os::dont_yield()) return;
+-  // When ConvertYieldToSleep is off (default), this matches the classic VM use of yield. 
++  // When ConvertYieldToSleep is off (default), this matches the classic VM use of yield.
+   // Critical for similar threading behaviour
+   if (ConvertYieldToSleep) {
+     os::sleep(thread, MinSleepInterval, false);
+@@ -2664,10 +2661,10 @@
+ 
+ 
+ JVM_ENTRY(void, JVM_Sleep(JNIEnv* env, jclass threadClass, jlong millis))
+-  JVMWrapper("JVM_Sleep");  
++  JVMWrapper("JVM_Sleep");
+ 
+   if (millis < 0) {
+-    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");    
++    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
+   }
+ 
+   if (Thread::is_interrupted (THREAD, true) && !HAS_PENDING_EXCEPTION) {
+@@ -2686,11 +2683,11 @@
+     if (ConvertSleepToYield) {
+       os::yield();
+     } else {
+-      ThreadState old_state = thread->osthread()->get_state(); 
+-      thread->osthread()->set_state(SLEEPING); 
++      ThreadState old_state = thread->osthread()->get_state();
++      thread->osthread()->set_state(SLEEPING);
+       os::sleep(thread, MinSleepInterval, false);
+       thread->osthread()->set_state(old_state);
+-    } 
++    }
+   } else {
+     ThreadState old_state = thread->osthread()->get_state();
+     thread->osthread()->set_state(SLEEPING);
+@@ -2699,10 +2696,10 @@
+       // us while we were sleeping. We do not overwrite those.
+       if (!HAS_PENDING_EXCEPTION) {
+         // TODO-FIXME: THROW_MSG returns which means we will not call set_state()
+-        // to properly restore the thread state.  That's likely wrong.  
++        // to properly restore the thread state.  That's likely wrong.
+         THROW_MSG(vmSymbols::java_lang_InterruptedException(), "sleep interrupted");
+-      }  
+-    }  
++      }
++    }
+     thread->osthread()->set_state(old_state);
+   }
+ JVM_END
+@@ -2717,44 +2714,44 @@
+ 
+ JVM_ENTRY(jint, JVM_CountStackFrames(JNIEnv* env, jobject jthread))
+   JVMWrapper("JVM_CountStackFrames");
+-  
++
+   // Ensure that the C++ Thread and OSThread structures aren't freed before we operate
+-  oop java_thread = JNIHandles::resolve_non_null(jthread); 
++  oop java_thread = JNIHandles::resolve_non_null(jthread);
+   bool throw_illegal_thread_state = false;
+   int count = 0;
+- 
++
+   {
+     MutexLockerEx ml(thread->threadObj() == java_thread ? NULL : Threads_lock);
+     // We need to re-resolve the java_thread, since a GC might have happened during the
+     // acquire of the lock
+     JavaThread* thr = java_lang_Thread::thread(JNIHandles::resolve_non_null(jthread));
+-   
+-    if (thr == NULL) { 
++
++    if (thr == NULL) {
+       // do nothing
+-    } else if(! thr->is_external_suspend() || ! thr->frame_anchor()->walkable()) { 
+-      // Check whether this java thread has been suspended already. If not, throws  
+-      // IllegalThreadStateException. We defer to throw that exception until 
++    } else if(! thr->is_external_suspend() || ! thr->frame_anchor()->walkable()) {
++      // Check whether this java thread has been suspended already. If not, throws
++      // IllegalThreadStateException. We defer to throw that exception until
+       // Threads_lock is released since loading exception class has to leave VM.
+-      // The correct way to test a thread is actually suspended is 
+-      // wait_for_ext_suspend_completion(), but we can't call that while holding 
+-      // the Threads_lock. The above tests are sufficient for our purposes 
+-      // provided the walkability of the stack is stable - which it isn't 
++      // The correct way to test a thread is actually suspended is
++      // wait_for_ext_suspend_completion(), but we can't call that while holding
++      // the Threads_lock. The above tests are sufficient for our purposes
++      // provided the walkability of the stack is stable - which it isn't
+       // 100% but close enough for most practical purposes.
+-      throw_illegal_thread_state = true; 
+-    } else { 
++      throw_illegal_thread_state = true;
++    } else {
+       // Count all java activation, i.e., number of vframes
+-      for(vframeStream vfst(thr); !vfst.at_end(); vfst.next()) {    
++      for(vframeStream vfst(thr); !vfst.at_end(); vfst.next()) {
+         // Native frames are not counted
+-        if (!vfst.method()->is_native()) count++;      
++        if (!vfst.method()->is_native()) count++;
+        }
+     }
+   }
+- 
++
+   if (throw_illegal_thread_state) {
+     THROW_MSG_0(vmSymbols::java_lang_IllegalThreadStateException(),
+                 "this thread is not suspended");
+   }
+-  return count; 
++  return count;
+ JVM_END
+ 
+ // Consider: A better way to implement JVM_Interrupt() is to acquire
+@@ -2764,20 +2761,20 @@
+ // outside the critical section.  Threads_lock is hot so we want to minimize
+ // the hold-time.  A cleaner interface would be to decompose interrupt into
+ // two steps.  The 1st phase, performed under Threads_lock, would return
+-// a closure that'd be invoked after Threads_lock was dropped.   
++// a closure that'd be invoked after Threads_lock was dropped.
+ // This tactic is safe as PlatformEvent and Parkers are type-stable (TSM) and
+-// admit spurious wakeups.  
++// admit spurious wakeups.
+ 
+ JVM_ENTRY(void, JVM_Interrupt(JNIEnv* env, jobject jthread))
+   JVMWrapper("JVM_Interrupt");
+ 
+   // Ensure that the C++ Thread and OSThread structures aren't freed before we operate
+-  oop java_thread = JNIHandles::resolve_non_null(jthread);  
++  oop java_thread = JNIHandles::resolve_non_null(jthread);
+   MutexLockerEx ml(thread->threadObj() == java_thread ? NULL : Threads_lock);
+   // We need to re-resolve the java_thread, since a GC might have happened during the
+   // acquire of the lock
+   JavaThread* thr = java_lang_Thread::thread(JNIHandles::resolve_non_null(jthread));
+-  if (thr != NULL) {    
++  if (thr != NULL) {
+     Thread::interrupt(thr);
+   }
+ JVM_END
+@@ -2787,8 +2784,8 @@
+   JVMWrapper("JVM_IsInterrupted");
+ 
+   // Ensure that the C++ Thread and OSThread structures aren't freed before we operate
+-  oop java_thread = JNIHandles::resolve_non_null(jthread);  
+-  MutexLockerEx ml(thread->threadObj() == java_thread ? NULL : Threads_lock);  
++  oop java_thread = JNIHandles::resolve_non_null(jthread);
++  MutexLockerEx ml(thread->threadObj() == java_thread ? NULL : Threads_lock);
+   // We need to re-resolve the java_thread, since a GC might have happened during the
+   // acquire of the lock
+   JavaThread* thr = java_lang_Thread::thread(JNIHandles::resolve_non_null(jthread));
+@@ -2825,32 +2822,32 @@
+ 
+ // java.lang.SecurityManager ///////////////////////////////////////////////////////////////////////
+ 
+-static bool is_trusted_frame(JavaThread* jthread, vframeStream* vfst) {  
++static bool is_trusted_frame(JavaThread* jthread, vframeStream* vfst) {
+   assert(jthread->is_Java_thread(), "must be a Java thread");
+-  if (jthread->privileged_stack_top() == NULL) return false;   
++  if (jthread->privileged_stack_top() == NULL) return false;
+   if (jthread->privileged_stack_top()->frame_id() == vfst->frame_id()) {
+     oop loader = jthread->privileged_stack_top()->class_loader();
+     if (loader == NULL) return true;
+     bool trusted = java_lang_ClassLoader::is_trusted_loader(loader);
+-    if (trusted) return true;      
+-  }  
++    if (trusted) return true;
++  }
+   return false;
+ }
+ 
+ JVM_ENTRY(jclass, JVM_CurrentLoadedClass(JNIEnv *env))
+   JVMWrapper("JVM_CurrentLoadedClass");
+   ResourceMark rm(THREAD);
+-  
++
+   for (vframeStream vfst(thread); !vfst.at_end(); vfst.next()) {
+-    // if a method in a class in a trusted loader is in a doPrivileged, return NULL    
++    // if a method in a class in a trusted loader is in a doPrivileged, return NULL
+     bool trusted = is_trusted_frame(thread, &vfst);
+-    if (trusted) return NULL;    
+-    
++    if (trusted) return NULL;
++
+     methodOop m = vfst.method();
+-    if (!m->is_native()) {      
+-      klassOop holder = m->method_holder();    
++    if (!m->is_native()) {
++      klassOop holder = m->method_holder();
+       oop      loader = instanceKlass::cast(holder)->class_loader();
+-      if (loader != NULL && !java_lang_ClassLoader::is_trusted_loader(loader)) { 
++      if (loader != NULL && !java_lang_ClassLoader::is_trusted_loader(loader)) {
+         return (jclass) JNIHandles::make_local(env, Klass::cast(holder)->java_mirror());
+       }
+     }
+@@ -2862,12 +2859,12 @@
+ JVM_ENTRY(jobject, JVM_CurrentClassLoader(JNIEnv *env))
+   JVMWrapper("JVM_CurrentClassLoader");
+   ResourceMark rm(THREAD);
+-  
++
+   for (vframeStream vfst(thread); !vfst.at_end(); vfst.next()) {
+-  
+-    // if a method in a class in a trusted loader is in a doPrivileged, return NULL    
++
++    // if a method in a class in a trusted loader is in a doPrivileged, return NULL
+     bool trusted = is_trusted_frame(thread, &vfst);
+-    if (trusted) return NULL;    
++    if (trusted) return NULL;
+ 
+     methodOop m = vfst.method();
+     if (!m->is_native()) {
+@@ -2901,14 +2898,14 @@
+   KlassLink* first = NULL;
+   KlassLink* last  = NULL;
+   int depth = 0;
+-    
+-  for(vframeStream vfst(thread); !vfst.at_end(); vfst.security_get_caller_frame(1)) {  
++
++  for(vframeStream vfst(thread); !vfst.at_end(); vfst.security_get_caller_frame(1)) {
+     // Native frames are not returned
+     if (!vfst.method()->is_native()) {
+       klassOop holder = vfst.method()->method_holder();
+-      assert(holder->is_klass(), "just checking");        
++      assert(holder->is_klass(), "just checking");
+       depth++;
+-      KlassLink* l = new KlassLink(KlassHandle(thread, holder));    
++      KlassLink* l = new KlassLink(KlassHandle(thread, holder));
+       if (first == NULL) {
+         first = last = l;
+       } else {
+@@ -2939,15 +2936,15 @@
+   Handle class_name_str = java_lang_String::internalize_classname(h_name, CHECK_0);
+ 
+   const char* str = java_lang_String::as_utf8_string(class_name_str());
+-  symbolHandle class_name_sym = 
++  symbolHandle class_name_sym =
+                 symbolHandle(THREAD, SymbolTable::probe(str, (int)strlen(str)));
+   if (class_name_sym.is_null()) {
+     return -1;
+   }
+ 
+   int depth = 0;
+-  
+-  for(vframeStream vfst(thread); !vfst.at_end(); vfst.next()) {    
++
++  for(vframeStream vfst(thread); !vfst.at_end(); vfst.next()) {
+     if (!vfst.method()->is_native()) {
+       klassOop holder = vfst.method()->method_holder();
+       assert(holder->is_klass(), "just checking");
+@@ -2964,13 +2961,13 @@
+ JVM_ENTRY(jint, JVM_ClassLoaderDepth(JNIEnv *env))
+   JVMWrapper("JVM_ClassLoaderDepth");
+   ResourceMark rm(THREAD);
+-  int depth = 0;  
++  int depth = 0;
+   for (vframeStream vfst(thread); !vfst.at_end(); vfst.next()) {
+-    // if a method in a class in a trusted loader is in a doPrivileged, return -1    
++    // if a method in a class in a trusted loader is in a doPrivileged, return -1
+     bool trusted = is_trusted_frame(thread, &vfst);
+-    if (trusted) return -1;    
+-    
+-    methodOop m = vfst.method();    
++    if (trusted) return -1;
++
++    methodOop m = vfst.method();
+     if (!m->is_native()) {
+       klassOop holder = m->method_holder();
+       assert(holder->is_klass(), "just checking");
+@@ -3008,7 +3005,7 @@
+ 
+ // ObjectInputStream ///////////////////////////////////////////////////////////////
+ 
+-bool force_verify_field_access(klassOop current_class, klassOop field_class, AccessFlags access, bool classloader_only) {  
++bool force_verify_field_access(klassOop current_class, klassOop field_class, AccessFlags access, bool classloader_only) {
+   if (current_class == NULL) {
+     return true;
+   }
+@@ -3017,7 +3014,7 @@
+   }
+ 
+   if (access.is_protected()) {
+-    // See if current_class is a subclass of field_class 
++    // See if current_class is a subclass of field_class
+     if (Klass::cast(current_class)->is_subclass_of(field_class)) {
+       return true;
+     }
+@@ -3037,14 +3034,14 @@
+ 
+   // Cannot instantiate primitive types
+   if (java_lang_Class::is_primitive(curr_mirror) || java_lang_Class::is_primitive(init_mirror)) {
+-    ResourceMark rm(THREAD);    
++    ResourceMark rm(THREAD);
+     THROW_0(vmSymbols::java_lang_InvalidClassException());
+   }
+-     
+-  // Arrays not allowed here, must use JVM_AllocateNewArray 
++
++  // Arrays not allowed here, must use JVM_AllocateNewArray
+   if (Klass::cast(java_lang_Class::as_klassOop(curr_mirror))->oop_is_javaArray() ||
+       Klass::cast(java_lang_Class::as_klassOop(init_mirror))->oop_is_javaArray()) {
+-    ResourceMark rm(THREAD);    
++    ResourceMark rm(THREAD);
+     THROW_0(vmSymbols::java_lang_InvalidClassException());
+   }
+ 
+@@ -3059,43 +3056,43 @@
+   // Make sure klass is initialized, since we are about to instantiate one of them.
+   curr_klass->initialize(CHECK_NULL);
+ 
+- methodHandle m (THREAD, 
+-                 init_klass->find_method(vmSymbols::object_initializer_name(), 
++ methodHandle m (THREAD,
++                 init_klass->find_method(vmSymbols::object_initializer_name(),
+                                          vmSymbols::void_method_signature()));
+   if (m.is_null()) {
+-    ResourceMark rm(THREAD);    
++    ResourceMark rm(THREAD);
+     THROW_MSG_0(vmSymbols::java_lang_NoSuchMethodError(),
+                 methodOopDesc::name_and_sig_as_C_string(Klass::cast(init_klass()),
+                                           vmSymbols::object_initializer_name(),
+                                           vmSymbols::void_method_signature()));
+   }
+-  
++
+   if (curr_klass ==  init_klass && !m->is_public()) {
+-    // Calling the constructor for class 'curr_klass'. 
++    // Calling the constructor for class 'curr_klass'.
+     // Only allow calls to a public no-arg constructor.
+-    // This path corresponds to creating an Externalizable object.    
+-    THROW_0(vmSymbols::java_lang_IllegalAccessException());  
+-  } 
+-  
++    // This path corresponds to creating an Externalizable object.
++    THROW_0(vmSymbols::java_lang_IllegalAccessException());
++  }
++
+   if (!force_verify_field_access(curr_klass(), init_klass(), m->access_flags(), false)) {
+     // subclass 'curr_klass' does not have access to no-arg constructor of 'initcb'
+-    THROW_0(vmSymbols::java_lang_IllegalAccessException());      
++    THROW_0(vmSymbols::java_lang_IllegalAccessException());
+   }
+ 
+   Handle obj = curr_klass->allocate_instance_handle(CHECK_NULL);
+   // Call constructor m. This might call a constructor higher up in the hierachy
+   JavaCalls::call_default_constructor(thread, m, obj, CHECK_NULL);
+-  
++
+   return JNIHandles::make_local(obj());
+ JVM_END
+ 
+ 
+ JVM_ENTRY(jobject, JVM_AllocateNewArray(JNIEnv *env, jobject obj, jclass currClass, jint length))
+-  JVMWrapper("JVM_AllocateNewArray");  
++  JVMWrapper("JVM_AllocateNewArray");
+   JvmtiVMObjectAllocEventCollector oam;
+   oop mirror = JNIHandles::resolve_non_null(currClass);
+ 
+-  if (java_lang_Class::is_primitive(mirror)) {    
++  if (java_lang_Class::is_primitive(mirror)) {
+     THROW_0(vmSymbols::java_lang_InvalidClassException());
+   }
+   klassOop k = java_lang_Class::as_klassOop(mirror);
+@@ -3116,7 +3113,7 @@
+ JVM_END
+ 
+ 
+-// Return the first non-null class loader up the execution stack, or null 
++// Return the first non-null class loader up the execution stack, or null
+ // if only code from the null class loader is on the stack.
+ 
+ JVM_ENTRY(jobject, JVM_LatestUserDefinedLoader(JNIEnv *env))
+@@ -3133,21 +3130,21 @@
+ JVM_END
+ 
+ 
+-// Load a class relative to the most recent class on the stack  with a non-null 
++// Load a class relative to the most recent class on the stack  with a non-null
+ // classloader.
+-// This function has been deprecated and should not be considered part of the 
++// This function has been deprecated and should not be considered part of the
+ // specified JVM interface.
+ 
+-JVM_ENTRY(jclass, JVM_LoadClass0(JNIEnv *env, jobject receiver, 
++JVM_ENTRY(jclass, JVM_LoadClass0(JNIEnv *env, jobject receiver,
+                                  jclass currClass, jstring currClassName))
+-  JVMWrapper("JVM_LoadClass0");  
++  JVMWrapper("JVM_LoadClass0");
+   // Receiver is not used
+   ResourceMark rm(THREAD);
+ 
+   // Class name argument is not guaranteed to be in internal format
+   Handle classname (THREAD, JNIHandles::resolve_non_null(currClassName));
+   Handle string = java_lang_String::internalize_classname(classname, CHECK_NULL);
+-  
++
+   const char* str = java_lang_String::as_utf8_string(string());
+ 
+   if (str == NULL || (int)strlen(str) > symbolOopDesc::max_length()) {
+@@ -3158,12 +3155,12 @@
+ 
+   symbolHandle name = oopFactory::new_symbol_handle(str, CHECK_NULL);
+   Handle curr_klass (THREAD, JNIHandles::resolve(currClass));
+-  // Find the most recent class on the stack with a non-null classloader  
++  // Find the most recent class on the stack with a non-null classloader
+   oop loader = NULL;
+   oop protection_domain = NULL;
+   if (curr_klass.is_null()) {
+-    for (vframeStream vfst(thread); 
+-         !vfst.at_end() && loader == NULL; 
++    for (vframeStream vfst(thread);
++         !vfst.at_end() && loader == NULL;
+          vfst.next()) {
+       if (!vfst.method()->is_native()) {
+         klassOop holder = vfst.method()->method_holder();
+@@ -3178,13 +3175,13 @@
+   }
+   Handle h_loader(THREAD, loader);
+   Handle h_prot  (THREAD, protection_domain);
+-  return find_class_from_class_loader(env, name, true, h_loader, h_prot, 
++  return find_class_from_class_loader(env, name, true, h_loader, h_prot,
+                                       false, thread);
+ JVM_END
+ 
+ 
+ // Array ///////////////////////////////////////////////////////////////////////////////////////////
+- 
++
+ 
+ // resolve array handle and check arguments
+ static inline arrayOop check_array(JNIEnv *env, jobject arr, bool type_array_only, TRAPS) {
+@@ -3195,7 +3192,7 @@
+   if (!a->is_javaArray() || (type_array_only && !a->is_typeArray())) {
+     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "Argument is not an array");
+   }
+-  return arrayOop(a); 
++  return arrayOop(a);
+ }
+ 
+ 
+@@ -3229,7 +3226,7 @@
+     Reflection::widen(&value, type, wide_type, CHECK_(value));
+   }
+   return value;
+-JVM_END 
++JVM_END
+ 
+ 
+ JVM_ENTRY(void, JVM_SetArrayElement(JNIEnv *env, jobject arr, jint index, jobject val))
+@@ -3268,7 +3265,7 @@
+ 
+ 
+ JVM_ENTRY(jobject, JVM_NewMultiArray(JNIEnv *env, jclass eltClass, jintArray dim))
+-  JVMWrapper("JVM_NewMultiArray");  
++  JVMWrapper("JVM_NewMultiArray");
+   JvmtiVMObjectAllocEventCollector oam;
+   arrayOop dim_array = check_array(env, dim, true, CHECK_NULL);
+   oop element_mirror = JNIHandles::resolve(eltClass);
+@@ -3358,7 +3355,7 @@
+ JVM_LEAF(jint, JVM_RecvFrom(jint fd, char *buf, int nBytes, int flags, struct sockaddr *from, int *fromlen))
+   JVMWrapper2("JVM_RecvFrom (0x%x)", fd);
+   //%note jvm_r6
+-  return hpi::recvfrom(fd, buf, nBytes, flags, from, fromlen); 
++  return hpi::recvfrom(fd, buf, nBytes, flags, from, fromlen);
+ JVM_END
+ 
+ 
+@@ -3425,8 +3422,8 @@
+ // Library support ///////////////////////////////////////////////////////////////////////////
+ 
+ JVM_ENTRY_NO_ENV(void*, JVM_LoadLibrary(const char* name))
+-  //%note jvm_ct  
+-  JVMWrapper2("JVM_LoadLibrary (%s)", name);  
++  //%note jvm_ct
++  JVMWrapper2("JVM_LoadLibrary (%s)", name);
+   char ebuf[1024];
+   void *load_result;
+   {
+@@ -3437,10 +3434,10 @@
+     char msg[1024];
+     jio_snprintf(msg, sizeof msg, "%s: %s", name, ebuf);
+     // Since 'ebuf' may contain a string encoded using
+-    // platform encoding scheme, we need to pass 
+-    // Exceptions::unsafe_to_utf8 to the new_exception method 
++    // platform encoding scheme, we need to pass
++    // Exceptions::unsafe_to_utf8 to the new_exception method
+     // as the last argument. See bug 6367357.
+-    Handle h_exception = 
++    Handle h_exception =
+       Exceptions::new_exception(thread,
+                                 vmSymbols::java_lang_UnsatisfiedLinkError(),
+                                 msg, Exceptions::unsafe_to_utf8);
+@@ -3451,14 +3448,14 @@
+ JVM_END
+ 
+ 
+-JVM_LEAF(void, JVM_UnloadLibrary(void* handle))  
+-  JVMWrapper("JVM_UnloadLibrary");  
++JVM_LEAF(void, JVM_UnloadLibrary(void* handle))
++  JVMWrapper("JVM_UnloadLibrary");
+   hpi::dll_unload(handle);
+ JVM_END
+ 
+ 
+-JVM_LEAF(void*, JVM_FindLibraryEntry(void* handle, const char* name))  
+-  JVMWrapper2("JVM_FindLibraryEntry (%s)", name);  
++JVM_LEAF(void*, JVM_FindLibraryEntry(void* handle, const char* name))
++  JVMWrapper2("JVM_FindLibraryEntry (%s)", name);
+   return hpi::dll_lookup(handle, name);
+ JVM_END
+ 
+@@ -3515,15 +3512,15 @@
+ 
+ JNIEXPORT jint JNICALL JVM_RawMonitorEnter(void *mon) {
+   VM_Exit::block_if_vm_exited();
+-  JVMWrapper("JVM_RawMonitorEnter");  
++  JVMWrapper("JVM_RawMonitorEnter");
+   ((Mutex*) mon)->jvm_raw_lock();
+   return 0;
+ }
+ 
+ 
+-JNIEXPORT void JNICALL JVM_RawMonitorExit(void *mon) { 
++JNIEXPORT void JNICALL JVM_RawMonitorExit(void *mon) {
+   VM_Exit::block_if_vm_exited();
+-  JVMWrapper("JVM_RawMonitorExit");  
++  JVMWrapper("JVM_RawMonitorExit");
+   ((Mutex*) mon)->jvm_raw_unlock();
+ }
+ 
+@@ -3572,15 +3569,15 @@
+ 
+ 
+ // Serialization
+-JVM_ENTRY(void, JVM_SetPrimitiveFieldValues(JNIEnv *env, jclass cb, jobject obj, 
++JVM_ENTRY(void, JVM_SetPrimitiveFieldValues(JNIEnv *env, jclass cb, jobject obj,
+                                             jlongArray fieldIDs, jcharArray typecodes, jbyteArray data))
+   assert(!JDK_Version::is_gte_jdk14x_version(), "should only be used in 1.3.1 and earlier");
+-                                   
++
+   typeArrayOop tcodes = typeArrayOop(JNIHandles::resolve(typecodes));
+   typeArrayOop dbuf   = typeArrayOop(JNIHandles::resolve(data));
+   typeArrayOop fids   = typeArrayOop(JNIHandles::resolve(fieldIDs));
+   oop          o      = JNIHandles::resolve(obj);
+-                                          
++
+   if (o == NULL || fids == NULL  || dbuf == NULL  || tcodes == NULL) {
+     THROW(vmSymbols::java_lang_NullPointerException());
+   }
+@@ -3591,14 +3588,14 @@
+   if (tcodes->length() < nfids) {
+     THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException());
+   }
+-     
++
+   jsize off = 0;
+   /* loop through fields, setting values */
+   for (jsize i = 0; i < nfids; i++) {
+     jfieldID fid = (jfieldID)(intptr_t) fids->long_at(i);
+     int field_offset;
+     if (fid != NULL) {
+-      // NULL is a legal value for fid, but retrieving the field offset 
++      // NULL is a legal value for fid, but retrieving the field offset
+       // trigger assertion in that case
+       field_offset = jfieldIDWorkaround::from_instance_jfieldID(o->klass(), fid);
+     }
+@@ -3633,7 +3630,7 @@
+           jshort val = ((dbuf->byte_at(off + 0) & 0xFF) << 8)
+                      + ((dbuf->byte_at(off + 1) & 0xFF) << 0);
+           o->short_field_put(field_offset, val);
+-	}
++        }
+         off += 2;
+         break;
+ 
+@@ -3647,7 +3644,7 @@
+         }
+         off += 4;
+         break;
+-        
++
+       case 'F':
+         if (fid != NULL) {
+           jint ival = ((dbuf->byte_at(off + 0) & 0xFF) << 24)
+@@ -3659,7 +3656,7 @@
+         }
+         off += 4;
+         break;
+-       
++
+       case 'J':
+         if (fid != NULL) {
+           jlong lval = (((jlong) dbuf->byte_at(off + 0) & 0xFF) << 56)
+@@ -3674,7 +3671,7 @@
+         }
+         off += 8;
+         break;
+-	
++
+       case 'D':
+         if (fid != NULL) {
+           jlong lval = (((jlong) dbuf->byte_at(off + 0) & 0xFF) << 56)
+@@ -3690,7 +3687,7 @@
+         }
+         off += 8;
+         break;
+-	
++
+       default:
+         // Illegal typecode
+         THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "illegal typecode");
+@@ -3718,7 +3715,7 @@
+   if (tcodes->length() < nfids) {
+     THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException());
+   }
+-  
++
+   /* loop through fields, fetching values */
+   jsize off = 0;
+   for (jsize i = 0; i < nfids; i++) {
+@@ -3739,7 +3736,7 @@
+        case 'B':
+          dbuf->byte_at_put(off++, o->byte_field(field_offset));
+          break;
+-	
++
+        case 'C':
+          {
+            jchar val = o->char_field(field_offset);
+@@ -3747,7 +3744,7 @@
+            dbuf->byte_at_put(off++, (val >> 0) & 0xFF);
+          }
+          break;
+-         
++
+        case 'S':
+          {
+            jshort val = o->short_field(field_offset);
+@@ -3755,7 +3752,7 @@
+            dbuf->byte_at_put(off++, (val >> 0) & 0xFF);
+          }
+          break;
+-         
++
+        case 'I':
+          {
+            jint val = o->int_field(field_offset);
+@@ -3765,7 +3762,7 @@
+            dbuf->byte_at_put(off++, (val >> 0)  & 0xFF);
+          }
+          break;
+-         
++
+        case 'F':
+          {
+            jfloat fval = o->float_field(field_offset);
+@@ -3776,7 +3773,7 @@
+            dbuf->byte_at_put(off++, (ival >> 0)  & 0xFF);
+          }
+          break;
+-         
++
+        case 'J':
+          {
+            jlong val = o->long_field(field_offset);
+@@ -3790,7 +3787,7 @@
+            dbuf->byte_at_put(off++, (val >> 0)  & 0xFF);
+          }
+          break;
+-         
++
+        case 'D':
+          {
+            jdouble dval = o->double_field(field_offset);
+@@ -3805,7 +3802,7 @@
+            dbuf->byte_at_put(off++, (lval >> 0)  & 0xFF);
+          }
+          break;
+-         
++
+        default:
+          // Illegal typecode
+          THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "illegal typecode");
+@@ -3818,7 +3815,7 @@
+ 
+ jclass find_class_from_class_loader(JNIEnv* env, symbolHandle name, jboolean init, Handle loader, Handle protection_domain, jboolean throwError, TRAPS) {
+   // Security Note:
+-  //   The Java level wrapper will perform the necessary security check allowing 
++  //   The Java level wrapper will perform the necessary security check allowing
+   //   us to pass the NULL as the initiating class loader.
+   klassOop klass = SystemDictionary::resolve_or_fail(name, loader, protection_domain, throwError != 0, CHECK_NULL);
+   KlassHandle klass_handle(THREAD, klass);
+@@ -3875,7 +3872,7 @@
+ 
+ #ifdef SUPPORT_OLD_REFLECTION
+ 
+-JVM_ENTRY(jobjectArray, JVM_GetClassFields(JNIEnv *env, jclass cls, jint which))  
++JVM_ENTRY(jobjectArray, JVM_GetClassFields(JNIEnv *env, jclass cls, jint which))
+   JVMWrapper("JVM_GetClassFields");
+   JvmtiVMObjectAllocEventCollector oam;
+   oop mirror = JNIHandles::resolve_non_null(cls);
+@@ -3884,7 +3881,7 @@
+ JVM_END
+ 
+ 
+-JVM_ENTRY(jobjectArray, JVM_GetClassMethods(JNIEnv *env, jclass cls, jint which))  
++JVM_ENTRY(jobjectArray, JVM_GetClassMethods(JNIEnv *env, jclass cls, jint which))
+   JVMWrapper("JVM_GetClassMethods");
+   JvmtiVMObjectAllocEventCollector oam;
+   oop mirror = JNIHandles::resolve_non_null(cls);
+@@ -3894,7 +3891,7 @@
+ JVM_END
+ 
+ 
+-JVM_ENTRY(jobjectArray, JVM_GetClassConstructors(JNIEnv *env, jclass cls, jint which))  
++JVM_ENTRY(jobjectArray, JVM_GetClassConstructors(JNIEnv *env, jclass cls, jint which))
+   JVMWrapper("JVM_GetClassConstructors");
+   JvmtiVMObjectAllocEventCollector oam;
+   oop mirror = JNIHandles::resolve_non_null(cls);
+@@ -3904,29 +3901,29 @@
+ JVM_END
+ 
+ 
+-JVM_ENTRY(jobject, JVM_GetClassField(JNIEnv *env, jclass cls, jstring name, jint which))  
++JVM_ENTRY(jobject, JVM_GetClassField(JNIEnv *env, jclass cls, jstring name, jint which))
+   JVMWrapper("JVM_GetClassField");
+   JvmtiVMObjectAllocEventCollector oam;
+   if (name == NULL) return NULL;
+   Handle str (THREAD, JNIHandles::resolve_non_null(name));
+ 
+   const char* cstr = java_lang_String::as_utf8_string(str());
+-  symbolHandle field_name = 
++  symbolHandle field_name =
+            symbolHandle(THREAD, SymbolTable::probe(cstr, (int)strlen(cstr)));
+   if (field_name.is_null()) {
+-    THROW_0(vmSymbols::java_lang_NoSuchFieldException());    
++    THROW_0(vmSymbols::java_lang_NoSuchFieldException());
+   }
+ 
+   oop mirror = JNIHandles::resolve_non_null(cls);
+   oop result = Reflection::reflect_field(mirror, field_name(), which, CHECK_NULL);
+-  if (result == NULL) {    
+-    THROW_0(vmSymbols::java_lang_NoSuchFieldException());    
++  if (result == NULL) {
++    THROW_0(vmSymbols::java_lang_NoSuchFieldException());
+   }
+   return JNIHandles::make_local(env, result);
+ JVM_END
+ 
+ 
+-JVM_ENTRY(jobject, JVM_GetClassMethod(JNIEnv *env, jclass cls, jstring name, jobjectArray types, jint which))  
++JVM_ENTRY(jobject, JVM_GetClassMethod(JNIEnv *env, jclass cls, jstring name, jobjectArray types, jint which))
+   JVMWrapper("JVM_GetClassMethod");
+   JvmtiVMObjectAllocEventCollector oam;
+   if (name == NULL) {
+@@ -3935,7 +3932,7 @@
+   Handle str (THREAD, JNIHandles::resolve_non_null(name));
+ 
+   const char* cstr = java_lang_String::as_utf8_string(str());
+-  symbolHandle method_name = 
++  symbolHandle method_name =
+           symbolHandle(THREAD, SymbolTable::probe(cstr, (int)strlen(cstr)));
+   if (method_name.is_null()) {
+     THROW_0(vmSymbols::java_lang_NoSuchMethodException());
+@@ -3943,7 +3940,7 @@
+ 
+   oop mirror = JNIHandles::resolve_non_null(cls);
+   objArrayHandle tarray (THREAD, objArrayOop(JNIHandles::resolve(types)));
+-  oop result = Reflection::reflect_method(mirror, method_name, tarray, 
++  oop result = Reflection::reflect_method(mirror, method_name, tarray,
+                                           which, CHECK_NULL);
+   if (result == NULL) {
+     THROW_0(vmSymbols::java_lang_NoSuchMethodException());
+@@ -3952,7 +3949,7 @@
+ JVM_END
+ 
+ 
+-JVM_ENTRY(jobject, JVM_GetClassConstructor(JNIEnv *env, jclass cls, jobjectArray types, jint which))  
++JVM_ENTRY(jobject, JVM_GetClassConstructor(JNIEnv *env, jclass cls, jobjectArray types, jint which))
+   JVMWrapper("JVM_GetClassConstructor");
+   JvmtiVMObjectAllocEventCollector oam;
+   oop mirror = JNIHandles::resolve_non_null(cls);
+@@ -3975,19 +3972,19 @@
+   if (resolved_constructor == NULL) {
+     klassOop k = java_lang_Class::as_klassOop(mirror());
+     // The java.lang.Class object caches a resolved constructor if all the checks
+-    // below were done successfully and a constructor was found. 
+-  
++    // below were done successfully and a constructor was found.
++
+     // Do class based checks
+     if (java_lang_Class::is_primitive(mirror())) {
+       const char* msg = "";
+-      if      (mirror == SystemDictionary::bool_mirror())   msg = "java/lang/Boolean";
+-      else if (mirror == SystemDictionary::char_mirror())   msg = "java/lang/Character";
+-      else if (mirror == SystemDictionary::float_mirror())  msg = "java/lang/Float";
+-      else if (mirror == SystemDictionary::double_mirror()) msg = "java/lang/Double";
+-      else if (mirror == SystemDictionary::byte_mirror())   msg = "java/lang/Byte";
+-      else if (mirror == SystemDictionary::short_mirror())  msg = "java/lang/Short";
+-      else if (mirror == SystemDictionary::int_mirror())    msg = "java/lang/Integer";
+-      else if (mirror == SystemDictionary::long_mirror())   msg = "java/lang/Long";
++      if      (mirror == Universe::bool_mirror())   msg = "java/lang/Boolean";
++      else if (mirror == Universe::char_mirror())   msg = "java/lang/Character";
++      else if (mirror == Universe::float_mirror())  msg = "java/lang/Float";
++      else if (mirror == Universe::double_mirror()) msg = "java/lang/Double";
++      else if (mirror == Universe::byte_mirror())   msg = "java/lang/Byte";
++      else if (mirror == Universe::short_mirror())  msg = "java/lang/Short";
++      else if (mirror == Universe::int_mirror())    msg = "java/lang/Integer";
++      else if (mirror == Universe::long_mirror())   msg = "java/lang/Long";
+       THROW_MSG_0(vmSymbols::java_lang_NullPointerException(), msg);
+     }
+ 
+@@ -4006,7 +4003,7 @@
+ 
+     // Cache result in java.lang.Class object. Does not have to be MT safe.
+     java_lang_Class::set_resolved_constructor(mirror(), resolved_constructor);
+-  } 
++  }
+ 
+   assert(resolved_constructor != NULL, "sanity check");
+   methodHandle constructor = methodHandle(THREAD, resolved_constructor);
+@@ -4019,23 +4016,23 @@
+   klassOop caller_klass = NULL;
+   if (UsePrivilegedStack) {
+     caller_klass = thread->security_get_caller_class(2);
+-     
++
+     if (!Reflection::verify_class_access(caller_klass, klass(), false) ||
+-        !Reflection::verify_field_access(caller_klass, 
++        !Reflection::verify_field_access(caller_klass,
++                                         klass(),
+                                          klass(),
+-                                         klass(), 
+-                                         constructor->access_flags(), 
+-                                         false, 
++                                         constructor->access_flags(),
++                                         false,
+                                          true)) {
+       ResourceMark rm(THREAD);
+       THROW_MSG_0(vmSymbols::java_lang_IllegalAccessException(), klass->external_name());
+-    }    
++    }
+   }
+ 
+   // Allocate object and call constructor
+   Handle receiver = klass->allocate_instance_handle(CHECK_NULL);
+   JavaCalls::call_default_constructor(thread, constructor, receiver, CHECK_NULL);
+- 
++
+   jobject res = JNIHandles::make_local(env, receiver());
+   if (JvmtiExport::should_post_vm_object_alloc()) {
+     JvmtiExport::post_vm_object_alloc(JavaThread::current(), receiver());
+@@ -4052,7 +4049,7 @@
+   Handle field_mirror(thread, JNIHandles::resolve(field));
+   Handle receiver    (thread, JNIHandles::resolve(obj));
+   fieldDescriptor fd;
+-  Reflection::resolve_field(field_mirror, receiver, &fd, false, CHECK_NULL);  
++  Reflection::resolve_field(field_mirror, receiver, &fd, false, CHECK_NULL);
+   jvalue value;
+   BasicType type = Reflection::field_get(&value, &fd, receiver);
+   oop box = Reflection::box(&value, type, CHECK_NULL);
+@@ -4060,14 +4057,14 @@
+ JVM_END
+ 
+ 
+-JVM_ENTRY(jvalue, JVM_GetPrimitiveField(JNIEnv *env, jobject field, jobject obj, unsigned char wCode)) 
++JVM_ENTRY(jvalue, JVM_GetPrimitiveField(JNIEnv *env, jobject field, jobject obj, unsigned char wCode))
+   JVMWrapper("JVM_GetPrimitiveField");
+   Handle field_mirror(thread, JNIHandles::resolve(field));
+   Handle receiver    (thread, JNIHandles::resolve(obj));
+   fieldDescriptor fd;
+   jvalue value;
+   value.j = 0;
+-  Reflection::resolve_field(field_mirror, receiver, &fd, false, CHECK_(value));  
++  Reflection::resolve_field(field_mirror, receiver, &fd, false, CHECK_(value));
+   BasicType type = Reflection::field_get(&value, &fd, receiver);
+   BasicType wide_type = (BasicType) wCode;
+   if (type != wide_type) {
+@@ -4083,7 +4080,7 @@
+   Handle receiver    (thread, JNIHandles::resolve(obj));
+   oop box = JNIHandles::resolve(val);
+   fieldDescriptor fd;
+-  Reflection::resolve_field(field_mirror, receiver, &fd, true, CHECK);  
++  Reflection::resolve_field(field_mirror, receiver, &fd, true, CHECK);
+   BasicType field_type = fd.field_type();
+   jvalue value;
+   BasicType value_type;
+@@ -4111,7 +4108,7 @@
+ 
+ // Method ///////////////////////////////////////////////////////////////////////////////////////////
+ 
+-JVM_ENTRY(jobject, JVM_InvokeMethod(JNIEnv *env, jobject method, jobject obj, jobjectArray args0))  
++JVM_ENTRY(jobject, JVM_InvokeMethod(JNIEnv *env, jobject method, jobject obj, jobjectArray args0))
+   JVMWrapper("JVM_InvokeMethod");
+   Handle method_handle;
+   if (thread->stack_available((address) &method_handle) >= JVMInvokeMethodSlack) {
+@@ -4129,14 +4126,14 @@
+         JvmtiExport::post_vm_object_alloc(JavaThread::current(), result);
+       }
+     }
+-    return res; 
++    return res;
+   } else {
+     THROW_0(vmSymbols::java_lang_StackOverflowError());
+   }
+ JVM_END
+ 
+ 
+-JVM_ENTRY(jobject, JVM_NewInstanceFromConstructor(JNIEnv *env, jobject c, jobjectArray args0))  
++JVM_ENTRY(jobject, JVM_NewInstanceFromConstructor(JNIEnv *env, jobject c, jobjectArray args0))
+   JVMWrapper("JVM_NewInstanceFromConstructor");
+   oop constructor_mirror = JNIHandles::resolve(c);
+   objArrayHandle args(THREAD, objArrayOop(JNIHandles::resolve(args0)));
+@@ -4238,7 +4235,7 @@
+ JVM_END
+ 
+ // com.sun.tools.attach.VirtualMachine agent properties support
+-// 
++//
+ // Initialize the agent properties with the properties maintained in the VM
+ JVM_ENTRY(jobject, JVM_InitAgentProperties(JNIEnv *env, jobject properties))
+   JVMWrapper("JVM_InitAgentProperties");
+@@ -4303,51 +4300,51 @@
+   // If new thread states are added in future JDK and VM versions,
+   // this should check if the JDK version is compatible with thread
+   // states supported by the VM.  Return NULL if not compatible.
+-  // 
++  //
+   // This function must map the VM java_lang_Thread::ThreadStatus
+   // to the Java thread state that the JDK supports.
+   //
+- 
++
+   typeArrayHandle values_h;
+   switch (javaThreadState) {
+     case JAVA_THREAD_STATE_NEW : {
+       typeArrayOop r = oopFactory::new_typeArray(T_INT, 1, CHECK_NULL);
+-      values_h = typeArrayHandle(THREAD, r); 
+-      values_h->int_at_put(0, java_lang_Thread::NEW); 
+-      break; 
++      values_h = typeArrayHandle(THREAD, r);
++      values_h->int_at_put(0, java_lang_Thread::NEW);
++      break;
+     }
+     case JAVA_THREAD_STATE_RUNNABLE : {
+       typeArrayOop r = oopFactory::new_typeArray(T_INT, 1, CHECK_NULL);
+-      values_h = typeArrayHandle(THREAD, r); 
+-      values_h->int_at_put(0, java_lang_Thread::RUNNABLE); 
+-      break; 
++      values_h = typeArrayHandle(THREAD, r);
++      values_h->int_at_put(0, java_lang_Thread::RUNNABLE);
++      break;
+     }
+     case JAVA_THREAD_STATE_BLOCKED : {
+       typeArrayOop r = oopFactory::new_typeArray(T_INT, 1, CHECK_NULL);
+-      values_h = typeArrayHandle(THREAD, r); 
+-      values_h->int_at_put(0, java_lang_Thread::BLOCKED_ON_MONITOR_ENTER); 
+-      break; 
++      values_h = typeArrayHandle(THREAD, r);
++      values_h->int_at_put(0, java_lang_Thread::BLOCKED_ON_MONITOR_ENTER);
++      break;
+     }
+     case JAVA_THREAD_STATE_WAITING : {
+       typeArrayOop r = oopFactory::new_typeArray(T_INT, 2, CHECK_NULL);
+-      values_h = typeArrayHandle(THREAD, r); 
+-      values_h->int_at_put(0, java_lang_Thread::IN_OBJECT_WAIT); 
+-      values_h->int_at_put(1, java_lang_Thread::PARKED); 
+-      break; 
++      values_h = typeArrayHandle(THREAD, r);
++      values_h->int_at_put(0, java_lang_Thread::IN_OBJECT_WAIT);
++      values_h->int_at_put(1, java_lang_Thread::PARKED);
++      break;
+     }
+     case JAVA_THREAD_STATE_TIMED_WAITING : {
+       typeArrayOop r = oopFactory::new_typeArray(T_INT, 3, CHECK_NULL);
+-      values_h = typeArrayHandle(THREAD, r); 
+-      values_h->int_at_put(0, java_lang_Thread::SLEEPING); 
+-      values_h->int_at_put(1, java_lang_Thread::IN_OBJECT_WAIT_TIMED); 
+-      values_h->int_at_put(2, java_lang_Thread::PARKED_TIMED); 
+-      break; 
++      values_h = typeArrayHandle(THREAD, r);
++      values_h->int_at_put(0, java_lang_Thread::SLEEPING);
++      values_h->int_at_put(1, java_lang_Thread::IN_OBJECT_WAIT_TIMED);
++      values_h->int_at_put(2, java_lang_Thread::PARKED_TIMED);
++      break;
+     }
+     case JAVA_THREAD_STATE_TERMINATED : {
+       typeArrayOop r = oopFactory::new_typeArray(T_INT, 1, CHECK_NULL);
+-      values_h = typeArrayHandle(THREAD, r); 
+-      values_h->int_at_put(0, java_lang_Thread::TERMINATED); 
+-      break; 
++      values_h = typeArrayHandle(THREAD, r);
++      values_h->int_at_put(0, java_lang_Thread::TERMINATED);
++      break;
+     }
+     default:
+       // Unknown state - probably incompatible JDK version
+@@ -4366,7 +4363,7 @@
+   // If new thread states are added in future JDK and VM versions,
+   // this should check if the JDK version is compatible with thread
+   // states supported by the VM.  Return NULL if not compatible.
+-  // 
++  //
+   // This function must map the VM java_lang_Thread::ThreadStatus
+   // to the Java thread state that the JDK supports.
+   //
+@@ -4384,63 +4381,63 @@
+   objArrayHandle names_h;
+   switch (javaThreadState) {
+     case JAVA_THREAD_STATE_NEW : {
+-      assert(values_h->length() == 1 && 
++      assert(values_h->length() == 1 &&
+                values_h->int_at(0) == java_lang_Thread::NEW,
+              "Invalid threadStatus value");
+ 
+       objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+                                                1, /* only 1 substate */
+                                                CHECK_NULL);
+-      names_h = objArrayHandle(THREAD, r); 
++      names_h = objArrayHandle(THREAD, r);
+       Handle name = java_lang_String::create_from_str("NEW", CHECK_NULL);
+       names_h->obj_at_put(0, name());
+-      break; 
++      break;
+     }
+     case JAVA_THREAD_STATE_RUNNABLE : {
+-      assert(values_h->length() == 1 && 
++      assert(values_h->length() == 1 &&
+                values_h->int_at(0) == java_lang_Thread::RUNNABLE,
+              "Invalid threadStatus value");
+ 
+       objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+                                                1, /* only 1 substate */
+                                                CHECK_NULL);
+-      names_h = objArrayHandle(THREAD, r); 
++      names_h = objArrayHandle(THREAD, r);
+       Handle name = java_lang_String::create_from_str("RUNNABLE", CHECK_NULL);
+-      names_h->obj_at_put(0, name()); 
+-      break; 
++      names_h->obj_at_put(0, name());
++      break;
+     }
+     case JAVA_THREAD_STATE_BLOCKED : {
+-      assert(values_h->length() == 1 && 
++      assert(values_h->length() == 1 &&
+                values_h->int_at(0) == java_lang_Thread::BLOCKED_ON_MONITOR_ENTER,
+              "Invalid threadStatus value");
+ 
+       objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+                                                1, /* only 1 substate */
+                                                CHECK_NULL);
+-      names_h = objArrayHandle(THREAD, r); 
++      names_h = objArrayHandle(THREAD, r);
+       Handle name = java_lang_String::create_from_str("BLOCKED", CHECK_NULL);
+-      names_h->obj_at_put(0, name()); 
+-      break; 
++      names_h->obj_at_put(0, name());
++      break;
+     }
+     case JAVA_THREAD_STATE_WAITING : {
+-      assert(values_h->length() == 2 && 
++      assert(values_h->length() == 2 &&
+                values_h->int_at(0) == java_lang_Thread::IN_OBJECT_WAIT &&
+                values_h->int_at(1) == java_lang_Thread::PARKED,
+              "Invalid threadStatus value");
+       objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+                                                2, /* number of substates */
+                                                CHECK_NULL);
+-      names_h = objArrayHandle(THREAD, r); 
++      names_h = objArrayHandle(THREAD, r);
+       Handle name0 = java_lang_String::create_from_str("WAITING.OBJECT_WAIT",
+                                                        CHECK_NULL);
+       Handle name1 = java_lang_String::create_from_str("WAITING.PARKED",
+                                                        CHECK_NULL);
+-      names_h->obj_at_put(0, name0()); 
+-      names_h->obj_at_put(1, name1()); 
+-      break; 
++      names_h->obj_at_put(0, name0());
++      names_h->obj_at_put(1, name1());
++      break;
+     }
+     case JAVA_THREAD_STATE_TIMED_WAITING : {
+-      assert(values_h->length() == 3 && 
++      assert(values_h->length() == 3 &&
+                values_h->int_at(0) == java_lang_Thread::SLEEPING &&
+                values_h->int_at(1) == java_lang_Thread::IN_OBJECT_WAIT_TIMED &&
+                values_h->int_at(2) == java_lang_Thread::PARKED_TIMED,
+@@ -4448,29 +4445,29 @@
+       objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+                                                3, /* number of substates */
+                                                CHECK_NULL);
+-      names_h = objArrayHandle(THREAD, r); 
++      names_h = objArrayHandle(THREAD, r);
+       Handle name0 = java_lang_String::create_from_str("TIMED_WAITING.SLEEPING",
+                                                        CHECK_NULL);
+       Handle name1 = java_lang_String::create_from_str("TIMED_WAITING.OBJECT_WAIT",
+                                                        CHECK_NULL);
+       Handle name2 = java_lang_String::create_from_str("TIMED_WAITING.PARKED",
+                                                        CHECK_NULL);
+-      names_h->obj_at_put(0, name0()); 
+-      names_h->obj_at_put(1, name1()); 
+-      names_h->obj_at_put(2, name2()); 
+-      break; 
++      names_h->obj_at_put(0, name0());
++      names_h->obj_at_put(1, name1());
++      names_h->obj_at_put(2, name2());
++      break;
+     }
+     case JAVA_THREAD_STATE_TERMINATED : {
+-      assert(values_h->length() == 1 && 
++      assert(values_h->length() == 1 &&
+                values_h->int_at(0) == java_lang_Thread::TERMINATED,
+              "Invalid threadStatus value");
+       objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+                                                1, /* only 1 substate */
+                                                CHECK_NULL);
+-      names_h = objArrayHandle(THREAD, r); 
++      names_h = objArrayHandle(THREAD, r);
+       Handle name = java_lang_String::create_from_str("TERMINATED", CHECK_NULL);
+-      names_h->obj_at_put(0, name()); 
+-      break; 
++      names_h->obj_at_put(0, name());
++      break;
+     }
+     default:
+       // Unknown state - probably incompatible JDK version
+@@ -4483,7 +4480,7 @@
+ JVM_ENTRY(void, JVM_GetVersionInfo(JNIEnv* env, jvm_version_info* info, size_t info_size))
+ {
+   memset(info, 0, sizeof(info_size));
+-                                                                                
++
+   info->jvm_version = Abstract_VM_Version::jvm_version();
+   info->update_version = 0;          /* 0 in HotSpot Express VM */
+   info->special_update_version = 0;  /* 0 in HotSpot Express VM */
+@@ -4492,6 +4489,10 @@
+   // consider to expose this new capability in the sun.rt.jvmCapabilities jvmstat
+   // counter defined in runtimeService.cpp.
+   info->is_attachable = AttachListener::is_attach_supported();
++#ifdef KERNEL
++  info->is_kernel_jvm = 1; // true;
++#else  // KERNEL
++  info->is_kernel_jvm = 0; // false;
++#endif // KERNEL
+ }
+ JVM_END
+-
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvm.h openjdk/hotspot/src/share/vm/prims/jvm.h
+--- openjdk6/hotspot/src/share/vm/prims/jvm.h	2008-02-28 05:02:41.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvm.h	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jvm.h	1.87 07/08/20 16:26:30 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #ifndef _JAVASOFT_JVM_H_
+@@ -43,20 +40,20 @@
+ extern "C" {
+ #endif
+ 
+-/* 
++/*
+  * This file contains additional functions exported from the VM.
+  * These functions are complementary to the standard JNI support.
+  * There are three parts to this file:
+- * 
++ *
+  * First, this file contains the VM-related functions needed by native
+  * libraries in the standard Java API. For example, the java.lang.Object
+  * class needs VM-level functions that wait for and notify monitors.
+- * 
++ *
+  * Second, this file contains the functions and constant definitions
+  * needed by the byte code verifier and class file format checker.
+  * These functions allow the verifier and format checker to be written
+  * in a VM-independent way.
+- * 
++ *
+  * Third, this file contains various I/O and nerwork operations needed
+  * by the standard Java I/O and network APIs.
+  */
+@@ -114,8 +111,8 @@
+ JVM_NanoTime(JNIEnv *env, jclass ignored);
+ 
+ JNIEXPORT void JNICALL
+-JVM_ArrayCopy(JNIEnv *env, jclass ignored, jobject src, jint src_pos, 
+-	      jobject dst, jint dst_pos, jint length);
++JVM_ArrayCopy(JNIEnv *env, jclass ignored, jobject src, jint src_pos,
++              jobject dst, jint dst_pos, jint length);
+ 
+ JNIEXPORT jobject JNICALL
+ JVM_InitProperties(JNIEnv *env, jobject p);
+@@ -173,13 +170,13 @@
+ JNIEXPORT jint JNICALL
+ JVM_ActiveProcessorCount(void);
+ 
+-JNIEXPORT void * JNICALL 
++JNIEXPORT void * JNICALL
+ JVM_LoadLibrary(const char *name);
+ 
+-JNIEXPORT void JNICALL 
++JNIEXPORT void JNICALL
+ JVM_UnloadLibrary(void * handle);
+ 
+-JNIEXPORT void * JNICALL 
++JNIEXPORT void * JNICALL
+ JVM_FindLibraryEntry(void *handle, const char *name);
+ 
+ JNIEXPORT jboolean JNICALL
+@@ -348,7 +345,7 @@
+ 
+ JNIEXPORT void JNICALL
+ JVM_SetPrimitiveArrayElement(JNIEnv *env, jobject arr, jint index, jvalue v,
+-			     unsigned char vCode);
++                             unsigned char vCode);
+ 
+ JNIEXPORT jobject JNICALL
+ JVM_NewArray(JNIEnv *env, jclass eltClass, jint length);
+@@ -390,14 +387,14 @@
+  */
+ JNIEXPORT jclass JNICALL
+ JVM_FindClassFromClassLoader(JNIEnv *env, const char *name, jboolean init,
+-			     jobject loader, jboolean throwError);
++                             jobject loader, jboolean throwError);
+ 
+ /*
+  * Find a class from a given class.
+  */
+ JNIEXPORT jclass JNICALL
+ JVM_FindClassFromClass(JNIEnv *env, const char *name, jboolean init,
+-			     jclass from);
++                             jclass from);
+ 
+ /* Find a loaded class cached by the VM */
+ JNIEXPORT jclass JNICALL
+@@ -563,8 +560,8 @@
+  */
+ 
+ JNIEXPORT jobject JNICALL
+-JVM_DoPrivileged(JNIEnv *env, jclass cls, 
+-		 jobject action, jobject context, jboolean wrapException);
++JVM_DoPrivileged(JNIEnv *env, jclass cls,
++                 jobject action, jobject context, jboolean wrapException);
+ 
+ JNIEXPORT jobject JNICALL
+ JVM_GetInheritedAccessControlContext(JNIEnv *env, jclass cls);
+@@ -624,7 +621,7 @@
+ 
+ /*
+  * Returns the constant pool types in the buffer provided by "types."
+- */  
++ */
+ JNIEXPORT void JNICALL
+ JVM_GetClassCPTypes(JNIEnv *env, jclass cb, unsigned char *types);
+ 
+@@ -651,7 +648,7 @@
+  */
+ JNIEXPORT void JNICALL
+ JVM_GetMethodIxExceptionIndexes(JNIEnv *env, jclass cb, jint method_index,
+-				unsigned short *exceptions); 
++                                unsigned short *exceptions);
+ /*
+  * Returns the number of exceptions raised by a given method.
+  * The method is identified by method_index.
+@@ -666,8 +663,8 @@
+  * The method is identified by method_index.
+  */
+ JNIEXPORT void JNICALL
+-JVM_GetMethodIxByteCode(JNIEnv *env, jclass cb, jint method_index, 
+-			unsigned char *code);
++JVM_GetMethodIxByteCode(JNIEnv *env, jclass cb, jint method_index,
++                        unsigned char *code);
+ 
+ /*
+  * Returns the length of the byte code sequence of a given method.
+@@ -694,8 +691,8 @@
+  */
+ JNIEXPORT void JNICALL
+ JVM_GetMethodIxExceptionTableEntry(JNIEnv *env, jclass cb, jint method_index,
+-				   jint entry_index,
+-				   JVM_ExceptionTableEntryType *entry);
++                                   jint entry_index,
++                                   JVM_ExceptionTableEntryType *entry);
+ 
+ /*
+  * Returns the length of the exception table of a given method.
+@@ -721,28 +718,28 @@
+ /*
+  * Returns the number of local variables of a given method.
+  * The method is identified by method_index.
+- */ 
++ */
+ JNIEXPORT jint JNICALL
+ JVM_GetMethodIxLocalsCount(JNIEnv *env, jclass cb, int index);
+ 
+ /*
+  * Returns the number of arguments (including this pointer) of a given method.
+  * The method is identified by method_index.
+- */ 
++ */
+ JNIEXPORT jint JNICALL
+ JVM_GetMethodIxArgsSize(JNIEnv *env, jclass cb, int index);
+ 
+-/* 
++/*
+  * Returns the maximum amount of stack (in words) used by a given method.
+  * The method is identified by method_index.
+- */ 
++ */
+ JNIEXPORT jint JNICALL
+ JVM_GetMethodIxMaxStack(JNIEnv *env, jclass cb, int index);
+ 
+ /*
+  * Is a given method a constructor.
+  * The method is identified by method_index.
+- */ 
++ */
+ JNIEXPORT jboolean JNICALL
+ JVM_IsConstructorIx(JNIEnv *env, jclass cb, int index);
+ 
+@@ -881,13 +878,13 @@
+ JVM_GetCPMethodModifiers(JNIEnv *env, jclass cb, int index, jclass calledClass);
+ 
+ /*
+- * Releases the UTF string obtained from the VM. 
++ * Releases the UTF string obtained from the VM.
+  */
+ JNIEXPORT void JNICALL
+ JVM_ReleaseUTF(const char *utf);
+ 
+ /*
+- * Compare if two classes are in the same package. 
++ * Compare if two classes are in the same package.
+  */
+ JNIEXPORT jboolean JNICALL
+ JVM_IsSameClassPackage(JNIEnv *env, jclass class1, jclass class2);
+@@ -908,7 +905,7 @@
+ #define JVM_ACC_NATIVE        0x0100  /* implemented in C */
+ #define JVM_ACC_INTERFACE     0x0200  /* class is an interface */
+ #define JVM_ACC_ABSTRACT      0x0400  /* no definition provided */
+-#define JVM_ACC_STRICT	      0x0800  /* strict floating point */
++#define JVM_ACC_STRICT        0x0800  /* strict floating point */
+ #define JVM_ACC_SYNTHETIC     0x1000  /* compiler-generated class, method or field */
+ #define JVM_ACC_ANNOTATION    0x2000  /* annotation type */
+ #define JVM_ACC_ENUM          0x4000  /* field is declared as element of enum */
+@@ -927,18 +924,18 @@
+ #define JVM_ACC_NATIVE_BIT        8
+ #define JVM_ACC_INTERFACE_BIT     9
+ #define JVM_ACC_ABSTRACT_BIT      10
+-#define JVM_ACC_STRICT_BIT	  11
++#define JVM_ACC_STRICT_BIT        11
+ #define JVM_ACC_SYNTHETIC_BIT     12
+-#define JVM_ACC_ANNOTATION_BIT	  13
++#define JVM_ACC_ANNOTATION_BIT    13
+ #define JVM_ACC_ENUM_BIT          14
+ 
+ // NOTE: replicated in SA in vm/agent/sun/jvm/hotspot/utilities/ConstantTag.java
+ enum {
+     JVM_CONSTANT_Utf8 = 1,
+-    JVM_CONSTANT_Unicode,		/* unused */
++    JVM_CONSTANT_Unicode,               /* unused */
+     JVM_CONSTANT_Integer,
+     JVM_CONSTANT_Float,
+-    JVM_CONSTANT_Long,      
++    JVM_CONSTANT_Long,
+     JVM_CONSTANT_Double,
+     JVM_CONSTANT_Class,
+     JVM_CONSTANT_String,
+@@ -961,38 +958,38 @@
+ 
+ /* JVM method signatures */
+ 
+-#define JVM_SIGNATURE_ARRAY		'['
+-#define JVM_SIGNATURE_BYTE		'B'
+-#define JVM_SIGNATURE_CHAR		'C'
+-#define JVM_SIGNATURE_CLASS		'L'
+-#define JVM_SIGNATURE_ENDCLASS	        ';'
+-#define JVM_SIGNATURE_ENUM		'E'
+-#define JVM_SIGNATURE_FLOAT		'F'
++#define JVM_SIGNATURE_ARRAY             '['
++#define JVM_SIGNATURE_BYTE              'B'
++#define JVM_SIGNATURE_CHAR              'C'
++#define JVM_SIGNATURE_CLASS             'L'
++#define JVM_SIGNATURE_ENDCLASS          ';'
++#define JVM_SIGNATURE_ENUM              'E'
++#define JVM_SIGNATURE_FLOAT             'F'
+ #define JVM_SIGNATURE_DOUBLE            'D'
+-#define JVM_SIGNATURE_FUNC		'('
+-#define JVM_SIGNATURE_ENDFUNC	        ')'
+-#define JVM_SIGNATURE_INT		'I'
+-#define JVM_SIGNATURE_LONG		'J'
+-#define JVM_SIGNATURE_SHORT		'S'
+-#define JVM_SIGNATURE_VOID		'V'
+-#define JVM_SIGNATURE_BOOLEAN	        'Z'
++#define JVM_SIGNATURE_FUNC              '('
++#define JVM_SIGNATURE_ENDFUNC           ')'
++#define JVM_SIGNATURE_INT               'I'
++#define JVM_SIGNATURE_LONG              'J'
++#define JVM_SIGNATURE_SHORT             'S'
++#define JVM_SIGNATURE_VOID              'V'
++#define JVM_SIGNATURE_BOOLEAN           'Z'
+ 
+-/* 
++/*
+  * A function defined by the byte-code verifier and called by the VM.
+  * This is not a function implemented in the VM.
+  *
+  * Returns JNI_FALSE if verification fails. A detailed error message
+  * will be places in msg_buf, whose length is specified by buf_len.
+- */ 
+-typedef jboolean (*verifier_fn_t)(JNIEnv *env, 
+-				  jclass cb,
+-				  char * msg_buf, 
+-				  jint buf_len);
++ */
++typedef jboolean (*verifier_fn_t)(JNIEnv *env,
++                                  jclass cb,
++                                  char * msg_buf,
++                                  jint buf_len);
+ 
+ 
+ /*
+  * Support for a VM-independent class format checker.
+- */ 
++ */
+ typedef struct {
+     unsigned long code;    /* byte code */
+     unsigned long excs;    /* exceptions */
+@@ -1013,9 +1010,9 @@
+     method_size_info main;     /* used everywhere else */
+ } class_size_info;
+ 
+-/* 
++/*
+  * Functions defined in libjava.so to perform string conversions.
+- * 
++ *
+  */
+ 
+ typedef jstring (*to_java_string_fn_t)(JNIEnv *env, char *str);
+@@ -1025,7 +1022,7 @@
+ /* This is the function defined in libjava.so that performs class
+  * format checks. This functions fills in size information about
+  * the class file and returns:
+- *  
++ *
+  *   0: good
+  *  -1: out of memory
+  *  -2: bad format
+@@ -1034,48 +1031,48 @@
+  */
+ 
+ typedef jint (*check_format_fn_t)(char *class_name,
+-				  unsigned char *data,
+-				  unsigned int data_size,
+-				  class_size_info *class_size,
+-				  char *message_buffer,
+-				  jint buffer_length,
+-				  jboolean measure_only,
+-				  jboolean check_relaxed);
++                                  unsigned char *data,
++                                  unsigned int data_size,
++                                  class_size_info *class_size,
++                                  char *message_buffer,
++                                  jint buffer_length,
++                                  jboolean measure_only,
++                                  jboolean check_relaxed);
+ 
+ #define JVM_RECOGNIZED_CLASS_MODIFIERS (JVM_ACC_PUBLIC | \
+-					JVM_ACC_FINAL | \
+-					JVM_ACC_SUPER | \
+-					JVM_ACC_INTERFACE | \
+-					JVM_ACC_ABSTRACT | \
+-					JVM_ACC_ANNOTATION | \
+-					JVM_ACC_ENUM | \
+-					JVM_ACC_SYNTHETIC)
+-       
++                                        JVM_ACC_FINAL | \
++                                        JVM_ACC_SUPER | \
++                                        JVM_ACC_INTERFACE | \
++                                        JVM_ACC_ABSTRACT | \
++                                        JVM_ACC_ANNOTATION | \
++                                        JVM_ACC_ENUM | \
++                                        JVM_ACC_SYNTHETIC)
++
+ #define JVM_RECOGNIZED_FIELD_MODIFIERS (JVM_ACC_PUBLIC | \
+-					JVM_ACC_PRIVATE | \
+-					JVM_ACC_PROTECTED | \
+-					JVM_ACC_STATIC | \
+-					JVM_ACC_FINAL | \
+-					JVM_ACC_VOLATILE | \
+-					JVM_ACC_TRANSIENT | \
+-					JVM_ACC_ENUM | \
+-					JVM_ACC_SYNTHETIC)
++                                        JVM_ACC_PRIVATE | \
++                                        JVM_ACC_PROTECTED | \
++                                        JVM_ACC_STATIC | \
++                                        JVM_ACC_FINAL | \
++                                        JVM_ACC_VOLATILE | \
++                                        JVM_ACC_TRANSIENT | \
++                                        JVM_ACC_ENUM | \
++                                        JVM_ACC_SYNTHETIC)
+ 
+ #define JVM_RECOGNIZED_METHOD_MODIFIERS (JVM_ACC_PUBLIC | \
+-					 JVM_ACC_PRIVATE | \
+-					 JVM_ACC_PROTECTED | \
+-					 JVM_ACC_STATIC | \
+-					 JVM_ACC_FINAL | \
+-					 JVM_ACC_SYNCHRONIZED | \
+-					 JVM_ACC_BRIDGE | \
+-					 JVM_ACC_VARARGS | \
+-					 JVM_ACC_NATIVE | \
+-					 JVM_ACC_ABSTRACT | \
+-					 JVM_ACC_STRICT | \
+-					 JVM_ACC_SYNTHETIC)
++                                         JVM_ACC_PRIVATE | \
++                                         JVM_ACC_PROTECTED | \
++                                         JVM_ACC_STATIC | \
++                                         JVM_ACC_FINAL | \
++                                         JVM_ACC_SYNCHRONIZED | \
++                                         JVM_ACC_BRIDGE | \
++                                         JVM_ACC_VARARGS | \
++                                         JVM_ACC_NATIVE | \
++                                         JVM_ACC_ABSTRACT | \
++                                         JVM_ACC_STRICT | \
++                                         JVM_ACC_SYNTHETIC)
+ 
+-/* 
+- * This is the function defined in libjava.so to perform path 
++/*
++ * This is the function defined in libjava.so to perform path
+  * canonicalization. VM call this function before opening jar files
+  * to load system classes.
+  *
+@@ -1089,17 +1086,17 @@
+ 
+ /* Note that the JVM IO functions are expected to return JVM_IO_ERR
+  * when there is any kind of error. The caller can then use the
+- * platform specific support (e.g., errno) to get the detailed 
++ * platform specific support (e.g., errno) to get the detailed
+  * error info.  The JVM_GetLastErrorString procedure may also be used
+  * to obtain a descriptive error string.
+  */
+ #define JVM_IO_ERR  (-1)
+ 
+ /* For interruptible IO. Returning JVM_IO_INTR indicates that an IO
+- * operation has been disrupted by Thread.interrupt. There are a 
++ * operation has been disrupted by Thread.interrupt. There are a
+  * number of technical difficulties related to interruptible IO that
+  * need to be solved. For example, most existing programs do not handle
+- * InterruptedIOExceptions specially, they simply treat those as any 
++ * InterruptedIOExceptions specially, they simply treat those as any
+  * IOExceptions, which typically indicate fatal errors.
+  *
+  * There are also two modes of operation for interruptible IO. In the
+@@ -1112,7 +1109,7 @@
+  * easier, but it's not clear that's the right semantics.
+  *
+  * Interruptible IO is not supported on Win32.It can be enabled/disabled
+- * using a compile-time flag on Solaris. Third-party JVM ports do not 
++ * using a compile-time flag on Solaris. Third-party JVM ports do not
+  * need to implement interruptible IO.
+  */
+ #define JVM_IO_INTR (-2)
+@@ -1140,7 +1137,7 @@
+ /*
+  * Open a file descriptor. This function returns a negative error code
+  * on error, and a non-negative integer that is the file descriptor on
+- * success.  
++ * success.
+  */
+ JNIEXPORT jint JNICALL
+ JVM_Open(const char *fname, jint flags, jint mode);
+@@ -1293,12 +1290,12 @@
+ JNIEXPORT int JNICALL
+ JVM_GetHostName(char* name, int namelen);
+ 
+-/* 
++/*
+  * The standard printing functions supported by the Java VM. (Should they
+- * be renamed to JVM_* in the future?  
++ * be renamed to JVM_* in the future?
+  */
+ 
+-/* 
++/*
+  * BE CAREFUL! The following functions do not implement the
+  * full feature set of standard C printf formats.
+  */
+@@ -1358,10 +1355,10 @@
+ 
+ JNIEXPORT jobject JNICALL
+ JVM_GetClassMethod(JNIEnv *env, jclass cls, jstring name, jobjectArray types,
+-		   jint which);
++                   jint which);
+ JNIEXPORT jobject JNICALL
+ JVM_GetClassConstructor(JNIEnv *env, jclass cls, jobjectArray types,
+-			jint which);
++                        jint which);
+ 
+ /*
+  * Implements Class.newInstance
+@@ -1438,13 +1435,13 @@
+     JAVA_THREAD_STATE_WAITING       = 3,
+     JAVA_THREAD_STATE_TIMED_WAITING = 4,
+     JAVA_THREAD_STATE_TERMINATED    = 5,
+-    JAVA_THREAD_STATE_COUNT         = 6 
++    JAVA_THREAD_STATE_COUNT         = 6
+ };
+ 
+ /*
+  * Returns an array of the threadStatus values representing the
+  * given Java thread state.  Returns NULL if the VM version is
+- * incompatible with the JDK or doesn't support the given 
++ * incompatible with the JDK or doesn't support the given
+  * Java thread state.
+  */
+ JNIEXPORT jintArray JNICALL
+@@ -1453,17 +1450,17 @@
+ /*
+  * Returns an array of the substate names representing the
+  * given Java thread state.  Returns NULL if the VM version is
+- * incompatible with the JDK or the VM doesn't support 
++ * incompatible with the JDK or the VM doesn't support
+  * the given Java thread state.
+  * values must be the jintArray returned from JVM_GetThreadStateValues
+- * and javaThreadState.  
++ * and javaThreadState.
+  */
+ JNIEXPORT jobjectArray JNICALL
+ JVM_GetThreadStateNames(JNIEnv* env, jint javaThreadState, jintArray values);
+ 
+ /* =========================================================================
+  * The following defines a private JVM interface that the JDK can query
+- * for the JVM version and capabilities.  sun.misc.Version defines 
++ * for the JVM version and capabilities.  sun.misc.Version defines
+  * the methods for getting the VM version and its capabilities.
+  *
+  * When a new bit is added, the following should be updated to provide
+@@ -1471,7 +1468,7 @@
+  *    HS:   JVM_GetVersionInfo and Abstract_VM_Version class
+  *    SDK:  Version class
+  *
+- * Similary, a private JDK interface JDK_GetVersionInfo0 is defined for 
++ * Similary, a private JDK interface JDK_GetVersionInfo0 is defined for
+  * JVM to query for the JDK version and capabilities.
+  *
+  * When a new bit is added, the following should be updated to provide
+@@ -1482,14 +1479,14 @@
+  * ==========================================================================
+  */
+ typedef struct {
+-    /* HotSpot Express VM version string: 
++    /* HotSpot Express VM version string:
+      * <major>.<minor>-bxx[-<identifier>][-<debug_flavor>]
+      */
+     unsigned int jvm_version; /* Consists of major.minor.0.build */
+     unsigned int update_version : 8;         /* 0 in HotSpot Express VM */
+     unsigned int special_update_version : 8; /* 0 in HotSpot Express VM */
+-    unsigned int reserved1 : 16; 
+-    unsigned int reserved2; 
++    unsigned int reserved1 : 16;
++    unsigned int reserved2;
+ 
+     /* The following bits represents JVM supports that JDK has dependency on.
+      * JDK can use these bits to determine which JVM version
+@@ -1499,7 +1496,8 @@
+      * the new bit is also added in the main/baseline.
+      */
+     unsigned int is_attachable : 1;
+-    unsigned int : 31;
++    unsigned int is_kernel_jvm : 1;
++    unsigned int : 30;
+     unsigned int : 32;
+     unsigned int : 32;
+ } jvm_version_info;
+@@ -1507,7 +1505,7 @@
+ #define JVM_VERSION_MAJOR(version) ((version & 0xFF000000) >> 24)
+ #define JVM_VERSION_MINOR(version) ((version & 0x00FF0000) >> 16)
+ // Micro version is 0 in HotSpot Express VM (set in jvm.cpp).
+-#define JVM_VERSION_MICRO(version) ((version & 0x0000FF00) >> 8) 
++#define JVM_VERSION_MICRO(version) ((version & 0x0000FF00) >> 8)
+ /* Build number is available in all HotSpot Express VM builds.
+  * It is defined in make/hotspot_version file.
+  */
+@@ -1522,8 +1520,8 @@
+                                 /* and build number (xx) */
+     unsigned int update_version : 8;         /* Update release version (uu) */
+     unsigned int special_update_version : 8; /* Special update release version (c)*/
+-    unsigned int reserved1 : 16; 
+-    unsigned int reserved2; 
++    unsigned int reserved1 : 16;
++    unsigned int reserved2;
+ 
+     /* The following bits represents new JDK supports that VM has dependency on.
+      * VM implementation can use these bits to determine which JDK version
+@@ -1547,15 +1545,15 @@
+  */
+ #define JDK_VERSION_BUILD(version) ((version & 0x000000FF))
+ 
+-/* 
+- * This is the function JDK_GetVersionInfo0 defined in libjava.so 
+- * that is dynamically looked up by JVM.  
++/*
++ * This is the function JDK_GetVersionInfo0 defined in libjava.so
++ * that is dynamically looked up by JVM.
+  */
+ typedef void (*jdk_version_info_fn_t)(jdk_version_info* info, size_t info_size);
+ 
+ /*
+  * This structure is used by the launcher to get the default thread
+- * stack size from the VM using JNI_GetDefaultJavaVMInitArgs() with a 
++ * stack size from the VM using JNI_GetDefaultJavaVMInitArgs() with a
+  * version of 1.1.  As it is not supported otherwise, it has been removed
+  * from jni.h
+  */
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvm_misc.hpp openjdk/hotspot/src/share/vm/prims/jvm_misc.hpp
+--- openjdk6/hotspot/src/share/vm/prims/jvm_misc.hpp	2008-02-28 05:02:41.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvm_misc.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jvm_misc.hpp	1.24 07/05/05 17:06:34 JVM"
+-#endif
+ /*
+  * Copyright 1998-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,10 +19,10 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+-// Useful entry points shared by JNI and JVM interface. 
++// Useful entry points shared by JNI and JVM interface.
+ // We do not allow real JNI or JVM entry point to call each other.
+ 
+ jclass find_class_from_class_loader(JNIEnv* env, symbolHandle name, jboolean init, Handle loader, Handle protection_domain, jboolean throwError, TRAPS);
+@@ -37,11 +34,11 @@
+ 
+ extern "C" {
+ 
+-void JNICALL 
+-JVM_SetPrimitiveFieldValues(JNIEnv *env, jclass cb, jobject obj, 
++void JNICALL
++JVM_SetPrimitiveFieldValues(JNIEnv *env, jclass cb, jobject obj,
+                             jlongArray fieldIDs, jcharArray typecodes, jbyteArray data);
+ 
+-void JNICALL 
++void JNICALL
+ JVM_GetPrimitiveFieldValues(JNIEnv *env, jclass cb, jobject obj,
+                             jlongArray fieldIDs, jcharArray typecodes, jbyteArray data);
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiAgentThread.hpp openjdk/hotspot/src/share/vm/prims/jvmtiAgentThread.hpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiAgentThread.hpp	2008-02-28 05:02:41.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiAgentThread.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jvmtiAgentThread.hpp	1.13 07/05/05 17:06:36 JVM"
+-#endif
+ /*
+  * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //
+@@ -40,7 +37,7 @@
+ public:
+   JvmtiAgentThread(JvmtiEnv* env, jvmtiStartFunction start_fn, const void *start_arg);
+ 
+-  bool is_jvmti_agent_thread() const	{ return true; }
++  bool is_jvmti_agent_thread() const    { return true; }
+ 
+   static void start_function_wrapper(JavaThread *thread, TRAPS);
+   void call_start_function();
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp openjdk/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp	2008-02-28 05:02:41.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)jvmtiClassFileReconstituter.cpp	1.21 07/05/05 17:06:36 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ # include "incls/_precompiled.incl"
+ # include "incls/_jvmtiClassFileReconstituter.cpp.incl"
+@@ -33,16 +30,16 @@
+ 
+ 
+ // Write the field information portion of ClassFile structure
+-// JVMSpec|   	u2 fields_count;
+-// JVMSpec|   	field_info fields[fields_count];
+-void JvmtiClassFileReconstituter::write_field_infos() {  
++// JVMSpec|     u2 fields_count;
++// JVMSpec|     field_info fields[fields_count];
++void JvmtiClassFileReconstituter::write_field_infos() {
+   HandleMark hm(thread());
+   typeArrayHandle fields(thread(), ikh()->fields());
+   int fields_length = fields->length();
+   int num_fields = fields_length / instanceKlass::next_offset;
+   objArrayHandle fields_anno(thread(), ikh()->fields_annotations());
+ 
+-  write_u2(num_fields);  
++  write_u2(num_fields);
+   for (int index = 0; index < fields_length; index += instanceKlass::next_offset) {
+     AccessFlags access_flags;
+     int flags = fields->ushort_at(index + instanceKlass::access_flags_offset);
+@@ -52,23 +49,23 @@
+     int initial_value_index = fields->ushort_at(index + instanceKlass::initval_index_offset);
+     guarantee(name_index != 0 && signature_index != 0, "bad constant pool index for field");
+     int offset = ikh()->offset_from_fields( index );
+-    int generic_signature_index = 
++    int generic_signature_index =
+                         fields->ushort_at(index + instanceKlass::generic_signature_offset);
+-    typeArrayHandle anno(thread(), fields_anno.not_null() ? 
++    typeArrayHandle anno(thread(), fields_anno.not_null() ?
+                                  (typeArrayOop)(fields_anno->obj_at(index / instanceKlass::next_offset)) :
+                                  (typeArrayOop)NULL);
+ 
+     // JVMSpec|   field_info {
+-    // JVMSpec|   	u2 access_flags;
+-    // JVMSpec|   	u2 name_index;
+-    // JVMSpec|   	u2 descriptor_index;
+-    // JVMSpec|   	u2 attributes_count;
+-    // JVMSpec|   	attribute_info attributes[attributes_count];
++    // JVMSpec|         u2 access_flags;
++    // JVMSpec|         u2 name_index;
++    // JVMSpec|         u2 descriptor_index;
++    // JVMSpec|         u2 attributes_count;
++    // JVMSpec|         attribute_info attributes[attributes_count];
+     // JVMSpec|   }
+ 
+-    write_u2(flags & JVM_RECOGNIZED_FIELD_MODIFIERS); 
+-    write_u2(name_index); 
+-    write_u2(signature_index); 
++    write_u2(flags & JVM_RECOGNIZED_FIELD_MODIFIERS);
++    write_u2(name_index);
++    write_u2(signature_index);
+     int attr_count = 0;
+     if (initial_value_index != 0) {
+       ++attr_count;
+@@ -83,7 +80,7 @@
+       ++attr_count;     // has RuntimeVisibleAnnotations attribute
+     }
+ 
+-    write_u2(attr_count); 
++    write_u2(attr_count);
+ 
+     if (initial_value_index != 0) {
+       write_attribute_name_index("ConstantValue");
+@@ -104,20 +101,20 @@
+ 
+ // Write Code attribute
+ // JVMSpec|   Code_attribute {
+-// JVMSpec|   	u2 attribute_name_index;
+-// JVMSpec|   	u4 attribute_length;
+-// JVMSpec|   	u2 max_stack;
+-// JVMSpec|   	u2 max_locals;
+-// JVMSpec|   	u4 code_length;
+-// JVMSpec|   	u1 code[code_length];
+-// JVMSpec|   	u2 exception_table_length;
+-// JVMSpec|   	{    	u2 start_pc;
+-// JVMSpec|   	      	u2 end_pc;
+-// JVMSpec|   	      	u2  handler_pc;
+-// JVMSpec|   	      	u2  catch_type;
+-// JVMSpec|   	}	exception_table[exception_table_length];
+-// JVMSpec|   	u2 attributes_count;
+-// JVMSpec|   	attribute_info attributes[attributes_count];
++// JVMSpec|     u2 attribute_name_index;
++// JVMSpec|     u4 attribute_length;
++// JVMSpec|     u2 max_stack;
++// JVMSpec|     u2 max_locals;
++// JVMSpec|     u4 code_length;
++// JVMSpec|     u1 code[code_length];
++// JVMSpec|     u2 exception_table_length;
++// JVMSpec|     {       u2 start_pc;
++// JVMSpec|             u2 end_pc;
++// JVMSpec|             u2  handler_pc;
++// JVMSpec|             u2  catch_type;
++// JVMSpec|     }       exception_table[exception_table_length];
++// JVMSpec|     u2 attributes_count;
++// JVMSpec|     attribute_info attributes[attributes_count];
+ // JVMSpec|   }
+ void JvmtiClassFileReconstituter::write_code_attribute(methodHandle method) {
+   constMethodHandle const_method(thread(), method->constMethod());
+@@ -128,17 +125,17 @@
+   int attr_count = 0;
+   int attr_size = 0;
+   if (const_method->has_linenumber_table()) {
+-    line_num_cnt = line_number_table_entries(method);  
++    line_num_cnt = line_number_table_entries(method);
+     if (line_num_cnt != 0) {
+       ++attr_count;
+       // Compute the complete size of the line number table attribute:
+       //      LineNumberTable_attribute {
+-      //      	u2 attribute_name_index;
+-      //      	u4 attribute_length;
+-      //      	u2 line_number_table_length;
+-      //      	{  u2 start_pc;	     
+-      //      	   u2 line_number;	     
+-      //      	} line_number_table[line_number_table_length];
++      //        u2 attribute_name_index;
++      //        u4 attribute_length;
++      //        u2 line_number_table_length;
++      //        {  u2 start_pc;
++      //           u2 line_number;
++      //        } line_number_table[line_number_table_length];
+       //      }
+       attr_size += 2 + 4 + 2 + line_num_cnt * (2 + 2);
+     }
+@@ -149,10 +146,10 @@
+       ++attr_count;
+       // Compute the  size of the stack map table attribute (VM stores raw):
+       //      StackMapTable_attribute {
+-      //      	u2 attribute_name_index;
+-      //      	u4 attribute_length;
+-      //      	u2 number_of_entries;
+-      //      	stack_map_frame_entries[number_of_entries];
++      //        u2 attribute_name_index;
++      //        u4 attribute_length;
++      //        u2 number_of_entries;
++      //        stack_map_frame_entries[number_of_entries];
+       //      }
+       attr_size += 2 + 4 + stackmap_len;
+     }
+@@ -162,7 +159,7 @@
+   int exception_table_length = exception_table->length();
+   int exception_table_entries = exception_table_length / 4;
+   int code_size = const_method->code_size();
+-  int size = 
++  int size =
+     2+2+4 +                                // max_stack, max_locals, code_length
+     code_size +                            // code
+     2 +                                    // exception_table_length
+@@ -196,15 +193,15 @@
+ 
+ // Write Exceptions attribute
+ // JVMSpec|   Exceptions_attribute {
+-// JVMSpec|   	u2 attribute_name_index;
+-// JVMSpec|   	u4 attribute_length;
+-// JVMSpec|   	u2 number_of_exceptions;
+-// JVMSpec|   	u2 exception_index_table[number_of_exceptions];
++// JVMSpec|     u2 attribute_name_index;
++// JVMSpec|     u4 attribute_length;
++// JVMSpec|     u2 number_of_exceptions;
++// JVMSpec|     u2 exception_index_table[number_of_exceptions];
+ // JVMSpec|   }
+ void JvmtiClassFileReconstituter::write_exceptions_attribute(constMethodHandle const_method) {
+   CheckedExceptionElement* checked_exceptions = const_method->checked_exceptions_start();
+   int checked_exceptions_length = const_method->checked_exceptions_length();
+-  int size = 
++  int size =
+     2 +                                    // number_of_exceptions
+     2 * checked_exceptions_length;         // exception_index_table
+ 
+@@ -218,9 +215,9 @@
+ 
+ // Write SourceFile attribute
+ // JVMSpec|   SourceFile_attribute {
+-// JVMSpec|   	u2 attribute_name_index;
+-// JVMSpec|   	u4 attribute_length;
+-// JVMSpec|   	u2 sourcefile_index;
++// JVMSpec|     u2 attribute_name_index;
++// JVMSpec|     u4 attribute_length;
++// JVMSpec|     u2 sourcefile_index;
+ // JVMSpec|   }
+ void JvmtiClassFileReconstituter::write_source_file_attribute() {
+   assert(ikh()->source_file_name() != NULL, "caller must check");
+@@ -232,9 +229,9 @@
+ 
+ // Write SourceDebugExtension attribute
+ // JSR45|   SourceDebugExtension_attribute {
+-// JSR45|   	u2 attribute_name_index;
+-// JSR45|   	u4 attribute_length;
+-// JSR45|   	u2 sourcefile_index;
++// JSR45|       u2 attribute_name_index;
++// JSR45|       u4 attribute_length;
++// JSR45|       u2 sourcefile_index;
+ // JSR45|   }
+ void JvmtiClassFileReconstituter::write_source_debug_extension_attribute() {
+   assert(ikh()->source_debug_extension() != NULL, "caller must check");
+@@ -246,9 +243,9 @@
+ 
+ // Write (generic) Signature attribute
+ // JVMSpec|   Signature_attribute {
+-// JVMSpec|   	u2 attribute_name_index;
+-// JVMSpec|   	u4 attribute_length;
+-// JVMSpec|   	u2 signature_index;
++// JVMSpec|     u2 attribute_name_index;
++// JVMSpec|     u4 attribute_length;
++// JVMSpec|     u2 signature_index;
+ // JVMSpec|   }
+ void JvmtiClassFileReconstituter::write_signature_attribute(u2 generic_signature_index) {
+   write_attribute_name_index("Signature");
+@@ -268,10 +265,10 @@
+ // JSR202|     u2 attribute_name_index;
+ // JSR202|     u4 attribute_length;
+ // JSR202|     ...
+-// JSR202|   } 
++// JSR202|   }
+ void JvmtiClassFileReconstituter::write_annotations_attribute(const char* attr_name,
+                                                               typeArrayHandle annos) {
+-  u4 length = annos->length(); 
++  u4 length = annos->length();
+   write_attribute_name_index(attr_name);
+   write_u4(length);
+   memcpy(writeable_address(length), annos->byte_at_addr(0), length);
+@@ -280,18 +277,18 @@
+ 
+ // Write InnerClasses attribute
+ // JVMSpec|   InnerClasses_attribute {
+-// JVMSpec|   	u2 attribute_name_index;
+-// JVMSpec|   	u4 attribute_length;
+-// JVMSpec|   	u2 number_of_classes;
+-// JVMSpec|   	{  u2 inner_class_info_index;	     
+-// JVMSpec|   	   u2 outer_class_info_index;	     
+-// JVMSpec|   	   u2 inner_name_index;	     
+-// JVMSpec|   	   u2 inner_class_access_flags;	     
+-// JVMSpec|   	} classes[number_of_classes];
++// JVMSpec|     u2 attribute_name_index;
++// JVMSpec|     u4 attribute_length;
++// JVMSpec|     u2 number_of_classes;
++// JVMSpec|     {  u2 inner_class_info_index;
++// JVMSpec|        u2 outer_class_info_index;
++// JVMSpec|        u2 inner_name_index;
++// JVMSpec|        u2 inner_class_access_flags;
++// JVMSpec|     } classes[number_of_classes];
+ // JVMSpec|   }
+ void JvmtiClassFileReconstituter::write_inner_classes_attribute(int length) {
+   typeArrayOop inner_class_list = ikh()->inner_classes();
+-  guarantee(inner_class_list != NULL && inner_class_list->length() == length, 
++  guarantee(inner_class_list != NULL && inner_class_list->length() == length,
+             "caller must check");
+   typeArrayHandle inner_class_list_h(thread(), inner_class_list);
+   assert (length % instanceKlass::inner_class_next_offset == 0, "just checking");
+@@ -299,8 +296,8 @@
+   u4 size = 2 + entry_count * (2+2+2+2);
+ 
+   write_attribute_name_index("InnerClasses");
+-  write_u4(size); 
+-  write_u2(entry_count); 
++  write_u4(size);
++  write_u2(entry_count);
+   for (int i = 0; i < length; i += instanceKlass::inner_class_next_offset) {
+     write_u2(inner_class_list_h->ushort_at(
+                       i + instanceKlass::inner_class_inner_class_info_offset));
+@@ -315,8 +312,8 @@
+ 
+ // Write Synthetic attribute
+ // JVMSpec|   Synthetic_attribute {
+-// JVMSpec|   	u2 attribute_name_index;
+-// JVMSpec|   	u4 attribute_length;
++// JVMSpec|     u2 attribute_name_index;
++// JVMSpec|     u4 attribute_length;
+ // JVMSpec|   }
+ void JvmtiClassFileReconstituter::write_synthetic_attribute() {
+   write_attribute_name_index("Synthetic");
+@@ -334,15 +331,15 @@
+   }
+   return num_entries;
+ }
+- 
++
+ // Write LineNumberTable attribute
+ // JVMSpec|   LineNumberTable_attribute {
+-// JVMSpec|   	u2 attribute_name_index;
+-// JVMSpec|   	u4 attribute_length;
+-// JVMSpec|   	u2 line_number_table_length;
+-// JVMSpec|   	{  u2 start_pc;	     
+-// JVMSpec|   	   u2 line_number;	     
+-// JVMSpec|   	} line_number_table[line_number_table_length];
++// JVMSpec|     u2 attribute_name_index;
++// JVMSpec|     u4 attribute_length;
++// JVMSpec|     u2 line_number_table_length;
++// JVMSpec|     {  u2 start_pc;
++// JVMSpec|        u2 line_number;
++// JVMSpec|     } line_number_table[line_number_table_length];
+ // JVMSpec|   }
+ void JvmtiClassFileReconstituter::write_line_number_table_attribute(methodHandle method,
+                                                                     u2 num_entries) {
+@@ -360,10 +357,10 @@
+ 
+ // Write stack map table attribute
+ // JSR-202|   StackMapTable_attribute {
+-// JSR-202|   	u2 attribute_name_index;
+-// JSR-202|   	u4 attribute_length;
+-// JSR-202|   	u2 number_of_entries;
+-// JSR-202|   	stack_map_frame_entries[number_of_entries];
++// JSR-202|     u2 attribute_name_index;
++// JSR-202|     u4 attribute_length;
++// JSR-202|     u2 number_of_entries;
++// JSR-202|     stack_map_frame_entries[number_of_entries];
+ // JSR-202|   }
+ void JvmtiClassFileReconstituter::write_stackmap_table_attribute(methodHandle method,
+                                                                  int stackmap_len) {
+@@ -371,18 +368,18 @@
+   write_attribute_name_index("StackMapTable");
+   write_u4(stackmap_len);
+   memcpy(
+-    writeable_address(stackmap_len), 
+-    (void*)(method->stackmap_data()->byte_at_addr(0)), 
++    writeable_address(stackmap_len),
++    (void*)(method->stackmap_data()->byte_at_addr(0)),
+     stackmap_len);
+ }
+ 
+ // Write one method_info structure
+ // JVMSpec|   method_info {
+-// JVMSpec|   	u2 access_flags;
+-// JVMSpec|   	u2 name_index;
+-// JVMSpec|   	u2 descriptor_index;
+-// JVMSpec|   	u2 attributes_count;
+-// JVMSpec|   	attribute_info attributes[attributes_count];
++// JVMSpec|     u2 access_flags;
++// JVMSpec|     u2 name_index;
++// JVMSpec|     u2 descriptor_index;
++// JVMSpec|     u2 attributes_count;
++// JVMSpec|     attribute_info attributes[attributes_count];
+ // JVMSpec|   }
+ void JvmtiClassFileReconstituter::write_method_info(methodHandle method) {
+   AccessFlags access_flags = method->access_flags();
+@@ -391,11 +388,11 @@
+   typeArrayHandle anno(thread(), method->annotations());
+   typeArrayHandle param_anno(thread(), method->parameter_annotations());
+   typeArrayHandle default_anno(thread(), method->annotation_default());
+-  
+-  write_u2(access_flags.get_flags() & JVM_RECOGNIZED_METHOD_MODIFIERS); 
+-  write_u2(const_method->name_index()); 
+-  write_u2(const_method->signature_index()); 
+-  
++
++  write_u2(access_flags.get_flags() & JVM_RECOGNIZED_METHOD_MODIFIERS);
++  write_u2(const_method->name_index());
++  write_u2(const_method->signature_index());
++
+   // write attributes in the same order javac does, so we can test with byte for
+   // byte comparison
+   int attr_count = 0;
+@@ -421,8 +418,8 @@
+   if (param_anno.not_null()) {
+     ++attr_count;     // has RuntimeVisibleParameterAnnotations attribute
+   }
+-  
+-  write_u2(attr_count); 
++
++  write_u2(attr_count);
+   if (const_method->code_size() > 0) {
+     write_code_attribute(method);
+   }
+@@ -448,8 +445,8 @@
+ }
+ 
+ // Write the class attributes portion of ClassFile structure
+-// JVMSpec|   	u2 attributes_count;
+-// JVMSpec|   	attribute_info attributes[attributes_count];
++// JVMSpec|     u2 attributes_count;
++// JVMSpec|     attribute_info attributes[attributes_count];
+ void JvmtiClassFileReconstituter::write_class_attributes() {
+   u2 inner_classes_length = inner_classes_attribute_length();
+   symbolHandle generic_signature(thread(), ikh()->generic_signature());
+@@ -471,8 +468,8 @@
+   if (anno.not_null()) {
+     ++attr_count;     // has RuntimeVisibleAnnotations attribute
+   }
+-  
+-  write_u2(attr_count); 
++
++  write_u2(attr_count);
+ 
+   if (generic_signature() != NULL) {
+     write_signature_attribute(symbol_to_cpool_index(generic_signature()));
+@@ -492,14 +489,14 @@
+ }
+ 
+ // Write the method information portion of ClassFile structure
+-// JVMSpec|   	u2 methods_count;
+-// JVMSpec|   	method_info methods[methods_count];
++// JVMSpec|     u2 methods_count;
++// JVMSpec|     method_info methods[methods_count];
+ void JvmtiClassFileReconstituter::write_method_infos() {
+   HandleMark hm(thread());
+   objArrayHandle methods(thread(), ikh()->methods());
+   int num_methods = methods->length();
+ 
+-  write_u2(num_methods);  
++  write_u2(num_methods);
+   if (JvmtiExport::can_maintain_original_method_order()) {
+     int index;
+     int original_index;
+@@ -508,7 +505,7 @@
+     // invert the method order mapping
+     for (index = 0; index < num_methods; index++) {
+       original_index = ikh()->method_ordering()->int_at(index);
+-      assert(original_index >= 0 && original_index < num_methods, 
++      assert(original_index >= 0 && original_index < num_methods,
+              "invalid original method index");
+       method_order[original_index] = index;
+     }
+@@ -532,51 +529,51 @@
+   ReallocMark();
+ 
+   // JVMSpec|   ClassFile {
+-  // JVMSpec|   	u4 magic;
++  // JVMSpec|           u4 magic;
+   write_u4(0xCAFEBABE);
+ 
+-  // JVMSpec|   	u2 minor_version;
+-  // JVMSpec|   	u2 major_version;
++  // JVMSpec|           u2 minor_version;
++  // JVMSpec|           u2 major_version;
+   write_u2(ikh()->minor_version());
+   u2 major = ikh()->major_version();
+   write_u2(major);
+ 
+-  // JVMSpec|   	u2 constant_pool_count;
+-  // JVMSpec|   	cp_info constant_pool[constant_pool_count-1];
+-  write_u2(cpool()->length()); 
++  // JVMSpec|           u2 constant_pool_count;
++  // JVMSpec|           cp_info constant_pool[constant_pool_count-1];
++  write_u2(cpool()->length());
+   copy_cpool_bytes(writeable_address(cpool_size()));
+ 
+-  // JVMSpec|   	u2 access_flags;
++  // JVMSpec|           u2 access_flags;
+   write_u2(ikh()->access_flags().get_flags() & JVM_RECOGNIZED_CLASS_MODIFIERS);
+ 
+-  // JVMSpec|   	u2 this_class;
+-  // JVMSpec|   	u2 super_class;
+-  write_u2(class_symbol_to_cpool_index(ikh()->name())); 
++  // JVMSpec|           u2 this_class;
++  // JVMSpec|           u2 super_class;
++  write_u2(class_symbol_to_cpool_index(ikh()->name()));
+   klassOop super_class = ikh()->super();
+   write_u2(super_class == NULL? 0 :  // zero for java.lang.Object
+-                class_symbol_to_cpool_index(super_class->klass_part()->name())); 
++                class_symbol_to_cpool_index(super_class->klass_part()->name()));
+ 
+-  // JVMSpec|   	u2 interfaces_count;
+-  // JVMSpec|   	u2 interfaces[interfaces_count];
++  // JVMSpec|           u2 interfaces_count;
++  // JVMSpec|           u2 interfaces[interfaces_count];
+   objArrayHandle interfaces(thread(), ikh()->local_interfaces());
+   int num_interfaces = interfaces->length();
+   write_u2(num_interfaces);
+   for (int index = 0; index < num_interfaces; index++) {
+     HandleMark hm(thread());
+     instanceKlassHandle iikh(thread(), klassOop(interfaces->obj_at(index)));
+-    write_u2(class_symbol_to_cpool_index(iikh->name())); 
++    write_u2(class_symbol_to_cpool_index(iikh->name()));
+   }
+ 
+-  // JVMSpec|   	u2 fields_count;
+-  // JVMSpec|   	field_info fields[fields_count];
++  // JVMSpec|           u2 fields_count;
++  // JVMSpec|           field_info fields[fields_count];
+   write_field_infos();
+ 
+-  // JVMSpec|   	u2 methods_count;
+-  // JVMSpec|   	method_info methods[methods_count];
++  // JVMSpec|           u2 methods_count;
++  // JVMSpec|           method_info methods[methods_count];
+   write_method_infos();
+ 
+-  // JVMSpec|   	u2 attributes_count;
+-  // JVMSpec|   	attribute_info attributes[attributes_count];
++  // JVMSpec|           u2 attributes_count;
++  // JVMSpec|           attribute_info attributes[attributes_count];
+   // JVMSpec|   } /* end ClassFile 8?
+   write_class_attributes();
+ }
+@@ -624,7 +621,7 @@
+   Bytes::put_Java_u8(writeable_address(8), x);
+ }
+ 
+-void JvmtiClassFileReconstituter::copy_bytecodes(methodHandle mh, 
++void JvmtiClassFileReconstituter::copy_bytecodes(methodHandle mh,
+                                                  unsigned char* bytecodes) {
+   // use a BytecodeStream to iterate over the bytecodes. JVM/fast bytecodes
+   // and the breakpoint bytecode are converted to their original bytecodes.
+@@ -640,7 +637,7 @@
+     assert(code != Bytecodes::_breakpoint, "sanity check");
+ 
+     // length of bytecode (mnemonic + operands)
+-    address bcp = bs.bcp();    
++    address bcp = bs.bcp();
+     int len = bs.next_bcp() - bcp;
+     assert(len > 0, "length must be > 0");
+ 
+@@ -650,8 +647,8 @@
+       memcpy(p+1, bcp+1, len-1);
+     }
+ 
+-    // During linking the get/put and invoke instructions are rewritten 
+-    // with an index into the constant pool cache. The original constant 
++    // During linking the get/put and invoke instructions are rewritten
++    // with an index into the constant pool cache. The original constant
+     // pool index must be returned to caller.  Rewrite the index.
+     if (is_rewritten && len >= 3) {
+       switch (code) {
+@@ -663,10 +660,10 @@
+       case Bytecodes::_invokespecial   :  // fall through
+       case Bytecodes::_invokestatic    :  // fall through
+       case Bytecodes::_invokeinterface :
+-        assert(len == 3 || (code == Bytecodes::_invokeinterface && len ==5), 
++        assert(len == 3 || (code == Bytecodes::_invokeinterface && len ==5),
+                "sanity check");
+         // cache cannot be pre-fetched since some classes won't have it yet
+-        ConstantPoolCacheEntry* entry = 
++        ConstantPoolCacheEntry* entry =
+           mh->constants()->cache()->entry_at(Bytes::get_native_u2(bcp+1));
+         int i = entry->constant_pool_index();
+         assert(i < mh->constants()->length(), "sanity check");
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.hpp openjdk/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.hpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.hpp	2008-02-28 05:02:41.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jvmtiClassFileReconstituter.hpp	1.15 07/05/05 17:06:36 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ 
+@@ -75,7 +72,7 @@
+     }
+   }
+ 
+-  
++
+   void       set_error(jvmtiError err)    { _err = err; }
+   jvmtiError get_error()                  { return _err; }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiCodeBlobEvents.cpp openjdk/hotspot/src/share/vm/prims/jvmtiCodeBlobEvents.cpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiCodeBlobEvents.cpp	2008-02-28 05:02:41.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiCodeBlobEvents.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)jvmtiCodeBlobEvents.cpp	1.20 07/05/05 17:06:36 JVM"
+-#endif
+ /*
+  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,14 +19,14 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_jvmtiCodeBlobEvents.cpp.incl"
+ 
+ // Support class to collect a list of the non-nmethod CodeBlobs in
+-// the CodeCache. 
++// the CodeCache.
+ //
+ // This class actually creates a list of JvmtiCodeBlobDesc - each JvmtiCodeBlobDesc
+ // describes a single CodeBlob in the CodeCache. Note that collection is
+@@ -56,16 +53,16 @@
+ 
+   // used during a collection
+   static GrowableArray<JvmtiCodeBlobDesc*>* _global_code_blobs;
+-  static void do_blob(CodeBlob* cb);  
+- public:     
+-  CodeBlobCollector() {    
+-    _code_blobs = NULL;    
++  static void do_blob(CodeBlob* cb);
++ public:
++  CodeBlobCollector() {
++    _code_blobs = NULL;
+     _pos = -1;
+   }
+   ~CodeBlobCollector() {
+     if (_code_blobs != NULL) {
+       for (int i=0; i<_code_blobs->length(); i++) {
+-	FreeHeap(_code_blobs->at(i));
++        FreeHeap(_code_blobs->at(i));
+       }
+       delete _code_blobs;
+     }
+@@ -92,7 +89,7 @@
+     }
+     return _code_blobs->at(++_pos);
+   }
+-     
++
+ };
+ 
+ // used during collection
+@@ -105,7 +102,7 @@
+ // other CodeBlobs. This function also filters out CodeBlobs that have
+ // a duplicate starting address as previous blobs. This is needed to
+ // handle the case where multiple stubs are generated into a single
+-// BufferBlob. 
++// BufferBlob.
+ 
+ void CodeBlobCollector::do_blob(CodeBlob* cb) {
+ 
+@@ -114,7 +111,7 @@
+     return;
+   }
+ 
+-  // check if this starting address has been seen already - the 
++  // check if this starting address has been seen already - the
+   // assumption is that stubs are inserted into the list before the
+   // enclosing BufferBlobs.
+   address addr = cb->instructions_begin();
+@@ -125,18 +122,18 @@
+     }
+   }
+ 
+-  // we must name the CodeBlob - some CodeBlobs already have names :- 
++  // we must name the CodeBlob - some CodeBlobs already have names :-
+   // - stubs used by compiled code to call a (static) C++ runtime routine
+   // - non-relocatable machine code such as the interpreter, stubroutines, etc.
+   // - various singleton blobs
+   //
+   // others are unnamed so we create a name :-
+-  // - OSR adapter (interpreter frame that has been on-stack replaced) 
++  // - OSR adapter (interpreter frame that has been on-stack replaced)
+   // - I2C and C2I adapters
+   const char* name = NULL;
+-  if (cb->is_runtime_stub()) {    
+-    name = ((RuntimeStub*)cb)->name();   
+-  }  
++  if (cb->is_runtime_stub()) {
++    name = ((RuntimeStub*)cb)->name();
++  }
+   if (cb->is_buffer_blob()) {
+     name = ((BufferBlob*)cb)->name();
+   }
+@@ -149,7 +146,7 @@
+ 
+   // record the CodeBlob details as a JvmtiCodeBlobDesc
+   JvmtiCodeBlobDesc* scb = new JvmtiCodeBlobDesc(name, cb->instructions_begin(),
+-						 cb->instructions_end());
++                                                 cb->instructions_end());
+   _global_code_blobs->append(scb);
+ }
+ 
+@@ -167,9 +164,9 @@
+ // the enclosing container we first iterate over the stub code descriptors so
+ // that the stubs go into the list first. do_blob will then filter out the
+ // enclosing blobs if the starting address of the enclosing blobs matches the
+-// starting address of first stub generated in the enclosing blob. 
++// starting address of first stub generated in the enclosing blob.
+ 
+-void CodeBlobCollector::collect() {   
++void CodeBlobCollector::collect() {
+   assert_locked_or_safepoint(CodeCache_lock);
+   assert(_global_code_blobs == NULL, "checking");
+ 
+@@ -179,7 +176,7 @@
+   // iterate over the stub code descriptors and put them in the list first.
+   int index = 0;
+   StubCodeDesc* desc;
+-  while ((desc = StubCodeDesc::desc_for_index(++index)) != NULL) {   
++  while ((desc = StubCodeDesc::desc_for_index(++index)) != NULL) {
+     _global_code_blobs->append(new JvmtiCodeBlobDesc(desc->name(), desc->begin(), desc->end()));
+   }
+ 
+@@ -192,7 +189,7 @@
+   // for other iterations.
+   _code_blobs = _global_code_blobs;
+   _global_code_blobs = NULL;
+-}  
++}
+ 
+ 
+ // Generate a DYNAMIC_CODE_GENERATED event for each non-nmethod code blob.
+@@ -210,7 +207,7 @@
+   JvmtiCodeBlobDesc* blob = collector.first();
+   while (blob != NULL) {
+     JvmtiExport::post_dynamic_code_generated(env, blob->name(), blob->code_begin(), blob->code_end());
+-    blob = collector.next();					   
++    blob = collector.next();
+   }
+   return JVMTI_ERROR_NONE;
+ }
+@@ -226,24 +223,24 @@
+   jvmtiAddrLocationMap* _map;
+   jint _map_length;
+  public:
+-  nmethodDesc(methodHandle method, address code_begin, address code_end, 
+-	      jvmtiAddrLocationMap* map, jint map_length) {
++  nmethodDesc(methodHandle method, address code_begin, address code_end,
++              jvmtiAddrLocationMap* map, jint map_length) {
+     _method = method;
+     _code_begin = code_begin;
+     _code_end = code_end;
+     _map = map;
+     _map_length = map_length;
+   }
+-  methodHandle method() const		{ return _method; }
+-  address code_begin() const		{ return _code_begin; }
+-  address code_end() const		{ return _code_end; }
+-  jvmtiAddrLocationMap*	map() const	{ return _map; }
+-  jint map_length() const		{ return _map_length; }
++  methodHandle method() const           { return _method; }
++  address code_begin() const            { return _code_begin; }
++  address code_end() const              { return _code_end; }
++  jvmtiAddrLocationMap* map() const     { return _map; }
++  jint map_length() const               { return _map_length; }
+ };
+ 
+ 
+ // Support class to collect a list of the nmethod CodeBlobs in
+-// the CodeCache. 
++// the CodeCache.
+ //
+ // Usage :-
+ //
+@@ -258,24 +255,24 @@
+ //
+ class nmethodCollector : StackObj {
+  private:
+-  GrowableArray<nmethodDesc*>* _nmethods;	    // collect nmethods
+-  int _pos;					    // iteration support
++  GrowableArray<nmethodDesc*>* _nmethods;           // collect nmethods
++  int _pos;                                         // iteration support
+ 
+   // used during a collection
+   static GrowableArray<nmethodDesc*>* _global_nmethods;
+-  static void do_nmethod(nmethod* nm);  
+- public:     
+-  nmethodCollector() {    
++  static void do_nmethod(nmethod* nm);
++ public:
++  nmethodCollector() {
+     _nmethods = NULL;
+     _pos = -1;
+   }
+   ~nmethodCollector() {
+     if (_nmethods != NULL) {
+       for (int i=0; i<_nmethods->length(); i++) {
+-	nmethodDesc* blob = _nmethods->at(i);
+-	if (blob->map()!= NULL) {
+-	  FREE_C_HEAP_ARRAY(jvmtiAddrLocationMap, blob->map());
+-	}
++        nmethodDesc* blob = _nmethods->at(i);
++        if (blob->map()!= NULL) {
++          FREE_C_HEAP_ARRAY(jvmtiAddrLocationMap, blob->map());
++        }
+       }
+       delete _nmethods;
+     }
+@@ -301,7 +298,7 @@
+       return NULL;
+     }
+     return _nmethods->at(++_pos);
+-  }  
++  }
+ };
+ 
+ // used during collection
+@@ -325,13 +322,13 @@
+   jint map_length;
+   JvmtiCodeBlobEvents::build_jvmti_addr_location_map(nm, &map, &map_length);
+ 
+-  // record the nmethod details 
++  // record the nmethod details
+   methodHandle mh(nm->method());
+   nmethodDesc* snm = new nmethodDesc(mh,
+-				     nm->code_begin(),
+-				     nm->code_end(),
+-				     map,
+-				     map_length);
++                                     nm->code_begin(),
++                                     nm->code_end(),
++                                     map,
++                                     map_length);
+   _global_nmethods->append(snm);
+ }
+ 
+@@ -340,7 +337,7 @@
+ // The created list is growable array of nmethodDesc - each one describes
+ // a nmethod and includs its JVMTI address location map.
+ 
+-void nmethodCollector::collect() {   
++void nmethodCollector::collect() {
+   assert_locked_or_safepoint(CodeCache_lock);
+   assert(_global_nmethods == NULL, "checking");
+ 
+@@ -350,12 +347,12 @@
+   // any a descriptor for each nmethod to the list.
+   CodeCache::nmethods_do(do_nmethod);
+ 
+-  // make the list the instance list 
++  // make the list the instance list
+   _nmethods = _global_nmethods;
+   _global_nmethods = NULL;
+-}  
++}
+ 
+-// Generate a COMPILED_METHOD_LOAD event for each nnmethod 
++// Generate a COMPILED_METHOD_LOAD event for each nnmethod
+ 
+ jvmtiError JvmtiCodeBlobEvents::generate_compiled_method_load_events(JvmtiEnv* env) {
+   HandleMark hm;
+@@ -370,23 +367,23 @@
+   // iterate over the  list and post an event for each nmethod
+   nmethodDesc* nm_desc = collector.first();
+   while (nm_desc != NULL) {
+-    methodOop method = nm_desc->method()();    
++    methodOop method = nm_desc->method()();
+     jmethodID mid = method->jmethod_id();
+     assert(mid != NULL, "checking");
+     JvmtiExport::post_compiled_method_load(env, mid,
+-					   (jint)(nm_desc->code_end() - nm_desc->code_begin()),
+-					   nm_desc->code_begin(), nm_desc->map_length(),
+-					   nm_desc->map());	
++                                           (jint)(nm_desc->code_end() - nm_desc->code_begin()),
++                                           nm_desc->code_begin(), nm_desc->map_length(),
++                                           nm_desc->map());
+     nm_desc = collector.next();
+-  }  
++  }
+   return JVMTI_ERROR_NONE;
+ }
+ 
+ 
+ // create a C-heap allocated address location map for an nmethod
+-void JvmtiCodeBlobEvents::build_jvmti_addr_location_map(nmethod *nm, 
+-							jvmtiAddrLocationMap** map_ptr, 
+-							jint *map_length_ptr)
++void JvmtiCodeBlobEvents::build_jvmti_addr_location_map(nmethod *nm,
++                                                        jvmtiAddrLocationMap** map_ptr,
++                                                        jint *map_length_ptr)
+ {
+   ResourceMark rm;
+   jvmtiAddrLocationMap* map = NULL;
+@@ -410,13 +407,13 @@
+       while( !sd->is_top() ) { sd = sd->sender(); }
+       int bci = sd->bci();
+       if (bci != InvocationEntryBci) {
+-	assert(map_length < pcds_in_method, "checking");
++        assert(map_length < pcds_in_method, "checking");
+         map[map_length].start_address = (const void*)pcd->real_pc(nm);
+         map[map_length].location = bci;
+         ++map_length;
+       }
+     }
+-  } 
++  }
+ 
+   *map_ptr = map;
+   *map_length_ptr = map_length;
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiCodeBlobEvents.hpp openjdk/hotspot/src/share/vm/prims/jvmtiCodeBlobEvents.hpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiCodeBlobEvents.hpp	2008-02-28 05:02:41.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiCodeBlobEvents.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jvmtiCodeBlobEvents.hpp	1.11 07/05/05 17:06:35 JVM"
+-#endif
+ /*
+  * Copyright 2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,9 +19,9 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+- 
++
+ #ifndef _JVMTI_CODE_BLOB_EVENTS_H_
+ #define _JVMTI_CODE_BLOB_EVENTS_H_
+ 
+@@ -41,17 +38,17 @@
+ class JvmtiCodeBlobEvents : public AllStatic {
+  public:
+ 
+-  // generate a DYNAMIC_CODE_GENERATED_EVENT event for each non-nmethod 
++  // generate a DYNAMIC_CODE_GENERATED_EVENT event for each non-nmethod
+   // code blob in the code cache.
+   static jvmtiError generate_dynamic_code_events(JvmtiEnv* env);
+ 
+-  // generate a COMPILED_METHOD_LOAD event for each nmethod 
++  // generate a COMPILED_METHOD_LOAD event for each nmethod
+   // code blob in the code cache.
+   static jvmtiError generate_compiled_method_load_events(JvmtiEnv* env);
+ 
+   // create a C-heap allocated address location map for an nmethod
+-  static void build_jvmti_addr_location_map(nmethod *nm, jvmtiAddrLocationMap** map, 
+-					    jint *map_length);
+-}; 
++  static void build_jvmti_addr_location_map(nmethod *nm, jvmtiAddrLocationMap** map,
++                                            jint *map_length);
++};
+ 
+ #endif
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiEnter.hpp openjdk/hotspot/src/share/vm/prims/jvmtiEnter.hpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiEnter.hpp	2008-02-28 05:02:41.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiEnter.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jvmtiEnter.hpp	1.8 07/05/05 17:06:35 JVM"
+-#endif
+ /*
+  * Copyright 2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,5 +19,5 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiEnter.xsl openjdk/hotspot/src/share/vm/prims/jvmtiEnter.xsl
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiEnter.xsl	2008-02-28 05:02:41.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiEnter.xsl	2008-01-31 09:19:01.000000000 -0500
+@@ -465,6 +465,13 @@
+   <xsl:apply-templates select="parameters" mode="signature"/>
+   <xsl:text>) {
+ </xsl:text>
++
++  <xsl:if test="not(contains(@jkernel,'yes'))">
++  <xsl:text>&#xA;#ifdef JVMTI_KERNEL &#xA;</xsl:text>
++  <xsl:text>  return JVMTI_ERROR_NOT_AVAILABLE; &#xA;</xsl:text>
++  <xsl:text>#else &#xA;</xsl:text>
++  </xsl:if>
++
+   <xsl:apply-templates select="." mode="traceSetUp"/>
+   <xsl:choose>
+     <xsl:when test="count(@phase)=0 or contains(@phase,'live')">
+@@ -584,9 +591,13 @@
+     </xsl:otherwise>
+   </xsl:choose>
+   <xsl:text>  return err;
+-}
+-
+ </xsl:text>
++
++  <xsl:if test="not(contains(@jkernel,'yes'))">
++  <xsl:text>#endif // JVMTI_KERNEL&#xA;</xsl:text>
++  </xsl:if>
++
++  <xsl:text>}&#xA;</xsl:text>
+ </xsl:template>
+ 
+ <xsl:template match="function" mode="doCall">
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiEnvBase.cpp openjdk/hotspot/src/share/vm/prims/jvmtiEnvBase.cpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiEnvBase.cpp	2008-02-28 05:02:41.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiEnvBase.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)jvmtiEnvBase.cpp	1.89 07/05/17 16:04:59 JVM"
+-#endif
+ /*
+  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ # include "incls/_precompiled.incl"
+ # include "incls/_jvmtiEnvBase.cpp.incl"
+@@ -56,8 +53,10 @@
+ 
+   JvmtiManageCapabilities::initialize();
+ 
++#ifndef JVMTI_KERNEL
+   // register extension functions and events
+   JvmtiExtensions::register_extensions();
++#endif // !JVMTI_KERNEL
+ 
+ #ifdef JVMTI_TRACE
+   JvmtiTrace::initialize();
+@@ -77,7 +76,7 @@
+     // (which occurs at a safepoint) cannot occur simultaneously with this list
+     // addition.  Note: No_Safepoint_Verifier cannot, currently, be used before
+     // threads exist.
+-    JvmtiEnvIterator it; 
++    JvmtiEnvIterator it;
+     JvmtiEnvBase *previous_env = NULL;
+     for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) {
+       previous_env = env;
+@@ -136,7 +135,7 @@
+   JvmtiTrace::shutdown();
+ #endif
+ 
+-  // Dispose of event info and let the event controller call us back 
++  // Dispose of event info and let the event controller call us back
+   // in a locked state (env_dispose, below)
+   JvmtiEventController::env_dispose(this);
+ }
+@@ -150,9 +149,6 @@
+   // checking for a valid environment when setting callbacks (while
+   // holding the JvmtiThreadState_lock).
+ 
+-  JvmtiTagMap* tag_map_to_deallocate = _tag_map;
+-  set_tag_map(NULL);
+-
+   // Mark as invalid.
+   _magic = DISPOSED_MAGIC;
+ 
+@@ -163,10 +159,14 @@
+   // Same situation as with events (see above)
+   set_native_method_prefixes(0, NULL);
+ 
++#ifndef JVMTI_KERNEL
++  JvmtiTagMap* tag_map_to_deallocate = _tag_map;
++  set_tag_map(NULL);
+   // A tag map can be big, deallocate it now
+   if (tag_map_to_deallocate != NULL) {
+     delete tag_map_to_deallocate;
+   }
++#endif // !JVMTI_KERNEL
+ 
+   _needs_clean_up = true;
+ }
+@@ -175,14 +175,17 @@
+ JvmtiEnvBase::~JvmtiEnvBase() {
+   assert(SafepointSynchronize::is_at_safepoint(), "sanity check");
+ 
+-  // There is a small window of time during which the tag map of a 
++  // There is a small window of time during which the tag map of a
+   // disposed environment could have been reallocated.
+   // Make sure it is gone.
++#ifndef JVMTI_KERNEL
+   JvmtiTagMap* tag_map_to_deallocate = _tag_map;
+   set_tag_map(NULL);
++  // A tag map can be big, deallocate it now
+   if (tag_map_to_deallocate != NULL) {
+     delete tag_map_to_deallocate;
+   }
++#endif // !JVMTI_KERNEL
+ 
+   _magic = BAD_MAGIC;
+ }
+@@ -192,13 +195,13 @@
+ JvmtiEnvBase::periodic_clean_up() {
+   assert(SafepointSynchronize::is_at_safepoint(), "sanity check");
+ 
+-  // JvmtiEnvBase reference is saved in JvmtiEnvThreadState. So 
++  // JvmtiEnvBase reference is saved in JvmtiEnvThreadState. So
+   // clean up JvmtiThreadState before deleting JvmtiEnv pointer.
+   JvmtiThreadState::periodic_clean_up();
+ 
+   // Unlink all invalid environments from the list of environments
+   // and deallocate them
+-  JvmtiEnvIterator it; 
++  JvmtiEnvIterator it;
+   JvmtiEnvBase* previous_env = NULL;
+   JvmtiEnvBase* env = it.first();
+   while (env != NULL) {
+@@ -235,7 +238,7 @@
+       _inside |= thread->is_inside_jvmti_env_iteration();
+     }
+ 
+-    bool is_inside_jvmti_env_iteration() { 
++    bool is_inside_jvmti_env_iteration() {
+       return _inside;
+     }
+   };
+@@ -245,7 +248,7 @@
+     // deallocation should not occur if we are
+     ThreadInsideIterationClosure tiic;
+     Threads::threads_do(&tiic);
+-    if (!tiic.is_inside_jvmti_env_iteration() && 
++    if (!tiic.is_inside_jvmti_env_iteration() &&
+              !is_inside_dying_thread_env_iteration()) {
+       _needs_clean_up = false;
+       JvmtiEnvBase::periodic_clean_up();
+@@ -328,7 +331,7 @@
+     _native_method_prefixes = new_prefixes;
+   }
+ 
+-  // now that we know the new prefixes have been successfully installed we can 
++  // now that we know the new prefixes have been successfully installed we can
+   // safely remove the old ones
+   if (old_prefix_count != 0) {
+     for (int i = 0; i < old_prefix_count; i++) {
+@@ -347,15 +350,15 @@
+ // Return in a resource allocated array.
+ char**
+ JvmtiEnvBase::get_all_native_method_prefixes(int* count_ptr) {
+-  assert(Threads::number_of_threads() == 0 || 
+-         SafepointSynchronize::is_at_safepoint() || 
++  assert(Threads::number_of_threads() == 0 ||
++         SafepointSynchronize::is_at_safepoint() ||
+          JvmtiThreadState_lock->is_locked(),
+          "sanity check");
+ 
+   int total_count = 0;
+   GrowableArray<char*>* prefix_array =new GrowableArray<char*>(5);
+ 
+-  JvmtiEnvIterator it; 
++  JvmtiEnvIterator it;
+   for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) {
+     int prefix_count = env->get_native_method_prefix_count();
+     char** prefixes = env->get_native_method_prefixes();
+@@ -399,7 +402,6 @@
+   }
+ }
+ 
+-
+ // Called from JVMTI entry points which perform stack walking. If the
+ // associated JavaThread is the current thread, then wait_for_suspend
+ // is not used. Otherwise, it determines if we should wait for the
+@@ -407,7 +409,7 @@
+ // releases the suspension mechanism should be reimplemented so this
+ // is not necessary.)
+ //
+-bool 
++bool
+ JvmtiEnvBase::is_thread_fully_suspended(JavaThread* thr, bool wait_for_suspend, uint32_t *bits) {
+   // "other" threads require special handling
+   if (thr != JavaThread::current()) {
+@@ -443,7 +445,7 @@
+   return mem;
+ }
+ 
+-  
++
+ //
+ // Threads
+ //
+@@ -456,7 +458,7 @@
+ 
+   jobject *objArray = (jobject *) jvmtiMalloc(sizeof(jobject) * length);
+   NULL_CHECK(objArray, NULL);
+-  
++
+   for (int i=0; i<length; i++) {
+     objArray[i] = jni_reference(handles[i]);
+   }
+@@ -523,28 +525,30 @@
+   return (jclass)jni_reference(Klass::cast(k)->java_mirror());
+ }
+ 
+-// 
+-// Field Information 
+-// 
+-
+-bool 
+-JvmtiEnvBase::get_field_descriptor(klassOop k, jfieldID field, fieldDescriptor* fd) { 
+-  if (!jfieldIDWorkaround::is_valid_jfieldID(k, field)) { 
+-    return false; 
+-  } 
+-  bool found = false; 
+-  if (jfieldIDWorkaround::is_static_jfieldID(field)) { 
+-    JNIid* id = jfieldIDWorkaround::from_static_jfieldID(field); 
+-    int offset = id->offset(); 
+-    klassOop holder = id->holder(); 
+-    found = instanceKlass::cast(holder)->find_local_field_from_offset(offset, true, fd); 
+-  } else { 
+-    // Non-static field. The fieldID is really the offset of the field within the object. 
+-    int offset = jfieldIDWorkaround::from_instance_jfieldID(k, field); 
+-    found = instanceKlass::cast(k)->find_field_from_offset(offset, false, fd); 
+-  } 
+-  return found; 
+-} 
++#ifndef JVMTI_KERNEL
++
++//
++// Field Information
++//
++
++bool
++JvmtiEnvBase::get_field_descriptor(klassOop k, jfieldID field, fieldDescriptor* fd) {
++  if (!jfieldIDWorkaround::is_valid_jfieldID(k, field)) {
++    return false;
++  }
++  bool found = false;
++  if (jfieldIDWorkaround::is_static_jfieldID(field)) {
++    JNIid* id = jfieldIDWorkaround::from_static_jfieldID(field);
++    int offset = id->offset();
++    klassOop holder = id->holder();
++    found = instanceKlass::cast(holder)->find_local_field_from_offset(offset, true, fd);
++  } else {
++    // Non-static field. The fieldID is really the offset of the field within the object.
++    int offset = jfieldIDWorkaround::from_instance_jfieldID(k, field);
++    found = instanceKlass::cast(k)->find_field_from_offset(offset, false, fd);
++  }
++  return found;
++}
+ 
+ //
+ // Object Monitor Information
+@@ -593,13 +597,13 @@
+ #endif
+   assert((SafepointSynchronize::is_at_safepoint() ||
+           is_thread_fully_suspended(java_thread, false, &debug_bits)),
+-         "at safepoint or target thread is suspended");    
++         "at safepoint or target thread is suspended");
+   oop obj = NULL;
+   ObjectMonitor *mon = java_thread->current_waiting_monitor();
+   if (mon == NULL) {
+     // thread is not doing an Object.wait() call
+     mon = java_thread->current_pending_monitor();
+-    if (mon != NULL) { 
++    if (mon != NULL) {
+       // The thread is trying to enter() or raw_enter() an ObjectMonitor.
+       obj = (oop)mon->object();
+       // If obj == NULL, then ObjectMonitor is raw which doesn't count
+@@ -644,10 +648,10 @@
+          jvf = jvf->java_sender()) {
+       if (depth++ < MaxJavaStackTraceDepth) {  // check for stack too deep
+         // add locked objects for this frame into list
+-        err = get_locked_objects_in_frame(calling_thread, java_thread, jvf, owned_monitors_list, depth-1);  
+-	if (err != JVMTI_ERROR_NONE) {
+-	  return err;
+-	}
++        err = get_locked_objects_in_frame(calling_thread, java_thread, jvf, owned_monitors_list, depth-1);
++        if (err != JVMTI_ERROR_NONE) {
++          return err;
++        }
+       }
+     }
+   }
+@@ -661,8 +665,8 @@
+ }
+ 
+ // Save JNI local handles for any objects that this frame owns.
+-jvmtiError  
+-JvmtiEnvBase::get_locked_objects_in_frame(JavaThread* calling_thread, JavaThread* java_thread, 
++jvmtiError
++JvmtiEnvBase::get_locked_objects_in_frame(JavaThread* calling_thread, JavaThread* java_thread,
+                                  javaVFrame *jvf, GrowableArray<jvmtiMonitorStackDepthInfo*>* owned_monitors_list, int stack_depth) {
+   jvmtiError err = JVMTI_ERROR_NONE;
+   ResourceMark rm;
+@@ -727,12 +731,12 @@
+         continue;
+       }
+     }
+-    
++
+     // add the owning object to our list
+     jvmtiMonitorStackDepthInfo *jmsdi;
+     err = allocate(sizeof(jvmtiMonitorStackDepthInfo), (unsigned char **)&jmsdi);
+     if (err != JVMTI_ERROR_NONE) {
+-	return err;
++        return err;
+     }
+     Handle hobj(obj);
+     jmsdi->monitor = jni_reference(calling_thread, hobj);
+@@ -744,7 +748,7 @@
+ }
+ 
+ jvmtiError
+-JvmtiEnvBase::get_stack_trace(JavaThread *java_thread, 
++JvmtiEnvBase::get_stack_trace(JavaThread *java_thread,
+                               jint start_depth, jint max_count,
+                               jvmtiFrameInfo* frame_buffer, jint* count_ptr) {
+ #ifdef ASSERT
+@@ -756,52 +760,52 @@
+   int count = 0;
+   if (java_thread->has_last_Java_frame()) {
+     RegisterMap reg_map(java_thread);
+-    Thread* current_thread = Thread::current(); 
++    Thread* current_thread = Thread::current();
+     ResourceMark rm(current_thread);
+     javaVFrame *jvf = java_thread->last_java_vframe(&reg_map);
+     HandleMark hm(current_thread);
+     if (start_depth != 0) {
+       if (start_depth > 0) {
+-	for (int j = 0; j < start_depth && jvf != NULL; j++) {
+-	  jvf = jvf->java_sender();
+-	}
+-	if (jvf == NULL) {
+-	  // start_depth is deeper than the stack depth
+-	  return JVMTI_ERROR_ILLEGAL_ARGUMENT;
+-	}
++        for (int j = 0; j < start_depth && jvf != NULL; j++) {
++          jvf = jvf->java_sender();
++        }
++        if (jvf == NULL) {
++          // start_depth is deeper than the stack depth
++          return JVMTI_ERROR_ILLEGAL_ARGUMENT;
++        }
+       } else { // start_depth < 0
+-	// we are referencing the starting depth based on the oldest
+-	// part of the stack.
+-	// optimize to limit the number of times that java_sender() is called
+-	javaVFrame *jvf_cursor = jvf;
+-	javaVFrame *jvf_prev = NULL;
+-	javaVFrame *jvf_prev_prev;
+-	int j = 0;
+-	while (jvf_cursor != NULL) {
+-	  jvf_prev_prev = jvf_prev;
+-	  jvf_prev = jvf_cursor;
+-	  for (j = 0; j > start_depth && jvf_cursor != NULL; j--) {
+-	    jvf_cursor = jvf_cursor->java_sender();
+-	  }
+-	}
+-	if (j == start_depth) {
+-	  // previous pointer is exactly where we want to start
+-	  jvf = jvf_prev;
+-	} else {
+-	  // we need to back up further to get to the right place
+-	  if (jvf_prev_prev == NULL) {
+-	    // the -start_depth is greater than the stack depth
+-	    return JVMTI_ERROR_ILLEGAL_ARGUMENT;
+-	  }
+-	  // j now is the number of frames on the stack starting with
+-	  // jvf_prev, we start from jvf_prev_prev and move older on
+-	  // the stack that many, the result is -start_depth frames
+-	  // remaining.
+-	  jvf = jvf_prev_prev;
+-	  for (; j < 0; j++) {
+-	    jvf = jvf->java_sender();
+-	  }
+-	}	  
++        // we are referencing the starting depth based on the oldest
++        // part of the stack.
++        // optimize to limit the number of times that java_sender() is called
++        javaVFrame *jvf_cursor = jvf;
++        javaVFrame *jvf_prev = NULL;
++        javaVFrame *jvf_prev_prev;
++        int j = 0;
++        while (jvf_cursor != NULL) {
++          jvf_prev_prev = jvf_prev;
++          jvf_prev = jvf_cursor;
++          for (j = 0; j > start_depth && jvf_cursor != NULL; j--) {
++            jvf_cursor = jvf_cursor->java_sender();
++          }
++        }
++        if (j == start_depth) {
++          // previous pointer is exactly where we want to start
++          jvf = jvf_prev;
++        } else {
++          // we need to back up further to get to the right place
++          if (jvf_prev_prev == NULL) {
++            // the -start_depth is greater than the stack depth
++            return JVMTI_ERROR_ILLEGAL_ARGUMENT;
++          }
++          // j now is the number of frames on the stack starting with
++          // jvf_prev, we start from jvf_prev_prev and move older on
++          // the stack that many, the result is -start_depth frames
++          // remaining.
++          jvf = jvf_prev_prev;
++          for (; j < 0; j++) {
++            jvf = jvf->java_sender();
++          }
++        }
+       }
+     }
+     for (; count < max_count && jvf != NULL; count++) {
+@@ -828,7 +832,7 @@
+ }
+ 
+ jvmtiError
+-JvmtiEnvBase::get_frame_location(JavaThread *java_thread, jint depth, 
++JvmtiEnvBase::get_frame_location(JavaThread *java_thread, jint depth,
+                                  jmethodID* method_ptr, jlocation* location_ptr) {
+ #ifdef ASSERT
+   uint32_t debug_bits = 0;
+@@ -836,7 +840,7 @@
+   assert((SafepointSynchronize::is_at_safepoint() ||
+           is_thread_fully_suspended(java_thread, false, &debug_bits)),
+          "at safepoint or target thread is suspended");
+-  Thread* current_thread = Thread::current(); 
++  Thread* current_thread = Thread::current();
+   ResourceMark rm(current_thread);
+ 
+   vframe *vf = vframeFor(java_thread, depth);
+@@ -852,7 +856,7 @@
+ #ifdef PRODUCT
+   if (!vf->is_java_frame()) {
+     return JVMTI_ERROR_INTERNAL;
+-  } 
++  }
+ #endif
+ 
+   HandleMark hm(current_thread);
+@@ -890,7 +894,7 @@
+   jvmtiMonitorUsage ret = {
+       NULL, 0, 0, NULL, 0, NULL
+   };
+-  
++
+   uint32_t debug_bits = 0;
+   // first derive the object's owner and entry_count (if any)
+   {
+@@ -934,7 +938,7 @@
+       owning_thread = Threads::owning_thread_from_monitor_owner(owner, !at_safepoint);
+       assert(owning_thread != NULL, "sanity check");
+       if (owning_thread != NULL) {  // robustness
+-        // The monitor's owner either has to be the current thread, at safepoint 
++        // The monitor's owner either has to be the current thread, at safepoint
+         // or it has to be suspended. Any of these conditions will prevent both
+         // contending and waiting threads from modifying the state of
+         // the monitor.
+@@ -985,13 +989,13 @@
+   if (err != JVMTI_ERROR_NONE) {
+     return err;
+   }
+-  err = allocate(ret.notify_waiter_count * sizeof(jthread *), 
++  err = allocate(ret.notify_waiter_count * sizeof(jthread *),
+                  (unsigned char**)&ret.notify_waiters);
+   if (err != JVMTI_ERROR_NONE) {
+     deallocate((unsigned char*)ret.waiters);
+     return err;
+   }
+- 
++
+   // now derive the rest of the fields
+   if (mon != NULL) {
+     // this object has a heavyweight monitor
+@@ -1008,7 +1012,7 @@
+         // we have contending threads
+         ResourceMark rm;
+         // get_pending_threads returns only java thread so we do not need to
+-        // check for  non java threads.          
++        // check for  non java threads.
+         GrowableArray<JavaThread*>* wantList = Threads::get_pending_threads(
+           nWant, (address)mon, !at_safepoint);
+         if (wantList->length() < nWant) {
+@@ -1083,7 +1087,7 @@
+ ResourceTracker::~ResourceTracker() {
+   if (_failed) {
+     for (int i=0; i<_allocations->length(); i++) {
+-      _env->deallocate(_allocations->at(i));	
++      _env->deallocate(_allocations->at(i));
+     }
+   }
+   delete _allocations;
+@@ -1091,7 +1095,7 @@
+ 
+ jvmtiError ResourceTracker::allocate(jlong size, unsigned char** mem_ptr) {
+   unsigned char *ptr;
+-  jvmtiError err = _env->allocate(size, &ptr);    
++  jvmtiError err = _env->allocate(size, &ptr);
+   if (err == JVMTI_ERROR_NONE) {
+     _allocations->append(ptr);
+     *mem_ptr = ptr;
+@@ -1102,7 +1106,7 @@
+   return err;
+  }
+ 
+-unsigned char* ResourceTracker::allocate(jlong size) {    
++unsigned char* ResourceTracker::allocate(jlong size) {
+   unsigned char* ptr;
+   allocate(size, &ptr);
+   return ptr;
+@@ -1128,8 +1132,8 @@
+ // may be null if the thread is new or has exited.
+ void
+ VM_GetMultipleStackTraces::fill_frames(jthread jt, JavaThread *thr, oop thread_oop) {
+-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");   
+- 
++  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
++
+   jint state = 0;
+   struct StackInfoNode *node = NEW_RESOURCE_OBJ(struct StackInfoNode);
+   jvmtiStackInfo *infop = &(node->info);
+@@ -1137,12 +1141,12 @@
+   set_head(node);
+   infop->frame_count = 0;
+   infop->thread = jt;
+-  
++
+   if (thread_oop != NULL) {
+     // get most state bits
+     state = (jint)java_lang_Thread::get_thread_status(thread_oop);
+   }
+-  
++
+   if (thr != NULL) {    // add more state bits if there is a JavaThead to query
+     // same as is_being_ext_suspended() but without locking
+     if (thr->is_ext_suspended() || thr->is_external_suspend()) {
+@@ -1152,13 +1156,13 @@
+     if (jts == _thread_in_native) {
+       state |= JVMTI_THREAD_STATE_IN_NATIVE;
+     }
+-    OSThread* osThread = thr->osthread(); 
++    OSThread* osThread = thr->osthread();
+     if (osThread != NULL && osThread->interrupted()) {
+       state |= JVMTI_THREAD_STATE_INTERRUPTED;
+     }
+   }
+   infop->state = state;
+-  
++
+   if (thr != NULL || (state & JVMTI_THREAD_STATE_ALIVE) != 0) {
+     infop->frame_buffer = NEW_RESOURCE_ARRAY(jvmtiFrameInfo, max_frame_count());
+     env()->get_stack_trace(thr, 0, max_frame_count(),
+@@ -1175,7 +1179,7 @@
+ void
+ VM_GetMultipleStackTraces::allocate_and_fill_stacks(jint thread_count) {
+   // do I need to worry about alignment issues?
+-  jlong alloc_size =  thread_count       * sizeof(jvmtiStackInfo) 
++  jlong alloc_size =  thread_count       * sizeof(jvmtiStackInfo)
+                     + _frame_count_total * sizeof(jvmtiFrameInfo);
+   env()->allocate(alloc_size, (unsigned char **)&_stack_info);
+ 
+@@ -1201,14 +1205,14 @@
+     }
+   }
+   assert(si == _stack_info, "the last copied stack info must be the first record");
+-  assert((unsigned char *)fi == ((unsigned char *)_stack_info) + alloc_size, 
++  assert((unsigned char *)fi == ((unsigned char *)_stack_info) + alloc_size,
+          "the last copied frame info must be the last record");
+ }
+ 
+ 
+ void
+ VM_GetThreadListStackTraces::doit() {
+-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");   
++  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+ 
+   ResourceMark rm;
+   for (int i = 0; i < _thread_count; ++i) {
+@@ -1225,13 +1229,13 @@
+ 
+ void
+ VM_GetAllStackTraces::doit() {
+-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");   
++  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+ 
+   ResourceMark rm;
+   _final_thread_count = 0;
+   for (JavaThread *jt = Threads::first(); jt != NULL; jt = jt->next()) {
+     oop thread_oop = jt->threadObj();
+-    if (thread_oop != NULL && 
++    if (thread_oop != NULL &&
+         !jt->is_exiting() &&
+         java_lang_Thread::is_alive(thread_oop) &&
+         !jt->is_hidden_from_external_view()) {
+@@ -1336,7 +1340,7 @@
+   }
+   {
+     // The same as for PopFrame. Workaround bug:
+-    //  4812902: popFrame hangs if the method is waiting at a synchronize 
++    //  4812902: popFrame hangs if the method is waiting at a synchronize
+     // Catch this condition and return an error to avoid hanging.
+     // Now JVMTI spec allows an implementation to bail out with an opaque
+     // frame error.
+@@ -1397,7 +1401,7 @@
+       err = _env->allocate(sizeof(jvmtiMonitorStackDepthInfo), (unsigned char **)&jmsdi);
+       if (err != JVMTI_ERROR_NONE) {
+         _error = err;
+-	return;
++        return;
+       }
+       Handle hobj(obj);
+       jmsdi->monitor = _env->jni_reference(_calling_thread, hobj);
+@@ -1408,3 +1412,4 @@
+   }
+ }
+ 
++#endif // !JVMTI_KERNEL
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiEnvBase.hpp openjdk/hotspot/src/share/vm/prims/jvmtiEnvBase.hpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiEnvBase.hpp	2008-02-28 05:02:41.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiEnvBase.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jvmtiEnvBase.hpp	1.68 07/05/23 10:53:43 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #ifndef _JAVA_JVMTIENVBASE_H_
+@@ -45,7 +42,7 @@
+ // allowed in jvmti.
+ 
+ class JvmtiEnvBase : public CHeapObj {
+-    
++
+  private:
+ 
+   static JvmtiEnvBase*     _head_environment;  // head of environment list
+@@ -54,18 +51,23 @@
+   static jvmtiPhase        _phase;
+   static volatile int      _dying_thread_env_iteration_count;
+ 
+- public:  
++ public:
++
++  enum {
++    JDK15_JVMTI_VERSION = JVMTI_VERSION_1_0 +  33,  /* version: 1.0.33  */
++    JDK16_JVMTI_VERSION = JVMTI_VERSION_1_1 + 102   /* version: 1.1.102 */
++  };
+ 
+   static jvmtiPhase  get_phase()                    { return _phase; }
+   static void  set_phase(jvmtiPhase phase)          { _phase = phase; }
+-  static bool is_vm_live()                          { return _phase == JVMTI_PHASE_LIVE; } 
++  static bool is_vm_live()                          { return _phase == JVMTI_PHASE_LIVE; }
+ 
+   static void entering_dying_thread_env_iteration() { ++_dying_thread_env_iteration_count; }
+   static void leaving_dying_thread_env_iteration()  { --_dying_thread_env_iteration_count; }
+   static bool is_inside_dying_thread_env_iteration(){ return _dying_thread_env_iteration_count > 0; }
+ 
+  private:
+-    
++
+   enum {
+       JVMTI_MAGIC    = 0x71EE,
+       DISPOSED_MAGIC = 0xDEFC,
+@@ -87,7 +89,7 @@
+   static volatile bool _needs_clean_up;
+   char** _native_method_prefixes;
+   int    _native_method_prefix_count;
+-        
++
+  protected:
+   JvmtiEnvBase();
+   ~JvmtiEnvBase();
+@@ -115,7 +117,7 @@
+   JvmtiEnv* next_environment()                     { return (JvmtiEnv*)_next; }
+   void set_next_environment(JvmtiEnvBase* env)     { _next = env; }
+   static JvmtiEnv* head_environment()              { return (JvmtiEnv*)_head_environment; }
+- 
++
+  public:
+ 
+   bool is_valid()                                  { return _magic == JVMTI_MAGIC; }
+@@ -123,7 +125,7 @@
+   bool is_retransformable()                        { return _is_retransformable; }
+ 
+   static ByteSize jvmti_external_offset() {
+-    return byte_offset_of(JvmtiEnvBase, _jvmti_external); 
++    return byte_offset_of(JvmtiEnvBase, _jvmti_external);
+   };
+ 
+   static JvmtiEnv* JvmtiEnv_from_jvmti_env(jvmtiEnv *env) {
+@@ -145,8 +147,8 @@
+ 
+   static void check_for_periodic_clean_up();
+ 
+-  JvmtiEnvEventEnable *env_event_enable() { 
+-    return &_env_event_enable; 
++  JvmtiEnvEventEnable *env_event_enable() {
++    return &_env_event_enable;
+   }
+ 
+   jvmtiError allocate(jlong size, unsigned char** mem_ptr) {
+@@ -156,7 +158,7 @@
+     if (size == 0) {
+       *mem_ptr = NULL;
+     } else {
+-      *mem_ptr = (unsigned char *)os::malloc((size_t)size); 
++      *mem_ptr = (unsigned char *)os::malloc((size_t)size);
+       if (*mem_ptr == NULL) {
+         return JVMTI_ERROR_OUT_OF_MEMORY;
+       }
+@@ -172,26 +174,26 @@
+   }
+ 
+ 
+-  // Memory functions 
++  // Memory functions
+   unsigned char* jvmtiMalloc(jlong size);  // don't use this - call allocate
+ 
+   // method to create a local handle
+   jobject jni_reference(Handle hndl) {
+-    return JNIHandles::make_local(hndl());  
++    return JNIHandles::make_local(hndl());
+   }
+ 
+   // method to create a local handle.
+   // This function allows caller to specify which
+   // threads local handle table to use.
+   jobject jni_reference(JavaThread *thread, Handle hndl) {
+-    return JNIHandles::make_local(thread, hndl());  
++    return JNIHandles::make_local(thread, hndl());
+   }
+ 
+   // method to destroy a local handle
+   void destroy_jni_reference(jobject jobj) {
+-    JNIHandles::destroy_local(jobj);  
++    JNIHandles::destroy_local(jobj);
+   }
+-    
++
+   // method to destroy a local handle.
+   // This function allows caller to specify which
+   // threads local handle table to use although currently it is
+@@ -199,14 +201,14 @@
+   void destroy_jni_reference(JavaThread *thread, jobject jobj) {
+     destroy_jni_reference(jobj);
+   }
+-    
++
+   jvmtiEnv* jvmti_external() { return &_jvmti_external; };
+ 
+ // Event Dispatch
+ 
+   bool has_callback(jvmtiEvent event_type) {
+-    assert(event_type >= JVMTI_MIN_EVENT_TYPE_VAL && 
+-	   event_type <= JVMTI_MAX_EVENT_TYPE_VAL, "checking");
++    assert(event_type >= JVMTI_MIN_EVENT_TYPE_VAL &&
++           event_type <= JVMTI_MAX_EVENT_TYPE_VAL, "checking");
+     return ((void**)&_event_callbacks)[event_type-JVMTI_MIN_EVENT_TYPE_VAL] != NULL;
+   }
+ 
+@@ -229,8 +231,8 @@
+ 
+   // return true if event is enabled globally or for any thread
+   // True only if there is a callback for it.
+-  bool is_enabled(jvmtiEvent event_type) { 
+-    return _env_event_enable.is_enabled(event_type); 
++  bool is_enabled(jvmtiEvent event_type) {
++    return _env_event_enable.is_enabled(event_type);
+   }
+ 
+ // Random Utilities
+@@ -247,12 +249,12 @@
+ 
+   // convert to a jni jclass from a non-null klassOop
+   jclass get_jni_class_non_null(klassOop k);
+-    
++
+   void update_klass_field_access_flag(fieldDescriptor *fd);
+ 
+   jint count_locked_objects(JavaThread *java_thread, Handle hobj);
+-  jvmtiError get_locked_objects_in_frame(JavaThread *calling_thread, 
+-				   JavaThread* java_thread,
++  jvmtiError get_locked_objects_in_frame(JavaThread *calling_thread,
++                                   JavaThread* java_thread,
+                                    javaVFrame *jvf,
+                                    GrowableArray<jvmtiMonitorStackDepthInfo*>* owned_monitors_list,
+                                    jint depth);
+@@ -269,14 +271,14 @@
+ 
+   // JVMTI API helper functions which are called at safepoint or thread is suspended.
+   jvmtiError get_frame_count(JvmtiThreadState *state, jint *count_ptr);
+-  jvmtiError get_frame_location(JavaThread* java_thread, jint depth, 
++  jvmtiError get_frame_location(JavaThread* java_thread, jint depth,
+                                               jmethodID* method_ptr, jlocation* location_ptr);
+   jvmtiError get_object_monitor_usage(JavaThread *calling_thread,
+                                                     jobject object, jvmtiMonitorUsage* info_ptr);
+-  jvmtiError get_stack_trace(JavaThread *java_thread, 
++  jvmtiError get_stack_trace(JavaThread *java_thread,
+                                            jint stack_depth, jint max_count,
+                                            jvmtiFrameInfo* frame_buffer, jint* count_ptr);
+-  jvmtiError get_current_contended_monitor(JavaThread *calling_thread, 
++  jvmtiError get_current_contended_monitor(JavaThread *calling_thread,
+                                                          JavaThread *java_thread,
+                                                          jobject *monitor_ptr);
+   jvmtiError get_owned_monitors(JavaThread *calling_thread, JavaThread* java_thread,
+@@ -294,7 +296,7 @@
+  private:
+   bool _entry_was_marked;
+  public:
+-  JvmtiEnvIterator() { 
++  JvmtiEnvIterator() {
+     if (Threads::number_of_threads() == 0) {
+       _entry_was_marked = false; // we are single-threaded, no need
+     } else {
+@@ -302,16 +304,16 @@
+       _entry_was_marked = true;
+     }
+   }
+-  ~JvmtiEnvIterator() { 
++  ~JvmtiEnvIterator() {
+     if (_entry_was_marked) {
+-      Thread::current()->leaving_jvmti_env_iteration(); 
++      Thread::current()->leaving_jvmti_env_iteration();
+     }
+   }
+   JvmtiEnv* first()                 { return JvmtiEnvBase::head_environment(); }
+   JvmtiEnv* next(JvmtiEnvBase* env) { return env->next_environment(); }
+ };
+ 
+-    
++
+ // VM operation to get monitor information with stack depth.
+ class VM_GetOwnedMonitorInfo : public VM_Operation {
+ private:
+@@ -399,7 +401,7 @@
+   jvmtiError _result;
+ 
+ public:
+-  VM_GetStackTrace(JvmtiEnv *env, JavaThread *java_thread, 
++  VM_GetStackTrace(JvmtiEnv *env, JavaThread *java_thread,
+                    jint start_depth, jint max_count,
+                    jvmtiFrameInfo* frame_buffer, jint* count_ptr) {
+     _env = env;
+@@ -463,7 +465,7 @@
+ 
+ public:
+   VM_GetAllStackTraces(JvmtiEnv *env, JavaThread *calling_thread,
+-                       jint max_frame_count) 
++                       jint max_frame_count)
+       : VM_GetMultipleStackTraces(env, max_frame_count) {
+     _calling_thread = calling_thread;
+   }
+@@ -479,7 +481,7 @@
+   const jthread* _thread_list;
+ 
+ public:
+-  VM_GetThreadListStackTraces(JvmtiEnv *env, jint thread_count, const jthread* thread_list, jint max_frame_count) 
++  VM_GetThreadListStackTraces(JvmtiEnv *env, jint thread_count, const jthread* thread_list, jint max_frame_count)
+       : VM_GetMultipleStackTraces(env, max_frame_count) {
+     _thread_count = thread_count;
+     _thread_list = thread_list;
+@@ -521,7 +523,7 @@
+   jvmtiError _result;
+ 
+ public:
+-  VM_GetFrameLocation(JvmtiEnv *env, JavaThread* java_thread, jint depth, 
++  VM_GetFrameLocation(JvmtiEnv *env, JavaThread* java_thread, jint depth,
+                       jmethodID* method_ptr, jlocation* location_ptr) {
+     _env = env;
+     _java_thread = java_thread;
+@@ -543,7 +545,7 @@
+ // ResourceTracker works a little like a ResourceMark. All allocates
+ // using the resource tracker are recorded. If an allocate using the
+ // resource tracker fails the destructor will free any resources
+-// that were allocated using the tracker. 
++// that were allocated using the tracker.
+ // The motive for this class is to avoid messy error recovery code
+ // in situations where multiple allocations are done in sequence. If
+ // the second or subsequent allocation fails it avoids any code to
+@@ -557,7 +559,7 @@
+ class ResourceTracker : public StackObj {
+  private:
+   JvmtiEnv* _env;
+-  GrowableArray<unsigned char*> *_allocations;	
++  GrowableArray<unsigned char*> *_allocations;
+   bool _failed;
+  public:
+   ResourceTracker(JvmtiEnv* env);
+@@ -587,7 +589,7 @@
+     _error = JVMTI_ERROR_NONE;
+     _env = env;
+   }
+-  void do_monitor(ObjectMonitor* mon); 
++  void do_monitor(ObjectMonitor* mon);
+   jvmtiError error() { return _error;}
+ };
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiEnv.cpp openjdk/hotspot/src/share/vm/prims/jvmtiEnv.cpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiEnv.cpp	2008-02-28 05:02:41.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiEnv.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)jvmtiEnv.cpp	1.162 07/05/23 10:53:40 JVM"
+-#endif
+ /*
+  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,79 +19,588 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ 
+-# include "incls/_precompiled.incl"
+-# include "incls/_jvmtiEnv.cpp.incl"
++# include "incls/_precompiled.incl"
++# include "incls/_jvmtiEnv.cpp.incl"
++
++
++#define FIXLATER 0 // REMOVE this when completed.
++
++ // FIXLATER: hook into JvmtiTrace
++#define TraceJVMTICalls false
++
++JvmtiEnv::JvmtiEnv() : JvmtiEnvBase() {
++}
++
++JvmtiEnv::~JvmtiEnv() {
++}
++
++JvmtiEnv*
++JvmtiEnv::create_a_jvmti() {
++  return new JvmtiEnv();
++}
++
++// VM operation class to copy jni function table at safepoint.
++// More than one java threads or jvmti agents may be reading/
++// modifying jni function tables. To reduce the risk of bad
++// interaction b/w these threads it is copied at safepoint.
++class VM_JNIFunctionTableCopier : public VM_Operation {
++ private:
++  const struct JNINativeInterface_ *_function_table;
++ public:
++  VM_JNIFunctionTableCopier(const struct JNINativeInterface_ *func_tbl) {
++    _function_table = func_tbl;
++  };
++
++  VMOp_Type type() const { return VMOp_JNIFunctionTableCopier; }
++  void doit() {
++    copy_jni_function_table(_function_table);
++  };
++};
++
++//
++// Do not change the "prefix" marker below, everything above it is copied
++// unchanged into the filled stub, everything below is controlled by the
++// stub filler (only method bodies are carried forward, and then only for
++// functionality still in the spec).
++//
++// end file prefix
++
++  //
++  // Memory Management functions
++  //
++
++// mem_ptr - pre-checked for NULL
++jvmtiError
++JvmtiEnv::Allocate(jlong size, unsigned char** mem_ptr) {
++  return allocate(size, mem_ptr);
++} /* end Allocate */
++
++
++// mem - NULL is a valid value, must be checked
++jvmtiError
++JvmtiEnv::Deallocate(unsigned char* mem) {
++  return deallocate(mem);
++} /* end Deallocate */
++
++// Threads_lock NOT held, java_thread not protected by lock
++// java_thread - pre-checked
++// data - NULL is a valid value, must be checked
++jvmtiError
++JvmtiEnv::SetThreadLocalStorage(JavaThread* java_thread, const void* data) {
++  JvmtiThreadState* state = java_thread->jvmti_thread_state();
++  if (state == NULL) {
++    if (data == NULL) {
++      // leaving state unset same as data set to NULL
++      return JVMTI_ERROR_NONE;
++    }
++    // otherwise, create the state
++    state = JvmtiThreadState::state_for(java_thread);
++  }
++  state->env_thread_state(this)->set_agent_thread_local_storage_data((void*)data);
++  return JVMTI_ERROR_NONE;
++} /* end SetThreadLocalStorage */
++
++
++// Threads_lock NOT held
++// thread - NOT pre-checked
++// data_ptr - pre-checked for NULL
++jvmtiError
++JvmtiEnv::GetThreadLocalStorage(jthread thread, void** data_ptr) {
++  JavaThread* current_thread = JavaThread::current();
++  if (thread == NULL) {
++    JvmtiThreadState* state = current_thread->jvmti_thread_state();
++    *data_ptr = (state == NULL) ? NULL :
++      state->env_thread_state(this)->get_agent_thread_local_storage_data();
++  } else {
++
++    // jvmti_GetThreadLocalStorage is "in native" and doesn't transition
++    // the thread to _thread_in_vm. However, when the TLS for a thread
++    // other than the current thread is required we need to transition
++    // from native so as to resolve the jthread.
++
++    ThreadInVMfromNative __tiv(current_thread);
++    __ENTRY(jvmtiError, JvmtiEnv::GetThreadLocalStorage , current_thread)
++    debug_only(VMNativeEntryWrapper __vew;)
++
++    oop thread_oop = JNIHandles::resolve_external_guard(thread);
++    if (thread_oop == NULL) {
++      return JVMTI_ERROR_INVALID_THREAD;
++    }
++    if (!thread_oop->is_a(SystemDictionary::thread_klass())) {
++      return JVMTI_ERROR_INVALID_THREAD;
++    }
++    JavaThread* java_thread = java_lang_Thread::thread(thread_oop);
++    if (java_thread == NULL) {
++      return JVMTI_ERROR_THREAD_NOT_ALIVE;
++    }
++    JvmtiThreadState* state = java_thread->jvmti_thread_state();
++    *data_ptr = (state == NULL) ? NULL :
++      state->env_thread_state(this)->get_agent_thread_local_storage_data();
++  }
++  return JVMTI_ERROR_NONE;
++} /* end GetThreadLocalStorage */
++
++  //
++  // Class functions
++  //
++
++// class_count_ptr - pre-checked for NULL
++// classes_ptr - pre-checked for NULL
++jvmtiError
++JvmtiEnv::GetLoadedClasses(jint* class_count_ptr, jclass** classes_ptr) {
++  return JvmtiGetLoadedClasses::getLoadedClasses(this, class_count_ptr, classes_ptr);
++} /* end GetLoadedClasses */
++
++
++// initiating_loader - NULL is a valid value, must be checked
++// class_count_ptr - pre-checked for NULL
++// classes_ptr - pre-checked for NULL
++jvmtiError
++JvmtiEnv::GetClassLoaderClasses(jobject initiating_loader, jint* class_count_ptr, jclass** classes_ptr) {
++  return JvmtiGetLoadedClasses::getClassLoaderClasses(this, initiating_loader,
++                                                  class_count_ptr, classes_ptr);
++} /* end GetClassLoaderClasses */
++
++// k_mirror - may be primitive, this must be checked
++// is_modifiable_class_ptr - pre-checked for NULL
++jvmtiError
++JvmtiEnv::IsModifiableClass(oop k_mirror, jboolean* is_modifiable_class_ptr) {
++  *is_modifiable_class_ptr = VM_RedefineClasses::is_modifiable_class(k_mirror)?
++                                                       JNI_TRUE : JNI_FALSE;
++  return JVMTI_ERROR_NONE;
++} /* end IsModifiableClass */
++
++// class_count - pre-checked to be greater than or equal to 0
++// classes - pre-checked for NULL
++jvmtiError
++JvmtiEnv::RetransformClasses(jint class_count, const jclass* classes) {
++//TODO: add locking
++
++  int index;
++  JavaThread* current_thread = JavaThread::current();
++  ResourceMark rm(current_thread);
++
++  jvmtiClassDefinition* class_definitions =
++                            NEW_RESOURCE_ARRAY(jvmtiClassDefinition, class_count);
++  NULL_CHECK(class_definitions, JVMTI_ERROR_OUT_OF_MEMORY);
++
++  for (index = 0; index < class_count; index++) {
++    HandleMark hm(current_thread);
++
++    jclass jcls = classes[index];
++    oop k_mirror = JNIHandles::resolve_external_guard(jcls);
++    if (k_mirror == NULL) {
++      return JVMTI_ERROR_INVALID_CLASS;
++    }
++    if (!k_mirror->is_a(SystemDictionary::class_klass())) {
++      return JVMTI_ERROR_INVALID_CLASS;
++    }
++
++    if (java_lang_Class::is_primitive(k_mirror)) {
++      return JVMTI_ERROR_UNMODIFIABLE_CLASS;
++    }
++
++    klassOop k_oop = java_lang_Class::as_klassOop(k_mirror);
++    KlassHandle klass(current_thread, k_oop);
++
++    jint status = klass->jvmti_class_status();
++    if (status & (JVMTI_CLASS_STATUS_ERROR)) {
++      return JVMTI_ERROR_INVALID_CLASS;
++    }
++    if (status & (JVMTI_CLASS_STATUS_ARRAY)) {
++      return JVMTI_ERROR_UNMODIFIABLE_CLASS;
++    }
++
++    instanceKlassHandle ikh(current_thread, k_oop);
++    if (ikh->get_cached_class_file_bytes() == NULL) {
++      // not cached, we need to reconstitute the class file from VM representation
++      constantPoolHandle  constants(current_thread, ikh->constants());
++      ObjectLocker ol(constants, current_thread);    // lock constant pool while we query it
++
++      JvmtiClassFileReconstituter reconstituter(ikh);
++      if (reconstituter.get_error() != JVMTI_ERROR_NONE) {
++        return reconstituter.get_error();
++      }
++
++      class_definitions[index].class_byte_count = (jint)reconstituter.class_file_size();
++      class_definitions[index].class_bytes      = (unsigned char*)
++                                                       reconstituter.class_file_bytes();
++    } else {
++      // it is cached, get it from the cache
++      class_definitions[index].class_byte_count = ikh->get_cached_class_file_len();
++      class_definitions[index].class_bytes      = ikh->get_cached_class_file_bytes();
++    }
++    class_definitions[index].klass              = jcls;
++  }
++  VM_RedefineClasses op(class_count, class_definitions, jvmti_class_load_kind_retransform);
++  VMThread::execute(&op);
++  return (op.check_error());
++} /* end RetransformClasses */
++
++
++// class_count - pre-checked to be greater than or equal to 0
++// class_definitions - pre-checked for NULL
++jvmtiError
++JvmtiEnv::RedefineClasses(jint class_count, const jvmtiClassDefinition* class_definitions) {
++//TODO: add locking
++  VM_RedefineClasses op(class_count, class_definitions, jvmti_class_load_kind_redefine);
++  VMThread::execute(&op);
++  return (op.check_error());
++} /* end RedefineClasses */
++
++
++  //
++  // Object functions
++  //
++
++// size_ptr - pre-checked for NULL
++jvmtiError
++JvmtiEnv::GetObjectSize(jobject object, jlong* size_ptr) {
++  oop mirror = JNIHandles::resolve_external_guard(object);
++  NULL_CHECK(mirror, JVMTI_ERROR_INVALID_OBJECT);
++
++  if (mirror->klass() == SystemDictionary::class_klass()) {
++    if (!java_lang_Class::is_primitive(mirror)) {
++        mirror = java_lang_Class::as_klassOop(mirror);
++        assert(mirror != NULL, "class for non-primitive mirror must exist");
++    }
++  }
++
++  *size_ptr = mirror->size() * wordSize;
++  return JVMTI_ERROR_NONE;
++} /* end GetObjectSize */
++
++  //
++  // Method functions
++  //
++
++// prefix - NULL is a valid value, must be checked
++jvmtiError
++JvmtiEnv::SetNativeMethodPrefix(const char* prefix) {
++  return prefix == NULL?
++              SetNativeMethodPrefixes(0, NULL) :
++              SetNativeMethodPrefixes(1, (char**)&prefix);
++} /* end SetNativeMethodPrefix */
++
++
++// prefix_count - pre-checked to be greater than or equal to 0
++// prefixes - pre-checked for NULL
++jvmtiError
++JvmtiEnv::SetNativeMethodPrefixes(jint prefix_count, char** prefixes) {
++  // Have to grab JVMTI thread state lock to be sure that some thread
++  // isn't accessing the prefixes at the same time we are setting them.
++  // No locks during VM bring-up.
++  if (Threads::number_of_threads() == 0) {
++    return set_native_method_prefixes(prefix_count, prefixes);
++  } else {
++    MutexLocker mu(JvmtiThreadState_lock);
++    return set_native_method_prefixes(prefix_count, prefixes);
++  }
++} /* end SetNativeMethodPrefixes */
++
++  //
++  // Event Management functions
++  //
++
++// callbacks - NULL is a valid value, must be checked
++// size_of_callbacks - pre-checked to be greater than or equal to 0
++jvmtiError
++JvmtiEnv::SetEventCallbacks(const jvmtiEventCallbacks* callbacks, jint size_of_callbacks) {
++  JvmtiEventController::set_event_callbacks(this, callbacks, size_of_callbacks);
++  return JVMTI_ERROR_NONE;
++} /* end SetEventCallbacks */
++
++
++// event_thread - NULL is a valid value, must be checked
++jvmtiError
++JvmtiEnv::SetEventNotificationMode(jvmtiEventMode mode, jvmtiEvent event_type, jthread event_thread,   ...) {
++  JavaThread* java_thread = NULL;
++  if (event_thread != NULL) {
++    oop thread_oop = JNIHandles::resolve_external_guard(event_thread);
++    if (thread_oop == NULL) {
++      return JVMTI_ERROR_INVALID_THREAD;
++    }
++    if (!thread_oop->is_a(SystemDictionary::thread_klass())) {
++      return JVMTI_ERROR_INVALID_THREAD;
++    }
++    java_thread = java_lang_Thread::thread(thread_oop);
++    if (java_thread == NULL) {
++      return JVMTI_ERROR_THREAD_NOT_ALIVE;
++    }
++  }
++
++  // event_type must be valid
++  if (!JvmtiEventController::is_valid_event_type(event_type)) {
++    return JVMTI_ERROR_INVALID_EVENT_TYPE;
++  }
++
++  // global events cannot be controlled at thread level.
++  if (java_thread != NULL && JvmtiEventController::is_global_event(event_type)) {
++    return JVMTI_ERROR_ILLEGAL_ARGUMENT;
++  }
++
++  bool enabled = (mode == JVMTI_ENABLE);
++
++  // assure that needed capabilities are present
++  if (enabled && !JvmtiUtil::has_event_capability(event_type, get_capabilities())) {
++    return JVMTI_ERROR_MUST_POSSESS_CAPABILITY;
++  }
++
++  if (event_type == JVMTI_EVENT_CLASS_FILE_LOAD_HOOK && enabled) {
++    record_class_file_load_hook_enabled();
++  }
++  JvmtiEventController::set_user_enabled(this, java_thread, event_type, enabled);
++
++  return JVMTI_ERROR_NONE;
++} /* end SetEventNotificationMode */
++
++  //
++  // Capability functions
++  //
++
++// capabilities_ptr - pre-checked for NULL
++jvmtiError
++JvmtiEnv::GetPotentialCapabilities(jvmtiCapabilities* capabilities_ptr) {
++  JvmtiManageCapabilities::get_potential_capabilities(get_capabilities(),
++                                                      get_prohibited_capabilities(),
++                                                      capabilities_ptr);
++  return JVMTI_ERROR_NONE;
++} /* end GetPotentialCapabilities */
++
++
++// capabilities_ptr - pre-checked for NULL
++jvmtiError
++JvmtiEnv::AddCapabilities(const jvmtiCapabilities* capabilities_ptr) {
++  return JvmtiManageCapabilities::add_capabilities(get_capabilities(),
++                                                   get_prohibited_capabilities(),
++                                                   capabilities_ptr,
++                                                   get_capabilities());
++} /* end AddCapabilities */
++
++
++// capabilities_ptr - pre-checked for NULL
++jvmtiError
++JvmtiEnv::RelinquishCapabilities(const jvmtiCapabilities* capabilities_ptr) {
++  JvmtiManageCapabilities::relinquish_capabilities(get_capabilities(), capabilities_ptr, get_capabilities());
++  return JVMTI_ERROR_NONE;
++} /* end RelinquishCapabilities */
++
++
++// capabilities_ptr - pre-checked for NULL
++jvmtiError
++JvmtiEnv::GetCapabilities(jvmtiCapabilities* capabilities_ptr) {
++  JvmtiManageCapabilities::copy_capabilities(get_capabilities(), capabilities_ptr);
++  return JVMTI_ERROR_NONE;
++} /* end GetCapabilities */
++
++  //
++  // Class Loader Search functions
++  //
++
++// segment - pre-checked for NULL
++jvmtiError
++JvmtiEnv::AddToBootstrapClassLoaderSearch(const char* segment) {
++  jvmtiPhase phase = get_phase();
++  if (phase == JVMTI_PHASE_ONLOAD) {
++    Arguments::append_sysclasspath(segment);
++    return JVMTI_ERROR_NONE;
++  } else {
++    assert(phase == JVMTI_PHASE_LIVE, "sanity check");
++
++    // create the zip entry
++    ClassPathZipEntry* zip_entry = ClassLoader::create_class_path_zip_entry(segment);
++    if (zip_entry == NULL) {
++      return JVMTI_ERROR_ILLEGAL_ARGUMENT;
++    }
++
++    // lock the loader
++    Thread* thread = Thread::current();
++    HandleMark hm;
++    Handle loader_lock = Handle(thread, SystemDictionary::system_loader_lock());
++
++    ObjectLocker ol(loader_lock, thread);
++
++    // add the jar file to the bootclasspath
++    if (TraceClassLoading) {
++      tty->print_cr("[Opened %s]", zip_entry->name());
++    }
++    ClassLoader::add_to_list(zip_entry);
++    return JVMTI_ERROR_NONE;
++  }
++
++} /* end AddToBootstrapClassLoaderSearch */
++
++
++// segment - pre-checked for NULL
++jvmtiError
++JvmtiEnv::AddToSystemClassLoaderSearch(const char* segment) {
++  jvmtiPhase phase = get_phase();
++
++  if (phase == JVMTI_PHASE_ONLOAD) {
++    for (SystemProperty* p = Arguments::system_properties(); p != NULL; p = p->next()) {
++      if (strcmp("java.class.path", p->key()) == 0) {
++        p->append_value(segment);
++        break;
++      }
++    }
++    return JVMTI_ERROR_NONE;
++  } else {
++    HandleMark hm;
+ 
++    assert(phase == JVMTI_PHASE_LIVE, "sanity check");
+ 
+-#define FIXLATER 0 // REMOVE this when completed.
++    // create the zip entry (which will open the zip file and hence
++    // check that the segment is indeed a zip file).
++    ClassPathZipEntry* zip_entry = ClassLoader::create_class_path_zip_entry(segment);
++    if (zip_entry == NULL) {
++      return JVMTI_ERROR_ILLEGAL_ARGUMENT;
++    }
++    delete zip_entry;   // no longer needed
+ 
+- // FIXLATER: hook into JvmtiTrace
+-#define TraceJVMTICalls false
++    // lock the loader
++    Thread* THREAD = Thread::current();
++    Handle loader = Handle(THREAD, SystemDictionary::java_system_loader());
+ 
+-JvmtiEnv::JvmtiEnv() : JvmtiEnvBase()
+-{
+-}
++    ObjectLocker ol(loader, THREAD);
+ 
+-JvmtiEnv::~JvmtiEnv() 
+-{
+-}
++    // need the path as java.lang.String
++    Handle path = java_lang_String::create_from_str(segment, THREAD);
++    if (HAS_PENDING_EXCEPTION) {
++      CLEAR_PENDING_EXCEPTION;
++      return JVMTI_ERROR_INTERNAL;
++    }
+ 
+-JvmtiEnv*
+-JvmtiEnv::create_a_jvmti() {
+-  return new JvmtiEnv();
+-}
++    instanceKlassHandle loader_ik(THREAD, loader->klass());
+ 
+-// VM operation class to copy jni function table at safepoint.
+-// More than one java threads or jvmti agents may be reading/
+-// modifying jni function tables. To reduce the risk of bad
+-// interaction b/w these threads it is copied at safepoint.
+-class VM_JNIFunctionTableCopier : public VM_Operation {
+- private:
+-  const struct JNINativeInterface_ *_function_table;
+- public:
+-  VM_JNIFunctionTableCopier(const struct JNINativeInterface_ *func_tbl) {
+-    _function_table = func_tbl;
+-  };
++    // Invoke the appendToClassPathForInstrumentation method - if the method
++    // is not found it means the loader doesn't support adding to the class path
++    // in the live phase.
++    {
++      JavaValue res(T_VOID);
++      JavaCalls::call_special(&res,
++                              loader,
++                              loader_ik,
++                              vmSymbolHandles::appendToClassPathForInstrumentation_name(),
++                              vmSymbolHandles::appendToClassPathForInstrumentation_signature(),
++                              path,
++                              THREAD);
++      if (HAS_PENDING_EXCEPTION) {
++        symbolOop ex_name = PENDING_EXCEPTION->klass()->klass_part()->name();
++        CLEAR_PENDING_EXCEPTION;
+ 
+-  VMOp_Type type() const { return VMOp_JNIFunctionTableCopier; }
+-  void doit() {
+-    copy_jni_function_table(_function_table);
+-  };
+-};
++        if (ex_name == vmSymbols::java_lang_NoSuchMethodError()) {
++          return JVMTI_ERROR_CLASS_LOADER_UNSUPPORTED;
++        } else {
++          return JVMTI_ERROR_INTERNAL;
++        }
++      }
++    }
+ 
+-//
+-// Do not change the "prefix" marker below, everything above it is copied
+-// unchanged into the filled stub, everything below is controlled by the
+-// stub filler (only method bodies are carried forward, and then only for
+-// functionality still in the spec).
+-//
+-// end file prefix
++    return JVMTI_ERROR_NONE;
++  }
++} /* end AddToSystemClassLoaderSearch */
+ 
+   //
+-  // Memory Management functions
+-  // 
++  // General functions
++  //
+ 
+-// mem_ptr - pre-checked for NULL
++// phase_ptr - pre-checked for NULL
+ jvmtiError
+-JvmtiEnv::Allocate(jlong size, unsigned char** mem_ptr) {
+-  return allocate(size, mem_ptr);
+-} /* end Allocate */
++JvmtiEnv::GetPhase(jvmtiPhase* phase_ptr) {
++  *phase_ptr = get_phase();
++  return JVMTI_ERROR_NONE;
++} /* end GetPhase */
+ 
+ 
+-// mem - NULL is a valid value, must be checked
+ jvmtiError
+-JvmtiEnv::Deallocate(unsigned char* mem) {
+-  return deallocate(mem);
+-} /* end Deallocate */
++JvmtiEnv::DisposeEnvironment() {
++  dispose();
++  return JVMTI_ERROR_NONE;
++} /* end DisposeEnvironment */
++
++
++// data - NULL is a valid value, must be checked
++jvmtiError
++JvmtiEnv::SetEnvironmentLocalStorage(const void* data) {
++  set_env_local_storage(data);
++  return JVMTI_ERROR_NONE;
++} /* end SetEnvironmentLocalStorage */
++
++
++// data_ptr - pre-checked for NULL
++jvmtiError
++JvmtiEnv::GetEnvironmentLocalStorage(void** data_ptr) {
++  *data_ptr = (void*)get_env_local_storage();
++  return JVMTI_ERROR_NONE;
++} /* end GetEnvironmentLocalStorage */
++
++// version_ptr - pre-checked for NULL
++jvmtiError
++JvmtiEnv::GetVersionNumber(jint* version_ptr) {
++  *version_ptr = JVMTI_VERSION;
++  return JVMTI_ERROR_NONE;
++} /* end GetVersionNumber */
++
++
++// name_ptr - pre-checked for NULL
++jvmtiError
++JvmtiEnv::GetErrorName(jvmtiError error, char** name_ptr) {
++  if (error < JVMTI_ERROR_NONE || error > JVMTI_ERROR_MAX) {
++    return JVMTI_ERROR_ILLEGAL_ARGUMENT;
++  }
++  const char *name = JvmtiUtil::error_name(error);
++  if (name == NULL) {
++    return JVMTI_ERROR_ILLEGAL_ARGUMENT;
++  }
++  size_t len = strlen(name) + 1;
++  jvmtiError err = allocate(len, (unsigned char**)name_ptr);
++  if (err == JVMTI_ERROR_NONE) {
++    memcpy(*name_ptr, name, len);
++  }
++  return err;
++} /* end GetErrorName */
++
++
++jvmtiError
++JvmtiEnv::SetVerboseFlag(jvmtiVerboseFlag flag, jboolean value) {
++  switch (flag) {
++  case JVMTI_VERBOSE_OTHER:
++    // ignore
++    break;
++  case JVMTI_VERBOSE_CLASS:
++    TraceClassLoading = value != 0;
++    TraceClassUnloading = value != 0;
++    break;
++  case JVMTI_VERBOSE_GC:
++    PrintGC = value != 0;
++    TraceClassUnloading = value != 0;
++    break;
++  case JVMTI_VERBOSE_JNI:
++    PrintJNIResolving = value != 0;
++    break;
++  default:
++    return JVMTI_ERROR_ILLEGAL_ARGUMENT;
++  };
++  return JVMTI_ERROR_NONE;
++} /* end SetVerboseFlag */
++
++
++// format_ptr - pre-checked for NULL
++jvmtiError
++JvmtiEnv::GetJLocationFormat(jvmtiJlocationFormat* format_ptr) {
++  *format_ptr = JVMTI_JLOCATION_JVMBCI;
++  return JVMTI_ERROR_NONE;
++} /* end GetJLocationFormat */
+ 
++#ifndef JVMTI_KERNEL
+ 
+   //
+   // Thread functions
+-  // 
++  //
+ 
+ // Threads_lock NOT held
+ // thread - NOT pre-checked
+@@ -104,7 +610,7 @@
+   jint state;
+   oop thread_oop;
+   JavaThread* thr;
+-  
++
+   if (thread == NULL) {
+     thread_oop = JavaThread::current()->threadObj();
+   } else {
+@@ -122,14 +628,14 @@
+   thr = java_lang_Thread::thread(thread_oop);
+   if (thr != NULL) {
+     JavaThreadState jts = thr->thread_state();
+-    
++
+     if (thr->is_being_ext_suspended()) {
+       state |= JVMTI_THREAD_STATE_SUSPENDED;
+     }
+     if (jts == _thread_in_native) {
+       state |= JVMTI_THREAD_STATE_IN_NATIVE;
+     }
+-    OSThread* osThread = thr->osthread(); 
++    OSThread* osThread = thr->osthread();
+     if (osThread != NULL && osThread->interrupted()) {
+       state |= JVMTI_THREAD_STATE_INTERRUPTED;
+     }
+@@ -162,12 +668,12 @@
+   ThreadsListEnumerator tle(Thread::current(), true);
+   nthreads = tle.num_threads();
+   *threads_count_ptr = nthreads;
+-  
++
+   if (nthreads == 0) {
+     *threads_ptr = NULL;
+     return JVMTI_ERROR_NONE;
+   }
+-  
++
+   thread_objs = NEW_RESOURCE_ARRAY(Handle, nthreads);
+   NULL_CHECK(thread_objs, JVMTI_ERROR_OUT_OF_MEMORY);
+ 
+@@ -340,7 +846,7 @@
+   JavaThread::send_async_exception(java_thread->threadObj(), e);
+ 
+   return JVMTI_ERROR_NONE;
+-  
++
+ } /* end StopThread */
+ 
+ 
+@@ -353,14 +859,14 @@
+     return JVMTI_ERROR_INVALID_THREAD;
+ 
+   JavaThread* current_thread  = JavaThread::current();
+-  
++
+   // Todo: this is a duplicate of JVM_Interrupt; share code in future
+   // Ensure that the C++ Thread and OSThread structures aren't freed before we operate
+   MutexLockerEx ml(current_thread->threadObj() == thread_oop ? NULL : Threads_lock);
+   // We need to re-resolve the java_thread, since a GC might have happened during the
+   // acquire of the lock
+ 
+-  JavaThread* java_thread = java_lang_Thread::thread(JNIHandles::resolve_external_guard(thread));    
++  JavaThread* java_thread = java_lang_Thread::thread(JNIHandles::resolve_external_guard(thread));
+   NULL_CHECK(java_thread, JVMTI_ERROR_THREAD_NOT_ALIVE);
+ 
+   Thread::interrupt(java_thread);
+@@ -407,25 +913,25 @@
+     context_class_loader = Handle(current_thread, loader);
+   }
+   { const char *n;
+-  
++
+     if (name() != NULL) {
+-      n = UNICODE::as_utf8((jchar*) name->base(T_CHAR), name->length()); 
++      n = UNICODE::as_utf8((jchar*) name->base(T_CHAR), name->length());
+     } else {
+-      n = UNICODE::as_utf8(NULL, 0); 
++      n = UNICODE::as_utf8(NULL, 0);
+     }
+-    
++
+     info_ptr->name = (char *) jvmtiMalloc(strlen(n)+1);
+     if (info_ptr->name == NULL)
+       return JVMTI_ERROR_OUT_OF_MEMORY;
+-    
++
+     strcpy(info_ptr->name, n);
+   }
+   info_ptr->is_daemon = is_daemon;
+   info_ptr->priority  = priority;
+ 
+   info_ptr->context_class_loader = (context_class_loader.is_null()) ? NULL :
+-				     jni_reference(context_class_loader);
+-  info_ptr->thread_group = jni_reference(thread_group);  
++                                     jni_reference(context_class_loader);
++  info_ptr->thread_group = jni_reference(thread_group);
+ 
+   return JVMTI_ERROR_NONE;
+ } /* end GetThreadInfo */
+@@ -443,7 +949,7 @@
+   // growable array of jvmti monitors info on the C-heap
+   GrowableArray<jvmtiMonitorStackDepthInfo*> *owned_monitors_list =
+       new (ResourceObj::C_HEAP) GrowableArray<jvmtiMonitorStackDepthInfo*>(1, true);
+-  
++
+   uint32_t debug_bits = 0;
+   if (is_thread_fully_suspended(java_thread, true, &debug_bits)) {
+     err = get_owned_monitors(calling_thread, java_thread, owned_monitors_list);
+@@ -452,20 +958,20 @@
+     // be suspended.
+     VM_GetOwnedMonitorInfo op(this, calling_thread, java_thread, owned_monitors_list);
+     VMThread::execute(&op);
+-    err = op.result();        
++    err = op.result();
+   }
+   jint owned_monitor_count = owned_monitors_list->length();
+   if (err == JVMTI_ERROR_NONE) {
+-    if ((err = allocate(owned_monitor_count * sizeof(jobject *), 
++    if ((err = allocate(owned_monitor_count * sizeof(jobject *),
+                       (unsigned char**)owned_monitors_ptr)) == JVMTI_ERROR_NONE) {
+       // copy into the returned array
+       for (int i = 0; i < owned_monitor_count; i++) {
+-        (*owned_monitors_ptr)[i] = 
++        (*owned_monitors_ptr)[i] =
+           ((jvmtiMonitorStackDepthInfo*)owned_monitors_list->at(i))->monitor;
+       }
+       *owned_monitor_count_ptr = owned_monitor_count;
+     }
+-  } 
++  }
+   // clean up.
+   for (int i = 0; i < owned_monitor_count; i++) {
+     deallocate((unsigned char*)owned_monitors_list->at(i));
+@@ -484,11 +990,11 @@
+ JvmtiEnv::GetOwnedMonitorStackDepthInfo(JavaThread* java_thread, jint* monitor_info_count_ptr, jvmtiMonitorStackDepthInfo** monitor_info_ptr) {
+   jvmtiError err = JVMTI_ERROR_NONE;
+   JavaThread* calling_thread  = JavaThread::current();
+-  
++
+   // growable array of jvmti monitors info on the C-heap
+   GrowableArray<jvmtiMonitorStackDepthInfo*> *owned_monitors_list =
+          new (ResourceObj::C_HEAP) GrowableArray<jvmtiMonitorStackDepthInfo*>(1, true);
+-  
++
+   uint32_t debug_bits = 0;
+   if (is_thread_fully_suspended(java_thread, true, &debug_bits)) {
+     err = get_owned_monitors(calling_thread, java_thread, owned_monitors_list);
+@@ -497,14 +1003,14 @@
+     // be suspended.
+     VM_GetOwnedMonitorInfo op(this, calling_thread, java_thread, owned_monitors_list);
+     VMThread::execute(&op);
+-    err = op.result();        
++    err = op.result();
+   }
+-  
++
+   jint owned_monitor_count = owned_monitors_list->length();
+   if (err == JVMTI_ERROR_NONE) {
+-    if ((err = allocate(owned_monitor_count * sizeof(jvmtiMonitorStackDepthInfo), 
++    if ((err = allocate(owned_monitor_count * sizeof(jvmtiMonitorStackDepthInfo),
+                       (unsigned char**)monitor_info_ptr)) == JVMTI_ERROR_NONE) {
+-      // copy to output array. 
++      // copy to output array.
+       for (int i = 0; i < owned_monitor_count; i++) {
+         (*monitor_info_ptr)[i].monitor =
+           ((jvmtiMonitorStackDepthInfo*)owned_monitors_list->at(i))->monitor;
+@@ -515,12 +1021,12 @@
+     *monitor_info_count_ptr = owned_monitor_count;
+   }
+ 
+-  // clean up. 
++  // clean up.
+   for (int i = 0; i < owned_monitor_count; i++) {
+     deallocate((unsigned char*)owned_monitors_list->at(i));
+   }
+   delete owned_monitors_list;
+-  
++
+   return err;
+ } /* end GetOwnedMonitorStackDepthInfo */
+ 
+@@ -534,12 +1040,12 @@
+   uint32_t debug_bits = 0;
+   JavaThread* calling_thread  = JavaThread::current();
+   if (is_thread_fully_suspended(java_thread, true, &debug_bits)) {
+-    err = get_current_contended_monitor(calling_thread, java_thread, monitor_ptr); 
++    err = get_current_contended_monitor(calling_thread, java_thread, monitor_ptr);
+   } else {
+     // get contended monitor information at safepoint.
+     VM_GetCurrentContendedMonitor op(this, calling_thread, java_thread, monitor_ptr);
+     VMThread::execute(&op);
+-    err = op.result();        
++    err = op.result();
+   }
+   return err;
+ } /* end GetCurrentContendedMonitor */
+@@ -560,7 +1066,7 @@
+   }
+ 
+   //Thread-self
+-  JavaThread* current_thread = JavaThread::current(); 
++  JavaThread* current_thread = JavaThread::current();
+ 
+   Handle thread_hndl(current_thread, thread_oop);
+   {
+@@ -581,95 +1087,35 @@
+ 
+     new_thread->set_threadObj(thread_hndl());
+     Threads::add(new_thread);
+-    Thread::start(new_thread);
+-  } // unlock Threads_lock
+-
+-  return JVMTI_ERROR_NONE;
+-} /* end RunAgentThread */
+-
+-
+-// Threads_lock NOT held, java_thread not protected by lock
+-// java_thread - pre-checked
+-// data - NULL is a valid value, must be checked
+-jvmtiError
+-JvmtiEnv::SetThreadLocalStorage(JavaThread* java_thread, const void* data) {
+-  JvmtiThreadState* state = java_thread->jvmti_thread_state();
+-  if (state == NULL) {
+-    if (data == NULL) {
+-      // leaving state unset same as data set to NULL
+-      return JVMTI_ERROR_NONE;
+-    }
+-    // otherwise, create the state
+-    state = JvmtiThreadState::state_for(java_thread);
+-  }
+-  state->env_thread_state(this)->set_agent_thread_local_storage_data((void*)data);
+-  return JVMTI_ERROR_NONE;
+-} /* end SetThreadLocalStorage */
+-
+-
+-// Threads_lock NOT held
+-// thread - NOT pre-checked
+-// data_ptr - pre-checked for NULL
+-jvmtiError
+-JvmtiEnv::GetThreadLocalStorage(jthread thread, void** data_ptr) {
+-  JavaThread* current_thread = JavaThread::current();
+-  if (thread == NULL) {
+-    JvmtiThreadState* state = current_thread->jvmti_thread_state();
+-    *data_ptr = (state == NULL) ? NULL :
+-      state->env_thread_state(this)->get_agent_thread_local_storage_data();
+-  } else {
+-
+-    // jvmti_GetThreadLocalStorage is "in native" and doesn't transition
+-    // the thread to _thread_in_vm. However, when the TLS for a thread
+-    // other than the current thread is required we need to transition
+-    // from native so as to resolve the jthread.
+-
+-    ThreadInVMfromNative __tiv(current_thread);
+-    __ENTRY(jvmtiError, JvmtiEnv::GetThreadLocalStorage , current_thread) 
+-    debug_only(VMNativeEntryWrapper __vew;) 
+-
+-    oop thread_oop = JNIHandles::resolve_external_guard(thread);
+-    if (thread_oop == NULL) {
+-      return JVMTI_ERROR_INVALID_THREAD;
+-    }
+-    if (!thread_oop->is_a(SystemDictionary::thread_klass())) {
+-      return JVMTI_ERROR_INVALID_THREAD;
+-    }
+-    JavaThread* java_thread = java_lang_Thread::thread(thread_oop); 
+-    if (java_thread == NULL) {
+-      return JVMTI_ERROR_THREAD_NOT_ALIVE;
+-    }
+-    JvmtiThreadState* state = java_thread->jvmti_thread_state();
+-    *data_ptr = (state == NULL) ? NULL :
+-      state->env_thread_state(this)->get_agent_thread_local_storage_data();
+-  }
+-  return JVMTI_ERROR_NONE;
+-} /* end GetThreadLocalStorage */
++    Thread::start(new_thread);
++  } // unlock Threads_lock
+ 
++  return JVMTI_ERROR_NONE;
++} /* end RunAgentThread */
+ 
+   //
+   // Thread Group functions
+-  // 
++  //
+ 
+ // group_count_ptr - pre-checked for NULL
+ // groups_ptr - pre-checked for NULL
+ jvmtiError
+ JvmtiEnv::GetTopThreadGroups(jint* group_count_ptr, jthreadGroup** groups_ptr) {
+-  JavaThread* current_thread = JavaThread::current(); 
++  JavaThread* current_thread = JavaThread::current();
+ 
+   // Only one top level thread group now.
+   *group_count_ptr = 1;
+-  
++
+   // Allocate memory to store global-refs to the thread groups.
+   // Assume this area is freed by caller.
+   *groups_ptr = (jthreadGroup *) jvmtiMalloc((sizeof(jthreadGroup)) * (*group_count_ptr));
+-  
++
+   NULL_CHECK(*groups_ptr, JVMTI_ERROR_OUT_OF_MEMORY);
+ 
+   // Convert oop to Handle, then convert Handle to global-ref.
+   {
+     HandleMark hm(current_thread);
+-    Handle system_thread_group(current_thread, Universe::system_thread_group()); 
++    Handle system_thread_group(current_thread, Universe::system_thread_group());
+     *groups_ptr[0] = jni_reference(system_thread_group);
+   }
+ 
+@@ -683,7 +1129,7 @@
+   ResourceMark rm;
+   HandleMark hm;
+ 
+-  JavaThread* current_thread = JavaThread::current(); 
++  JavaThread* current_thread = JavaThread::current();
+ 
+   Handle group_obj (current_thread, JNIHandles::resolve_external_guard(group));
+   NULL_CHECK(group_obj(), JVMTI_ERROR_INVALID_THREAD_GROUP);
+@@ -725,7 +1171,7 @@
+ // groups_ptr - pre-checked for NULL
+ jvmtiError
+ JvmtiEnv::GetThreadGroupChildren(jthreadGroup group, jint* thread_count_ptr, jthread** threads_ptr, jint* group_count_ptr, jthreadGroup** groups_ptr) {
+-  JavaThread* current_thread = JavaThread::current(); 
++  JavaThread* current_thread = JavaThread::current();
+   oop group_obj = (oop) JNIHandles::resolve_external_guard(group);
+   NULL_CHECK(group_obj, JVMTI_ERROR_INVALID_THREAD_GROUP);
+ 
+@@ -755,8 +1201,8 @@
+         JavaThread *javathread = java_lang_Thread::thread(thread_obj);
+         // Filter out hidden java threads.
+         if (javathread != NULL && javathread->is_hidden_from_external_view()) {
+-	  hidden_threads++;
+-	  continue;
++          hidden_threads++;
++          continue;
+         }
+         thread_objs[j++] = Handle(current_thread, thread_obj);
+       }
+@@ -792,7 +1238,7 @@
+ 
+   //
+   // Stack Frame functions
+-  // 
++  //
+ 
+ // Threads_lock NOT held, java_thread not protected by lock
+ // java_thread - pre-checked
+@@ -859,7 +1305,7 @@
+ jvmtiError
+ JvmtiEnv::GetFrameCount(JavaThread* java_thread, jint* count_ptr) {
+   jvmtiError err = JVMTI_ERROR_NONE;
+-  
++
+   // retrieve or create JvmtiThreadState.
+   JvmtiThreadState* state = JvmtiThreadState::state_for(java_thread);
+   uint32_t debug_bits = 0;
+@@ -895,8 +1341,8 @@
+   }
+ 
+   {
+-    // Was workaround bug 
+-    //    4812902: popFrame hangs if the method is waiting at a synchronize 
++    // Was workaround bug
++    //    4812902: popFrame hangs if the method is waiting at a synchronize
+     // Catch this condition and return an error to avoid hanging.
+     // Now JVMTI spec allows an implementation to bail out with an opaque frame error.
+     OSThread* osThread = java_thread->osthread();
+@@ -934,7 +1380,7 @@
+         return JVMTI_ERROR_OPAQUE_FRAME;
+       }
+     }
+-    
++
+     // If any of the top 2 frames is a compiled one, need to deoptimize it
+     for (int i = 0; i < 2; i++) {
+       if (!is_interpreted[i]) {
+@@ -942,17 +1388,17 @@
+         VMThread::execute(&op);
+       }
+     }
+-    
++
+     // Update the thread state to reflect that the top frame is popped
+     // so that cur_stack_depth is maintained properly and all frameIDs
+     // are invalidated.
+     // The current frame will be popped later when the suspended thread
+     // is resumed and right before returning from VM to Java.
+     // (see call_VM_base() in assembler_<cpu>.cpp).
+-    
+-    // It's fine to update the thread state here because no JVMTI events 
++
++    // It's fine to update the thread state here because no JVMTI events
+     // shall be posted for this PopFrame.
+-    
++
+     // retreive or create the state
+     JvmtiThreadState* state = JvmtiThreadState::state_for(java_thread);
+ 
+@@ -961,7 +1407,7 @@
+     // Set pending step flag for this popframe and it is cleared when next
+     // step event is posted.
+     state->set_pending_step_for_popframe();
+-  } 
++  }
+ 
+   return JVMTI_ERROR_NONE;
+ } /* end PopFrame */
+@@ -969,7 +1415,7 @@
+ 
+ // Threads_lock NOT held, java_thread not protected by lock
+ // java_thread - pre-checked
+-// java_thread - unchecked 
++// java_thread - unchecked
+ // depth - pre-checked as non-negative
+ // method_ptr - pre-checked for NULL
+ // location_ptr - pre-checked for NULL
+@@ -992,7 +1438,7 @@
+ 
+ // Threads_lock NOT held, java_thread not protected by lock
+ // java_thread - pre-checked
+-// java_thread - unchecked 
++// java_thread - unchecked
+ // depth - pre-checked as non-negative
+ jvmtiError
+ JvmtiEnv::NotifyFramePop(JavaThread* java_thread, jint depth) {
+@@ -1028,7 +1474,7 @@
+ 
+   //
+   // Force Early Return functions
+-  // 
++  //
+ 
+ // Threads_lock NOT held, java_thread not protected by lock
+ // java_thread - pre-checked
+@@ -1092,7 +1538,7 @@
+ 
+   //
+   // Heap functions
+-  // 
++  //
+ 
+ // klass - NULL is a valid value, must be checked
+ // initial_object - NULL is a valid value, must be checked
+@@ -1197,7 +1643,7 @@
+ 
+   //
+   // Heap (1.0) functions
+-  // 
++  //
+ 
+ // object_reference_callback - pre-checked for NULL
+ // user_data - NULL is a valid value, must be checked
+@@ -1258,11 +1704,11 @@
+ 
+   //
+   // Local Variable functions
+-  // 
++  //
+ 
+ // Threads_lock NOT held, java_thread not protected by lock
+ // java_thread - pre-checked
+-// java_thread - unchecked 
++// java_thread - unchecked
+ // depth - pre-checked as non-negative
+ // value_ptr - pre-checked for NULL
+ jvmtiError
+@@ -1286,7 +1732,7 @@
+ 
+ // Threads_lock NOT held, java_thread not protected by lock
+ // java_thread - pre-checked
+-// java_thread - unchecked 
++// java_thread - unchecked
+ // depth - pre-checked as non-negative
+ // value_ptr - pre-checked for NULL
+ jvmtiError
+@@ -1304,7 +1750,7 @@
+ 
+ // Threads_lock NOT held, java_thread not protected by lock
+ // java_thread - pre-checked
+-// java_thread - unchecked 
++// java_thread - unchecked
+ // depth - pre-checked as non-negative
+ // value_ptr - pre-checked for NULL
+ jvmtiError
+@@ -1322,7 +1768,7 @@
+ 
+ // Threads_lock NOT held, java_thread not protected by lock
+ // java_thread - pre-checked
+-// java_thread - unchecked 
++// java_thread - unchecked
+ // depth - pre-checked as non-negative
+ // value_ptr - pre-checked for NULL
+ jvmtiError
+@@ -1340,7 +1786,7 @@
+ 
+ // Threads_lock NOT held, java_thread not protected by lock
+ // java_thread - pre-checked
+-// java_thread - unchecked 
++// java_thread - unchecked
+ // depth - pre-checked as non-negative
+ // value_ptr - pre-checked for NULL
+ jvmtiError
+@@ -1358,7 +1804,7 @@
+ 
+ // Threads_lock NOT held, java_thread not protected by lock
+ // java_thread - pre-checked
+-// java_thread - unchecked 
++// java_thread - unchecked
+ // depth - pre-checked as non-negative
+ jvmtiError
+ JvmtiEnv::SetLocalObject(JavaThread* java_thread, jint depth, jint slot, jobject value) {
+@@ -1375,7 +1821,7 @@
+ 
+ // Threads_lock NOT held, java_thread not protected by lock
+ // java_thread - pre-checked
+-// java_thread - unchecked 
++// java_thread - unchecked
+ // depth - pre-checked as non-negative
+ jvmtiError
+ JvmtiEnv::SetLocalInt(JavaThread* java_thread, jint depth, jint slot, jint value) {
+@@ -1392,7 +1838,7 @@
+ 
+ // Threads_lock NOT held, java_thread not protected by lock
+ // java_thread - pre-checked
+-// java_thread - unchecked 
++// java_thread - unchecked
+ // depth - pre-checked as non-negative
+ jvmtiError
+ JvmtiEnv::SetLocalLong(JavaThread* java_thread, jint depth, jint slot, jlong value) {
+@@ -1409,7 +1855,7 @@
+ 
+ // Threads_lock NOT held, java_thread not protected by lock
+ // java_thread - pre-checked
+-// java_thread - unchecked 
++// java_thread - unchecked
+ // depth - pre-checked as non-negative
+ jvmtiError
+ JvmtiEnv::SetLocalFloat(JavaThread* java_thread, jint depth, jint slot, jfloat value) {
+@@ -1426,7 +1872,7 @@
+ 
+ // Threads_lock NOT held, java_thread not protected by lock
+ // java_thread - pre-checked
+-// java_thread - unchecked 
++// java_thread - unchecked
+ // depth - pre-checked as non-negative
+ jvmtiError
+ JvmtiEnv::SetLocalDouble(JavaThread* java_thread, jint depth, jint slot, jdouble value) {
+@@ -1443,13 +1889,13 @@
+ 
+   //
+   // Breakpoint functions
+-  // 
++  //
+ 
+ // method_oop - pre-checked for validity, but may be NULL meaning obsolete method
+ jvmtiError
+ JvmtiEnv::SetBreakpoint(methodOop method_oop, jlocation location) {
+   NULL_CHECK(method_oop, JVMTI_ERROR_INVALID_METHODID);
+-  if (location < 0) {	// simple invalid location check first
++  if (location < 0) {   // simple invalid location check first
+     return JVMTI_ERROR_INVALID_LOCATION;
+   }
+   // verify that the breakpoint is not past the end of the method
+@@ -1475,8 +1921,8 @@
+ jvmtiError
+ JvmtiEnv::ClearBreakpoint(methodOop method_oop, jlocation location) {
+   NULL_CHECK(method_oop, JVMTI_ERROR_INVALID_METHODID);
+-    
+-  if (location < 0) {	// simple invalid location check first
++
++  if (location < 0) {   // simple invalid location check first
+     return JVMTI_ERROR_INVALID_LOCATION;
+   }
+ 
+@@ -1501,7 +1947,7 @@
+ 
+   //
+   // Watched Field functions
+-  // 
++  //
+ 
+ jvmtiError
+ JvmtiEnv::SetFieldAccessWatch(fieldDescriptor* fdesc_ptr) {
+@@ -1511,7 +1957,7 @@
+   update_klass_field_access_flag(fdesc_ptr);
+ 
+   JvmtiEventController::change_field_watch(JVMTI_EVENT_FIELD_ACCESS, true);
+-  
++
+   return JVMTI_ERROR_NONE;
+ } /* end SetFieldAccessWatch */
+ 
+@@ -1524,7 +1970,7 @@
+   update_klass_field_access_flag(fdesc_ptr);
+ 
+   JvmtiEventController::change_field_watch(JVMTI_EVENT_FIELD_ACCESS, false);
+-  
++
+   return JVMTI_ERROR_NONE;
+ } /* end ClearFieldAccessWatch */
+ 
+@@ -1537,7 +1983,7 @@
+   update_klass_field_access_flag(fdesc_ptr);
+ 
+   JvmtiEventController::change_field_watch(JVMTI_EVENT_FIELD_MODIFICATION, true);
+-  
++
+   return JVMTI_ERROR_NONE;
+ } /* end SetFieldModificationWatch */
+ 
+@@ -1550,31 +1996,13 @@
+   update_klass_field_access_flag(fdesc_ptr);
+ 
+   JvmtiEventController::change_field_watch(JVMTI_EVENT_FIELD_MODIFICATION, false);
+-  
++
+   return JVMTI_ERROR_NONE;
+ } /* end ClearFieldModificationWatch */
+ 
+-
+   //
+   // Class functions
+-  // 
+-
+-// class_count_ptr - pre-checked for NULL
+-// classes_ptr - pre-checked for NULL
+-jvmtiError
+-JvmtiEnv::GetLoadedClasses(jint* class_count_ptr, jclass** classes_ptr) {
+-  return JvmtiGetLoadedClasses::getLoadedClasses(this, class_count_ptr, classes_ptr);
+-} /* end GetLoadedClasses */
+-
+-
+-// initiating_loader - NULL is a valid value, must be checked
+-// class_count_ptr - pre-checked for NULL
+-// classes_ptr - pre-checked for NULL
+-jvmtiError
+-JvmtiEnv::GetClassLoaderClasses(jobject initiating_loader, jint* class_count_ptr, jclass** classes_ptr) {
+-  return JvmtiGetLoadedClasses::getClassLoaderClasses(this, initiating_loader, 
+-                                                  class_count_ptr, classes_ptr);
+-} /* end GetClassLoaderClasses */
++  //
+ 
+ 
+ // k_mirror - may be primitive, this must be checked
+@@ -1657,7 +2085,7 @@
+   if (!Klass::cast(k_klass)->oop_is_instance()) {
+     return JVMTI_ERROR_ABSENT_INFORMATION;
+   }
+- 
++
+   symbolOop sfnOop = instanceKlass::cast(k_klass)->source_file_name();
+   NULL_CHECK(sfnOop, JVMTI_ERROR_ABSENT_INFORMATION);
+   {
+@@ -1684,8 +2112,8 @@
+     assert((Klass::cast(k)->oop_is_instance() || Klass::cast(k)->oop_is_array()), "should be an instance or an array klass");
+     result = Klass::cast(k)->compute_modifier_flags(current_thread);
+     JavaThread* THREAD = current_thread; // pass to macros
+-    if (HAS_PENDING_EXCEPTION) { 
+-      CLEAR_PENDING_EXCEPTION; 
++    if (HAS_PENDING_EXCEPTION) {
++      CLEAR_PENDING_EXCEPTION;
+       return JVMTI_ERROR_INTERNAL;
+     };
+ 
+@@ -1697,7 +2125,7 @@
+     result = (JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
+   }
+   *modifiers_ptr = result;
+- 
++
+   return JVMTI_ERROR_NONE;
+ } /* end GetClassModifiers */
+ 
+@@ -1769,12 +2197,12 @@
+     *fields_ptr = (jfieldID*) jvmtiMalloc(0 * sizeof(jfieldID));
+     return JVMTI_ERROR_NONE;
+   }
+-  JavaThread* current_thread = JavaThread::current(); 
++  JavaThread* current_thread = JavaThread::current();
+   HandleMark hm(current_thread);
+   klassOop k = java_lang_Class::as_klassOop(k_mirror);
+   NULL_CHECK(k, JVMTI_ERROR_INVALID_CLASS);
+-  
+-  // Return CLASS_NOT_PREPARED error as per JVMTI spec. 
++
++  // Return CLASS_NOT_PREPARED error as per JVMTI spec.
+   if (!(Klass::cast(k)->jvmti_class_status() & (JVMTI_CLASS_STATUS_PREPARED|JVMTI_CLASS_STATUS_ARRAY) )) {
+     return JVMTI_ERROR_CLASS_NOT_PREPARED;
+   }
+@@ -1789,10 +2217,10 @@
+   instanceKlassHandle instanceK_h(current_thread, k);
+ 
+   int result_count = 0;
+-  // First, count the fields. 
++  // First, count the fields.
+   FilteredFieldStream flds(instanceK_h, true, true);
+   result_count = flds.field_count();
+-  
++
+   // Allocate the result and fill it in
+   jfieldID* result_list = (jfieldID*) jvmtiMalloc(result_count * sizeof(jfieldID));
+   // The JVMTI spec requires fields in the order they occur in the class file,
+@@ -1800,8 +2228,8 @@
+   int id_index = (result_count - 1);
+ 
+   for (FilteredFieldStream src_st(instanceK_h, true, true); !src_st.eos(); src_st.next()) {
+-    result_list[id_index--] = jfieldIDWorkaround::to_jfieldID( 
+-                                            instanceK_h, src_st.offset(), 
++    result_list[id_index--] = jfieldIDWorkaround::to_jfieldID(
++                                            instanceK_h, src_st.offset(),
+                                             src_st.access_flags().is_static());
+   }
+   assert(id_index == -1, "just checking");
+@@ -1824,12 +2252,12 @@
+       *interfaces_ptr = (jclass*) jvmtiMalloc(0 * sizeof(jclass));
+       return JVMTI_ERROR_NONE;
+     }
+-    JavaThread* current_thread = JavaThread::current(); 
++    JavaThread* current_thread = JavaThread::current();
+     HandleMark hm(current_thread);
+     klassOop k = java_lang_Class::as_klassOop(k_mirror);
+     NULL_CHECK(k, JVMTI_ERROR_INVALID_CLASS);
+ 
+-    // Return CLASS_NOT_PREPARED error as per JVMTI spec. 
++    // Return CLASS_NOT_PREPARED error as per JVMTI spec.
+     if (!(Klass::cast(k)->jvmti_class_status() & (JVMTI_CLASS_STATUS_PREPARED|JVMTI_CLASS_STATUS_ARRAY) ))
+       return JVMTI_ERROR_CLASS_NOT_PREPARED;
+ 
+@@ -1901,7 +2329,7 @@
+   klassOop k_oop = java_lang_Class::as_klassOop(k_mirror);
+   Thread *thread = Thread::current();
+   HandleMark hm(thread);
+-  ResourceMark rm(thread);     
++  ResourceMark rm(thread);
+   KlassHandle klass(thread, k_oop);
+ 
+   jint status = klass->jvmti_class_status();
+@@ -1955,7 +2383,7 @@
+         result = true;
+       }
+     }
+-    *is_interface_ptr = result; 
++    *is_interface_ptr = result;
+   }
+ 
+   return JVMTI_ERROR_NONE;
+@@ -1982,16 +2410,6 @@
+ 
+ 
+ // k_mirror - may be primitive, this must be checked
+-// is_modifiable_class_ptr - pre-checked for NULL
+-jvmtiError
+-JvmtiEnv::IsModifiableClass(oop k_mirror, jboolean* is_modifiable_class_ptr) {
+-  *is_modifiable_class_ptr = VM_RedefineClasses::is_modifiable_class(k_mirror)?
+-                                                       JNI_TRUE : JNI_FALSE;
+-  return JVMTI_ERROR_NONE;
+-} /* end IsModifiableClass */
+-
+-
+-// k_mirror - may be primitive, this must be checked
+ // classloader_ptr - pre-checked for NULL
+ jvmtiError
+ JvmtiEnv::GetClassLoader(oop k_mirror, jobject* classloader_ptr) {
+@@ -2000,7 +2418,7 @@
+       *classloader_ptr = (jclass) jni_reference(Handle());
+       return JVMTI_ERROR_NONE;
+     }
+-    JavaThread* current_thread = JavaThread::current(); 
++    JavaThread* current_thread = JavaThread::current();
+     HandleMark hm(current_thread);
+     klassOop k = java_lang_Class::as_klassOop(k_mirror);
+     NULL_CHECK(k, JVMTI_ERROR_INVALID_CLASS);
+@@ -2046,107 +2464,9 @@
+   return JVMTI_ERROR_NONE;
+ } /* end GetSourceDebugExtension */
+ 
+-
+-// class_count - pre-checked to be greater than or equal to 0
+-// classes - pre-checked for NULL
+-jvmtiError
+-JvmtiEnv::RetransformClasses(jint class_count, const jclass* classes) {
+-//TODO: add locking
+-
+-  int index;
+-  JavaThread* current_thread = JavaThread::current(); 
+-  ResourceMark rm(current_thread);  
+-
+-  jvmtiClassDefinition* class_definitions = 
+-                            NEW_RESOURCE_ARRAY(jvmtiClassDefinition, class_count);
+-  NULL_CHECK(class_definitions, JVMTI_ERROR_OUT_OF_MEMORY);
+-
+-  for (index = 0; index < class_count; index++) {
+-    HandleMark hm(current_thread);
+-
+-    jclass jcls = classes[index];
+-    oop k_mirror = JNIHandles::resolve_external_guard(jcls);
+-    if (k_mirror == NULL) {
+-      return JVMTI_ERROR_INVALID_CLASS;
+-    }
+-    if (!k_mirror->is_a(SystemDictionary::class_klass())) {
+-      return JVMTI_ERROR_INVALID_CLASS;
+-    }
+-
+-    if (java_lang_Class::is_primitive(k_mirror)) {
+-      return JVMTI_ERROR_UNMODIFIABLE_CLASS;   
+-    }
+-    
+-    klassOop k_oop = java_lang_Class::as_klassOop(k_mirror);
+-    KlassHandle klass(current_thread, k_oop);
+-    
+-    jint status = klass->jvmti_class_status();
+-    if (status & (JVMTI_CLASS_STATUS_ERROR)) {
+-      return JVMTI_ERROR_INVALID_CLASS;
+-    }
+-    if (status & (JVMTI_CLASS_STATUS_ARRAY)) {
+-      return JVMTI_ERROR_UNMODIFIABLE_CLASS;   
+-    }
+-
+-    instanceKlassHandle ikh(current_thread, k_oop);
+-    if (ikh->get_cached_class_file_bytes() == NULL) {
+-      // not cached, we need to reconstitute the class file from VM representation
+-      constantPoolHandle  constants(current_thread, ikh->constants());
+-      ObjectLocker ol(constants, current_thread);    // lock constant pool while we query it
+-      
+-      JvmtiClassFileReconstituter reconstituter(ikh);
+-      if (reconstituter.get_error() != JVMTI_ERROR_NONE) {
+-        return reconstituter.get_error();
+-      }
+-
+-      class_definitions[index].class_byte_count = (jint)reconstituter.class_file_size();
+-      class_definitions[index].class_bytes      = (unsigned char*)
+-                                                       reconstituter.class_file_bytes();
+-    } else {
+-      // it is cached, get it from the cache
+-      class_definitions[index].class_byte_count = ikh->get_cached_class_file_len();
+-      class_definitions[index].class_bytes      = ikh->get_cached_class_file_bytes();
+-    }
+-    class_definitions[index].klass              = jcls;
+-  }
+-  VM_RedefineClasses op(class_count, class_definitions, jvmti_class_load_kind_retransform);
+-  VMThread::execute(&op);
+-  return (op.check_error());
+-} /* end RetransformClasses */
+-
+-
+-// class_count - pre-checked to be greater than or equal to 0
+-// class_definitions - pre-checked for NULL
+-jvmtiError
+-JvmtiEnv::RedefineClasses(jint class_count, const jvmtiClassDefinition* class_definitions) {
+-//TODO: add locking
+-  VM_RedefineClasses op(class_count, class_definitions, jvmti_class_load_kind_redefine);
+-  VMThread::execute(&op);
+-  return (op.check_error());
+-} /* end RedefineClasses */
+-
+-
+   //
+   // Object functions
+-  // 
+-
+-// size_ptr - pre-checked for NULL
+-jvmtiError
+-JvmtiEnv::GetObjectSize(jobject object, jlong* size_ptr) {
+-  oop mirror = JNIHandles::resolve_external_guard(object);
+-  NULL_CHECK(mirror, JVMTI_ERROR_INVALID_OBJECT);
+-
+-  if (mirror->klass() == SystemDictionary::class_klass()) {
+-    if (!java_lang_Class::is_primitive(mirror)) {
+-	mirror = java_lang_Class::as_klassOop(mirror);
+-	assert(mirror != NULL, "class for non-primitive mirror must exist");
+-    }
+-  }
+-
+-  *size_ptr = mirror->size() * wordSize;
+-  return JVMTI_ERROR_NONE;
+-} /* end GetObjectSize */
+-
++  //
+ 
+ // hash_code_ptr - pre-checked for NULL
+ jvmtiError
+@@ -2180,7 +2500,7 @@
+ 
+   //
+   // Field functions
+-  // 
++  //
+ 
+ // name_ptr - NULL is a valid value, must be checked
+ // signature_ptr - NULL is a valid value, must be checked
+@@ -2256,7 +2576,7 @@
+ 
+   //
+   // Method functions
+-  // 
++  //
+ 
+ // method_oop - pre-checked for validity, but may be NULL meaning obsolete method
+ // name_ptr - NULL is a valid value, must be checked
+@@ -2336,9 +2656,9 @@
+ // size_ptr - pre-checked for NULL
+ jvmtiError
+ JvmtiEnv::GetArgumentsSize(methodOop method_oop, jint* size_ptr) {
+-  NULL_CHECK(method_oop, JVMTI_ERROR_INVALID_METHODID);    
++  NULL_CHECK(method_oop, JVMTI_ERROR_INVALID_METHODID);
+   // get size of arguments
+-    
++
+   (*size_ptr) = method_oop->size_of_parameters();
+   return JVMTI_ERROR_NONE;
+ } /* end GetArgumentsSize */
+@@ -2353,7 +2673,7 @@
+   if (!method_oop->has_linenumber_table()) {
+     return (JVMTI_ERROR_ABSENT_INFORMATION);
+   }
+-  
++
+   // The line number table is compressed so we don't know how big it is until decompressed.
+   // Decompression is really fast so we just do it twice.
+ 
+@@ -2363,9 +2683,9 @@
+   while (stream.read_pair()) {
+     num_entries++;
+   }
+-  jvmtiLineNumberEntry *jvmti_table = 
+-	    (jvmtiLineNumberEntry *)jvmtiMalloc(num_entries * (sizeof(jvmtiLineNumberEntry)));
+- 
++  jvmtiLineNumberEntry *jvmti_table =
++            (jvmtiLineNumberEntry *)jvmtiMalloc(num_entries * (sizeof(jvmtiLineNumberEntry)));
++
+   // Fill jvmti table
+   if (num_entries > 0) {
+     int index = 0;
+@@ -2377,8 +2697,8 @@
+     }
+     assert(index == num_entries, "sanity check");
+   }
+-  
+-  // Set up results  
++
++  // Set up results
+   (*entry_count_ptr) = num_entries;
+   (*table_ptr) = jvmti_table;
+ 
+@@ -2392,7 +2712,7 @@
+ jvmtiError
+ JvmtiEnv::GetMethodLocation(methodOop method_oop, jlocation* start_location_ptr, jlocation* end_location_ptr) {
+ 
+-  NULL_CHECK(method_oop, JVMTI_ERROR_INVALID_METHODID);    
++  NULL_CHECK(method_oop, JVMTI_ERROR_INVALID_METHODID);
+   // get start and end location
+   (*end_location_ptr) = (jlocation) (method_oop->code_size() - 1);
+   if (method_oop->code_size() == 0) {
+@@ -2412,7 +2732,7 @@
+ jvmtiError
+ JvmtiEnv::GetLocalVariableTable(methodOop method_oop, jint* entry_count_ptr, jvmtiLocalVariableEntry** table_ptr) {
+ 
+-  NULL_CHECK(method_oop, JVMTI_ERROR_INVALID_METHODID);    
++  NULL_CHECK(method_oop, JVMTI_ERROR_INVALID_METHODID);
+   JavaThread* current_thread  = JavaThread::current();
+ 
+   // does the klass have any local variable information?
+@@ -2423,14 +2743,14 @@
+ 
+   constantPoolOop constants = method_oop->constants();
+   NULL_CHECK(constants, JVMTI_ERROR_ABSENT_INFORMATION);
+-  
++
+   // in the vm localvariable table representation, 6 consecutive elements in the table
+   // represent a 6-tuple of shorts
+   // [start_pc, length, name_index, descriptor_index, signature_index, index]
+   jint num_entries = method_oop->localvariable_table_length();
+   jvmtiLocalVariableEntry *jvmti_table = (jvmtiLocalVariableEntry *)
+-     		jvmtiMalloc(num_entries * (sizeof(jvmtiLocalVariableEntry)));
+-  
++                jvmtiMalloc(num_entries * (sizeof(jvmtiLocalVariableEntry)));
++
+   if (num_entries > 0) {
+     LocalVariableTableElement* table = method_oop->localvariable_table_start();
+     for (int i = 0; i < num_entries; i++) {
+@@ -2446,13 +2766,13 @@
+       char *name_buf = NULL;
+       char *sig_buf = NULL;
+       char *gen_sig_buf = NULL;
+-      { 
++      {
+         ResourceMark rm(current_thread);
+-    
++
+         const char *utf8_name = (const char *) constants->symbol_at(name_index)->as_utf8();
+         name_buf = (char *) jvmtiMalloc(strlen(utf8_name)+1);
+         strcpy(name_buf, utf8_name);
+-      
++
+         const char *utf8_signature = (const char *) constants->symbol_at(signature_index)->as_utf8();
+         sig_buf = (char *) jvmtiMalloc(strlen(utf8_signature)+1);
+         strcpy(sig_buf, utf8_signature);
+@@ -2474,11 +2794,11 @@
+       jvmti_table[i].slot = slot;
+     }
+   }
+-  
++
+   // set results
+   (*entry_count_ptr) = num_entries;
+   (*table_ptr) = jvmti_table;
+-  
++
+   return JVMTI_ERROR_NONE;
+ } /* end GetLocalVariableTable */
+ 
+@@ -2489,7 +2809,7 @@
+ jvmtiError
+ JvmtiEnv::GetBytecodes(methodOop method_oop, jint* bytecode_count_ptr, unsigned char** bytecodes_ptr) {
+   NULL_CHECK(method_oop, JVMTI_ERROR_INVALID_METHODID);
+-    
++
+   HandleMark hm;
+   methodHandle method(method_oop);
+   jint size = (jint)method->code_size();
+@@ -2501,7 +2821,7 @@
+   (*bytecode_count_ptr) = size;
+   // get byte codes
+   JvmtiClassFileReconstituter::copy_bytecodes(method, *bytecodes_ptr);
+- 
++
+   return JVMTI_ERROR_NONE;
+ } /* end GetBytecodes */
+ 
+@@ -2510,7 +2830,7 @@
+ // is_native_ptr - pre-checked for NULL
+ jvmtiError
+ JvmtiEnv::IsMethodNative(methodOop method_oop, jboolean* is_native_ptr) {
+-  NULL_CHECK(method_oop, JVMTI_ERROR_INVALID_METHODID);    
++  NULL_CHECK(method_oop, JVMTI_ERROR_INVALID_METHODID);
+   (*is_native_ptr) = method_oop->is_native();
+   return JVMTI_ERROR_NONE;
+ } /* end IsMethodNative */
+@@ -2538,35 +2858,9 @@
+   return JVMTI_ERROR_NONE;
+ } /* end IsMethodObsolete */
+ 
+-
+-// prefix - NULL is a valid value, must be checked
+-jvmtiError
+-JvmtiEnv::SetNativeMethodPrefix(const char* prefix) {
+-  return prefix == NULL? 
+-              SetNativeMethodPrefixes(0, NULL) : 
+-              SetNativeMethodPrefixes(1, (char**)&prefix);
+-} /* end SetNativeMethodPrefix */
+-
+-
+-// prefix_count - pre-checked to be greater than or equal to 0
+-// prefixes - pre-checked for NULL
+-jvmtiError
+-JvmtiEnv::SetNativeMethodPrefixes(jint prefix_count, char** prefixes) {
+-  // Have to grab JVMTI thread state lock to be sure that some thread
+-  // isn't accessing the prefixes at the same time we are setting them. 
+-  // No locks during VM bring-up.
+-  if (Threads::number_of_threads() == 0) {
+-    return set_native_method_prefixes(prefix_count, prefixes);
+-  } else {
+-    MutexLocker mu(JvmtiThreadState_lock);
+-    return set_native_method_prefixes(prefix_count, prefixes);
+-  }
+-} /* end SetNativeMethodPrefixes */
+-
+-
+   //
+   // Raw Monitor functions
+-  // 
++  //
+ 
+ // name - pre-checked for NULL
+ // monitor_ptr - pre-checked for NULL
+@@ -2574,7 +2868,7 @@
+ JvmtiEnv::CreateRawMonitor(const char* name, jrawMonitorID* monitor_ptr) {
+   JvmtiRawMonitor* rmonitor = new JvmtiRawMonitor(name);
+   NULL_CHECK(rmonitor, JVMTI_ERROR_OUT_OF_MEMORY);
+- 
++
+   *monitor_ptr = (jrawMonitorID)rmonitor;
+ 
+   return JVMTI_ERROR_NONE;
+@@ -2585,11 +2879,11 @@
+ jvmtiError
+ JvmtiEnv::DestroyRawMonitor(JvmtiRawMonitor * rmonitor) {
+   if (Threads::number_of_threads() == 0) {
+-    // Remove this  monitor from pending raw monitors list 
++    // Remove this  monitor from pending raw monitors list
+     // if it has entered in onload or start phase.
+     JvmtiPendingMonitors::destroy(rmonitor);
+   } else {
+-    Thread* thread  = Thread::current();  
++    Thread* thread  = Thread::current();
+     if (rmonitor->is_entered(thread)) {
+       // The caller owns this monitor which we are about to destroy.
+       // We exit the underlying synchronization object so that the
+@@ -2616,7 +2910,7 @@
+       return JVMTI_ERROR_NOT_MONITOR_OWNER;
+     }
+   }
+-  
++
+   delete rmonitor;
+ 
+   return JVMTI_ERROR_NONE;
+@@ -2631,10 +2925,10 @@
+     // used, add this raw monitor to the pending list.
+     // The pending monitors will be actually entered when
+     // the VM is setup.
+-    // See transition_pending_raw_monitors in create_vm() 
++    // See transition_pending_raw_monitors in create_vm()
+     // in thread.cpp.
+     JvmtiPendingMonitors::enter(rmonitor);
+-  } else {    
++  } else {
+     int r;
+     Thread* thread = Thread::current();
+ 
+@@ -2660,8 +2954,8 @@
+       JavaThreadState state = current_thread->thread_state();
+       assert(state == _thread_in_native, "Must be _thread_in_native");
+       // frame should already be walkable since we are in native
+-      assert(!current_thread->has_last_Java_frame() || 
+-	     current_thread->frame_anchor()->walkable(), "Must be walkable");
++      assert(!current_thread->has_last_Java_frame() ||
++             current_thread->frame_anchor()->walkable(), "Must be walkable");
+       current_thread->set_thread_state(_thread_blocked);
+ 
+       r = rmonitor->raw_enter(current_thread);
+@@ -2672,9 +2966,9 @@
+       assert(r == ObjectMonitor::OM_OK, "raw_enter should have worked");
+     } else {
+       if (thread->is_VM_thread() || thread->is_ConcurrentGC_thread()) {
+-	r = rmonitor->raw_enter(thread);
++        r = rmonitor->raw_enter(thread);
+       } else {
+-	ShouldNotReachHere();
++        ShouldNotReachHere();
+       }
+     }
+ 
+@@ -2690,7 +2984,7 @@
+ jvmtiError
+ JvmtiEnv::RawMonitorExit(JvmtiRawMonitor * rmonitor) {
+   jvmtiError err = JVMTI_ERROR_NONE;
+-    
++
+   if (Threads::number_of_threads() == 0) {
+     // No JavaThreads exist so just remove this monitor from the pending list.
+     // Bool value from exit is false if rmonitor is not in the list.
+@@ -2702,7 +2996,7 @@
+     Thread* thread = Thread::current();
+ 
+     if (thread->is_Java_thread()) {
+-      JavaThread* current_thread = (JavaThread*)thread;      
++      JavaThread* current_thread = (JavaThread*)thread;
+ #ifdef PROPER_TRANSITIONS
+       // Not really unknown but ThreadInVMfromNative does more than we want
+       ThreadInVMfromUnknown __tiv;
+@@ -2710,9 +3004,9 @@
+       r = rmonitor->raw_exit(current_thread);
+     } else {
+       if (thread->is_VM_thread() || thread->is_ConcurrentGC_thread()) {
+-	r = rmonitor->raw_exit(thread);
++        r = rmonitor->raw_exit(thread);
+       } else {
+-	ShouldNotReachHere();
++        ShouldNotReachHere();
+       }
+     }
+ 
+@@ -2736,7 +3030,7 @@
+   Thread* thread = Thread::current();
+ 
+   if (thread->is_Java_thread()) {
+-    JavaThread* current_thread = (JavaThread*)thread; 
++    JavaThread* current_thread = (JavaThread*)thread;
+ #ifdef PROPER_TRANSITIONS
+     // Not really unknown but ThreadInVMfromNative does more than we want
+     ThreadInVMfromUnknown __tiv;
+@@ -2756,8 +3050,8 @@
+     JavaThreadState state = current_thread->thread_state();
+     assert(state == _thread_in_native, "Must be _thread_in_native");
+     // frame should already be walkable since we are in native
+-    assert(!current_thread->has_last_Java_frame() || 
+-	   current_thread->frame_anchor()->walkable(), "Must be walkable");
++    assert(!current_thread->has_last_Java_frame() ||
++           current_thread->frame_anchor()->walkable(), "Must be walkable");
+     current_thread->set_thread_state(_thread_blocked);
+ 
+     r = rmonitor->raw_wait(millis, true, current_thread);
+@@ -2795,7 +3089,7 @@
+   Thread* thread = Thread::current();
+ 
+   if (thread->is_Java_thread()) {
+-    JavaThread* current_thread = (JavaThread*)thread;  
++    JavaThread* current_thread = (JavaThread*)thread;
+     // Not really unknown but ThreadInVMfromNative does more than we want
+     ThreadInVMfromUnknown __tiv;
+     r = rmonitor->raw_notify(current_thread);
+@@ -2851,86 +3145,34 @@
+ 
+   //
+   // JNI Function Interception functions
+-  // 
+-
+-// function_table - pre-checked for NULL
+-jvmtiError
+-JvmtiEnv::SetJNIFunctionTable(const jniNativeInterface* function_table) {
+-  // Copy jni function table at safepoint.  
+-  VM_JNIFunctionTableCopier copier(function_table);
+-  VMThread::execute(&copier);
+-  
+-  return JVMTI_ERROR_NONE;
+-} /* end SetJNIFunctionTable */
+-
+-
+-// function_table - pre-checked for NULL
+-jvmtiError
+-JvmtiEnv::GetJNIFunctionTable(jniNativeInterface** function_table) {
+-  *function_table=(jniNativeInterface*)jvmtiMalloc(sizeof(jniNativeInterface));
+-  if (*function_table == NULL)
+-    return JVMTI_ERROR_OUT_OF_MEMORY;
+-  memcpy(*function_table,(JavaThread::current())->get_jni_functions(),sizeof(jniNativeInterface));
+-  return JVMTI_ERROR_NONE;
+-} /* end GetJNIFunctionTable */
+-
+-
+   //
+-  // Event Management functions
+-  // 
+-
+-// callbacks - NULL is a valid value, must be checked
+-// size_of_callbacks - pre-checked to be greater than or equal to 0
+-jvmtiError
+-JvmtiEnv::SetEventCallbacks(const jvmtiEventCallbacks* callbacks, jint size_of_callbacks) {
+-  JvmtiEventController::set_event_callbacks(this, callbacks, size_of_callbacks);
+-  return JVMTI_ERROR_NONE;
+-} /* end SetEventCallbacks */
+ 
+ 
+-// event_thread - NULL is a valid value, must be checked
++// function_table - pre-checked for NULL
+ jvmtiError
+-JvmtiEnv::SetEventNotificationMode(jvmtiEventMode mode, jvmtiEvent event_type, jthread event_thread,   ...) {
+-  JavaThread* java_thread = NULL;
+-  if (event_thread != NULL) {
+-    oop thread_oop = JNIHandles::resolve_external_guard(event_thread);
+-    if (thread_oop == NULL) {
+-      return JVMTI_ERROR_INVALID_THREAD;
+-    }
+-    if (!thread_oop->is_a(SystemDictionary::thread_klass())) {
+-      return JVMTI_ERROR_INVALID_THREAD;
+-    }
+-    java_thread = java_lang_Thread::thread(thread_oop); 
+-    if (java_thread == NULL) {
+-      return JVMTI_ERROR_THREAD_NOT_ALIVE;
+-    }
+-  }
+-
+-  // event_type must be valid
+-  if (!JvmtiEventController::is_valid_event_type(event_type)) {
+-    return JVMTI_ERROR_INVALID_EVENT_TYPE;
+-  }
++JvmtiEnv::SetJNIFunctionTable(const jniNativeInterface* function_table) {
++  // Copy jni function table at safepoint.
++  VM_JNIFunctionTableCopier copier(function_table);
++  VMThread::execute(&copier);
+ 
+-  // global events cannot be controlled at thread level.
+-  if (java_thread != NULL && JvmtiEventController::is_global_event(event_type)) {
+-    return JVMTI_ERROR_ILLEGAL_ARGUMENT;
+-  }
+-       
+-  bool enabled = (mode == JVMTI_ENABLE);
++  return JVMTI_ERROR_NONE;
++} /* end SetJNIFunctionTable */
+ 
+-  // assure that needed capabilities are present
+-  if (enabled && !JvmtiUtil::has_event_capability(event_type, get_capabilities())) {
+-    return JVMTI_ERROR_MUST_POSSESS_CAPABILITY;
+-  }
+-       
+-  if (event_type == JVMTI_EVENT_CLASS_FILE_LOAD_HOOK && enabled) {
+-    record_class_file_load_hook_enabled();
+-  }
+-  JvmtiEventController::set_user_enabled(this, java_thread, event_type, enabled);
+-  
++
++// function_table - pre-checked for NULL
++jvmtiError
++JvmtiEnv::GetJNIFunctionTable(jniNativeInterface** function_table) {
++  *function_table=(jniNativeInterface*)jvmtiMalloc(sizeof(jniNativeInterface));
++  if (*function_table == NULL)
++    return JVMTI_ERROR_OUT_OF_MEMORY;
++  memcpy(*function_table,(JavaThread::current())->get_jni_functions(),sizeof(jniNativeInterface));
+   return JVMTI_ERROR_NONE;
+-} /* end SetEventNotificationMode */
++} /* end GetJNIFunctionTable */
++
+ 
++  //
++  // Event Management functions
++  //
+ 
+ jvmtiError
+ JvmtiEnv::GenerateEvents(jvmtiEvent event_type) {
+@@ -2943,20 +3185,20 @@
+   // for compiled_method_load events we must check that the environment
+   // has the can_generate_compiled_method_load_events capability.
+   if (event_type == JVMTI_EVENT_COMPILED_METHOD_LOAD) {
+-    if (get_capabilities()->can_generate_compiled_method_load_events == 0) { 
++    if (get_capabilities()->can_generate_compiled_method_load_events == 0) {
+       return JVMTI_ERROR_MUST_POSSESS_CAPABILITY;
+     }
+     return JvmtiCodeBlobEvents::generate_compiled_method_load_events(this);
+-  } else {     
++  } else {
+     return JvmtiCodeBlobEvents::generate_dynamic_code_events(this);
+-  }  
++  }
+ 
+ } /* end GenerateEvents */
+ 
+ 
+   //
+   // Extension Mechanism functions
+-  // 
++  //
+ 
+ // extension_count_ptr - pre-checked for NULL
+ // extensions - pre-checked for NULL
+@@ -2980,50 +3222,9 @@
+   return JvmtiExtensions::set_event_callback(this, extension_event_index, callback);
+ } /* end SetExtensionEventCallback */
+ 
+-
+-  //
+-  // Capability functions
+-  // 
+-
+-// capabilities_ptr - pre-checked for NULL
+-jvmtiError
+-JvmtiEnv::GetPotentialCapabilities(jvmtiCapabilities* capabilities_ptr) {
+-  JvmtiManageCapabilities::get_potential_capabilities(get_capabilities(), 
+-                                                      get_prohibited_capabilities(),
+-                                                      capabilities_ptr);
+-  return JVMTI_ERROR_NONE;
+-} /* end GetPotentialCapabilities */
+-
+-
+-// capabilities_ptr - pre-checked for NULL
+-jvmtiError
+-JvmtiEnv::AddCapabilities(const jvmtiCapabilities* capabilities_ptr) {
+-  return JvmtiManageCapabilities::add_capabilities(get_capabilities(), 
+-                                                   get_prohibited_capabilities(),
+-                                                   capabilities_ptr, 
+-                                                   get_capabilities());
+-} /* end AddCapabilities */
+-
+-
+-// capabilities_ptr - pre-checked for NULL
+-jvmtiError
+-JvmtiEnv::RelinquishCapabilities(const jvmtiCapabilities* capabilities_ptr) {
+-  JvmtiManageCapabilities::relinquish_capabilities(get_capabilities(), capabilities_ptr, get_capabilities());
+-  return JVMTI_ERROR_NONE;  
+-} /* end RelinquishCapabilities */
+-
+-
+-// capabilities_ptr - pre-checked for NULL
+-jvmtiError
+-JvmtiEnv::GetCapabilities(jvmtiCapabilities* capabilities_ptr) {
+-  JvmtiManageCapabilities::copy_capabilities(get_capabilities(), capabilities_ptr);
+-  return JVMTI_ERROR_NONE;  
+-} /* end GetCapabilities */
+-
+-
+   //
+   // Timers functions
+-  // 
++  //
+ 
+ // info_ptr - pre-checked for NULL
+ jvmtiError
+@@ -3082,118 +3283,9 @@
+   return JVMTI_ERROR_NONE;
+ } /* end GetAvailableProcessors */
+ 
+-
+-  //
+-  // Class Loader Search functions
+-  // 
+-
+-// segment - pre-checked for NULL
+-jvmtiError
+-JvmtiEnv::AddToBootstrapClassLoaderSearch(const char* segment) {
+-  jvmtiPhase phase = get_phase();
+-  if (phase == JVMTI_PHASE_ONLOAD) {
+-    Arguments::append_sysclasspath(segment);
+-    return JVMTI_ERROR_NONE;
+-  } else {
+-    assert(phase == JVMTI_PHASE_LIVE, "sanity check");
+-
+-    // create the zip entry
+-    ClassPathZipEntry* zip_entry = ClassLoader::create_class_path_zip_entry(segment);
+-    if (zip_entry == NULL) {
+-      return JVMTI_ERROR_ILLEGAL_ARGUMENT;
+-    }
+-
+-    // lock the loader 
+-    Thread* thread = Thread::current();
+-    HandleMark hm;
+-    Handle loader_lock = Handle(thread, SystemDictionary::system_loader_lock());
+-
+-    ObjectLocker ol(loader_lock, thread);
+-
+-    // add the jar file to the bootclasspath
+-    if (TraceClassLoading) {
+-      tty->print_cr("[Opened %s]", zip_entry->name());
+-    }
+-    ClassLoader::add_to_list(zip_entry);
+-    return JVMTI_ERROR_NONE;
+-  }
+-
+-} /* end AddToBootstrapClassLoaderSearch */
+-
+-
+-// segment - pre-checked for NULL
+-jvmtiError
+-JvmtiEnv::AddToSystemClassLoaderSearch(const char* segment) {
+-  jvmtiPhase phase = get_phase();
+-
+-  if (phase == JVMTI_PHASE_ONLOAD) {
+-    for (SystemProperty* p = Arguments::system_properties(); p != NULL; p = p->next()) {
+-      if (strcmp("java.class.path", p->key()) == 0) {
+-	p->append_value(segment);
+- 	break;	
+-      }
+-    }
+-    return JVMTI_ERROR_NONE;
+-  } else {
+-    HandleMark hm;
+-
+-    assert(phase == JVMTI_PHASE_LIVE, "sanity check");
+-
+-    // create the zip entry (which will open the zip file and hence
+-    // check that the segment is indeed a zip file).
+-    ClassPathZipEntry* zip_entry = ClassLoader::create_class_path_zip_entry(segment);
+-    if (zip_entry == NULL) {
+-      return JVMTI_ERROR_ILLEGAL_ARGUMENT;
+-    }
+-    delete zip_entry;	// no longer needed
+-
+-    // lock the loader
+-    Thread* THREAD = Thread::current();
+-    Handle loader = Handle(THREAD, SystemDictionary::java_system_loader());
+-
+-    ObjectLocker ol(loader, THREAD);
+-
+-    // need the path as java.lang.String
+-    Handle path = java_lang_String::create_from_str(segment, THREAD);
+-    if (HAS_PENDING_EXCEPTION) {
+-      CLEAR_PENDING_EXCEPTION;
+-      return JVMTI_ERROR_INTERNAL;
+-    }
+-
+-    instanceKlassHandle loader_ik(THREAD, loader->klass());
+-
+-    // Invoke the appendToClassPathForInstrumentation method - if the method
+-    // is not found it means the loader doesn't support adding to the class path
+-    // in the live phase.
+-    {
+-      JavaValue res(T_VOID);
+-      JavaCalls::call_special(&res,
+-			      loader,
+-			      loader_ik,
+-                              vmSymbolHandles::appendToClassPathForInstrumentation_name(),
+-                              vmSymbolHandles::appendToClassPathForInstrumentation_signature(),
+-			      path,
+-                              THREAD);
+-      if (HAS_PENDING_EXCEPTION) {
+-	symbolOop ex_name = PENDING_EXCEPTION->klass()->klass_part()->name();
+-	CLEAR_PENDING_EXCEPTION;
+-
+-	if (ex_name == vmSymbols::java_lang_NoSuchMethodError()) {
+-	  return JVMTI_ERROR_CLASS_LOADER_UNSUPPORTED;
+-	} else {
+-	  return JVMTI_ERROR_INTERNAL;
+-	}
+-      }
+-    }
+-
+-    return JVMTI_ERROR_NONE;
+-  }
+-} /* end AddToSystemClassLoaderSearch */
+-
+-
+   //
+   // System Properties functions
+-  // 
++  //
+ 
+ // count_ptr - pre-checked for NULL
+ // property_ptr - pre-checked for NULL
+@@ -3263,96 +3355,4 @@
+   return err;
+ } /* end SetSystemProperty */
+ 
+-
+-  //
+-  // General functions
+-  // 
+-
+-// phase_ptr - pre-checked for NULL
+-jvmtiError
+-JvmtiEnv::GetPhase(jvmtiPhase* phase_ptr) {
+-  *phase_ptr = get_phase();
+-  return JVMTI_ERROR_NONE;
+-} /* end GetPhase */
+-
+-
+-jvmtiError
+-JvmtiEnv::DisposeEnvironment() {
+-  dispose();
+-  return JVMTI_ERROR_NONE;
+-} /* end DisposeEnvironment */
+-
+-
+-// data - NULL is a valid value, must be checked
+-jvmtiError
+-JvmtiEnv::SetEnvironmentLocalStorage(const void* data) {
+-  set_env_local_storage(data);
+-  return JVMTI_ERROR_NONE;
+-} /* end SetEnvironmentLocalStorage */
+-
+-
+-// data_ptr - pre-checked for NULL
+-jvmtiError
+-JvmtiEnv::GetEnvironmentLocalStorage(void** data_ptr) {
+-  *data_ptr = (void*)get_env_local_storage();
+-  return JVMTI_ERROR_NONE;
+-} /* end GetEnvironmentLocalStorage */
+-
+-
+-// version_ptr - pre-checked for NULL
+-jvmtiError
+-JvmtiEnv::GetVersionNumber(jint* version_ptr) {
+-  *version_ptr = JVMTI_VERSION;
+-  return JVMTI_ERROR_NONE;
+-} /* end GetVersionNumber */
+-
+-
+-// name_ptr - pre-checked for NULL
+-jvmtiError
+-JvmtiEnv::GetErrorName(jvmtiError error, char** name_ptr) {
+-  if (error < JVMTI_ERROR_NONE || error > JVMTI_ERROR_MAX) {
+-    return JVMTI_ERROR_ILLEGAL_ARGUMENT;
+-  }
+-  const char *name = JvmtiUtil::error_name(error);
+-  if (name == NULL) {
+-    return JVMTI_ERROR_ILLEGAL_ARGUMENT;
+-  }
+-  size_t len = strlen(name) + 1;
+-  jvmtiError err = allocate(len, (unsigned char**)name_ptr);
+-  if (err == JVMTI_ERROR_NONE) {
+-    memcpy(*name_ptr, name, len);
+-  }
+-  return err;
+-} /* end GetErrorName */
+-
+-
+-jvmtiError
+-JvmtiEnv::SetVerboseFlag(jvmtiVerboseFlag flag, jboolean value) {
+-  switch (flag) {
+-  case JVMTI_VERBOSE_OTHER:
+-    // ignore
+-    break;
+-  case JVMTI_VERBOSE_CLASS:
+-    TraceClassLoading = value != 0;
+-    TraceClassUnloading = value != 0;
+-    break;
+-  case JVMTI_VERBOSE_GC:
+-    PrintGC = value != 0;
+-    TraceClassUnloading = value != 0;
+-    break;
+-  case JVMTI_VERBOSE_JNI:
+-    PrintJNIResolving = value != 0;
+-    break;
+-  default:
+-    return JVMTI_ERROR_ILLEGAL_ARGUMENT;
+-  };    
+-  return JVMTI_ERROR_NONE;
+-} /* end SetVerboseFlag */
+-
+-
+-// format_ptr - pre-checked for NULL
+-jvmtiError
+-JvmtiEnv::GetJLocationFormat(jvmtiJlocationFormat* format_ptr) {
+-  *format_ptr = JVMTI_JLOCATION_JVMBCI;
+-  return JVMTI_ERROR_NONE;
+-} /* end GetJLocationFormat */
++#endif // !JVMTI_KERNEL
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiEnvFill.java openjdk/hotspot/src/share/vm/prims/jvmtiEnvFill.java
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiEnvFill.java	2008-02-28 05:02:41.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiEnvFill.java	2008-01-31 09:19:01.000000000 -0500
+@@ -19,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ import java.io.*;
+@@ -28,23 +28,23 @@
+ class jvmtiEnvFill {
+ 
+     public static void main(String[] args) throws IOException {
+-	if (args.length != 3) {
+-	    System.err.println("usage: <filledFile> <stubFile> <resultFile>");
+-	    System.exit(1);
+-	}
+-	String filledFN = args[0];
+-	String stubFN = args[1];
+-	String resultFN = args[2];
+-
+-	SourceFile filledSF = new SourceFile(filledFN);
+-	SourceFile stubSF = new SourceFile(stubFN);
+-
+-
+-	stubSF.fill(filledSF);
+-
+-	PrintWriter out = new PrintWriter(new FileWriter(resultFN));
+-	stubSF.output(out);
+-	out.close();
++        if (args.length != 3) {
++            System.err.println("usage: <filledFile> <stubFile> <resultFile>");
++            System.exit(1);
++        }
++        String filledFN = args[0];
++        String stubFN = args[1];
++        String resultFN = args[2];
++
++        SourceFile filledSF = new SourceFile(filledFN);
++        SourceFile stubSF = new SourceFile(stubFN);
++
++
++        stubSF.fill(filledSF);
++
++        PrintWriter out = new PrintWriter(new FileWriter(resultFN));
++        stubSF.output(out);
++        out.close();
+     }
+ }
+ 
+@@ -70,68 +70,68 @@
+       List<String> body = new ArrayList<String>();
+ 
+       Function() throws IOException {
+-	line = in.readLine();
+-	String trimmed = line.trim();
+-	if (!trimmed.startsWith(functionPrefix)) {
+-	    error("expected '" + functionPrefix + "'");
+-	}
+-	int index = trimmed.indexOf('(', functionPrefix.length());
+-	if (index == -1) {
+-	    error("missing open paren");
+-	}
+-	name = trimmed.substring(functionPrefix.length(), index);
+-	int index2 = trimmed.indexOf(')', index);
+-	if (index2 == -1) {
+-	    error("missing close paren - must be on same line");
+-	}
+-	args = trimmed.substring(index+1, index2);
++        line = in.readLine();
++        String trimmed = line.trim();
++        if (!trimmed.startsWith(functionPrefix)) {
++            error("expected '" + functionPrefix + "'");
++        }
++        int index = trimmed.indexOf('(', functionPrefix.length());
++        if (index == -1) {
++            error("missing open paren");
++        }
++        name = trimmed.substring(functionPrefix.length(), index);
++        int index2 = trimmed.indexOf(')', index);
++        if (index2 == -1) {
++            error("missing close paren - must be on same line");
++        }
++        args = trimmed.substring(index+1, index2);
+         compareArgs = args.replaceAll("\\s", "");
+-	String tail = trimmed.substring(index2+1).trim();	
+-	if (!tail.equals("{")) {
+-	    error("function declaration first line must end with open bracket '{', instead got '" +
+-	           tail + "'");
+-	}
+-	while(true) {
+-	    line = in.readLine();
+-	    if (line == null) {
+-		line = ""; // so error does not look wierd
+-		error("unexpected end of file");
+-	    }
+-	    if (line.startsWith("}")) {
+-		break;
+-	    }
+-	    body.add(line);
+-	}
+-	String expected = "} /* end " + name + " */";
+-	trimmed = line.replaceAll("\\s","");
+-	if (!trimmed.equals(expected.replaceAll("\\s",""))) {
+-	    error("function end is malformed - should be: " + expected);
+-	}
+-	// copy over the comment prefix
+-	comment = before;
+-	before = new ArrayList<String>();
++        String tail = trimmed.substring(index2+1).trim();
++        if (!tail.equals("{")) {
++            error("function declaration first line must end with open bracket '{', instead got '" +
++                   tail + "'");
++        }
++        while(true) {
++            line = in.readLine();
++            if (line == null) {
++                line = ""; // so error does not look wierd
++                error("unexpected end of file");
++            }
++            if (line.startsWith("}")) {
++                break;
++            }
++            body.add(line);
++        }
++        String expected = "} /* end " + name + " */";
++        trimmed = line.replaceAll("\\s","");
++        if (!trimmed.equals(expected.replaceAll("\\s",""))) {
++            error("function end is malformed - should be: " + expected);
++        }
++        // copy over the comment prefix
++        comment = before;
++        before = new ArrayList<String>();
+       }
+ 
+       void remove() {
+-	functionMap.remove(name);
++        functionMap.remove(name);
+       }
+ 
+       String fileName() {
+-	return fn;
++        return fn;
+       }
+ 
+       void fill(Function filledFunc) {
+-	if (filledFunc == null) {
+-	    System.err.println("Warning: function " + name + " missing from filled file");
+-	    body.add(0, "    /*** warning: function added and not filled in ***/");
+-	} else {
++        if (filledFunc == null) {
++            System.err.println("Warning: function " + name + " missing from filled file");
++            body.add(0, "    /*** warning: function added and not filled in ***/");
++        } else {
+             int fbsize = filledFunc.body.size();
+             int bsize = body.size();
+             if (fbsize > bsize  || !body.subList(bsize-fbsize,bsize).equals(filledFunc.body)) {
+                 // it has actually been filled in
+                 body = filledFunc.body;
+                 if (!compareArgs.equals(filledFunc.compareArgs)) {
+-                    System.err.println("Warning: function " + name + 
++                    System.err.println("Warning: function " + name +
+                                        ": filled and stub arguments differ");
+                     System.err.println("  old (filled): " + filledFunc.args);
+                     System.err.println("  new (stub): " + args);
+@@ -140,121 +140,121 @@
+                 }
+             }
+             filledFunc.remove();  // mark used
+-	}
++        }
+       }
+ 
+       void output(PrintWriter out) {
+-	    Iterator it = comment.iterator();
+- 	    while (it.hasNext()) {
+-	    	out.println(it.next());
+-	    }
+-	    out.println("jvmtiError");
+-	    out.print(functionPrefix);
+-	    out.print(name);
+-	    out.print('(');
+-	    out.print(args);
+-	    out.println(") {");
+-	    it = body.iterator();
+- 	    while (it.hasNext()) {
+-	    	out.println(it.next());
+-	    }
+-	    out.print("} /* end ");
+-	    out.print(name);
+-	    out.println(" */");
++            Iterator it = comment.iterator();
++            while (it.hasNext()) {
++                out.println(it.next());
++            }
++            out.println("jvmtiError");
++            out.print(functionPrefix);
++            out.print(name);
++            out.print('(');
++            out.print(args);
++            out.println(") {");
++            it = body.iterator();
++            while (it.hasNext()) {
++                out.println(it.next());
++            }
++            out.print("} /* end ");
++            out.print(name);
++            out.println(" */");
+       }
+     }
+ 
+     SourceFile(String fn) throws IOException {
+-	this.fn = fn;
+-	Reader reader = new FileReader(fn);
+-	in = new LineNumberReader(reader);
+-
+-	while (readGaps()) {
+-	    Function func = new Function();
+-	    functionMap.put(func.name, func);
+-	    functions.add(func);
+-	}
+-	
+-	in.close();
++        this.fn = fn;
++        Reader reader = new FileReader(fn);
++        in = new LineNumberReader(reader);
++
++        while (readGaps()) {
++            Function func = new Function();
++            functionMap.put(func.name, func);
++            functions.add(func);
++        }
++
++        in.close();
+     }
+ 
+     void error(String msg) {
+-	System.err.println("Fatal error parsing file: " + fn);
+-	System.err.println("Line number: " + in.getLineNumber());
+-	System.err.println("Error message: " + msg);
+-	System.err.println("Source line: " + line);
+-	System.exit(1);
++        System.err.println("Fatal error parsing file: " + fn);
++        System.err.println("Line number: " + in.getLineNumber());
++        System.err.println("Error message: " + msg);
++        System.err.println("Source line: " + line);
++        System.exit(1);
+     }
+ 
+     boolean readGaps() throws IOException {
+-	while(true) {
+-	    line = in.readLine();
+-	    if (line == null) {
+-		return false; // end of file
+-	    }
+-	    if (!inFilePrefix && line.startsWith("}")) {
+-		error("unexpected close bracket in first column, outside of function.\n");
+-	    }
+-	    String trimmed = line.trim();
+-	    if (line.startsWith("jvmtiError")) {
+-		if (trimmed.equals("jvmtiError")) {
+-		    if (inFilePrefix) {
+-			error("unexpected 'jvmtiError' line in file prefix.\n" +
+-			      "is '" + endFilePrefix + "'... line missing?");
+-		    }
+-		    return true; // beginning of a function
+-		} else {
+-		    error("extra characters at end of 'jvmtiError'");
+-		}
+-	    }
+-	    if (inFilePrefix) {
+-		top.add(line);
+-	    } else {
+-		trimmed = line.trim();
+-		if (!trimmed.equals("") && !trimmed.startsWith("//")) {
+-		    error("only comments and blank lines allowed between functions");
+-		}
+-		before.add(line);
+-	    }
+-	    if (line.replaceAll("\\s","").toLowerCase().startsWith(endFilePrefix.replaceAll("\\s",""))) {
+-		if (!inFilePrefix) {
+-		    error("excess '" + endFilePrefix + "'");
+-		}
+-		inFilePrefix = false;
+-	    }
+-	}
++        while(true) {
++            line = in.readLine();
++            if (line == null) {
++                return false; // end of file
++            }
++            if (!inFilePrefix && line.startsWith("}")) {
++                error("unexpected close bracket in first column, outside of function.\n");
++            }
++            String trimmed = line.trim();
++            if (line.startsWith("jvmtiError")) {
++                if (trimmed.equals("jvmtiError")) {
++                    if (inFilePrefix) {
++                        error("unexpected 'jvmtiError' line in file prefix.\n" +
++                              "is '" + endFilePrefix + "'... line missing?");
++                    }
++                    return true; // beginning of a function
++                } else {
++                    error("extra characters at end of 'jvmtiError'");
++                }
++            }
++            if (inFilePrefix) {
++                top.add(line);
++            } else {
++                trimmed = line.trim();
++                if (!trimmed.equals("") && !trimmed.startsWith("//") && !trimmed.startsWith("#")) {
++                    error("only comments and blank lines allowed between functions");
++                }
++                before.add(line);
++            }
++            if (line.replaceAll("\\s","").toLowerCase().startsWith(endFilePrefix.replaceAll("\\s",""))) {
++                if (!inFilePrefix) {
++                    error("excess '" + endFilePrefix + "'");
++                }
++                inFilePrefix = false;
++            }
++        }
+     }
+ 
+     void fill(SourceFile filledSF) {
+-	// copy beginning of file straight from filled file
+-	top = filledSF.top;
++        // copy beginning of file straight from filled file
++        top = filledSF.top;
+ 
+-	// file in functions
+-	Iterator it = functions.iterator();
+-	while (it.hasNext()) {
+-	    Function stubFunc = (Function)(it.next());
+-	    Function filledFunc = (Function)filledSF.functionMap.get(stubFunc.name);
+-	    stubFunc.fill(filledFunc);
+-	}
+-	if (filledSF.functionMap.size() > 0) {
+-	    System.err.println("Warning: the following functions were present in the " +
+-				"filled file but missing in the stub file and thus not copied:");
+-	    it  = filledSF.functionMap.values().iterator();
+-	    while (it.hasNext()) {
+-	    	System.err.println("        " + ((Function)(it.next())).name);
+-	    }
+-	}
+-    }	
++        // file in functions
++        Iterator it = functions.iterator();
++        while (it.hasNext()) {
++            Function stubFunc = (Function)(it.next());
++            Function filledFunc = (Function)filledSF.functionMap.get(stubFunc.name);
++            stubFunc.fill(filledFunc);
++        }
++        if (filledSF.functionMap.size() > 0) {
++            System.err.println("Warning: the following functions were present in the " +
++                                "filled file but missing in the stub file and thus not copied:");
++            it  = filledSF.functionMap.values().iterator();
++            while (it.hasNext()) {
++                System.err.println("        " + ((Function)(it.next())).name);
++            }
++        }
++    }
+ 
+     void output(PrintWriter out) {
+-	Iterator it = top.iterator();
+-	while (it.hasNext()) {
+-	    out.println(it.next());
+-	}
+-	it = functions.iterator();
+-	while (it.hasNext()) {
+-	    Function stubFunc = (Function)(it.next());
+-	    stubFunc.output(out);
+-	}
++        Iterator it = top.iterator();
++        while (it.hasNext()) {
++            out.println(it.next());
++        }
++        it = functions.iterator();
++        while (it.hasNext()) {
++            Function stubFunc = (Function)(it.next());
++            stubFunc.output(out);
++        }
+     }
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiEnvThreadState.cpp openjdk/hotspot/src/share/vm/prims/jvmtiEnvThreadState.cpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiEnvThreadState.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiEnvThreadState.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)jvmtiEnvThreadState.cpp	1.23 07/05/23 10:53:46 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -31,14 +28,14 @@
+ 
+ ///////////////////////////////////////////////////////////////
+ //
+-// class JvmtiFramePop 
++// class JvmtiFramePop
+ //
+ 
+ #ifndef PRODUCT
+ void JvmtiFramePop::print() {
+   tty->print_cr("_frame_number=%d", _frame_number);
+ }
+-#endif  
++#endif
+ 
+ 
+ ///////////////////////////////////////////////////////////////
+@@ -46,7 +43,7 @@
+ // class JvmtiFramePops - private methods
+ //
+ 
+-void 
++void
+ JvmtiFramePops::set(JvmtiFramePop& fp) {
+   if (_pops->find(fp.frame_number()) < 0) {
+     _pops->append(fp.frame_number());
+@@ -54,15 +51,15 @@
+ }
+ 
+ 
+-void 
++void
+ JvmtiFramePops::clear(JvmtiFramePop& fp) {
+   assert(_pops->length() > 0, "No more frame pops");
+ 
+   _pops->remove(fp.frame_number());
+ }
+ 
+-  
+-int 
++
++int
+ JvmtiFramePops::clear_to(JvmtiFramePop& fp) {
+   int cleared = 0;
+   int index = 0;
+@@ -129,7 +126,7 @@
+   _agent_thread_local_storage_data = NULL;
+ }
+ 
+-JvmtiEnvThreadState::~JvmtiEnvThreadState()   { 
++JvmtiEnvThreadState::~JvmtiEnvThreadState()   {
+   delete _frame_pops;
+   _frame_pops = NULL;
+ }
+@@ -140,7 +137,7 @@
+ // - instruction rewrites
+ // - breakpoint followed by single step
+ // - single step at a breakpoint
+-void JvmtiEnvThreadState::compare_and_set_current_location(methodOop new_method, 
++void JvmtiEnvThreadState::compare_and_set_current_location(methodOop new_method,
+                                                            address new_location, jvmtiEvent event) {
+ 
+   int new_bci = new_location - new_method->code_base();
+@@ -163,7 +160,7 @@
+       // If step is pending for popframe then it may not be
+       // a repeat step. The new_bci and method_id is same as current_bci
+       // and current method_id after pop and step for recursive calls.
+-      // This has been handled by clearing the location 
++      // This has been handled by clearing the location
+       _single_stepping_posted = true;
+       break;
+     default:
+@@ -198,7 +195,7 @@
+   return _frame_pops == NULL? false : (_frame_pops->length() > 0);
+ }
+ 
+-void JvmtiEnvThreadState::set_frame_pop(int frame_number) { 
++void JvmtiEnvThreadState::set_frame_pop(int frame_number) {
+ #ifdef ASSERT
+   uint32_t debug_bits = 0;
+ #endif
+@@ -209,7 +206,7 @@
+ }
+ 
+ 
+-void JvmtiEnvThreadState::clear_frame_pop(int frame_number) { 
++void JvmtiEnvThreadState::clear_frame_pop(int frame_number) {
+ #ifdef ASSERT
+   uint32_t debug_bits = 0;
+ #endif
+@@ -220,7 +217,7 @@
+ }
+ 
+ 
+-void JvmtiEnvThreadState::clear_to_frame_pop(int frame_number)  { 
++void JvmtiEnvThreadState::clear_to_frame_pop(int frame_number)  {
+ #ifdef ASSERT
+   uint32_t debug_bits = 0;
+ #endif
+@@ -280,7 +277,7 @@
+   // 2) single-step to a bytecode that will be transformed to a fast version
+   // We skip to avoid posting the duplicate single-stepping event.
+ 
+-  // If single-stepping is disabled, clear current location so that 
++  // If single-stepping is disabled, clear current location so that
+   // single-stepping to the same method and bcp at a later time will be
+   // detected if single-stepping is enabled at that time (see 4388912).
+ 
+@@ -303,7 +300,7 @@
+       jmethodID method_id;
+       int bci;
+       // The java thread stack may not be walkable for a running thread
+-      // so get current location at safepoint. 
++      // so get current location at safepoint.
+       VM_GetCurrentLocation op(_thread);
+       VMThread::execute(&op);
+       op.get_current_location(&method_id, &bci);
+@@ -314,4 +311,3 @@
+     clear_current_location();
+   }
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiEnvThreadState.hpp openjdk/hotspot/src/share/vm/prims/jvmtiEnvThreadState.hpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiEnvThreadState.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiEnvThreadState.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jvmtiEnvThreadState.hpp	1.17 07/05/05 17:06:37 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ #ifndef _JAVA_JVMTIENVTHREADSTATE_H_
+ #define _JAVA_JVMTIENVTHREADSTATE_H_
+@@ -41,7 +38,7 @@
+ // operator.  I'm trying to to rewrite everything.
+ 
+ class JvmtiFramePop VALUE_OBJ_CLASS_SPEC {
+- private:  
++ private:
+   // Frame number counting from BOTTOM (oldest) frame;
+   // bottom frame == #0
+   int _frame_number;
+@@ -69,7 +66,7 @@
+ //
+ 
+ class JvmtiFramePops : public CHeapObj {
+- private: 
++ private:
+   GrowableArray<int>* _pops;
+ 
+   // should only be used by JvmtiEventControllerPrivate
+@@ -79,7 +76,7 @@
+   void set(JvmtiFramePop& fp);
+   void clear(JvmtiFramePop& fp);
+   int clear_to(JvmtiFramePop& fp);
+-  
++
+  public:
+   JvmtiFramePops();
+   ~JvmtiFramePops();
+@@ -107,17 +104,17 @@
+   JvmtiEnvThreadState *_next;
+   jmethodID         _current_method_id;
+   int               _current_bci;
+-  bool		    _breakpoint_posted;
+-  bool		    _single_stepping_posted;
++  bool              _breakpoint_posted;
++  bool              _single_stepping_posted;
+   JvmtiEnvThreadEventEnable _event_enable;
+   void              *_agent_thread_local_storage_data; // per env and per thread agent allocated data.
+-    
++
+   // Class used to store pending framepops.
+   // lazily initialized by get_frame_pops();
+   JvmtiFramePops *_frame_pops;
+ 
+-  inline void set_current_location(jmethodID method_id, int bci) { 
+-    _current_method_id = method_id; 
++  inline void set_current_location(jmethodID method_id, int bci) {
++    _current_method_id = method_id;
+     _current_bci  = bci;
+   }
+ 
+@@ -136,9 +133,9 @@
+   JvmtiEnvThreadEventEnable *event_enable() { return &_event_enable; }
+   void *get_agent_thread_local_storage_data() { return _agent_thread_local_storage_data; }
+   void set_agent_thread_local_storage_data (void *data) { _agent_thread_local_storage_data = data; }
+-            
+ 
+-  // If the thread is in the given method at the given 
++
++  // If the thread is in the given method at the given
+   // location just return.  Otherwise, reset the current location
+   // and reset _breakpoint_posted and _single_stepping_posted.
+   // _breakpoint_posted and _single_stepping_posted are only cleared
+@@ -154,7 +151,7 @@
+     _single_stepping_posted = true;
+   }
+   inline bool breakpoint_posted() { return _breakpoint_posted; }
+-  inline bool single_stepping_posted() { 
++  inline bool single_stepping_posted() {
+     return _single_stepping_posted;
+   }
+ 
+@@ -162,7 +159,7 @@
+   inline JvmtiEnv *get_env() { return _env; }
+ 
+   // lazily initialize _frame_pops
+-  JvmtiFramePops* get_frame_pops();  
++  JvmtiFramePops* get_frame_pops();
+ 
+   bool has_frame_pops();
+ 
+@@ -176,4 +173,3 @@
+ };
+ 
+ #endif   /* _JAVA_JVMTIENVTHREADSTATE_H_ */
+-
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiEventController.cpp openjdk/hotspot/src/share/vm/prims/jvmtiEventController.cpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiEventController.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiEventController.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)jvmtiEventController.cpp	1.56 07/05/23 10:53:48 JVM"
+-#endif
+ /*
+  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -73,17 +70,17 @@
+ 
+ 
+ static const jlong  MONITOR_BITS = MONITOR_CONTENDED_ENTER_BIT | MONITOR_CONTENDED_ENTERED_BIT |
+-	   		  MONITOR_WAIT_BIT | MONITOR_WAITED_BIT;
++                          MONITOR_WAIT_BIT | MONITOR_WAITED_BIT;
+ static const jlong  EXCEPTION_BITS = EXCEPTION_THROW_BIT | EXCEPTION_CATCH_BIT;
+ static const jlong  INTERP_EVENT_BITS =  SINGLE_STEP_BIT | METHOD_ENTRY_BIT | METHOD_EXIT_BIT |
+-	                        FRAME_POP_BIT | FIELD_ACCESS_BIT | FIELD_MODIFICATION_BIT;
++                                FRAME_POP_BIT | FIELD_ACCESS_BIT | FIELD_MODIFICATION_BIT;
+ static const jlong  THREAD_FILTERED_EVENT_BITS = INTERP_EVENT_BITS | EXCEPTION_BITS | MONITOR_BITS |
+-       					BREAKPOINT_BIT | CLASS_LOAD_BIT | CLASS_PREPARE_BIT | THREAD_END_BIT;
++                                        BREAKPOINT_BIT | CLASS_LOAD_BIT | CLASS_PREPARE_BIT | THREAD_END_BIT;
+ static const jlong  NEED_THREAD_LIFE_EVENTS = THREAD_FILTERED_EVENT_BITS | THREAD_START_BIT;
+-static const jlong  EARLY_EVENT_BITS = CLASS_FILE_LOAD_HOOK_BIT | 
+-			       VM_START_BIT | VM_INIT_BIT | VM_DEATH_BIT | NATIVE_METHOD_BIND_BIT | 
+-                               THREAD_START_BIT | THREAD_END_BIT | 
+-			       DYNAMIC_CODE_GENERATED_BIT;
++static const jlong  EARLY_EVENT_BITS = CLASS_FILE_LOAD_HOOK_BIT |
++                               VM_START_BIT | VM_INIT_BIT | VM_DEATH_BIT | NATIVE_METHOD_BIND_BIT |
++                               THREAD_START_BIT | THREAD_END_BIT |
++                               DYNAMIC_CODE_GENERATED_BIT;
+ static const jlong  GLOBAL_EVENT_BITS = ~THREAD_FILTERED_EVENT_BITS;
+ 
+ 
+@@ -104,7 +101,7 @@
+ #endif
+ }
+ 
+-void JvmtiEventEnabled::set_enabled(jvmtiEvent event_type, bool enabled) {  
++void JvmtiEventEnabled::set_enabled(jvmtiEvent event_type, bool enabled) {
+   jlong bits = get_bits();
+   jlong mask = bit_for(event_type);
+   if (enabled) {
+@@ -178,7 +175,7 @@
+ 
+ public:
+   VM_EnterInterpOnlyMode(JvmtiThreadState *state);
+-  
++
+   bool allow_nested_vm_operations() const        { return true; }
+   VMOp_Type type() const { return VMOp_EnterInterpOnlyMode; }
+   void doit();
+@@ -193,12 +190,12 @@
+   : _state(state)
+ {
+ }
+-  
++
+ 
+ void VM_EnterInterpOnlyMode::doit() {
+   // Set up the current stack depth for later tracking
+   _state->invalidate_cur_stack_depth();
+-  
++
+   _state->enter_interp_only_mode();
+ 
+   JavaThread *thread = _state->get_thread();
+@@ -247,7 +244,7 @@
+   : _on(on != 0)
+ {
+ }
+-  
++
+ 
+ 
+ 
+@@ -274,15 +271,15 @@
+   static jlong recompute_thread_enabled(JvmtiThreadState *state);
+   static void event_init();
+ 
+-  static void set_user_enabled(JvmtiEnvBase *env, JavaThread *thread, 
++  static void set_user_enabled(JvmtiEnvBase *env, JavaThread *thread,
+                         jvmtiEvent event_type, bool enabled);
+-  static void set_event_callbacks(JvmtiEnvBase *env, 
+-                                  const jvmtiEventCallbacks* callbacks, 
++  static void set_event_callbacks(JvmtiEnvBase *env,
++                                  const jvmtiEventCallbacks* callbacks,
+                                   jint size_of_callbacks);
+ 
+   static void set_extension_event_callback(JvmtiEnvBase *env,
+-					   jint extension_event_index, 
+-					   jvmtiExtensionEvent callback);
++                                           jint extension_event_index,
++                                           jvmtiExtensionEvent callback);
+ 
+   static void set_frame_pop(JvmtiEnvThreadState *env_thread, JvmtiFramePop fpop);
+   static void clear_frame_pop(JvmtiEnvThreadState *env_thread, JvmtiFramePop fpop);
+@@ -322,13 +319,13 @@
+ void VM_ChangeSingleStep::doit() {
+   JvmtiEventControllerPrivate::set_should_post_single_step(_on);
+   if (_on) {
+-    AbstractInterpreter::notice_safepoints();
++    Interpreter::notice_safepoints();
+   }
+ }
+ 
+ 
+ void JvmtiEventControllerPrivate::enter_interp_only_mode(JvmtiThreadState *state) {
+-  EC_TRACE(("JVMTI [%s] # Entering interpreter only mode",  
++  EC_TRACE(("JVMTI [%s] # Entering interpreter only mode",
+             JvmtiTrace::safe_get_thread_name(state->get_thread())));
+ 
+   VM_EnterInterpOnlyMode op(state);
+@@ -338,7 +335,7 @@
+ 
+ void
+ JvmtiEventControllerPrivate::leave_interp_only_mode(JvmtiThreadState *state) {
+-  EC_TRACE(("JVMTI [%s] # Leaving interpreter only mode",  
++  EC_TRACE(("JVMTI [%s] # Leaving interpreter only mode",
+             JvmtiTrace::safe_get_thread_name(state->get_thread())));
+   state->leave_interp_only_mode();
+ }
+@@ -354,11 +351,11 @@
+       jlong bit = JvmtiEventEnabled::bit_for((jvmtiEvent)ei);
+       if (changed & bit) {
+         // it changed, print it
+-        tty->print_cr("JVMTI [%s] # %s event %s",  
++        tty->print_cr("JVMTI [%s] # %s event %s",
+                       JvmtiTrace::safe_get_thread_name(state->get_thread()),
+                       (now_enabled & bit)? "Enabling" : "Disabling", JvmtiTrace::event_name((jvmtiEvent)ei));
+       }
+-    }    
++    }
+   }
+ #endif /*JVMTI_TRACE */
+ }
+@@ -374,7 +371,7 @@
+       jlong bit = JvmtiEventEnabled::bit_for((jvmtiEvent)ei);
+       if (changed & bit) {
+         // it changed, print it
+-        tty->print_cr("JVMTI [-] # %s event %s",  
++        tty->print_cr("JVMTI [-] # %s event %s",
+                       (now_enabled & bit)? "Enabling" : "Disabling", JvmtiTrace::event_name((jvmtiEvent)ei));
+       }
+     }
+@@ -383,14 +380,14 @@
+ }
+ 
+ 
+-// For the specified env: compute the currently truly enabled events 
+-// set external state accordingly.  
++// For the specified env: compute the currently truly enabled events
++// set external state accordingly.
+ // Return value and set value must include all events.
+ // But outside this class, only non-thread-filtered events can be queried..
+-jlong 
++jlong
+ JvmtiEventControllerPrivate::recompute_env_enabled(JvmtiEnvBase* env) {
+   jlong was_enabled = env->env_event_enable()->_event_enabled.get_bits();
+-  jlong now_enabled =  
++  jlong now_enabled =
+     env->env_event_enable()->_event_callback_enabled.get_bits() &
+     env->env_event_enable()->_event_user_enabled.get_bits();
+ 
+@@ -418,23 +415,23 @@
+ 
+   // will we really send these events to this env
+   env->env_event_enable()->_event_enabled.set_bits(now_enabled);
+-  
++
+   trace_changed(now_enabled, (now_enabled ^ was_enabled)  & ~THREAD_FILTERED_EVENT_BITS);
+ 
+   return now_enabled;
+ }
+ 
+ 
+-// For the specified env and thread: compute the currently truly enabled events 
++// For the specified env and thread: compute the currently truly enabled events
+ // set external state accordingly.  Only thread-filtered events are included.
+-jlong 
++jlong
+ JvmtiEventControllerPrivate::recompute_env_thread_enabled(JvmtiEnvThreadState* ets, JvmtiThreadState* state) {
+   JvmtiEnv *env = ets->get_env();
+ 
+   jlong was_enabled = ets->event_enable()->_event_enabled.get_bits();
+   jlong now_enabled =  THREAD_FILTERED_EVENT_BITS &
+     env->env_event_enable()->_event_callback_enabled.get_bits() &
+-    (env->env_event_enable()->_event_user_enabled.get_bits() | 
++    (env->env_event_enable()->_event_user_enabled.get_bits() |
+      ets->event_enable()->_event_user_enabled.get_bits());
+ 
+   // for frame pops and field watchs, computed enabled state
+@@ -457,12 +454,12 @@
+   }
+ 
+   // if anything changed do update
+-  if (now_enabled != was_enabled) { 
++  if (now_enabled != was_enabled) {
+ 
+     // will we really send these events to this thread x env
+     ets->event_enable()->_event_enabled.set_bits(now_enabled);
+-      
+-    // If the enabled status of the single step or breakpoint events changed, 
++
++    // If the enabled status of the single step or breakpoint events changed,
+     // the location status may need to change as well.
+     jlong changed = now_enabled ^ was_enabled;
+     if (changed & SINGLE_STEP_BIT) {
+@@ -472,26 +469,26 @@
+       ets->reset_current_location(JVMTI_EVENT_BREAKPOINT,  (now_enabled & BREAKPOINT_BIT) != 0);
+     }
+     trace_changed(state, now_enabled, changed);
+-  } 
++  }
+   return now_enabled;
+ }
+ 
+ 
+-// For the specified thread: compute the currently truly enabled events 
++// For the specified thread: compute the currently truly enabled events
+ // set external state accordingly.  Only thread-filtered events are included.
+-jlong 
++jlong
+ JvmtiEventControllerPrivate::recompute_thread_enabled(JvmtiThreadState *state) {
+   jlong was_any_env_enabled = state->thread_event_enable()->_event_enabled.get_bits();
+   jlong any_env_enabled = 0;
+ 
+   {
+-    // This iteration will include JvmtiEnvThreadStates whoses environments 
++    // This iteration will include JvmtiEnvThreadStates whoses environments
+     // have been disposed.  These JvmtiEnvThreadStates must not be filtered
+     // as recompute must be called on them to disable their events,
+-    JvmtiEnvThreadStateIterator it(state); 
++    JvmtiEnvThreadStateIterator it(state);
+     for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
+       any_env_enabled |= recompute_env_thread_enabled(ets, state);
+-    } 
++    }
+   }
+ 
+   if (any_env_enabled != was_any_env_enabled) {
+@@ -501,7 +498,7 @@
+     // compute interp_only mode
+     bool should_be_interp = (any_env_enabled & INTERP_EVENT_BITS) != 0;
+     bool is_now_interp = state->is_interp_only_mode();
+-    
++
+     if (should_be_interp != is_now_interp) {
+       if (should_be_interp) {
+         enter_interp_only_mode(state);
+@@ -520,16 +517,16 @@
+ // for it and, for field watch and frame pop, one has been set.
+ // Compute if truly enabled, per thread, per environment, per combination
+ // (thread x environment), and overall.  These merges are true if any is true.
+-// True per thread if some environment has callback set and the event is globally 
++// True per thread if some environment has callback set and the event is globally
+ // enabled or enabled for this thread.
+-// True per environment if the callback is set and the event is globally 
++// True per environment if the callback is set and the event is globally
+ // enabled in this environment or enabled for any thread in this environment.
+ // True per combination if the environment has the callback set and the
+ // event is globally enabled in this environment or the event is enabled
+ // for this thread and environment.
+ //
+ // All states transitions dependent on these transitions are also handled here.
+-void 
++void
+ JvmtiEventControllerPrivate::recompute_enabled() {
+   assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check");
+ 
+@@ -542,24 +539,24 @@
+   // compute non-thread-filters events.
+   // This must be done separately from thread-filtered events, since some
+   // events can occur before any threads exist.
+-  JvmtiEnvIterator it; 
++  JvmtiEnvIterator it;
+   for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) {
+     any_env_thread_enabled |= recompute_env_enabled(env);
+   }
+ 
+-  // We need to create any missing jvmti_thread_state if there are globally set thread 
++  // We need to create any missing jvmti_thread_state if there are globally set thread
+   // filtered events and there weren't last time
+   if (    (any_env_thread_enabled & THREAD_FILTERED_EVENT_BITS) != 0 &&
+       (was_any_env_thread_enabled & THREAD_FILTERED_EVENT_BITS) == 0) {
+-    assert(JvmtiEnv::is_vm_live() || (JvmtiEnv::get_phase()==JVMTI_PHASE_START), 
++    assert(JvmtiEnv::is_vm_live() || (JvmtiEnv::get_phase()==JVMTI_PHASE_START),
+       "thread filtered events should not be enabled when VM not in start or live phase");
+-    { 
++    {
+       MutexLocker mu(Threads_lock);   //hold the Threads_lock for the iteration
+       for (JavaThread *tp = Threads::first(); tp != NULL; tp = tp->next()) {
+         JvmtiThreadState::state_for_while_locked(tp);  // create the thread state if missing
+-      } 
++      }
+     }// release Threads_lock
+-  } 
++  }
+ 
+   // compute and set thread-filtered events
+   for (JvmtiThreadState *state = JvmtiThreadState::first(); state != NULL; state = state->next()) {
+@@ -575,7 +572,7 @@
+     JvmtiExport::set_should_post_class_file_load_hook((any_env_thread_enabled & CLASS_FILE_LOAD_HOOK_BIT) != 0);
+     JvmtiExport::set_should_post_native_method_bind((any_env_thread_enabled & NATIVE_METHOD_BIND_BIT) != 0);
+     JvmtiExport::set_should_post_dynamic_code_generated((any_env_thread_enabled & DYNAMIC_CODE_GENERATED_BIT) != 0);
+-    JvmtiExport::set_should_post_data_dump((any_env_thread_enabled & DATA_DUMP_BIT) != 0);    
++    JvmtiExport::set_should_post_data_dump((any_env_thread_enabled & DATA_DUMP_BIT) != 0);
+     JvmtiExport::set_should_post_class_prepare((any_env_thread_enabled & CLASS_PREPARE_BIT) != 0);
+     JvmtiExport::set_should_post_class_unload((any_env_thread_enabled & CLASS_UNLOAD_BIT) != 0);
+     JvmtiExport::set_should_post_monitor_contended_enter((any_env_thread_enabled & MONITOR_CONTENDED_ENTER_BIT) != 0);
+@@ -591,7 +588,7 @@
+     JvmtiExport::set_should_post_vm_object_alloc((any_env_thread_enabled & VM_OBJECT_ALLOC_BIT) != 0);
+ 
+     // need this if we want thread events or we need them to init data
+-    JvmtiExport::set_should_post_thread_life((any_env_thread_enabled & NEED_THREAD_LIFE_EVENTS) != 0); 
++    JvmtiExport::set_should_post_thread_life((any_env_thread_enabled & NEED_THREAD_LIFE_EVENTS) != 0);
+ 
+     // If single stepping is turned on or off, execute the VM op to change it.
+     if (delta & SINGLE_STEP_BIT) {
+@@ -616,21 +613,21 @@
+ 
+   EC_TRACE(("JVMTI [-] # recompute enabled - after %llx", any_env_thread_enabled));
+ }
+-    
+ 
+-void 
++
++void
+ JvmtiEventControllerPrivate::thread_started(JavaThread *thread) {
+   assert(thread->is_Java_thread(), "Must be JavaThread");
+   assert(thread == Thread::current(), "must be current thread");
+   assert(JvmtiEnvBase::environments_might_exist(), "to enter event controller, JVM TI environments must exist");
+-  
++
+   EC_TRACE(("JVMTI [%s] # thread started", JvmtiTrace::safe_get_thread_name(thread)));
+ 
+   // if we have any thread filtered events globally enabled, create/update the thread state
+   if ((JvmtiEventController::_universal_global_event_enabled.get_bits() & THREAD_FILTERED_EVENT_BITS) != 0) {
+     MutexLocker mu(JvmtiThreadState_lock);
+     // create the thread state if missing
+-    JvmtiThreadState *state = JvmtiThreadState::state_for_while_locked(thread);  
++    JvmtiThreadState *state = JvmtiThreadState::state_for_while_locked(thread);
+     if (state != NULL) {    // skip threads with no JVMTI thread state
+       recompute_thread_enabled(state);
+     }
+@@ -638,7 +635,7 @@
+ }
+ 
+ 
+-void 
++void
+ JvmtiEventControllerPrivate::thread_ended(JavaThread *thread) {
+   // Removes the JvmtiThreadState associated with the specified thread.
+   // May be called after all environments have been disposed.
+@@ -652,8 +649,8 @@
+   }
+ }
+ 
+-void JvmtiEventControllerPrivate::set_event_callbacks(JvmtiEnvBase *env, 
+-                                                      const jvmtiEventCallbacks* callbacks, 
++void JvmtiEventControllerPrivate::set_event_callbacks(JvmtiEnvBase *env,
++                                                      const jvmtiEventCallbacks* callbacks,
+                                                       jint size_of_callbacks) {
+   assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check");
+   EC_TRACE(("JVMTI [*] # set event callbacks"));
+@@ -665,22 +662,22 @@
+     if (env->has_callback(evt_t)) {
+       enabled_bits |= JvmtiEventEnabled::bit_for(evt_t);
+     }
+-  }  
++  }
+   env->env_event_enable()->_event_callback_enabled.set_bits(enabled_bits);
+   recompute_enabled();
+ }
+ 
+ void
+ JvmtiEventControllerPrivate::set_extension_event_callback(JvmtiEnvBase *env,
+-							  jint extension_event_index, 
+-							  jvmtiExtensionEvent callback)
++                                                          jint extension_event_index,
++                                                          jvmtiExtensionEvent callback)
+ {
+   assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check");
+   EC_TRACE(("JVMTI [*] # set extension event callback"));
+ 
+   // extension events are allocated below JVMTI_MIN_EVENT_TYPE_VAL
+   assert(extension_event_index >= (jint)EXT_MIN_EVENT_TYPE_VAL &&
+-	 extension_event_index <= (jint)EXT_MAX_EVENT_TYPE_VAL, "sanity check");
++         extension_event_index <= (jint)EXT_MAX_EVENT_TYPE_VAL, "sanity check");
+ 
+ 
+   // As the bits for both standard (jvmtiEvent) and extension
+@@ -688,7 +685,7 @@
+   // jvmtiEvent to set/clear the bit for this extension event.
+   jvmtiEvent event_type = (jvmtiEvent)extension_event_index;
+ 
+-  // Prevent a possible race condition where events are re-enabled by a call to 
++  // Prevent a possible race condition where events are re-enabled by a call to
+   // set event callbacks, where the DisposeEnvironment occurs after the boiler-plate
+   // environment check and before the lock is acquired.
+   // We can safely do the is_valid check now, as JvmtiThreadState_lock is held.
+@@ -696,9 +693,9 @@
+   env->env_event_enable()->set_user_enabled(event_type, enabling);
+ 
+   // update the callback
+-  jvmtiExtEventCallbacks* ext_callbacks = env->ext_callbacks();  
++  jvmtiExtEventCallbacks* ext_callbacks = env->ext_callbacks();
+   switch (extension_event_index) {
+-    case EXT_EVENT_CLASS_UNLOAD : 
++    case EXT_EVENT_CLASS_UNLOAD :
+       ext_callbacks->ClassUnload = callback;
+       break;
+     default:
+@@ -719,7 +716,7 @@
+ }
+ 
+ 
+-void 
++void
+ JvmtiEventControllerPrivate::env_initialize(JvmtiEnvBase *env) {
+   assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check");
+   EC_TRACE(("JVMTI [*] # env initialize"));
+@@ -741,17 +738,17 @@
+ }
+ 
+ 
+-void 
++void
+ JvmtiEventControllerPrivate::env_dispose(JvmtiEnvBase *env) {
+   assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check");
+   EC_TRACE(("JVMTI [*] # env dispose"));
+ 
+-  // Before the environment is marked disposed, disable all events on this 
+-  // environment (by zapping the callbacks).  As a result, the disposed 
++  // Before the environment is marked disposed, disable all events on this
++  // environment (by zapping the callbacks).  As a result, the disposed
+   // environment will not call event handlers.
+   set_event_callbacks(env, NULL, 0);
+-  for (jint extension_event_index = EXT_MIN_EVENT_TYPE_VAL; 
+-       extension_event_index <= EXT_MAX_EVENT_TYPE_VAL; 
++  for (jint extension_event_index = EXT_MIN_EVENT_TYPE_VAL;
++       extension_event_index <= EXT_MAX_EVENT_TYPE_VAL;
+        ++extension_event_index) {
+     set_extension_event_callback(env, extension_event_index, NULL);
+   }
+@@ -762,14 +759,14 @@
+ 
+ 
+ void
+-JvmtiEventControllerPrivate::set_user_enabled(JvmtiEnvBase *env, JavaThread *thread, 
++JvmtiEventControllerPrivate::set_user_enabled(JvmtiEnvBase *env, JavaThread *thread,
+                                           jvmtiEvent event_type, bool enabled) {
+   assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check");
+ 
+-  EC_TRACE(("JVMTI [%s] # user %s event %s",  
++  EC_TRACE(("JVMTI [%s] # user %s event %s",
+             thread==NULL? "ALL": JvmtiTrace::safe_get_thread_name(thread),
+             enabled? "enabled" : "disabled", JvmtiTrace::event_name(event_type)));
+-  
++
+   if (thread == NULL) {
+     env->env_event_enable()->set_user_enabled(event_type, enabled);
+   } else {
+@@ -785,7 +782,7 @@
+ 
+ void
+ JvmtiEventControllerPrivate::set_frame_pop(JvmtiEnvThreadState *ets, JvmtiFramePop fpop) {
+-  EC_TRACE(("JVMTI [%s] # set frame pop - frame=%d", 
++  EC_TRACE(("JVMTI [%s] # set frame pop - frame=%d",
+             JvmtiTrace::safe_get_thread_name(ets->get_thread()),
+             fpop.frame_number() ));
+ 
+@@ -796,7 +793,7 @@
+ 
+ void
+ JvmtiEventControllerPrivate::clear_frame_pop(JvmtiEnvThreadState *ets, JvmtiFramePop fpop) {
+-  EC_TRACE(("JVMTI [%s] # clear frame pop - frame=%d", 
++  EC_TRACE(("JVMTI [%s] # clear frame pop - frame=%d",
+             JvmtiTrace::safe_get_thread_name(ets->get_thread()),
+             fpop.frame_number() ));
+ 
+@@ -809,7 +806,7 @@
+ JvmtiEventControllerPrivate::clear_to_frame_pop(JvmtiEnvThreadState *ets, JvmtiFramePop fpop) {
+   int cleared_cnt = ets->get_frame_pops()->clear_to(fpop);
+ 
+-  EC_TRACE(("JVMTI [%s] # clear to frame pop - frame=%d, count=%d", 
++  EC_TRACE(("JVMTI [%s] # clear to frame pop - frame=%d, count=%d",
+             JvmtiTrace::safe_get_thread_name(ets->get_thread()),
+             fpop.frame_number(),
+             cleared_cnt ));
+@@ -835,7 +832,7 @@
+     return;
+   }
+ 
+-  EC_TRACE(("JVMTI [-] # change field watch - %s %s count=%d", 
++  EC_TRACE(("JVMTI [-] # change field watch - %s %s count=%d",
+             event_type==JVMTI_EVENT_FIELD_MODIFICATION? "modification" : "access",
+             added? "add" : "remove",
+             *count_addr));
+@@ -871,7 +868,7 @@
+   // check that our idea and the spec's idea of threaded events match
+   for (int ei = JVMTI_MIN_EVENT_TYPE_VAL; ei <= JVMTI_MAX_EVENT_TYPE_VAL; ++ei) {
+     jlong bit = JvmtiEventEnabled::bit_for((jvmtiEvent)ei);
+-    assert(((THREAD_FILTERED_EVENT_BITS & bit) != 0) == JvmtiUtil::event_threaded(ei), 
++    assert(((THREAD_FILTERED_EVENT_BITS & bit) != 0) == JvmtiUtil::event_threaded(ei),
+            "thread filtered event list does not match");
+   }
+ #endif
+@@ -912,15 +909,15 @@
+ bool
+ JvmtiEventController::is_global_event(jvmtiEvent event_type) {
+   assert(is_valid_event_type(event_type), "invalid event type");
+-  jlong bit_for = ((jlong)1) << (event_type - TOTAL_MIN_EVENT_TYPE_VAL); 
++  jlong bit_for = ((jlong)1) << (event_type - TOTAL_MIN_EVENT_TYPE_VAL);
+   return((bit_for & GLOBAL_EVENT_BITS)!=0);
+ }
+ 
+-void 
++void
+ JvmtiEventController::set_user_enabled(JvmtiEnvBase *env, JavaThread *thread, jvmtiEvent event_type, bool enabled) {
+   if (Threads::number_of_threads() == 0) {
+     // during early VM start-up locks don't exist, but we are safely single threaded,
+-    // call the functionality without holding the JvmtiThreadState_lock.  
++    // call the functionality without holding the JvmtiThreadState_lock.
+     JvmtiEventControllerPrivate::set_user_enabled(env, thread, event_type, enabled);
+   } else {
+     MutexLocker mu(JvmtiThreadState_lock);
+@@ -929,13 +926,13 @@
+ }
+ 
+ 
+-void 
+-JvmtiEventController::set_event_callbacks(JvmtiEnvBase *env, 
+-                                          const jvmtiEventCallbacks* callbacks, 
++void
++JvmtiEventController::set_event_callbacks(JvmtiEnvBase *env,
++                                          const jvmtiEventCallbacks* callbacks,
+                                           jint size_of_callbacks) {
+   if (Threads::number_of_threads() == 0) {
+     // during early VM start-up locks don't exist, but we are safely single threaded,
+-    // call the functionality without holding the JvmtiThreadState_lock.  
++    // call the functionality without holding the JvmtiThreadState_lock.
+     JvmtiEventControllerPrivate::set_event_callbacks(env, callbacks, size_of_callbacks);
+   } else {
+     MutexLocker mu(JvmtiThreadState_lock);
+@@ -943,42 +940,42 @@
+   }
+ }
+ 
+-void 
+-JvmtiEventController::set_extension_event_callback(JvmtiEnvBase *env,  
+-						   jint extension_event_index, 
+-						   jvmtiExtensionEvent callback) {
++void
++JvmtiEventController::set_extension_event_callback(JvmtiEnvBase *env,
++                                                   jint extension_event_index,
++                                                   jvmtiExtensionEvent callback) {
+   if (Threads::number_of_threads() == 0) {
+     JvmtiEventControllerPrivate::set_extension_event_callback(env, extension_event_index, callback);
+   } else {
+     MutexLocker mu(JvmtiThreadState_lock);
+     JvmtiEventControllerPrivate::set_extension_event_callback(env, extension_event_index, callback);
+-  } 
++  }
+ }
+ 
+ 
+ 
+ 
+-void 
++void
+ JvmtiEventController::set_frame_pop(JvmtiEnvThreadState *ets, JvmtiFramePop fpop) {
+   MutexLocker mu(JvmtiThreadState_lock);
+   JvmtiEventControllerPrivate::set_frame_pop(ets, fpop);
+ }
+ 
+ 
+-void 
++void
+ JvmtiEventController::clear_frame_pop(JvmtiEnvThreadState *ets, JvmtiFramePop fpop) {
+   MutexLocker mu(JvmtiThreadState_lock);
+   JvmtiEventControllerPrivate::clear_frame_pop(ets, fpop);
+ }
+ 
+ 
+-void 
++void
+ JvmtiEventController::clear_to_frame_pop(JvmtiEnvThreadState *ets, JvmtiFramePop fpop) {
+   MutexLocker mu(JvmtiThreadState_lock);
+   JvmtiEventControllerPrivate::clear_to_frame_pop(ets, fpop);
+ }
+ 
+-void 
++void
+ JvmtiEventController::change_field_watch(jvmtiEvent event_type, bool added) {
+   MutexLocker mu(JvmtiThreadState_lock);
+   JvmtiEventControllerPrivate::change_field_watch(event_type, added);
+@@ -987,14 +984,14 @@
+ void
+ JvmtiEventController::thread_started(JavaThread *thread) {
+   // operates only on the current thread
+-  // JvmtiThreadState_lock grabbed only if needed.  
++  // JvmtiThreadState_lock grabbed only if needed.
+   JvmtiEventControllerPrivate::thread_started(thread);
+ }
+ 
+ void
+ JvmtiEventController::thread_ended(JavaThread *thread) {
+   // operates only on the current thread
+-  // JvmtiThreadState_lock grabbed only if needed.  
++  // JvmtiThreadState_lock grabbed only if needed.
+   JvmtiEventControllerPrivate::thread_ended(thread);
+ }
+ 
+@@ -1002,11 +999,11 @@
+ JvmtiEventController::env_initialize(JvmtiEnvBase *env) {
+   if (Threads::number_of_threads() == 0) {
+     // during early VM start-up locks don't exist, but we are safely single threaded,
+-    // call the functionality without holding the JvmtiThreadState_lock.  
+-    JvmtiEventControllerPrivate::env_initialize(env); 
++    // call the functionality without holding the JvmtiThreadState_lock.
++    JvmtiEventControllerPrivate::env_initialize(env);
+   } else {
+     MutexLocker mu(JvmtiThreadState_lock);
+-    JvmtiEventControllerPrivate::env_initialize(env); 
++    JvmtiEventControllerPrivate::env_initialize(env);
+   }
+ }
+ 
+@@ -1014,7 +1011,7 @@
+ JvmtiEventController::env_dispose(JvmtiEnvBase *env) {
+   if (Threads::number_of_threads() == 0) {
+     // during early VM start-up locks don't exist, but we are safely single threaded,
+-    // call the functionality without holding the JvmtiThreadState_lock.  
++    // call the functionality without holding the JvmtiThreadState_lock.
+     JvmtiEventControllerPrivate::env_dispose(env);
+   } else {
+     MutexLocker mu(JvmtiThreadState_lock);
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiEventController.hpp openjdk/hotspot/src/share/vm/prims/jvmtiEventController.hpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiEventController.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiEventController.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jvmtiEventController.hpp	1.22 07/05/05 17:06:37 JVM"
+-#endif
+ /*
+  * Copyright 2003-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #ifndef _JAVA_JVMTI_EVENT_CONTROLLER_H_
+@@ -53,7 +50,7 @@
+ } jvmtiExtEventCallbacks;
+ 
+ 
+-// The complete range of events is EXT_MIN_EVENT_TYPE_VAL to 
++// The complete range of events is EXT_MIN_EVENT_TYPE_VAL to
+ // JVMTI_MAX_EVENT_TYPE_VAL (inclusive and contiguous).
+ const int TOTAL_MIN_EVENT_TYPE_VAL = EXT_MIN_EVENT_TYPE_VAL;
+ const int TOTAL_MAX_EVENT_TYPE_VAL = JVMTI_MAX_EVENT_TYPE_VAL;
+@@ -67,7 +64,7 @@
+ //
+ // A boolean array indexed by event_type, used as an internal
+ // data structure to track what JVMTI event types are enabled.
+-// Used for user set enabling and disabling (globally and on a 
++// Used for user set enabling and disabling (globally and on a
+ // per thread basis), and for computed merges across environments,
+ // threads and the VM as a whole.
+ //
+@@ -151,11 +148,11 @@
+ private:
+   friend class JvmtiEventControllerPrivate;
+ 
+-  // user set global event enablement indexed by jvmtiEvent  
++  // user set global event enablement indexed by jvmtiEvent
+   JvmtiEventEnabled _event_user_enabled;
+ 
+   // this flag indicates the presence (true) or absence (false) of event callbacks
+-  // it is indexed by jvmtiEvent  
++  // it is indexed by jvmtiEvent
+   JvmtiEventEnabled _event_callback_enabled;
+ 
+   // indexed by jvmtiEvent true if enabled globally or on any thread.
+@@ -174,7 +171,7 @@
+ //
+ // JvmtiEventController
+ //
+-// The class is the access point for all actions that change 
++// The class is the access point for all actions that change
+ // which events are active, this include:
+ //      enabling and disabling events
+ //      changing the callbacks/eventhook (they may be null)
+@@ -189,7 +186,7 @@
+ private:
+   friend class JvmtiEventControllerPrivate;
+ 
+-  // for all environments, global array indexed by jvmtiEvent 
++  // for all environments, global array indexed by jvmtiEvent
+   static JvmtiEventEnabled _universal_global_event_enabled;
+ 
+ public:
+@@ -208,20 +205,20 @@
+   // Use (thread == NULL) to enable/disable an event globally.
+   // Use (thread != NULL) to enable/disable an event for a particular thread.
+   // thread is ignored for events that can only be specified globally
+-  static void set_user_enabled(JvmtiEnvBase *env, JavaThread *thread, 
++  static void set_user_enabled(JvmtiEnvBase *env, JavaThread *thread,
+                                jvmtiEvent event_type, bool enabled);
+ 
+   // Setting callbacks changes computed enablement and must be done
+   // at a safepoint otherwise a NULL callback could be attempted
+-  static void set_event_callbacks(JvmtiEnvBase *env,  
+-                                  const jvmtiEventCallbacks* callbacks, 
++  static void set_event_callbacks(JvmtiEnvBase *env,
++                                  const jvmtiEventCallbacks* callbacks,
+                                   jint size_of_callbacks);
+ 
+-  // Sets the callback function for a single extension event and enables 
++  // Sets the callback function for a single extension event and enables
+   // (or disables it).
+   static void set_extension_event_callback(JvmtiEnvBase* env,
+-					   jint extension_event_index, 
+-					   jvmtiExtensionEvent callback);
++                                           jint extension_event_index,
++                                           jvmtiExtensionEvent callback);
+ 
+   static void set_frame_pop(JvmtiEnvThreadState *env_thread, JvmtiFramePop fpop);
+   static void clear_frame_pop(JvmtiEnvThreadState *env_thread, JvmtiFramePop fpop);
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiEventController.inline.hpp openjdk/hotspot/src/share/vm/prims/jvmtiEventController.inline.hpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiEventController.inline.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiEventController.inline.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jvmtiEventController.inline.hpp	1.14 07/05/05 17:06:37 JVM"
+-#endif
+ /*
+  * Copyright 2003-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // these inline functions are in a separate file to break include cycles
+@@ -35,7 +32,7 @@
+ 
+ inline jlong JvmtiEventEnabled::bit_for(jvmtiEvent event_type) {
+   assert(JvmtiEventController::is_valid_event_type(event_type), "invalid event type");
+-  return ((jlong)1) << (event_type - TOTAL_MIN_EVENT_TYPE_VAL);  
++  return ((jlong)1) << (event_type - TOTAL_MIN_EVENT_TYPE_VAL);
+ }
+ 
+ inline jlong JvmtiEventEnabled::get_bits() {
+@@ -58,13 +55,13 @@
+ // JvmtiEnvThreadEventEnable
+ //
+ 
+-inline bool JvmtiEnvThreadEventEnable::is_enabled(jvmtiEvent event_type) { 
++inline bool JvmtiEnvThreadEventEnable::is_enabled(jvmtiEvent event_type) {
+   assert(JvmtiUtil::event_threaded(event_type), "Only thread filtered events should be tested here");
+-  return _event_enabled.is_enabled(event_type); 
++  return _event_enabled.is_enabled(event_type);
+ }
+ 
+-inline void JvmtiEnvThreadEventEnable::set_user_enabled(jvmtiEvent event_type, bool enabled) { 
+-  _event_user_enabled.set_enabled(event_type, enabled);  
++inline void JvmtiEnvThreadEventEnable::set_user_enabled(jvmtiEvent event_type, bool enabled) {
++  _event_user_enabled.set_enabled(event_type, enabled);
+ }
+ 
+ 
+@@ -73,9 +70,9 @@
+ // JvmtiThreadEventEnable
+ //
+ 
+-inline bool JvmtiThreadEventEnable::is_enabled(jvmtiEvent event_type) { 
++inline bool JvmtiThreadEventEnable::is_enabled(jvmtiEvent event_type) {
+   assert(JvmtiUtil::event_threaded(event_type), "Only thread filtered events should be tested here");
+-  return _event_enabled.is_enabled(event_type); 
++  return _event_enabled.is_enabled(event_type);
+ }
+ 
+ 
+@@ -84,16 +81,16 @@
+ // JvmtiEnvEventEnable
+ //
+ 
+-inline bool JvmtiEnvEventEnable::is_enabled(jvmtiEvent event_type) { 
++inline bool JvmtiEnvEventEnable::is_enabled(jvmtiEvent event_type) {
+   assert(!JvmtiUtil::event_threaded(event_type), "Only non thread filtered events should be tested here");
+   return _event_enabled.is_enabled(event_type);
+ }
+ 
+-inline void JvmtiEnvEventEnable::set_user_enabled(jvmtiEvent event_type, bool enabled) { 
+-  _event_user_enabled.set_enabled(event_type, enabled);  
++inline void JvmtiEnvEventEnable::set_user_enabled(jvmtiEvent event_type, bool enabled) {
++  _event_user_enabled.set_enabled(event_type, enabled);
+ }
+ 
+- 
++
+ ///////////////////////////////////////////////////////////////
+ //
+ // JvmtiEventController
+@@ -102,4 +99,3 @@
+ inline bool JvmtiEventController::is_enabled(jvmtiEvent event_type) {
+   return _universal_global_event_enabled.is_enabled(event_type);
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiExport.cpp openjdk/hotspot/src/share/vm/prims/jvmtiExport.cpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiExport.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiExport.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)jvmtiExport.cpp	1.125 07/05/29 09:44:25 JVM"
+-#endif
+ /*
+  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -36,7 +33,6 @@
+ #define EVT_TRACE(evt,out)
+ #endif
+ 
+-
+ ///////////////////////////////////////////////////////////////
+ //
+ // JvmtiEventTransition
+@@ -49,10 +45,10 @@
+ private:
+   ResourceMark _rm;
+   ThreadToNativeFromVM _transition;
+-  HandleMark _hm;  
++  HandleMark _hm;
+ 
+ public:
+-  JvmtiJavaThreadEventTransition(JavaThread *thread) : 
++  JvmtiJavaThreadEventTransition(JavaThread *thread) :
+     _rm(),
+     _transition(thread),
+     _hm(thread)  {};
+@@ -66,7 +62,7 @@
+   HandleMark _hm;
+   JavaThreadState _saved_state;
+   JavaThread *_jthread;
+-    
++
+ public:
+   JvmtiThreadEventTransition(Thread *thread) : _rm(), _hm() {
+     if (thread->is_Java_thread()) {
+@@ -81,9 +77,9 @@
+       _jthread = NULL;
+     }
+   }
+-    
++
+   ~JvmtiThreadEventTransition() {
+-    if (_jthread != NULL)  
++    if (_jthread != NULL)
+       ThreadStateTransition::transition_from_native(_jthread, _saved_state);
+   }
+ };
+@@ -98,7 +94,7 @@
+ private:
+   JavaThread *_thread;
+   JNIEnv* _jni_env;
+-  bool _exception_detected;   
++  bool _exception_detected;
+   bool _exception_caught;
+ #if 0
+   JNIHandleBlock* _hblock;
+@@ -115,7 +111,7 @@
+     // for now, steal JNI push local frame code
+     JvmtiThreadState *state = thread->jvmti_thread_state();
+     // we are before an event.
+-    // Save current jvmti thread exception state. 
++    // Save current jvmti thread exception state.
+     if (state != NULL) {
+       _exception_detected = state->is_exception_detected();
+       _exception_caught = state->is_exception_caught();
+@@ -123,7 +119,7 @@
+       _exception_detected = false;
+       _exception_caught = false;
+     }
+-    
++
+     JNIHandleBlock* old_handles = thread->active_handles();
+     JNIHandleBlock* new_handles = JNIHandleBlock::allocate_block(thread);
+     assert(new_handles != NULL, "should not be NULL");
+@@ -146,7 +142,7 @@
+     _thread->set_active_handles(new_handles);
+     // Note that we set the pop_frame_link to NULL explicitly, otherwise
+     // the release_block call will release the blocks.
+-    old_handles->set_pop_frame_link(NULL); 
++    old_handles->set_pop_frame_link(NULL);
+     JNIHandleBlock::release_block(old_handles, _thread); // may block
+ #endif
+ 
+@@ -159,10 +155,10 @@
+       }
+       if (_exception_caught) {
+         state->set_exception_caught();
+-      } 
++      }
+     }
+-  }      
+-  
++  }
++
+ #if 0
+   jobject to_jobject(oop obj) { return obj == NULL? NULL : _hblock->allocate_handle_fast(obj); }
+ #else
+@@ -183,10 +179,10 @@
+   jthread _jt;
+ 
+ public:
+-  JvmtiThreadEventMark(JavaThread *thread) : 
++  JvmtiThreadEventMark(JavaThread *thread) :
+     JvmtiEventMark(thread) {
+     _jt = (jthread)(to_jobject(thread->threadObj()));
+-  }; 
++  };
+  jthread jni_thread() { return _jt; }
+ };
+ 
+@@ -195,7 +191,7 @@
+   jclass _jc;
+ 
+ public:
+-  JvmtiClassEventMark(JavaThread *thread, klassOop klass) : 
++  JvmtiClassEventMark(JavaThread *thread, klassOop klass) :
+     JvmtiThreadEventMark(thread) {
+     _jc = to_jclass(klass);
+   };
+@@ -207,7 +203,7 @@
+   jmethodID _mid;
+ 
+ public:
+-  JvmtiMethodEventMark(JavaThread *thread, methodHandle method) : 
++  JvmtiMethodEventMark(JavaThread *thread, methodHandle method) :
+     JvmtiThreadEventMark(thread),
+     _mid(to_jmethodID(method)) {};
+   jmethodID jni_methodID() { return _mid; }
+@@ -218,7 +214,7 @@
+   jlocation _loc;
+ 
+ public:
+-  JvmtiLocationEventMark(JavaThread *thread, methodHandle method, address location) : 
++  JvmtiLocationEventMark(JavaThread *thread, methodHandle method, address location) :
+     JvmtiMethodEventMark(thread, method),
+     _loc(location - method->code_base()) {};
+   jlocation location() { return _loc; }
+@@ -229,7 +225,7 @@
+   jobject _exc;
+ 
+ public:
+-  JvmtiExceptionEventMark(JavaThread *thread, methodHandle method, address location, Handle exception) : 
++  JvmtiExceptionEventMark(JavaThread *thread, methodHandle method, address location, Handle exception) :
+     JvmtiLocationEventMark(thread, method, location),
+     _exc(to_jobject(exception())) {};
+   jobject exception() { return _exc; }
+@@ -249,9 +245,9 @@
+       _jloader = (jobject)to_jobject(class_loader());
+       _protection_domain = (jobject)to_jobject(prot_domain());
+       if (class_being_redefined == NULL) {
+-	_class_being_redefined = NULL;
++        _class_being_redefined = NULL;
+       } else {
+-	_class_being_redefined = (jclass)to_jclass((*class_being_redefined)());
++        _class_being_redefined = (jclass)to_jclass((*class_being_redefined)());
+       }
+   };
+   const char *class_name() {
+@@ -268,6 +264,365 @@
+   }
+ };
+ 
++//////////////////////////////////////////////////////////////////////////////
++
++int               JvmtiExport::_field_access_count                        = 0;
++int               JvmtiExport::_field_modification_count                  = 0;
++
++bool              JvmtiExport::_can_access_local_variables                = false;
++bool              JvmtiExport::_can_examine_or_deopt_anywhere             = false;
++bool              JvmtiExport::_can_hotswap_or_post_breakpoint            = false;
++bool              JvmtiExport::_can_modify_any_class                      = false;
++bool              JvmtiExport::_can_walk_any_space                        = false;
++
++bool              JvmtiExport::_has_redefined_a_class                     = false;
++bool              JvmtiExport::_all_dependencies_are_recorded             = false;
++
++//
++// field access management
++//
++
++// interpreter generator needs the address of the counter
++address JvmtiExport::get_field_access_count_addr() {
++  // We don't grab a lock because we don't want to
++  // serialize field access between all threads. This means that a
++  // thread on another processor can see the wrong count value and
++  // may either miss making a needed call into post_field_access()
++  // or will make an unneeded call into post_field_access(). We pay
++  // this price to avoid slowing down the VM when we aren't watching
++  // field accesses.
++  // Other access/mutation safe by virtue of being in VM state.
++  return (address)(&_field_access_count);
++}
++
++//
++// field modification management
++//
++
++// interpreter generator needs the address of the counter
++address JvmtiExport::get_field_modification_count_addr() {
++  // We don't grab a lock because we don't
++  // want to serialize field modification between all threads. This
++  // means that a thread on another processor can see the wrong
++  // count value and may either miss making a needed call into
++  // post_field_modification() or will make an unneeded call into
++  // post_field_modification(). We pay this price to avoid slowing
++  // down the VM when we aren't watching field modifications.
++  // Other access/mutation safe by virtue of being in VM state.
++  return (address)(&_field_modification_count);
++}
++
++
++///////////////////////////////////////////////////////////////
++// Functions needed by java.lang.instrument for starting up javaagent.
++///////////////////////////////////////////////////////////////
++
++jint
++JvmtiExport::get_jvmti_interface(JavaVM *jvm, void **penv, jint version) {
++  /* To Do: add version checks */
++
++  if (JvmtiEnv::get_phase() == JVMTI_PHASE_LIVE) {
++    JavaThread* current_thread = (JavaThread*) ThreadLocalStorage::thread();
++    // transition code: native to VM
++    ThreadInVMfromNative __tiv(current_thread);
++    __ENTRY(jvmtiEnv*, JvmtiExport::get_jvmti_interface, current_thread)
++    debug_only(VMNativeEntryWrapper __vew;)
++
++    JvmtiEnv *jvmti_env = JvmtiEnv::create_a_jvmti();
++    *penv = jvmti_env->jvmti_external();  // actual type is jvmtiEnv* -- not to be confused with JvmtiEnv*
++    return JNI_OK;
++
++  } else if (JvmtiEnv::get_phase() == JVMTI_PHASE_ONLOAD) {
++    // not live, no thread to transition
++    JvmtiEnv *jvmti_env = JvmtiEnv::create_a_jvmti();
++    *penv = jvmti_env->jvmti_external();  // actual type is jvmtiEnv* -- not to be confused with JvmtiEnv*
++    return JNI_OK;
++
++  } else {
++    // Called at the wrong time
++    *penv = NULL;
++    return JNI_EDETACHED;
++  }
++}
++
++void JvmtiExport::enter_primordial_phase() {
++  JvmtiEnvBase::set_phase(JVMTI_PHASE_PRIMORDIAL);
++}
++
++void JvmtiExport::enter_start_phase() {
++  JvmtiManageCapabilities::recompute_always_capabilities();
++  JvmtiEnvBase::set_phase(JVMTI_PHASE_START);
++}
++
++void JvmtiExport::enter_onload_phase() {
++  JvmtiEnvBase::set_phase(JVMTI_PHASE_ONLOAD);
++}
++
++void JvmtiExport::enter_live_phase() {
++  JvmtiEnvBase::set_phase(JVMTI_PHASE_LIVE);
++}
++
++//
++// JVMTI events that the VM posts to the debugger and also startup agent
++// and call the agent's premain() for java.lang.instrument.
++//
++
++void JvmtiExport::post_vm_start() {
++  EVT_TRIG_TRACE(JVMTI_EVENT_VM_START, ("JVMTI Trg VM start event triggered" ));
++
++  // can now enable some events
++  JvmtiEventController::vm_start();
++
++  JvmtiEnvIterator it;
++  for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
++    if (env->is_enabled(JVMTI_EVENT_VM_START)) {
++      EVT_TRACE(JVMTI_EVENT_VM_START, ("JVMTI Evt VM start event sent" ));
++
++      JavaThread *thread  = JavaThread::current();
++      JvmtiThreadEventMark jem(thread);
++      JvmtiJavaThreadEventTransition jet(thread);
++      jvmtiEventVMStart callback = env->callbacks()->VMStart;
++      if (callback != NULL) {
++        (*callback)(env->jvmti_external(), jem.jni_env());
++      }
++    }
++  }
++}
++
++
++void JvmtiExport::post_vm_initialized() {
++  EVT_TRIG_TRACE(JVMTI_EVENT_VM_INIT, ("JVMTI Trg VM init event triggered" ));
++
++  // can now enable events
++  JvmtiEventController::vm_init();
++
++  JvmtiEnvIterator it;
++  for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
++    if (env->is_enabled(JVMTI_EVENT_VM_INIT)) {
++      EVT_TRACE(JVMTI_EVENT_VM_INIT, ("JVMTI Evt VM init event sent" ));
++
++      JavaThread *thread  = JavaThread::current();
++      JvmtiThreadEventMark jem(thread);
++      JvmtiJavaThreadEventTransition jet(thread);
++      jvmtiEventVMInit callback = env->callbacks()->VMInit;
++      if (callback != NULL) {
++        (*callback)(env->jvmti_external(), jem.jni_env(), jem.jni_thread());
++      }
++    }
++  }
++}
++
++
++void JvmtiExport::post_vm_death() {
++  EVT_TRIG_TRACE(JVMTI_EVENT_VM_DEATH, ("JVMTI Trg VM death event triggered" ));
++
++  JvmtiEnvIterator it;
++  for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
++    if (env->is_enabled(JVMTI_EVENT_VM_DEATH)) {
++      EVT_TRACE(JVMTI_EVENT_VM_DEATH, ("JVMTI Evt VM death event sent" ));
++
++      JavaThread *thread  = JavaThread::current();
++      JvmtiEventMark jem(thread);
++      JvmtiJavaThreadEventTransition jet(thread);
++      jvmtiEventVMDeath callback = env->callbacks()->VMDeath;
++      if (callback != NULL) {
++        (*callback)(env->jvmti_external(), jem.jni_env());
++      }
++    }
++  }
++
++  JvmtiEnvBase::set_phase(JVMTI_PHASE_DEAD);
++  JvmtiEventController::vm_death();
++}
++
++char**
++JvmtiExport::get_all_native_method_prefixes(int* count_ptr) {
++  // Have to grab JVMTI thread state lock to be sure environment doesn't
++  // go away while we iterate them.  No locks during VM bring-up.
++  if (Threads::number_of_threads() == 0 || SafepointSynchronize::is_at_safepoint()) {
++    return JvmtiEnvBase::get_all_native_method_prefixes(count_ptr);
++  } else {
++    MutexLocker mu(JvmtiThreadState_lock);
++    return JvmtiEnvBase::get_all_native_method_prefixes(count_ptr);
++  }
++}
++
++class JvmtiClassFileLoadHookPoster : public StackObj {
++ private:
++  symbolHandle         _h_name;
++  Handle               _class_loader;
++  Handle               _h_protection_domain;
++  unsigned char **     _data_ptr;
++  unsigned char **     _end_ptr;
++  JavaThread *         _thread;
++  jint                 _curr_len;
++  unsigned char *      _curr_data;
++  JvmtiEnv *           _curr_env;
++  jint *               _cached_length_ptr;
++  unsigned char **     _cached_data_ptr;
++  JvmtiThreadState *   _state;
++  KlassHandle *        _h_class_being_redefined;
++  JvmtiClassLoadKind   _load_kind;
++
++ public:
++  inline JvmtiClassFileLoadHookPoster(symbolHandle h_name, Handle class_loader,
++                                      Handle h_protection_domain,
++                                      unsigned char **data_ptr, unsigned char **end_ptr,
++                                      unsigned char **cached_data_ptr,
++                                      jint *cached_length_ptr) {
++    _h_name = h_name;
++    _class_loader = class_loader;
++    _h_protection_domain = h_protection_domain;
++    _data_ptr = data_ptr;
++    _end_ptr = end_ptr;
++    _thread = JavaThread::current();
++    _curr_len = *end_ptr - *data_ptr;
++    _curr_data = *data_ptr;
++    _curr_env = NULL;
++    _cached_length_ptr = cached_length_ptr;
++    _cached_data_ptr = cached_data_ptr;
++    *_cached_length_ptr = 0;
++    *_cached_data_ptr = NULL;
++
++    _state = _thread->jvmti_thread_state();
++    if (_state != NULL) {
++      _h_class_being_redefined = _state->get_class_being_redefined();
++      _load_kind = _state->get_class_load_kind();
++      // Clear class_being_redefined flag here. The action
++      // from agent handler could generate a new class file load
++      // hook event and if it is not cleared the new event generated
++      // from regular class file load could have this stale redefined
++      // class handle info.
++      _state->clear_class_being_redefined();
++    } else {
++      // redefine and retransform will always set the thread state
++      _h_class_being_redefined = (KlassHandle *) NULL;
++      _load_kind = jvmti_class_load_kind_load;
++    }
++  }
++
++  void post() {
++//    EVT_TRIG_TRACE(JVMTI_EVENT_CLASS_FILE_LOAD_HOOK,
++//                   ("JVMTI [%s] class file load hook event triggered",
++//                    JvmtiTrace::safe_get_thread_name(_thread)));
++    post_all_envs();
++    copy_modified_data();
++  }
++
++ private:
++  void post_all_envs() {
++    if (_load_kind != jvmti_class_load_kind_retransform) {
++      // for class load and redefine,
++      // call the non-retransformable agents
++      JvmtiEnvIterator it;
++      for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
++        if (!env->is_retransformable() && env->is_enabled(JVMTI_EVENT_CLASS_FILE_LOAD_HOOK)) {
++          // non-retransformable agents cannot retransform back,
++          // so no need to cache the original class file bytes
++          post_to_env(env, false);
++        }
++      }
++    }
++    JvmtiEnvIterator it;
++    for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
++      // retransformable agents get all events
++      if (env->is_retransformable() && env->is_enabled(JVMTI_EVENT_CLASS_FILE_LOAD_HOOK)) {
++        // retransformable agents need to cache the original class file
++        // bytes if changes are made via the ClassFileLoadHook
++        post_to_env(env, true);
++      }
++    }
++  }
++
++  void post_to_env(JvmtiEnv* env, bool caching_needed) {
++    unsigned char *new_data = NULL;
++    jint new_len = 0;
++//    EVT_TRACE(JVMTI_EVENT_CLASS_FILE_LOAD_HOOK,
++//     ("JVMTI [%s] class file load hook event sent %s  data_ptr = %d, data_len = %d",
++//               JvmtiTrace::safe_get_thread_name(_thread),
++//               _h_name.is_null() ? "NULL" : _h_name->as_utf8(),
++//               _curr_data, _curr_len ));
++    JvmtiClassFileLoadEventMark jem(_thread, _h_name, _class_loader,
++                                    _h_protection_domain,
++                                    _h_class_being_redefined);
++    JvmtiJavaThreadEventTransition jet(_thread);
++    JNIEnv* jni_env =  (JvmtiEnv::get_phase() == JVMTI_PHASE_PRIMORDIAL)?
++                                                        NULL : jem.jni_env();
++    jvmtiEventClassFileLoadHook callback = env->callbacks()->ClassFileLoadHook;
++    if (callback != NULL) {
++      (*callback)(env->jvmti_external(), jni_env,
++                  jem.class_being_redefined(),
++                  jem.jloader(), jem.class_name(),
++                  jem.protection_domain(),
++                  _curr_len, _curr_data,
++                  &new_len, &new_data);
++    }
++    if (new_data != NULL) {
++      // this agent has modified class data.
++      if (caching_needed && *_cached_data_ptr == NULL) {
++        // data has been changed by the new retransformable agent
++        // and it hasn't already been cached, cache it
++        *_cached_data_ptr = (unsigned char *)os::malloc(_curr_len);
++        memcpy(*_cached_data_ptr, _curr_data, _curr_len);
++        *_cached_length_ptr = _curr_len;
++      }
++
++      if (_curr_data != *_data_ptr) {
++        // curr_data is previous agent modified class data.
++        // And this has been changed by the new agent so
++        // we can delete it now.
++        _curr_env->Deallocate(_curr_data);
++      }
++
++      // Class file data has changed by the current agent.
++      _curr_data = new_data;
++      _curr_len = new_len;
++      // Save the current agent env we need this to deallocate the
++      // memory allocated by this agent.
++      _curr_env = env;
++    }
++  }
++
++  void copy_modified_data() {
++    // if one of the agent has modified class file data.
++    // Copy modified class data to new resources array.
++    if (_curr_data != *_data_ptr) {
++      *_data_ptr = NEW_RESOURCE_ARRAY(u1, _curr_len);
++      memcpy(*_data_ptr, _curr_data, _curr_len);
++      *_end_ptr = *_data_ptr + _curr_len;
++      _curr_env->Deallocate(_curr_data);
++    }
++  }
++};
++
++bool JvmtiExport::_should_post_class_file_load_hook = false;
++
++// this entry is for class file load hook on class load, redefine and retransform
++void JvmtiExport::post_class_file_load_hook(symbolHandle h_name,
++                                            Handle class_loader,
++                                            Handle h_protection_domain,
++                                            unsigned char **data_ptr,
++                                            unsigned char **end_ptr,
++                                            unsigned char **cached_data_ptr,
++                                            jint *cached_length_ptr) {
++  JvmtiClassFileLoadHookPoster poster(h_name, class_loader,
++                                      h_protection_domain,
++                                      data_ptr, end_ptr,
++                                      cached_data_ptr,
++                                      cached_length_ptr);
++  poster.post();
++}
++
++void JvmtiExport::report_unsupported(bool on) {
++  // If any JVMTI service is turned on, we need to exit before native code
++  // tries to access nonexistant services.
++  if (on) {
++    vm_exit_during_initialization("Java Kernel does not support JVMTI.");
++  }
++}
++
++
++#ifndef JVMTI_KERNEL
+ static inline klassOop oop_to_klassOop(oop obj) {
+   klassOop k = obj->klass();
+ 
+@@ -275,9 +630,9 @@
+   if (k == SystemDictionary::class_klass()) {
+     if (!java_lang_Class::is_primitive(obj)) {
+       k = java_lang_Class::as_klassOop(obj);
+-      assert(k != NULL, "class for non-primitive mirror must exist");      
++      assert(k != NULL, "class for non-primitive mirror must exist");
+     }
+-  }  
++  }
+   return k;
+ }
+ 
+@@ -300,7 +655,7 @@
+   const void *_code_data;
+   jint _map_length;
+   jvmtiAddrLocationMap *_map;
+-  const void *_compile_info;    
++  const void *_compile_info;
+  public:
+   JvmtiCompiledMethodLoadEventMark(JavaThread *thread, nmethod *nm)
+           : JvmtiMethodEventMark(thread,methodHandle(thread, nm->method())) {
+@@ -312,7 +667,7 @@
+   ~JvmtiCompiledMethodLoadEventMark() {
+      FREE_C_HEAP_ARRAY(jvmtiAddrLocationMap, _map);
+   }
+-    
++
+   jint code_size() { return _code_size; }
+   const void *code_data() { return _code_data; }
+   jint map_length() { return _map_length; }
+@@ -326,7 +681,7 @@
+ private:
+   jobject _jobj;
+ public:
+-  JvmtiMonitorEventMark(JavaThread *thread, oop object) 
++  JvmtiMonitorEventMark(JavaThread *thread, oop object)
+           : JvmtiThreadEventMark(thread){
+      _jobj = to_jobject(object);
+   }
+@@ -338,9 +693,9 @@
+ // pending CompiledMethodUnload support
+ //
+ 
+-bool JvmtiExport::_have_pending_compiled_method_unload_events;		
+-GrowableArray<jmethodID>* JvmtiExport::_pending_compiled_method_unload_method_ids;	
+-GrowableArray<const void *>* JvmtiExport::_pending_compiled_method_unload_code_begins;	
++bool JvmtiExport::_have_pending_compiled_method_unload_events;
++GrowableArray<jmethodID>* JvmtiExport::_pending_compiled_method_unload_method_ids;
++GrowableArray<const void *>* JvmtiExport::_pending_compiled_method_unload_code_begins;
+ JavaThread* JvmtiExport::_current_poster;
+ 
+ // post any pending CompiledMethodUnload events
+@@ -370,11 +725,11 @@
+   //
+   // If another thread isn't posting we examine the list of pending jmethodIDs.
+   // If the list is empty then we are done. If it's not empty then this thread
+-  // (self) becomes the pending event poster and we remove the top (last) 
++  // (self) becomes the pending event poster and we remove the top (last)
+   // event from the list. Note that this means we remove the newest event first
+   // but as they are all CompiledMethodUnload events the order doesn't matter.
+   // Once we have removed a jmethodID then we exit the monitor. Any other thread
+-  // wanting to post a CompiledMethodLoad or DynamicCodeGenerated event will 
++  // wanting to post a CompiledMethodLoad or DynamicCodeGenerated event will
+   // be forced to wait on the monitor.
+   {
+     MutexLocker mu(JvmtiPendingEvent_lock);
+@@ -382,8 +737,8 @@
+       while (_current_poster != NULL) {
+         JvmtiPendingEvent_lock->wait();
+       }
+-    }     
+-    if ((_pending_compiled_method_unload_method_ids == NULL) || 
++    }
++    if ((_pending_compiled_method_unload_method_ids == NULL) ||
+         (_pending_compiled_method_unload_method_ids->length() == 0)) {
+       return;
+     }
+@@ -394,8 +749,8 @@
+       // re-entrant
+       guarantee(_current_poster == self, "checking");
+     }
+-    method = _pending_compiled_method_unload_method_ids->pop();     
+-    code_begin = _pending_compiled_method_unload_code_begins->pop();     
++    method = _pending_compiled_method_unload_method_ids->pop();
++    code_begin = _pending_compiled_method_unload_code_begins->pop();
+   }
+ 
+   // This thread is the pending event poster so it first posts the CompiledMethodUnload
+@@ -406,17 +761,17 @@
+   // pending events list, and finally notify any thread that might be waiting.
+   for (;;) {
+     EVT_TRIG_TRACE(JVMTI_EVENT_COMPILED_METHOD_UNLOAD,
+-                   ("JVMTI [%s] method compile unload event triggered",  
++                   ("JVMTI [%s] method compile unload event triggered",
+                    JvmtiTrace::safe_get_thread_name(self)));
+ 
+     // post the event for each environment that has this event enabled.
+-    JvmtiEnvIterator it; 
++    JvmtiEnvIterator it;
+     for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
+       if (env->is_enabled(JVMTI_EVENT_COMPILED_METHOD_UNLOAD)) {
+         EVT_TRACE(JVMTI_EVENT_COMPILED_METHOD_UNLOAD,
+-                  ("JVMTI [%s] class compile method unload event sent jmethodID " PTR_FORMAT, 
++                  ("JVMTI [%s] class compile method unload event sent jmethodID " PTR_FORMAT,
+                   JvmtiTrace::safe_get_thread_name(self), method));
+- 
++
+         JvmtiEventMark jem(self);
+         JvmtiJavaThreadEventTransition jet(self);
+         jvmtiEventCompiledMethodUnload callback = env->callbacks()->CompiledMethodUnload;
+@@ -431,16 +786,16 @@
+     // activiation of this function by this thread notify any waiters
+     // so that they can post.
+     {
+-      MutexLocker ml(JvmtiPendingEvent_lock);      
++      MutexLocker ml(JvmtiPendingEvent_lock);
+       if (_pending_compiled_method_unload_method_ids->length() == 0) {
+-	if (first_activation) {
+-	  _have_pending_compiled_method_unload_events = false;
+-	  _current_poster = NULL;
+-	  JvmtiPendingEvent_lock->notify_all();
+-	}
+-	return;
++        if (first_activation) {
++          _have_pending_compiled_method_unload_events = false;
++          _current_poster = NULL;
++          JvmtiPendingEvent_lock->notify_all();
++        }
++        return;
+       }
+-      method = _pending_compiled_method_unload_method_ids->pop();    
++      method = _pending_compiled_method_unload_method_ids->pop();
+       code_begin = _pending_compiled_method_unload_code_begins->pop();
+     }
+   }
+@@ -459,15 +814,15 @@
+   if (state == NULL) {
+     return;
+   }
+-  EVT_TRIG_TRACE(JVMTI_EVENT_BREAKPOINT, ("JVMTI [%s] Trg Breakpoint triggered",  
++  EVT_TRIG_TRACE(JVMTI_EVENT_BREAKPOINT, ("JVMTI [%s] Trg Breakpoint triggered",
+                       JvmtiTrace::safe_get_thread_name(thread)));
+-  JvmtiEnvThreadStateIterator it(state); 
++  JvmtiEnvThreadStateIterator it(state);
+   for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
+     ets->compare_and_set_current_location(mh(), location, JVMTI_EVENT_BREAKPOINT);
+     if (!ets->breakpoint_posted() && ets->is_enabled(JVMTI_EVENT_BREAKPOINT)) {
+       ThreadState old_os_state = thread->osthread()->get_state();
+       thread->osthread()->set_state(BREAKPOINTED);
+-      EVT_TRACE(JVMTI_EVENT_BREAKPOINT, ("JVMTI [%s] Evt Breakpoint sent %s.%s @ %d",  
++      EVT_TRACE(JVMTI_EVENT_BREAKPOINT, ("JVMTI [%s] Evt Breakpoint sent %s.%s @ %d",
+                      JvmtiTrace::safe_get_thread_name(thread),
+                      (mh() == NULL) ? "NULL" : mh()->klass_name()->as_C_string(),
+                      (mh() == NULL) ? "NULL" : mh()->name()->as_C_string(),
+@@ -478,7 +833,7 @@
+       JvmtiJavaThreadEventTransition jet(thread);
+       jvmtiEventBreakpoint callback = env->callbacks()->Breakpoint;
+       if (callback != NULL) {
+-        (*callback)(env->jvmti_external(), jem.jni_env(), jem.jni_thread(), 
++        (*callback)(env->jvmti_external(), jem.jni_env(), jem.jni_thread(),
+                     jem.jni_methodID(), jem.location());
+       }
+ 
+@@ -488,48 +843,11 @@
+   }
+ }
+ 
+-jint  
+-JvmtiExport::get_jvmti_interface(JavaVM *jvm, void **penv, jint version) {
+-  /* To Do: add version checks */
+-
+-  if (JvmtiEnv::get_phase() == JVMTI_PHASE_LIVE) {
+-    JavaThread* current_thread = (JavaThread*) ThreadLocalStorage::thread();
+-    // transition code: native to VM
+-    ThreadInVMfromNative __tiv(current_thread);
+-    __ENTRY(jvmtiEnv*, JvmtiExport::get_jvmti_interface, current_thread)
+-    debug_only(VMNativeEntryWrapper __vew;) 
+-
+-    JvmtiEnv *jvmti_env = JvmtiEnv::create_a_jvmti();
+-    *penv = jvmti_env->jvmti_external();  // actual type is jvmtiEnv* -- not to be confused with JvmtiEnv*
+-    return JNI_OK; 
+-
+-  } else if (JvmtiEnv::get_phase() == JVMTI_PHASE_ONLOAD) {
+-    // not live, no thread to transition
+-    JvmtiEnv *jvmti_env = JvmtiEnv::create_a_jvmti();
+-    *penv = jvmti_env->jvmti_external();  // actual type is jvmtiEnv* -- not to be confused with JvmtiEnv*
+-    return JNI_OK; 
+-
+-  } else {
+-    // Called at the wrong time
+-    *penv = NULL;
+-    return JNI_EDETACHED;
+-  }
+-}
+-
+-
+ //////////////////////////////////////////////////////////////////////////////
+ 
+-int               JvmtiExport::_field_access_count                        = 0;
+-int               JvmtiExport::_field_modification_count                  = 0;
+-
+ bool              JvmtiExport::_can_get_source_debug_extension            = false;
+-bool              JvmtiExport::_can_examine_or_deopt_anywhere             = false;
+ bool              JvmtiExport::_can_maintain_original_method_order        = false;
+ bool              JvmtiExport::_can_post_interpreter_events               = false;
+-bool              JvmtiExport::_can_hotswap_or_post_breakpoint            = false;
+-bool              JvmtiExport::_can_modify_any_class                      = false;
+-bool              JvmtiExport::_can_walk_any_space                        = false;
+-bool              JvmtiExport::_can_access_local_variables                = false;
+ bool              JvmtiExport::_can_post_exceptions                       = false;
+ bool              JvmtiExport::_can_post_breakpoint                       = false;
+ bool              JvmtiExport::_can_post_field_access                     = false;
+@@ -547,7 +865,6 @@
+ bool              JvmtiExport::_should_post_class_unload                  = false;
+ bool              JvmtiExport::_should_post_thread_life                   = false;
+ bool              JvmtiExport::_should_clean_up_heap_objects              = false;
+-bool              JvmtiExport::_should_post_class_file_load_hook          = false;
+ bool              JvmtiExport::_should_post_native_method_bind            = false;
+ bool              JvmtiExport::_should_post_dynamic_code_generated        = false;
+ bool              JvmtiExport::_should_post_data_dump                     = false;
+@@ -559,230 +876,73 @@
+ bool              JvmtiExport::_should_post_monitor_waited                = false;
+ bool              JvmtiExport::_should_post_garbage_collection_start      = false;
+ bool              JvmtiExport::_should_post_garbage_collection_finish     = false;
+-bool		  JvmtiExport::_should_post_object_free			  = false;
+-bool		  JvmtiExport::_should_post_resource_exhausted        	  = false;
+-bool              JvmtiExport::_should_post_vm_object_alloc               = false;
+-bool              JvmtiExport::_has_redefined_a_class                     = false;
+-bool		  JvmtiExport::_all_dependencies_are_recorded		  = false;
+-
+-
+-
+-void JvmtiExport::enter_primordial_phase() { 
+-  JvmtiEnvBase::set_phase(JVMTI_PHASE_PRIMORDIAL);
+-}
+-
+-void JvmtiExport::enter_start_phase() { 
+-  JvmtiManageCapabilities::recompute_always_capabilities();
+-  JvmtiEnvBase::set_phase(JVMTI_PHASE_START);
+-}
+-
+-void JvmtiExport::enter_onload_phase() { 
+-  JvmtiEnvBase::set_phase(JVMTI_PHASE_ONLOAD);
+-}
+-
+-void JvmtiExport::enter_live_phase() { 
+-  JvmtiEnvBase::set_phase(JVMTI_PHASE_LIVE);
+-}
+-
+-
+-
+-////////////////////////////////////////////////////////////////////////////////////////////////
+-
+-
+-//
+-// field access management
+-//
+-
+-// interpreter generator needs the address of the counter
+-address JvmtiExport::get_field_access_count_addr() {
+-  // We don't grab a lock because we don't want to
+-  // serialize field access between all threads. This means that a
+-  // thread on another processor can see the wrong count value and
+-  // may either miss making a needed call into post_field_access()
+-  // or will make an unneeded call into post_field_access(). We pay
+-  // this price to avoid slowing down the VM when we aren't watching
+-  // field accesses.
+-  // Other access/mutation safe by virtue of being in VM state.
+-  return (address)(&_field_access_count);
+-}
+-
+-
+-//
+-// field modification management
+-//
+-
+-// interpreter generator needs the address of the counter
+-address JvmtiExport::get_field_modification_count_addr() {
+-  // We don't grab a lock because we don't
+-  // want to serialize field modification between all threads. This
+-  // means that a thread on another processor can see the wrong
+-  // count value and may either miss making a needed call into
+-  // post_field_modification() or will make an unneeded call into
+-  // post_field_modification(). We pay this price to avoid slowing
+-  // down the VM when we aren't watching field modifications.
+-  // Other access/mutation safe by virtue of being in VM state.
+-  return (address)(&_field_modification_count);
+-}
+-
+-
+-//
+-// JVMTI single step management
+-//
+-void JvmtiExport::at_single_stepping_point(JavaThread *thread, methodOop method, address location) {
+-  assert(JvmtiExport::should_post_single_step(), "must be single stepping");
+-
+-  HandleMark hm(thread);
+-  methodHandle mh(thread, method);
+-
+-  // update information about current location and post a step event
+-  JvmtiThreadState *state = thread->jvmti_thread_state();
+-  if (state == NULL) {
+-    return;
+-  }
+-  EVT_TRIG_TRACE(JVMTI_EVENT_SINGLE_STEP, ("JVMTI [%s] Trg Single Step triggered",  
+-                      JvmtiTrace::safe_get_thread_name(thread)));
+-  if (!state->hide_single_stepping()) {
+-    if (state->is_pending_step_for_popframe()) {
+-      state->process_pending_step_for_popframe();
+-    }
+-    if (state->is_pending_step_for_earlyret()) {
+-      state->process_pending_step_for_earlyret();
+-    }
+-    JvmtiExport::post_single_step(thread, mh(), location);
+-  }
+-}
+-
+-
+-void JvmtiExport::expose_single_stepping(JavaThread *thread) {
+-  JvmtiThreadState *state = thread->jvmti_thread_state();
+-  if (state != NULL) {
+-    state->clear_hide_single_stepping();
+-  }
+-}
+-
+-
+-bool JvmtiExport::hide_single_stepping(JavaThread *thread) {
+-  JvmtiThreadState *state = thread->jvmti_thread_state();
+-  if (state != NULL && state->is_enabled(JVMTI_EVENT_SINGLE_STEP)) { 
+-    state->set_hide_single_stepping();
+-    return true; 
+-  } else {
+-    return false; 
+-  }
+-}
+-
+-//
+-// JVMTI events that the VM posts to the debugger
+-//
+-
+-void JvmtiExport::post_vm_start() {
+-  EVT_TRIG_TRACE(JVMTI_EVENT_VM_START, ("JVMTI Trg VM start event triggered" ));
+-
+-  // can now enable some events 
+-  JvmtiEventController::vm_start();
+-
+-  JvmtiEnvIterator it; 
+-  for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
+-    if (env->is_enabled(JVMTI_EVENT_VM_START)) {
+-      EVT_TRACE(JVMTI_EVENT_VM_START, ("JVMTI Evt VM start event sent" ));
+-
+-      JavaThread *thread  = JavaThread::current();
+-      JvmtiThreadEventMark jem(thread);
+-      JvmtiJavaThreadEventTransition jet(thread);
+-      jvmtiEventVMStart callback = env->callbacks()->VMStart;
+-      if (callback != NULL) {
+-        (*callback)(env->jvmti_external(), jem.jni_env());
+-      }
+-    }
+-  }  
+-}
+-
+-
+-void JvmtiExport::post_vm_initialized() {
+-  EVT_TRIG_TRACE(JVMTI_EVENT_VM_INIT, ("JVMTI Trg VM init event triggered" ));
++bool              JvmtiExport::_should_post_object_free                   = false;
++bool              JvmtiExport::_should_post_resource_exhausted            = false;
++bool              JvmtiExport::_should_post_vm_object_alloc               = false;
+ 
+-  // can now enable events 
+-  JvmtiEventController::vm_init();
++////////////////////////////////////////////////////////////////////////////////////////////////
+ 
+-  JvmtiEnvIterator it; 
+-  for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
+-    if (env->is_enabled(JVMTI_EVENT_VM_INIT)) {
+-      EVT_TRACE(JVMTI_EVENT_VM_INIT, ("JVMTI Evt VM init event sent" ));
+ 
+-      JavaThread *thread  = JavaThread::current();
+-      JvmtiThreadEventMark jem(thread);
+-      JvmtiJavaThreadEventTransition jet(thread);
+-      jvmtiEventVMInit callback = env->callbacks()->VMInit;
+-      if (callback != NULL) {
+-        (*callback)(env->jvmti_external(), jem.jni_env(), jem.jni_thread());
+-      }
+-    }
+-  }  
+-}
++//
++// JVMTI single step management
++//
++void JvmtiExport::at_single_stepping_point(JavaThread *thread, methodOop method, address location) {
++  assert(JvmtiExport::should_post_single_step(), "must be single stepping");
+ 
++  HandleMark hm(thread);
++  methodHandle mh(thread, method);
+ 
+-extern "C" {
+-  typedef void (JNICALL *Agent_OnUnload_t)(JavaVM *);
++  // update information about current location and post a step event
++  JvmtiThreadState *state = thread->jvmti_thread_state();
++  if (state == NULL) {
++    return;
++  }
++  EVT_TRIG_TRACE(JVMTI_EVENT_SINGLE_STEP, ("JVMTI [%s] Trg Single Step triggered",
++                      JvmtiTrace::safe_get_thread_name(thread)));
++  if (!state->hide_single_stepping()) {
++    if (state->is_pending_step_for_popframe()) {
++      state->process_pending_step_for_popframe();
++    }
++    if (state->is_pending_step_for_earlyret()) {
++      state->process_pending_step_for_earlyret();
++    }
++    JvmtiExport::post_single_step(thread, mh(), location);
++  }
+ }
+ 
+-void JvmtiExport::post_vm_death() {
+-  EVT_TRIG_TRACE(JVMTI_EVENT_VM_DEATH, ("JVMTI Trg VM death event triggered" ));
+ 
+-  JvmtiEnvIterator it; 
+-  for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
+-    if (env->is_enabled(JVMTI_EVENT_VM_DEATH)) {
+-      EVT_TRACE(JVMTI_EVENT_VM_DEATH, ("JVMTI Evt VM death event sent" ));
+-      
+-      JavaThread *thread  = JavaThread::current();
+-      JvmtiEventMark jem(thread);
+-      JvmtiJavaThreadEventTransition jet(thread);
+-      jvmtiEventVMDeath callback = env->callbacks()->VMDeath;
+-      if (callback != NULL) {
+-        (*callback)(env->jvmti_external(), jem.jni_env());
+-      }
+-    }
++void JvmtiExport::expose_single_stepping(JavaThread *thread) {
++  JvmtiThreadState *state = thread->jvmti_thread_state();
++  if (state != NULL) {
++    state->clear_hide_single_stepping();
+   }
++}
+ 
+-  JvmtiEnvBase::set_phase(JVMTI_PHASE_DEAD);
+-  JvmtiEventController::vm_death();
+ 
+-  // Send any Agent_OnUnload notifications
+-  const char *on_unload_symbols[] = AGENT_ONUNLOAD_SYMBOLS;
+-  extern struct JavaVM_ main_vm;
+-  for (AgentLibrary* agent = Arguments::agents(); agent != NULL; agent = agent->next()) {
+-
+-    // Find the Agent_OnUnload function.
+-    for (uint symbol_index = 0; symbol_index < ARRAY_SIZE(on_unload_symbols); symbol_index++) {
+-      Agent_OnUnload_t unload_entry = CAST_TO_FN_PTR(Agent_OnUnload_t,
+-               hpi::dll_lookup(agent->os_lib(), on_unload_symbols[symbol_index]));
+-
+-      // Invoke the Agent_OnUnload function
+-      if (unload_entry != NULL) {
+-        JavaThread* thread = JavaThread::current();
+-        ThreadToNativeFromVM ttn(thread);
+-        HandleMark hm(thread);
+-        (*unload_entry)(&main_vm);
+-        break;
+-      }
+-    }
++bool JvmtiExport::hide_single_stepping(JavaThread *thread) {
++  JvmtiThreadState *state = thread->jvmti_thread_state();
++  if (state != NULL && state->is_enabled(JVMTI_EVENT_SINGLE_STEP)) {
++    state->set_hide_single_stepping();
++    return true;
++  } else {
++    return false;
+   }
+ }
+ 
+-
+ void JvmtiExport::post_class_load(JavaThread *thread, klassOop klass) {
+-  HandleMark hm(thread);  
++  HandleMark hm(thread);
+   KlassHandle kh(thread, klass);
+ 
+-  EVT_TRIG_TRACE(JVMTI_EVENT_CLASS_LOAD, ("JVMTI [%s] Trg Class Load triggered",  
++  EVT_TRIG_TRACE(JVMTI_EVENT_CLASS_LOAD, ("JVMTI [%s] Trg Class Load triggered",
+                       JvmtiTrace::safe_get_thread_name(thread)));
+   JvmtiThreadState* state = thread->jvmti_thread_state();
+   if (state == NULL) {
+     return;
+   }
+-  JvmtiEnvThreadStateIterator it(state); 
++  JvmtiEnvThreadStateIterator it(state);
+   for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
+     if (ets->is_enabled(JVMTI_EVENT_CLASS_LOAD)) {
+-      EVT_TRACE(JVMTI_EVENT_CLASS_LOAD, ("JVMTI [%s] Evt Class Load sent %s",  
++      EVT_TRACE(JVMTI_EVENT_CLASS_LOAD, ("JVMTI [%s] Evt Class Load sent %s",
+                                          JvmtiTrace::safe_get_thread_name(thread),
+                                          kh()==NULL? "NULL" : Klass::cast(kh())->external_name() ));
+ 
+@@ -799,19 +959,19 @@
+ 
+ 
+ void JvmtiExport::post_class_prepare(JavaThread *thread, klassOop klass) {
+-  HandleMark hm(thread);  
++  HandleMark hm(thread);
+   KlassHandle kh(thread, klass);
+ 
+-  EVT_TRIG_TRACE(JVMTI_EVENT_CLASS_PREPARE, ("JVMTI [%s] Trg Class Prepare triggered",  
++  EVT_TRIG_TRACE(JVMTI_EVENT_CLASS_PREPARE, ("JVMTI [%s] Trg Class Prepare triggered",
+                       JvmtiTrace::safe_get_thread_name(thread)));
+   JvmtiThreadState* state = thread->jvmti_thread_state();
+   if (state == NULL) {
+     return;
+   }
+-  JvmtiEnvThreadStateIterator it(state); 
++  JvmtiEnvThreadStateIterator it(state);
+   for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
+-    if (ets->is_enabled(JVMTI_EVENT_CLASS_PREPARE)) { 
+-      EVT_TRACE(JVMTI_EVENT_CLASS_PREPARE, ("JVMTI [%s] Evt Class Prepare sent %s",  
++    if (ets->is_enabled(JVMTI_EVENT_CLASS_PREPARE)) {
++      EVT_TRACE(JVMTI_EVENT_CLASS_PREPARE, ("JVMTI [%s] Evt Class Prepare sent %s",
+                                             JvmtiTrace::safe_get_thread_name(thread),
+                                             kh()==NULL? "NULL" : Klass::cast(kh())->external_name() ));
+ 
+@@ -832,16 +992,16 @@
+   KlassHandle kh(thread, klass);
+ 
+   EVT_TRIG_TRACE(EXT_EVENT_CLASS_UNLOAD, ("JVMTI [?] Trg Class Unload triggered" ));
+-  if (JvmtiEventController::is_enabled((jvmtiEvent)EXT_EVENT_CLASS_UNLOAD)) { 
++  if (JvmtiEventController::is_enabled((jvmtiEvent)EXT_EVENT_CLASS_UNLOAD)) {
+     assert(thread->is_VM_thread(), "wrong thread");
+ 
+     // get JavaThread for whom we are proxy
+     JavaThread *real_thread =
+         (JavaThread *)((VMThread *)thread)->vm_operation()->calling_thread();
+ 
+-    JvmtiEnvIterator it; 
++    JvmtiEnvIterator it;
+     for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
+-      if (env->is_enabled((jvmtiEvent)EXT_EVENT_CLASS_UNLOAD)) { 
++      if (env->is_enabled((jvmtiEvent)EXT_EVENT_CLASS_UNLOAD)) {
+         EVT_TRACE(EXT_EVENT_CLASS_UNLOAD, ("JVMTI [?] Evt Class Unload sent %s",
+                   kh()==NULL? "NULL" : Klass::cast(kh())->external_name() ));
+ 
+@@ -861,7 +1021,7 @@
+           (*callback)(env->jvmti_external(), jni_env, jt, jk);
+         }
+ 
+-        assert(real_thread->thread_state() == _thread_in_native, 
++        assert(real_thread->thread_state() == _thread_in_native,
+                "JavaThread should be in native");
+         real_thread->set_thread_state(prev_state);
+ 
+@@ -876,19 +1036,19 @@
+ void JvmtiExport::post_thread_start(JavaThread *thread) {
+   assert(thread->thread_state() == _thread_in_vm, "must be in vm state");
+ 
+-  EVT_TRIG_TRACE(JVMTI_EVENT_THREAD_START, ("JVMTI [%s] Trg Thread Start event triggered",  
++  EVT_TRIG_TRACE(JVMTI_EVENT_THREAD_START, ("JVMTI [%s] Trg Thread Start event triggered",
+                       JvmtiTrace::safe_get_thread_name(thread)));
+ 
+-  // do JVMTI thread initialization (if needed) 
++  // do JVMTI thread initialization (if needed)
+   JvmtiEventController::thread_started(thread);
+ 
+   // Do not post thread start event for hidden java thread.
+   if (JvmtiEventController::is_enabled(JVMTI_EVENT_THREAD_START) &&
+       !thread->is_hidden_from_external_view()) {
+-    JvmtiEnvIterator it; 
++    JvmtiEnvIterator it;
+     for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
+-      if (env->is_enabled(JVMTI_EVENT_THREAD_START)) { 
+-        EVT_TRACE(JVMTI_EVENT_THREAD_START, ("JVMTI [%s] Evt Thread Start event sent",  
++      if (env->is_enabled(JVMTI_EVENT_THREAD_START)) {
++        EVT_TRACE(JVMTI_EVENT_THREAD_START, ("JVMTI [%s] Evt Thread Start event sent",
+                      JvmtiTrace::safe_get_thread_name(thread) ));
+ 
+         JvmtiThreadEventMark jem(thread);
+@@ -904,7 +1064,7 @@
+ 
+ 
+ void JvmtiExport::post_thread_end(JavaThread *thread) {
+-  EVT_TRIG_TRACE(JVMTI_EVENT_THREAD_END, ("JVMTI [%s] Trg Thread End event triggered",  
++  EVT_TRIG_TRACE(JVMTI_EVENT_THREAD_END, ("JVMTI [%s] Trg Thread End event triggered",
+                       JvmtiTrace::safe_get_thread_name(thread)));
+ 
+   JvmtiThreadState *state = thread->jvmti_thread_state();
+@@ -916,10 +1076,10 @@
+   if (state->is_enabled(JVMTI_EVENT_THREAD_END) &&
+       !thread->is_hidden_from_external_view()) {
+ 
+-    JvmtiEnvThreadStateIterator it(state); 
++    JvmtiEnvThreadStateIterator it(state);
+     for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
+       if (ets->is_enabled(JVMTI_EVENT_THREAD_END)) {
+-        EVT_TRACE(JVMTI_EVENT_THREAD_END, ("JVMTI [%s] Evt Thread End event sent",  
++        EVT_TRACE(JVMTI_EVENT_THREAD_END, ("JVMTI [%s] Evt Thread End event sent",
+                      JvmtiTrace::safe_get_thread_name(thread) ));
+ 
+         JvmtiEnv *env = ets->get_env();
+@@ -950,7 +1110,7 @@
+ void JvmtiExport::post_resource_exhausted(jint resource_exhausted_flags, const char* description) {
+   EVT_TRIG_TRACE(JVMTI_EVENT_RESOURCE_EXHAUSTED, ("JVMTI Trg resource exhausted event triggered" ));
+ 
+-  JvmtiEnvIterator it; 
++  JvmtiEnvIterator it;
+   for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
+     if (env->is_enabled(JVMTI_EVENT_RESOURCE_EXHAUSTED)) {
+       EVT_TRACE(JVMTI_EVENT_RESOURCE_EXHAUSTED, ("JVMTI Evt resource exhausted event sent" ));
+@@ -960,18 +1120,18 @@
+       JvmtiJavaThreadEventTransition jet(thread);
+       jvmtiEventResourceExhausted callback = env->callbacks()->ResourceExhausted;
+       if (callback != NULL) {
+-        (*callback)(env->jvmti_external(), jem.jni_env(), 
++        (*callback)(env->jvmti_external(), jem.jni_env(),
+                     resource_exhausted_flags, NULL, description);
+       }
+     }
+-  }  
++  }
+ }
+ 
+ void JvmtiExport::post_method_entry(JavaThread *thread, methodOop method, frame current_frame) {
+-  HandleMark hm(thread);  
++  HandleMark hm(thread);
+   methodHandle mh(thread, method);
+ 
+-  EVT_TRIG_TRACE(JVMTI_EVENT_METHOD_ENTRY, ("JVMTI [%s] Trg Method Entry triggered %s.%s",  
++  EVT_TRIG_TRACE(JVMTI_EVENT_METHOD_ENTRY, ("JVMTI [%s] Trg Method Entry triggered %s.%s",
+                      JvmtiTrace::safe_get_thread_name(thread),
+                      (mh() == NULL) ? "NULL" : mh()->klass_name()->as_C_string(),
+                      (mh() == NULL) ? "NULL" : mh()->name()->as_C_string() ));
+@@ -985,14 +1145,14 @@
+   state->incr_cur_stack_depth();
+ 
+   if (state->is_enabled(JVMTI_EVENT_METHOD_ENTRY)) {
+-    JvmtiEnvThreadStateIterator it(state); 
++    JvmtiEnvThreadStateIterator it(state);
+     for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
+       if (ets->is_enabled(JVMTI_EVENT_METHOD_ENTRY)) {
+-        EVT_TRACE(JVMTI_EVENT_METHOD_ENTRY, ("JVMTI [%s] Evt Method Entry sent %s.%s",  
++        EVT_TRACE(JVMTI_EVENT_METHOD_ENTRY, ("JVMTI [%s] Evt Method Entry sent %s.%s",
+                                              JvmtiTrace::safe_get_thread_name(thread),
+                                              (mh() == NULL) ? "NULL" : mh()->klass_name()->as_C_string(),
+                                              (mh() == NULL) ? "NULL" : mh()->name()->as_C_string() ));
+-    
++
+         JvmtiEnv *env = ets->get_env();
+         JvmtiMethodEventMark jem(thread, mh);
+         JvmtiJavaThreadEventTransition jet(thread);
+@@ -1009,7 +1169,7 @@
+   HandleMark hm(thread);
+   methodHandle mh(thread, method);
+ 
+-  EVT_TRIG_TRACE(JVMTI_EVENT_METHOD_EXIT, ("JVMTI [%s] Trg Method Exit triggered %s.%s",  
++  EVT_TRIG_TRACE(JVMTI_EVENT_METHOD_EXIT, ("JVMTI [%s] Trg Method Exit triggered %s.%s",
+                      JvmtiTrace::safe_get_thread_name(thread),
+                      (mh() == NULL) ? "NULL" : mh()->klass_name()->as_C_string(),
+                      (mh() == NULL) ? "NULL" : mh()->name()->as_C_string() ));
+@@ -1027,12 +1187,12 @@
+ 
+   if (state->is_enabled(JVMTI_EVENT_METHOD_EXIT)) {
+     Handle result;
+-    jvalue value;	   
++    jvalue value;
+     value.j = 0L;
+ 
+     // if the method hasn't been popped because of an exception then we populate
+     // the return_value parameter for the callback. At this point we only have
+-    // the address of a "raw result" and we just call into the interpreter to 
++    // the address of a "raw result" and we just call into the interpreter to
+     // convert this into a jvalue.
+     if (!exception_exit) {
+       oop oop_result;
+@@ -1042,23 +1202,23 @@
+       }
+     }
+ 
+-    JvmtiEnvThreadStateIterator it(state); 
++    JvmtiEnvThreadStateIterator it(state);
+     for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
+       if (ets->is_enabled(JVMTI_EVENT_METHOD_EXIT)) {
+-        EVT_TRACE(JVMTI_EVENT_METHOD_EXIT, ("JVMTI [%s] Evt Method Exit sent %s.%s",  
++        EVT_TRACE(JVMTI_EVENT_METHOD_EXIT, ("JVMTI [%s] Evt Method Exit sent %s.%s",
+                                             JvmtiTrace::safe_get_thread_name(thread),
+                                             (mh() == NULL) ? "NULL" : mh()->klass_name()->as_C_string(),
+                                             (mh() == NULL) ? "NULL" : mh()->name()->as_C_string() ));
+ 
+         JvmtiEnv *env = ets->get_env();
+         JvmtiMethodEventMark jem(thread, mh);
+-	if (result.not_null()) {
+-	  value.l = JNIHandles::make_local(thread, result());
+-	}
+-   	JvmtiJavaThreadEventTransition jet(thread);
++        if (result.not_null()) {
++          value.l = JNIHandles::make_local(thread, result());
++        }
++        JvmtiJavaThreadEventTransition jet(thread);
+         jvmtiEventMethodExit callback = env->callbacks()->MethodExit;
+         if (callback != NULL) {
+-          (*callback)(env->jvmti_external(), jem.jni_env(), jem.jni_thread(), 
++          (*callback)(env->jvmti_external(), jem.jni_env(), jem.jni_thread(),
+                       jem.jni_methodID(), exception_exit,  value);
+         }
+       }
+@@ -1066,7 +1226,7 @@
+   }
+ 
+   if (state->is_enabled(JVMTI_EVENT_FRAME_POP)) {
+-    JvmtiEnvThreadStateIterator it(state); 
++    JvmtiEnvThreadStateIterator it(state);
+     for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
+       int cur_frame_number = state->cur_stack_depth();
+ 
+@@ -1074,18 +1234,18 @@
+         // we have a NotifyFramePop entry for this frame.
+         // now check that this env/thread wants this event
+         if (ets->is_enabled(JVMTI_EVENT_FRAME_POP)) {
+-          EVT_TRACE(JVMTI_EVENT_FRAME_POP, ("JVMTI [%s] Evt Frame Pop sent %s.%s",  
++          EVT_TRACE(JVMTI_EVENT_FRAME_POP, ("JVMTI [%s] Evt Frame Pop sent %s.%s",
+                                             JvmtiTrace::safe_get_thread_name(thread),
+                                             (mh() == NULL) ? "NULL" : mh()->klass_name()->as_C_string(),
+                                             (mh() == NULL) ? "NULL" : mh()->name()->as_C_string() ));
+ 
+           // we also need to issue a frame pop event for this frame
+           JvmtiEnv *env = ets->get_env();
+-	  JvmtiMethodEventMark jem(thread, mh);
++          JvmtiMethodEventMark jem(thread, mh);
+           JvmtiJavaThreadEventTransition jet(thread);
+           jvmtiEventFramePop callback = env->callbacks()->FramePop;
+           if (callback != NULL) {
+-            (*callback)(env->jvmti_external(), jem.jni_env(), jem.jni_thread(), 
++            (*callback)(env->jvmti_external(), jem.jni_env(), jem.jni_thread(),
+                         jem.jni_methodID(), exception_exit);
+           }
+         }
+@@ -1101,18 +1261,18 @@
+ 
+ // Todo: inline this for optimization
+ void JvmtiExport::post_single_step(JavaThread *thread, methodOop method, address location) {
+-  HandleMark hm(thread);  
++  HandleMark hm(thread);
+   methodHandle mh(thread, method);
+ 
+   JvmtiThreadState *state = thread->jvmti_thread_state();
+   if (state == NULL) {
+     return;
+   }
+-  JvmtiEnvThreadStateIterator it(state); 
++  JvmtiEnvThreadStateIterator it(state);
+   for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
+     ets->compare_and_set_current_location(mh(), location, JVMTI_EVENT_SINGLE_STEP);
+     if (!ets->single_stepping_posted() && ets->is_enabled(JVMTI_EVENT_SINGLE_STEP)) {
+-      EVT_TRACE(JVMTI_EVENT_SINGLE_STEP, ("JVMTI [%s] Evt Single Step sent %s.%s @ %d",  
++      EVT_TRACE(JVMTI_EVENT_SINGLE_STEP, ("JVMTI [%s] Evt Single Step sent %s.%s @ %d",
+                     JvmtiTrace::safe_get_thread_name(thread),
+                     (mh() == NULL) ? "NULL" : mh()->klass_name()->as_C_string(),
+                     (mh() == NULL) ? "NULL" : mh()->name()->as_C_string(),
+@@ -1123,7 +1283,7 @@
+       JvmtiJavaThreadEventTransition jet(thread);
+       jvmtiEventSingleStep callback = env->callbacks()->SingleStep;
+       if (callback != NULL) {
+-        (*callback)(env->jvmti_external(), jem.jni_env(), jem.jni_thread(), 
++        (*callback)(env->jvmti_external(), jem.jni_env(), jem.jni_thread(),
+                     jem.jni_methodID(), jem.location());
+       }
+ 
+@@ -1134,7 +1294,7 @@
+ 
+ 
+ void JvmtiExport::post_exception_throw(JavaThread *thread, methodOop method, address location, oop exception) {
+-  HandleMark hm(thread);  
++  HandleMark hm(thread);
+   methodHandle mh(thread, method);
+   Handle exception_handle(thread, exception);
+ 
+@@ -1142,25 +1302,25 @@
+   if (state == NULL) {
+     return;
+   }
+-  
+-  EVT_TRIG_TRACE(JVMTI_EVENT_EXCEPTION, ("JVMTI [%s] Trg Exception thrown triggered",  
++
++  EVT_TRIG_TRACE(JVMTI_EVENT_EXCEPTION, ("JVMTI [%s] Trg Exception thrown triggered",
+                       JvmtiTrace::safe_get_thread_name(thread)));
+   if (!state->is_exception_detected()) {
+     state->set_exception_detected();
+-    JvmtiEnvThreadStateIterator it(state); 
++    JvmtiEnvThreadStateIterator it(state);
+     for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
+       if (ets->is_enabled(JVMTI_EVENT_EXCEPTION) && (exception != NULL)) {
+ 
+         EVT_TRACE(JVMTI_EVENT_EXCEPTION,
+-                     ("JVMTI [%s] Evt Exception thrown sent %s.%s @ %d",  
++                     ("JVMTI [%s] Evt Exception thrown sent %s.%s @ %d",
+                       JvmtiTrace::safe_get_thread_name(thread),
+                       (mh() == NULL) ? "NULL" : mh()->klass_name()->as_C_string(),
+                       (mh() == NULL) ? "NULL" : mh()->name()->as_C_string(),
+                       location - mh()->code_base() ));
+-      
++
+         JvmtiEnv *env = ets->get_env();
+         JvmtiExceptionEventMark jem(thread, mh, location, exception_handle);
+-    
++
+         // It's okay to clear these exceptions here because we duplicate
+         // this lookup in InterpreterRuntime::exception_handler_for_exception.
+         EXCEPTION_MARK;
+@@ -1199,9 +1359,9 @@
+         JvmtiJavaThreadEventTransition jet(thread);
+         jvmtiEventException callback = env->callbacks()->Exception;
+         if (callback != NULL) {
+-          (*callback)(env->jvmti_external(), jem.jni_env(), jem.jni_thread(), 
++          (*callback)(env->jvmti_external(), jem.jni_env(), jem.jni_thread(),
+                       jem.jni_methodID(), jem.location(),
+-                      jem.exception(), 
++                      jem.exception(),
+                       catch_jmethodID, current_bci);
+         }
+       }
+@@ -1214,7 +1374,7 @@
+ 
+ 
+ void JvmtiExport::notice_unwind_due_to_exception(JavaThread *thread, methodOop method, address location, oop exception, bool in_handler_frame) {
+-  HandleMark hm(thread);  
++  HandleMark hm(thread);
+   methodHandle mh(thread, method);
+   Handle exception_handle(thread, exception);
+ 
+@@ -1222,20 +1382,20 @@
+   if (state == NULL) {
+     return;
+   }
+-  EVT_TRIG_TRACE(JVMTI_EVENT_EXCEPTION_CATCH, 
+-                    ("JVMTI [%s] Trg unwind_due_to_exception triggered %s.%s @ %s%d - %s",  
++  EVT_TRIG_TRACE(JVMTI_EVENT_EXCEPTION_CATCH,
++                    ("JVMTI [%s] Trg unwind_due_to_exception triggered %s.%s @ %s%d - %s",
+                      JvmtiTrace::safe_get_thread_name(thread),
+                      (mh() == NULL) ? "NULL" : mh()->klass_name()->as_C_string(),
+                      (mh() == NULL) ? "NULL" : mh()->name()->as_C_string(),
+-                     location==0? "no location:" : "", 
++                     location==0? "no location:" : "",
+                      location==0? 0 : location - mh()->code_base(),
+-                     in_handler_frame? "in handler frame" : "not handler frame" )); 
+-  
++                     in_handler_frame? "in handler frame" : "not handler frame" ));
++
+   if (state->is_exception_detected()) {
+-      
++
+     state->invalidate_cur_stack_depth();
+     if (!in_handler_frame) {
+-      // Not in exception handler. 
++      // Not in exception handler.
+       if(state->is_interp_only_mode()) {
+         // method exit and frame pop events are posted only in interp mode.
+         // When these events are enabled code should be in running in interp mode.
+@@ -1243,7 +1403,7 @@
+         // The cached cur_stack_depth might have changed from the
+         // operations of frame pop or method exit. We are not 100% sure
+         // the cached cur_stack_depth is still valid depth so invalidate
+-        // it. 
++        // it.
+         state->invalidate_cur_stack_depth();
+       }
+     } else {
+@@ -1254,22 +1414,22 @@
+       assert(!state->is_exception_caught(), "exception must not be caught yet.");
+       state->set_exception_caught();
+ 
+-      JvmtiEnvThreadStateIterator it(state); 
++      JvmtiEnvThreadStateIterator it(state);
+       for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
+         if (ets->is_enabled(JVMTI_EVENT_EXCEPTION_CATCH) && (exception_handle() != NULL)) {
+-          EVT_TRACE(JVMTI_EVENT_EXCEPTION_CATCH, 
+-                     ("JVMTI [%s] Evt ExceptionCatch sent %s.%s @ %d",  
++          EVT_TRACE(JVMTI_EVENT_EXCEPTION_CATCH,
++                     ("JVMTI [%s] Evt ExceptionCatch sent %s.%s @ %d",
+                       JvmtiTrace::safe_get_thread_name(thread),
+                       (mh() == NULL) ? "NULL" : mh()->klass_name()->as_C_string(),
+                       (mh() == NULL) ? "NULL" : mh()->name()->as_C_string(),
+-                      location - mh()->code_base() )); 
++                      location - mh()->code_base() ));
+ 
+           JvmtiEnv *env = ets->get_env();
+           JvmtiExceptionEventMark jem(thread, mh, location, exception_handle);
+           JvmtiJavaThreadEventTransition jet(thread);
+           jvmtiEventExceptionCatch callback = env->callbacks()->ExceptionCatch;
+           if (callback != NULL) {
+-            (*callback)(env->jvmti_external(), jem.jni_env(), jem.jni_thread(), 
++            (*callback)(env->jvmti_external(), jem.jni_env(), jem.jni_thread(),
+                       jem.jni_methodID(), jem.location(),
+                       jem.exception());
+           }
+@@ -1347,12 +1507,12 @@
+   if (state == NULL) {
+     return;
+   }
+-  EVT_TRIG_TRACE(JVMTI_EVENT_FIELD_ACCESS, ("JVMTI [%s] Trg Field Access event triggered",  
++  EVT_TRIG_TRACE(JVMTI_EVENT_FIELD_ACCESS, ("JVMTI [%s] Trg Field Access event triggered",
+                       JvmtiTrace::safe_get_thread_name(thread)));
+-  JvmtiEnvThreadStateIterator it(state); 
++  JvmtiEnvThreadStateIterator it(state);
+   for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
+     if (ets->is_enabled(JVMTI_EVENT_FIELD_ACCESS)) {
+-      EVT_TRACE(JVMTI_EVENT_FIELD_ACCESS, ("JVMTI [%s] Evt Field Access event sent %s.%s @ %d",  
++      EVT_TRACE(JVMTI_EVENT_FIELD_ACCESS, ("JVMTI [%s] Evt Field Access event sent %s.%s @ %d",
+                      JvmtiTrace::safe_get_thread_name(thread),
+                      (mh() == NULL) ? "NULL" : mh()->klass_name()->as_C_string(),
+                      (mh() == NULL) ? "NULL" : mh()->name()->as_C_string(),
+@@ -1365,7 +1525,7 @@
+       JvmtiJavaThreadEventTransition jet(thread);
+       jvmtiEventFieldAccess callback = env->callbacks()->FieldAccess;
+       if (callback != NULL) {
+-        (*callback)(env->jvmti_external(), jem.jni_env(), jem.jni_thread(), 
++        (*callback)(env->jvmti_external(), jem.jni_env(), jem.jni_thread(),
+                     jem.jni_methodID(), jem.location(),
+                     field_jclass, field_jobject, field);
+       }
+@@ -1429,7 +1589,7 @@
+     h_obj = Handle(thread, obj);
+   }
+   KlassHandle h_klass(thread, klass);
+-  post_field_modification(thread, 
++  post_field_modification(thread,
+                           thread->last_frame().interpreter_frame_method(),
+                           thread->last_frame().interpreter_frame_bcp(),
+                           h_klass, h_obj, fieldID, sig_type, value);
+@@ -1448,7 +1608,7 @@
+     if (found) {
+       jint ival = value->i;
+       // convert value from int to appropriate type
+-      switch (fd.field_type()) {   
++      switch (fd.field_type()) {
+       case T_BOOLEAN:
+         sig_type = 'Z';
+         value->i = 0; // clear it
+@@ -1504,15 +1664,15 @@
+   if (state == NULL) {
+     return;
+   }
+-  EVT_TRIG_TRACE(JVMTI_EVENT_FIELD_MODIFICATION, 
+-                     ("JVMTI [%s] Trg Field Modification event triggered",  
++  EVT_TRIG_TRACE(JVMTI_EVENT_FIELD_MODIFICATION,
++                     ("JVMTI [%s] Trg Field Modification event triggered",
+                       JvmtiTrace::safe_get_thread_name(thread)));
+ 
+-  JvmtiEnvThreadStateIterator it(state); 
++  JvmtiEnvThreadStateIterator it(state);
+   for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
+     if (ets->is_enabled(JVMTI_EVENT_FIELD_MODIFICATION)) {
+       EVT_TRACE(JVMTI_EVENT_FIELD_MODIFICATION,
+-                   ("JVMTI [%s] Evt Field Modification event sent %s.%s @ %d",  
++                   ("JVMTI [%s] Evt Field Modification event sent %s.%s @ %d",
+                     JvmtiTrace::safe_get_thread_name(thread),
+                     (mh() == NULL) ? "NULL" : mh()->klass_name()->as_C_string(),
+                     (mh() == NULL) ? "NULL" : mh()->name()->as_C_string(),
+@@ -1525,7 +1685,7 @@
+       JvmtiJavaThreadEventTransition jet(thread);
+       jvmtiEventFieldModification callback = env->callbacks()->FieldModification;
+       if (callback != NULL) {
+-        (*callback)(env->jvmti_external(), jem.jni_env(), jem.jni_thread(), 
++        (*callback)(env->jvmti_external(), jem.jni_env(), jem.jni_thread(),
+                     jem.jni_methodID(), jem.location(),
+                     field_jclass, field_jobject, field, sig_type, *value_ptr);
+       }
+@@ -1533,185 +1693,21 @@
+   }
+ }
+ 
+-class JvmtiClassFileLoadHookPoster : public StackObj {
+- private:
+-  symbolHandle         _h_name;
+-  Handle               _class_loader;
+-  Handle               _h_protection_domain;
+-  unsigned char **     _data_ptr;
+-  unsigned char **     _end_ptr;
+-  JavaThread *         _thread;
+-  jint                 _curr_len;
+-  unsigned char *      _curr_data;
+-  JvmtiEnv *           _curr_env; 
+-  jint *               _cached_length_ptr;
+-  unsigned char **     _cached_data_ptr;
+-  JvmtiThreadState *   _state;
+-  KlassHandle *        _h_class_being_redefined;
+-  JvmtiClassLoadKind   _load_kind;
+-
+- public:
+-  inline JvmtiClassFileLoadHookPoster(symbolHandle h_name, Handle class_loader,
+-                                      Handle h_protection_domain, 
+-                                      unsigned char **data_ptr, unsigned char **end_ptr, 
+-                                      unsigned char **cached_data_ptr, 
+-                                      jint *cached_length_ptr) {
+-    _h_name = h_name;
+-    _class_loader = class_loader;
+-    _h_protection_domain = h_protection_domain;
+-    _data_ptr = data_ptr;
+-    _end_ptr = end_ptr;
+-    _thread = JavaThread::current();
+-    _curr_len = *end_ptr - *data_ptr;
+-    _curr_data = *data_ptr;
+-    _curr_env = NULL; 
+-    _cached_length_ptr = cached_length_ptr;
+-    _cached_data_ptr = cached_data_ptr;
+-    *_cached_length_ptr = 0;
+-    *_cached_data_ptr = NULL;
+-
+-    _state = _thread->jvmti_thread_state();
+-    if (_state != NULL) {
+-      _h_class_being_redefined = _state->get_class_being_redefined();
+-      _load_kind = _state->get_class_load_kind();
+-      // Clear class_being_redefined flag here. The action 
+-      // from agent handler could generate a new class file load
+-      // hook event and if it is not cleared the new event generated
+-      // from regular class file load could have this stale redefined
+-      // class handle info. 
+-      _state->clear_class_being_redefined();
+-    } else {
+-      // redefine and retransform will always set the thread state
+-      _h_class_being_redefined = (KlassHandle *) NULL;
+-      _load_kind = jvmti_class_load_kind_load;
+-    }
+-  }
+-
+-  void post() {
+-//    EVT_TRIG_TRACE(JVMTI_EVENT_CLASS_FILE_LOAD_HOOK,
+-//                   ("JVMTI [%s] class file load hook event triggered",  
+-//                    JvmtiTrace::safe_get_thread_name(_thread)));
+-    post_all_envs();
+-    copy_modified_data();
+-  }
+-
+- private:
+-  void post_all_envs() {
+-    if (_load_kind != jvmti_class_load_kind_retransform) {
+-      // for class load and redefine, 
+-      // call the non-retransformable agents
+-      JvmtiEnvIterator it; 
+-      for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
+-        if (!env->is_retransformable() && env->is_enabled(JVMTI_EVENT_CLASS_FILE_LOAD_HOOK)) {
+-          // non-retransformable agents cannot retransform back,
+-          // so no need to cache the original class file bytes
+-          post_to_env(env, false);
+-        }
+-      }
+-    }
+-    JvmtiEnvIterator it; 
+-    for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
+-      // retransformable agents get all events
+-      if (env->is_retransformable() && env->is_enabled(JVMTI_EVENT_CLASS_FILE_LOAD_HOOK)) {
+-        // retransformable agents need to cache the original class file 
+-        // bytes if changes are made via the ClassFileLoadHook
+-        post_to_env(env, true);
+-      }
+-    }
+-  }
+-
+-  void post_to_env(JvmtiEnv* env, bool caching_needed) {
+-    unsigned char *new_data = NULL;
+-    jint new_len = 0;
+-//    EVT_TRACE(JVMTI_EVENT_CLASS_FILE_LOAD_HOOK,
+-//     ("JVMTI [%s] class file load hook event sent %s  data_ptr = %d, data_len = %d",  
+-//               JvmtiTrace::safe_get_thread_name(_thread),
+-//               _h_name.is_null() ? "NULL" : _h_name->as_utf8(),
+-//               _curr_data, _curr_len ));
+-    JvmtiClassFileLoadEventMark jem(_thread, _h_name, _class_loader, 
+-                                    _h_protection_domain, 
+-                                    _h_class_being_redefined);
+-    JvmtiJavaThreadEventTransition jet(_thread);
+-    JNIEnv* jni_env =  (JvmtiEnv::get_phase() == JVMTI_PHASE_PRIMORDIAL)? 
+-                                                        NULL : jem.jni_env();
+-    jvmtiEventClassFileLoadHook callback = env->callbacks()->ClassFileLoadHook;
+-    if (callback != NULL) {
+-      (*callback)(env->jvmti_external(), jni_env,
+-                  jem.class_being_redefined(),
+-                  jem.jloader(), jem.class_name(), 
+-                  jem.protection_domain(),
+-                  _curr_len, _curr_data,
+-                  &new_len, &new_data);
+-    }
+-    if (new_data != NULL) {
+-      // this agent has modified class data. 
+-      if (caching_needed && *_cached_data_ptr == NULL) {
+-        // data has been changed by the new retransformable agent
+-        // and it hasn't already been cached, cache it
+-        *_cached_data_ptr = (unsigned char *)os::malloc(_curr_len);
+-        memcpy(*_cached_data_ptr, _curr_data, _curr_len);
+-        *_cached_length_ptr = _curr_len;
+-      }
+-
+-      if (_curr_data != *_data_ptr) {
+-        // curr_data is previous agent modified class data.
+-        // And this has been changed by the new agent so
+-        // we can delete it now.           
+-        _curr_env->Deallocate(_curr_data);
+-      }
+-
+-      // Class file data has changed by the current agent.
+-      _curr_data = new_data;
+-      _curr_len = new_len;
+-      // Save the current agent env we need this to deallocate the
+-      // memory allocated by this agent.
+-      _curr_env = env;
+-    }
+-  }
+-
+-  void copy_modified_data() {
+-    // if one of the agent has modified class file data.
+-    // Copy modified class data to new resources array.
+-    if (_curr_data != *_data_ptr) {
+-      *_data_ptr = NEW_RESOURCE_ARRAY(u1, _curr_len);
+-      memcpy(*_data_ptr, _curr_data, _curr_len);
+-      *_end_ptr = *_data_ptr + _curr_len;
+-      _curr_env->Deallocate(_curr_data);
+-    }
+-  }
+-};
+-
+-// this entry is for class file load hook on class load, redefine and retransform
+-void JvmtiExport::post_class_file_load_hook(symbolHandle h_name, 
+-                                            Handle class_loader,
+-                                            Handle h_protection_domain, 
+-                                            unsigned char **data_ptr, 
+-                                            unsigned char **end_ptr, 
+-                                            unsigned char **cached_data_ptr, 
+-                                            jint *cached_length_ptr) {
+-  JvmtiClassFileLoadHookPoster poster(h_name, class_loader, 
+-                                      h_protection_domain, 
+-                                      data_ptr, end_ptr,
+-                                      cached_data_ptr, 
+-                                      cached_length_ptr);
+-  poster.post();
+-}
+-
+ void JvmtiExport::post_native_method_bind(methodOop method, address* function_ptr) {
+-  JavaThread* thread = JavaThread::current();    
++  JavaThread* thread = JavaThread::current();
+   assert(thread->thread_state() == _thread_in_vm, "must be in vm state");
+ 
+   HandleMark hm(thread);
+   methodHandle mh(thread, method);
+ 
+-  EVT_TRIG_TRACE(JVMTI_EVENT_NATIVE_METHOD_BIND, ("JVMTI [%s] Trg Native Method Bind event triggered",  
++  EVT_TRIG_TRACE(JVMTI_EVENT_NATIVE_METHOD_BIND, ("JVMTI [%s] Trg Native Method Bind event triggered",
+                       JvmtiTrace::safe_get_thread_name(thread)));
+ 
+   if (JvmtiEventController::is_enabled(JVMTI_EVENT_NATIVE_METHOD_BIND)) {
+-    JvmtiEnvIterator it; 
++    JvmtiEnvIterator it;
+     for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
+-      if (env->is_enabled(JVMTI_EVENT_NATIVE_METHOD_BIND)) { 
+-        EVT_TRACE(JVMTI_EVENT_NATIVE_METHOD_BIND, ("JVMTI [%s] Evt Native Method Bind event sent",  
++      if (env->is_enabled(JVMTI_EVENT_NATIVE_METHOD_BIND)) {
++        EVT_TRACE(JVMTI_EVENT_NATIVE_METHOD_BIND, ("JVMTI [%s] Evt Native Method Bind event sent",
+                      JvmtiTrace::safe_get_thread_name(thread) ));
+ 
+         JvmtiMethodEventMark jem(thread, mh);
+@@ -1719,7 +1715,7 @@
+         JNIEnv* jni_env =  JvmtiEnv::get_phase() == JVMTI_PHASE_PRIMORDIAL? NULL : jem.jni_env();
+         jvmtiEventNativeMethodBind callback = env->callbacks()->NativeMethodBind;
+         if (callback != NULL) {
+-          (*callback)(env->jvmti_external(), jni_env, jem.jni_thread(), 
++          (*callback)(env->jvmti_external(), jni_env, jem.jni_thread(),
+                       jem.jni_methodID(), (void*)(*function_ptr), (void**)function_ptr);
+         }
+       }
+@@ -1729,22 +1725,22 @@
+ 
+ 
+ void JvmtiExport::post_compiled_method_load(nmethod *nm) {
+-  // If there are pending CompiledMethodUnload events then these are 
++  // If there are pending CompiledMethodUnload events then these are
+   // posted before this CompiledMethodLoad event. We "lock" the nmethod and
+   // maintain a handle to the methodOop to ensure that the nmethod isn't
+   // flushed or unloaded while posting the events.
+-  JavaThread* thread = JavaThread::current();    
++  JavaThread* thread = JavaThread::current();
+   if (have_pending_compiled_method_unload_events()) {
+     methodHandle mh(thread, nm->method());
+-    nmethodLocker nml(nm);    
++    nmethodLocker nml(nm);
+     post_pending_compiled_method_unload_events();
+   }
+ 
+   EVT_TRIG_TRACE(JVMTI_EVENT_COMPILED_METHOD_LOAD,
+-                 ("JVMTI [%s] method compile load event triggered",  
++                 ("JVMTI [%s] method compile load event triggered",
+                  JvmtiTrace::safe_get_thread_name(thread)));
+ 
+-  JvmtiEnvIterator it; 
++  JvmtiEnvIterator it;
+   for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
+     if (env->is_enabled(JVMTI_EVENT_COMPILED_METHOD_LOAD)) {
+ 
+@@ -1754,7 +1750,7 @@
+                 (nm->method() == NULL) ? "NULL" : nm->method()->klass_name()->as_C_string(),
+                 (nm->method() == NULL) ? "NULL" : nm->method()->name()->as_C_string()));
+ 
+-      ResourceMark rm(thread);  
++      ResourceMark rm(thread);
+       JvmtiCompiledMethodLoadEventMark jem(thread, nm);
+       JvmtiJavaThreadEventTransition jet(thread);
+       jvmtiEventCompiledMethodLoad callback = env->callbacks()->CompiledMethodLoad;
+@@ -1769,14 +1765,14 @@
+ 
+ 
+ // post a COMPILED_METHOD_LOAD event for a given environment
+-void JvmtiExport::post_compiled_method_load(JvmtiEnv* env, const jmethodID method, const jint length, 
+-					    const void *code_begin, const jint map_length, 
+-					    const jvmtiAddrLocationMap* map)
++void JvmtiExport::post_compiled_method_load(JvmtiEnv* env, const jmethodID method, const jint length,
++                                            const void *code_begin, const jint map_length,
++                                            const jvmtiAddrLocationMap* map)
+ {
+-  JavaThread* thread = JavaThread::current();    
++  JavaThread* thread = JavaThread::current();
+   EVT_TRIG_TRACE(JVMTI_EVENT_COMPILED_METHOD_LOAD,
+-                 ("JVMTI [%s] method compile load event triggered (by GenerateEvents)",  
+-                 JvmtiTrace::safe_get_thread_name(thread))); 
++                 ("JVMTI [%s] method compile load event triggered (by GenerateEvents)",
++                 JvmtiTrace::safe_get_thread_name(thread)));
+   if (env->is_enabled(JVMTI_EVENT_COMPILED_METHOD_LOAD)) {
+ 
+     EVT_TRACE(JVMTI_EVENT_COMPILED_METHOD_LOAD,
+@@ -1813,12 +1809,12 @@
+   EVT_TRIG_TRACE(JVMTI_EVENT_DYNAMIC_CODE_GENERATED,
+                  ("JVMTI [%s] method dynamic code generated event triggered",
+                  JvmtiTrace::safe_get_thread_name(thread)));
+-  JvmtiEnvIterator it; 
++  JvmtiEnvIterator it;
+   for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
+     if (env->is_enabled(JVMTI_EVENT_DYNAMIC_CODE_GENERATED)) {
+       EVT_TRACE(JVMTI_EVENT_DYNAMIC_CODE_GENERATED,
+-		("JVMTI [%s] dynamic code generated event sent for %s",
+-		JvmtiTrace::safe_get_thread_name(thread), name));
++                ("JVMTI [%s] dynamic code generated event sent for %s",
++                JvmtiTrace::safe_get_thread_name(thread), name));
+       JvmtiEventMark jem(thread);
+       JvmtiJavaThreadEventTransition jet(thread);
+       jint length = (jint)pointer_delta(code_end, code_begin, sizeof(char));
+@@ -1828,12 +1824,12 @@
+       }
+     }
+   }
+-} 
++}
+ 
+ void JvmtiExport::post_dynamic_code_generated(const char *name, const void *code_begin, const void *code_end) {
+   // In theory everyone coming thru here is in_vm but we need to be certain
+   // because a callee will do a vm->native transition
+-  ThreadInVMfromUnknown __tiv;   
++  ThreadInVMfromUnknown __tiv;
+   jvmtiPhase phase = JvmtiEnv::get_phase();
+   if (phase == JVMTI_PHASE_PRIMORDIAL || phase == JVMTI_PHASE_START) {
+     post_dynamic_code_generated_internal(name, code_begin, code_end);
+@@ -1849,17 +1845,17 @@
+ 
+ // post a DYNAMIC_CODE_GENERATED event for a given environment
+ // used by GenerateEvents
+-void JvmtiExport::post_dynamic_code_generated(JvmtiEnv* env, const char *name, 
+-					      const void *code_begin, const void *code_end)
++void JvmtiExport::post_dynamic_code_generated(JvmtiEnv* env, const char *name,
++                                              const void *code_begin, const void *code_end)
+ {
+   JavaThread* thread = JavaThread::current();
+   EVT_TRIG_TRACE(JVMTI_EVENT_DYNAMIC_CODE_GENERATED,
+                  ("JVMTI [%s] dynamic code generated event triggered (by GenerateEvents)",
+-		  JvmtiTrace::safe_get_thread_name(thread)));
++                  JvmtiTrace::safe_get_thread_name(thread)));
+   if (env->is_enabled(JVMTI_EVENT_DYNAMIC_CODE_GENERATED)) {
+     EVT_TRACE(JVMTI_EVENT_DYNAMIC_CODE_GENERATED,
+               ("JVMTI [%s] dynamic code generated event sent for %s",
+-	       JvmtiTrace::safe_get_thread_name(thread), name));
++               JvmtiTrace::safe_get_thread_name(thread), name));
+     JvmtiEventMark jem(thread);
+     JvmtiJavaThreadEventTransition jet(thread);
+     jint length = (jint)pointer_delta(code_end, code_begin, sizeof(char));
+@@ -1868,11 +1864,11 @@
+       (*callback)(env->jvmti_external(), name, (void*)code_begin, length);
+     }
+   }
+-} 
++}
+ 
+ // post a DynamicCodeGenerated event while holding locks in the VM.
+-void JvmtiExport::post_dynamic_code_generated_while_holding_locks(const char* name, 
+-								  address code_begin, address code_end)
++void JvmtiExport::post_dynamic_code_generated_while_holding_locks(const char* name,
++                                                                  address code_begin, address code_end)
+ {
+   // register the stub with the current dynamic code event collector
+   JvmtiThreadState* state = JvmtiThreadState::state_for(JavaThread::current());
+@@ -1883,7 +1879,7 @@
+ 
+ // Collect all the vm internally allocated objects which are visible to java world
+ void JvmtiExport::record_vm_internal_object_allocation(oop obj) {
+-  Thread* thread = ThreadLocalStorage::thread(); 
++  Thread* thread = ThreadLocalStorage::thread();
+   if (thread != NULL && thread->is_Java_thread())  {
+     // Can not take safepoint here.
+     No_Safepoint_Verifier no_sfpt;
+@@ -1897,10 +1893,10 @@
+       if (collector != NULL && collector->is_enabled()) {
+         // Don't record classes as these will be notified via the ClassLoad
+         // event.
+-        if (obj->klass() != SystemDictionary::class_klass()) {	
++        if (obj->klass() != SystemDictionary::class_klass()) {
+           collector->record_allocation(obj);
+         }
+-      } 
++      }
+     }
+   }
+ }
+@@ -1910,7 +1906,7 @@
+   EVT_TRIG_TRACE(JVMTI_EVENT_GARBAGE_COLLECTION_FINISH,
+                  ("JVMTI [%s] garbage collection finish event triggered",
+                   JvmtiTrace::safe_get_thread_name(thread)));
+-  JvmtiEnvIterator it; 
++  JvmtiEnvIterator it;
+   for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
+     if (env->is_enabled(JVMTI_EVENT_GARBAGE_COLLECTION_FINISH)) {
+       EVT_TRACE(JVMTI_EVENT_GARBAGE_COLLECTION_FINISH,
+@@ -1924,14 +1920,14 @@
+       }
+     }
+   }
+-} 
++}
+ 
+ void JvmtiExport::post_garbage_collection_start() {
+   Thread* thread = Thread::current(); // this event is posted from vm-thread.
+   EVT_TRIG_TRACE(JVMTI_EVENT_GARBAGE_COLLECTION_START,
+                  ("JVMTI [%s] garbage collection start event triggered",
+                   JvmtiTrace::safe_get_thread_name(thread)));
+-  JvmtiEnvIterator it; 
++  JvmtiEnvIterator it;
+   for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
+     if (env->is_enabled(JVMTI_EVENT_GARBAGE_COLLECTION_START)) {
+       EVT_TRACE(JVMTI_EVENT_GARBAGE_COLLECTION_START,
+@@ -1945,14 +1941,14 @@
+       }
+     }
+   }
+-} 
++}
+ 
+ void JvmtiExport::post_data_dump() {
+   Thread *thread = Thread::current();
+-  EVT_TRIG_TRACE(JVMTI_EVENT_DATA_DUMP_REQUEST, 
++  EVT_TRIG_TRACE(JVMTI_EVENT_DATA_DUMP_REQUEST,
+                  ("JVMTI [%s] data dump request event triggered",
+                   JvmtiTrace::safe_get_thread_name(thread)));
+-  JvmtiEnvIterator it; 
++  JvmtiEnvIterator it;
+   for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
+     if (env->is_enabled(JVMTI_EVENT_DATA_DUMP_REQUEST)) {
+       EVT_TRACE(JVMTI_EVENT_DATA_DUMP_REQUEST,
+@@ -1971,7 +1967,7 @@
+ void JvmtiExport::post_monitor_contended_enter(JavaThread *thread, ObjectMonitor *obj_mntr) {
+   oop object = (oop)obj_mntr->object();
+   if (!ServiceUtil::visible_oop(object)) {
+-    // Ignore monitor contended enter for vm internal object. 
++    // Ignore monitor contended enter for vm internal object.
+     return;
+   }
+   JvmtiThreadState *state = thread->jvmti_thread_state();
+@@ -1982,15 +1978,15 @@
+   HandleMark hm(thread);
+   Handle h(thread, object);
+ 
+-  EVT_TRIG_TRACE(JVMTI_EVENT_MONITOR_CONTENDED_ENTER, 
+-                     ("JVMTI [%s] montior contended enter event triggered",  
++  EVT_TRIG_TRACE(JVMTI_EVENT_MONITOR_CONTENDED_ENTER,
++                     ("JVMTI [%s] montior contended enter event triggered",
+                       JvmtiTrace::safe_get_thread_name(thread)));
+ 
+-  JvmtiEnvThreadStateIterator it(state); 
++  JvmtiEnvThreadStateIterator it(state);
+   for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
+     if (ets->is_enabled(JVMTI_EVENT_MONITOR_CONTENDED_ENTER)) {
+       EVT_TRACE(JVMTI_EVENT_MONITOR_CONTENDED_ENTER,
+-                   ("JVMTI [%s] monitor contended enter event sent",  
++                   ("JVMTI [%s] monitor contended enter event sent",
+                     JvmtiTrace::safe_get_thread_name(thread)));
+       JvmtiMonitorEventMark  jem(thread, h());
+       JvmtiEnv *env = ets->get_env();
+@@ -2006,7 +2002,7 @@
+ void JvmtiExport::post_monitor_contended_entered(JavaThread *thread, ObjectMonitor *obj_mntr) {
+   oop object = (oop)obj_mntr->object();
+   if (!ServiceUtil::visible_oop(object)) {
+-    // Ignore monitor contended entered for vm internal object. 
++    // Ignore monitor contended entered for vm internal object.
+     return;
+   }
+   JvmtiThreadState *state = thread->jvmti_thread_state();
+@@ -2017,17 +2013,17 @@
+   HandleMark hm(thread);
+   Handle h(thread, object);
+ 
+-  EVT_TRIG_TRACE(JVMTI_EVENT_MONITOR_CONTENDED_ENTERED, 
+-                     ("JVMTI [%s] montior contended entered event triggered",  
++  EVT_TRIG_TRACE(JVMTI_EVENT_MONITOR_CONTENDED_ENTERED,
++                     ("JVMTI [%s] montior contended entered event triggered",
+                       JvmtiTrace::safe_get_thread_name(thread)));
+ 
+-  JvmtiEnvThreadStateIterator it(state); 
++  JvmtiEnvThreadStateIterator it(state);
+   for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
+     if (ets->is_enabled(JVMTI_EVENT_MONITOR_CONTENDED_ENTERED)) {
+       EVT_TRACE(JVMTI_EVENT_MONITOR_CONTENDED_ENTERED,
+-                   ("JVMTI [%s] monitor contended enter event sent",  
++                   ("JVMTI [%s] monitor contended enter event sent",
+                     JvmtiTrace::safe_get_thread_name(thread)));
+-      JvmtiMonitorEventMark  jem(thread, h());        
++      JvmtiMonitorEventMark  jem(thread, h());
+       JvmtiEnv *env = ets->get_env();
+       JvmtiThreadEventTransition jet(thread);
+       jvmtiEventMonitorContendedEntered callback = env->callbacks()->MonitorContendedEntered;
+@@ -2047,16 +2043,16 @@
+ 
+   HandleMark hm(thread);
+   Handle h(thread, object);
+-  
+-  EVT_TRIG_TRACE(JVMTI_EVENT_MONITOR_WAIT, 
+-                     ("JVMTI [%s] montior wait event triggered",  
++
++  EVT_TRIG_TRACE(JVMTI_EVENT_MONITOR_WAIT,
++                     ("JVMTI [%s] montior wait event triggered",
+                       JvmtiTrace::safe_get_thread_name(thread)));
+ 
+-  JvmtiEnvThreadStateIterator it(state); 
++  JvmtiEnvThreadStateIterator it(state);
+   for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
+     if (ets->is_enabled(JVMTI_EVENT_MONITOR_WAIT)) {
+       EVT_TRACE(JVMTI_EVENT_MONITOR_WAIT,
+-                   ("JVMTI [%s] monitor wait event sent ",  
++                   ("JVMTI [%s] monitor wait event sent ",
+                     JvmtiTrace::safe_get_thread_name(thread)));
+       JvmtiMonitorEventMark  jem(thread, h());
+       JvmtiEnv *env = ets->get_env();
+@@ -2073,7 +2069,7 @@
+ void JvmtiExport::post_monitor_waited(JavaThread *thread, ObjectMonitor *obj_mntr, jboolean timed_out) {
+   oop object = (oop)obj_mntr->object();
+   if (!ServiceUtil::visible_oop(object)) {
+-    // Ignore monitor waited for vm internal object. 
++    // Ignore monitor waited for vm internal object.
+     return;
+   }
+   JvmtiThreadState *state = thread->jvmti_thread_state();
+@@ -2084,15 +2080,15 @@
+   HandleMark hm(thread);
+   Handle h(thread, object);
+ 
+-  EVT_TRIG_TRACE(JVMTI_EVENT_MONITOR_WAITED, 
+-                     ("JVMTI [%s] montior waited event triggered",  
++  EVT_TRIG_TRACE(JVMTI_EVENT_MONITOR_WAITED,
++                     ("JVMTI [%s] montior waited event triggered",
+                       JvmtiTrace::safe_get_thread_name(thread)));
+ 
+-  JvmtiEnvThreadStateIterator it(state); 
++  JvmtiEnvThreadStateIterator it(state);
+   for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
+     if (ets->is_enabled(JVMTI_EVENT_MONITOR_WAITED)) {
+       EVT_TRACE(JVMTI_EVENT_MONITOR_WAITED,
+-                   ("JVMTI [%s] monitor waited event sent ",  
++                   ("JVMTI [%s] monitor waited event sent ",
+                     JvmtiTrace::safe_get_thread_name(thread)));
+       JvmtiMonitorEventMark  jem(thread, h());
+       JvmtiEnv *env = ets->get_env();
+@@ -2108,44 +2104,31 @@
+ 
+ 
+ void JvmtiExport::post_vm_object_alloc(JavaThread *thread,  oop object) {
+-  EVT_TRIG_TRACE(JVMTI_EVENT_VM_OBJECT_ALLOC, ("JVMTI [%s] Trg vm object alloc triggered",  
++  EVT_TRIG_TRACE(JVMTI_EVENT_VM_OBJECT_ALLOC, ("JVMTI [%s] Trg vm object alloc triggered",
+                       JvmtiTrace::safe_get_thread_name(thread)));
+   if (object == NULL) {
+     return;
+   }
+   HandleMark hm(thread);
+   Handle h(thread, object);
+-  JvmtiEnvIterator it; 
++  JvmtiEnvIterator it;
+   for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
+     if (env->is_enabled(JVMTI_EVENT_VM_OBJECT_ALLOC)) {
+-      EVT_TRACE(JVMTI_EVENT_VM_OBJECT_ALLOC, ("JVMTI [%s] Evt vmobject alloc sent %s",  
++      EVT_TRACE(JVMTI_EVENT_VM_OBJECT_ALLOC, ("JVMTI [%s] Evt vmobject alloc sent %s",
+                                          JvmtiTrace::safe_get_thread_name(thread),
+                                          object==NULL? "NULL" : Klass::cast(java_lang_Class::as_klassOop(object))->external_name()));
+- 
++
+       JvmtiVMObjectAllocEventMark jem(thread, h());
+       JvmtiJavaThreadEventTransition jet(thread);
+       jvmtiEventVMObjectAlloc callback = env->callbacks()->VMObjectAlloc;
+       if (callback != NULL) {
+-        (*callback)(env->jvmti_external(), jem.jni_env(), jem.jni_thread(), 
++        (*callback)(env->jvmti_external(), jem.jni_env(), jem.jni_thread(),
+                     jem.jni_jobject(), jem.jni_class(), jem.size());
+       }
+     }
+   }
+ }
+ 
+-
+-char**
+-JvmtiExport::get_all_native_method_prefixes(int* count_ptr) {
+-  // Have to grab JVMTI thread state lock to be sure environment doesn't
+-  // go away while we iterate them.  No locks during VM bring-up.
+-  if (Threads::number_of_threads() == 0 || SafepointSynchronize::is_at_safepoint()) {
+-    return JvmtiEnvBase::get_all_native_method_prefixes(count_ptr);
+-  } else {
+-    MutexLocker mu(JvmtiThreadState_lock);
+-    return JvmtiEnvBase::get_all_native_method_prefixes(count_ptr);
+-  }
+-}
+-
+ ////////////////////////////////////////////////////////////////////////////////////////////////
+ 
+ void JvmtiExport::cleanup_thread(JavaThread* thread) {
+@@ -2175,6 +2158,7 @@
+   typedef jint (JNICALL *OnAttachEntry_t)(JavaVM*, char *, void *);
+ }
+ 
++#ifndef SERVICES_KERNEL
+ jint JvmtiExport::load_agent_library(AttachOperation* op, outputStream* st) {
+   char ebuf[1024];
+   char buffer[JVM_MAXPATHLEN];
+@@ -2190,7 +2174,7 @@
+   bool is_absolute_path = (absParam != NULL) && (strcmp(absParam,"true")==0);
+ 
+ 
+-  // If the path is absolute we attempt to load the library. Otherwise we try to 
++  // If the path is absolute we attempt to load the library. Otherwise we try to
+   // load it from the standard dll directory.
+ 
+   if (is_absolute_path) {
+@@ -2215,16 +2199,16 @@
+     OnAttachEntry_t on_attach_entry = NULL;
+     const char *on_attach_symbols[] = AGENT_ONATTACH_SYMBOLS;
+     for (uint symbol_index = 0; symbol_index < ARRAY_SIZE(on_attach_symbols); symbol_index++) {
+-      on_attach_entry = 
+-	CAST_TO_FN_PTR(OnAttachEntry_t, hpi::dll_lookup(library, on_attach_symbols[symbol_index]));
++      on_attach_entry =
++        CAST_TO_FN_PTR(OnAttachEntry_t, hpi::dll_lookup(library, on_attach_symbols[symbol_index]));
+       if (on_attach_entry != NULL) break;
+     }
+-                                                                                                      
++
+     if (on_attach_entry == NULL) {
+       // Agent_OnAttach missing - unload library
+       hpi::dll_unload(library);
+     } else {
+-      // Invoke the Agent_OnAttach function 
++      // Invoke the Agent_OnAttach function
+       JavaThread* THREAD = JavaThread::current();
+       {
+         extern struct JavaVM_ main_vm;
+@@ -2252,9 +2236,10 @@
+   }
+   return result;
+ }
++#endif // SERVICES_KERNEL
+ 
+ // CMS has completed referencing processing so may need to update
+-// tag maps. 
++// tag maps.
+ void JvmtiExport::cms_ref_processing_epilogue() {
+   if (JvmtiEnv::environments_might_exist()) {
+     JvmtiTagMap::cms_ref_processing_epilogue();
+@@ -2268,11 +2253,11 @@
+ void JvmtiEventCollector::setup_jvmti_thread_state() {
+   // set this event collector to be the current one.
+   JvmtiThreadState* state = JvmtiThreadState::state_for(JavaThread::current());
+-  if (is_vm_object_alloc_event()) { 
+-    _prev = state->get_vm_object_alloc_event_collector(); 
++  if (is_vm_object_alloc_event()) {
++    _prev = state->get_vm_object_alloc_event_collector();
+     state->set_vm_object_alloc_event_collector((JvmtiVMObjectAllocEventCollector *)this);
+   } else if (is_dynamic_code_event()) {
+-    _prev = state->get_dynamic_code_event_collector(); 
++    _prev = state->get_dynamic_code_event_collector();
+     state->set_dynamic_code_event_collector((JvmtiDynamicCodeEventCollector *)this);
+   }
+ }
+@@ -2282,22 +2267,22 @@
+ void JvmtiEventCollector::unset_jvmti_thread_state() {
+   JvmtiThreadState* state = JavaThread::current()->jvmti_thread_state();
+   if (state != NULL) {
+-    // restore the previous event collector (if any)    
+-    if (is_vm_object_alloc_event()) { 
++    // restore the previous event collector (if any)
++    if (is_vm_object_alloc_event()) {
+       if (state->get_vm_object_alloc_event_collector() == this) {
+         state->set_vm_object_alloc_event_collector((JvmtiVMObjectAllocEventCollector *)_prev);
+       } else {
+-	// this thread's jvmti state was created during the scope of
+-	// the event collector.
++        // this thread's jvmti state was created during the scope of
++        // the event collector.
+       }
+     } else {
+       if (is_dynamic_code_event()) {
+-	if (state->get_dynamic_code_event_collector() == this) {
++        if (state->get_dynamic_code_event_collector() == this) {
+           state->set_dynamic_code_event_collector((JvmtiDynamicCodeEventCollector *)_prev);
+-	} else {
+-	  // this thread's jvmti state was created during the scope of
+-	  // the event collector.
+-	}
++        } else {
++          // this thread's jvmti state was created during the scope of
++          // the event collector.
++        }
+       }
+     }
+   }
+@@ -2310,10 +2295,10 @@
+   }
+ }
+ 
+-// iterate over any code blob descriptors collected and post a 
++// iterate over any code blob descriptors collected and post a
+ // DYNAMIC_CODE_GENERATED event to the profiler.
+ JvmtiDynamicCodeEventCollector::~JvmtiDynamicCodeEventCollector() {
+-  assert(!JavaThread::current()->owns_locks(), "all locks must be released to post deferred events");    
++  assert(!JavaThread::current()->owns_locks(), "all locks must be released to post deferred events");
+  // iterate over any code blob descriptors that we collected
+  if (_code_blobs != NULL) {
+    for (int i=0; i<_code_blobs->length(); i++) {
+@@ -2326,7 +2311,7 @@
+  unset_jvmti_thread_state();
+ }
+ 
+-// register a stub 
++// register a stub
+ void JvmtiDynamicCodeEventCollector::register_stub(const char* name, address start, address end) {
+  if (_code_blobs == NULL) {
+    _code_blobs = new (ResourceObj::C_HEAP) GrowableArray<JvmtiCodeBlobDesc*>(1,true);
+@@ -2337,7 +2322,7 @@
+ // Setup current thread to record vm allocated objects.
+ JvmtiVMObjectAllocEventCollector::JvmtiVMObjectAllocEventCollector() : _allocated(NULL) {
+   if (JvmtiExport::should_post_vm_object_alloc()) {
+-    _enable = true; 
++    _enable = true;
+     setup_jvmti_thread_state();
+   } else {
+     _enable = false;
+@@ -2352,7 +2337,7 @@
+     for (int i = 0; i < _allocated->length(); i++) {
+       oop obj = _allocated->at(i);
+       if (ServiceUtil::visible_oop(obj)) {
+-        JvmtiExport::post_vm_object_alloc(JavaThread::current(), obj);	
++        JvmtiExport::post_vm_object_alloc(JavaThread::current(), obj);
+       }
+     }
+     delete _allocated;
+@@ -2400,13 +2385,13 @@
+ }
+ 
+ 
+-// Disable collection of VMObjectAlloc events 
++// Disable collection of VMObjectAlloc events
+ NoJvmtiVMObjectAllocMark::NoJvmtiVMObjectAllocMark() : _collector(NULL) {
+   // a no-op if VMObjectAlloc event is not enabled
+   if (!JvmtiExport::should_post_vm_object_alloc()) {
+     return;
+   }
+-  Thread* thread = ThreadLocalStorage::thread(); 
++  Thread* thread = ThreadLocalStorage::thread();
+   if (thread != NULL && thread->is_Java_thread())  {
+     JavaThread* current_thread = (JavaThread*)thread;
+     JvmtiThreadState *state = current_thread->jvmti_thread_state();
+@@ -2429,7 +2414,7 @@
+ };
+ 
+ JvmtiGCMarker::JvmtiGCMarker(bool full) : _full(full), _invocation_count(0) {
+-  assert(Thread::current()->is_VM_thread(), "wrong thread");  
++  assert(Thread::current()->is_VM_thread(), "wrong thread");
+ 
+   // if there aren't any JVMTI environments then nothing to do
+   if (!JvmtiEnv::environments_might_exist()) {
+@@ -2465,7 +2450,7 @@
+   // Do clean up tasks that need to be done at a safepoint
+   JvmtiEnvBase::check_for_periodic_clean_up();
+ }
+-  
++
+ JvmtiGCMarker::~JvmtiGCMarker() {
+   // if there aren't any JVMTI environments then nothing to do
+   if (!JvmtiEnv::environments_might_exist()) {
+@@ -2484,7 +2469,7 @@
+     if (Universe::heap()->kind() == CollectedHeap::GenCollectedHeap) {
+       GenCollectedHeap* gch = GenCollectedHeap::heap();
+       if (_invocation_count != (unsigned int)gch->get_gen(1)->stat_record()->invocations) {
+-        _full = true;      
++        _full = true;
+       }
+     } else {
+ #ifndef SERIALGC
+@@ -2506,5 +2491,4 @@
+   // Notify heap/object tagging support
+   JvmtiTagMap::gc_epilogue(_full);
+ }
+-
+-
++#endif // JVMTI_KERNEL
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiExport.hpp openjdk/hotspot/src/share/vm/prims/jvmtiExport.hpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiExport.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiExport.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jvmtiExport.hpp	1.95 07/05/05 17:06:37 JVM"
+-#endif
+ /*
+  * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #ifndef _JAVA_JVMTIEXPORT_H_
+@@ -36,6 +33,23 @@
+ class JvmtiThreadState;
+ class AttachOperation;
+ 
++#ifndef JVMTI_KERNEL
++#define JVMTI_SUPPORT_FLAG(key)                                         \
++  private:                                                              \
++  static bool  _##key;                                                  \
++  public:                                                               \
++  inline static void set_##key(bool on)       { _##key = (on != 0); }   \
++  inline static bool key()                    { return _##key; }
++#else  // JVMTI_KERNEL
++#define JVMTI_SUPPORT_FLAG(key)                                           \
++  private:                                                                \
++  const static bool _##key = false;                                       \
++  public:                                                                 \
++  inline static void set_##key(bool on)       { report_unsupported(on); } \
++  inline static bool key()                    { return _##key; }
++#endif // JVMTI_KERNEL
++
++
+ // This class contains the JVMTI interface for the rest of hotspot.
+ //
+ class JvmtiExport : public AllStatic {
+@@ -43,93 +57,65 @@
+   static int         _field_access_count;
+   static int         _field_modification_count;
+ 
+-  static bool        _can_get_source_debug_extension;
++  static bool        _can_access_local_variables;
+   static bool        _can_examine_or_deopt_anywhere;
+-  static bool        _can_maintain_original_method_order;
+-  static bool        _can_post_interpreter_events;
+   static bool        _can_hotswap_or_post_breakpoint;
+   static bool        _can_modify_any_class;
+-  static bool	     _can_walk_any_space;
+-  static bool        _can_access_local_variables;
+-  static bool        _can_post_exceptions;
+-  static bool        _can_post_breakpoint;
+-  static bool        _can_post_field_access;
+-  static bool        _can_post_field_modification;
+-  static bool        _can_post_method_entry;
+-  static bool        _can_post_method_exit;
+-  static bool        _can_pop_frame;
+-  static bool        _can_force_early_return;
+-
+-  static bool        _should_post_single_step;
+-  static bool        _should_post_field_access;
+-  static bool        _should_post_field_modification;
+-  static bool        _should_post_class_load;
+-  static bool        _should_post_class_prepare;
+-  static bool        _should_post_class_unload;
+-  static bool        _should_post_class_file_load_hook;
+-  static bool        _should_post_native_method_bind;
+-  static bool        _should_post_compiled_method_load;
+-  static bool        _should_post_compiled_method_unload;
+-  static bool        _should_post_dynamic_code_generated;
+-  static bool        _should_post_monitor_contended_enter;
+-  static bool        _should_post_monitor_contended_entered;
+-  static bool        _should_post_monitor_wait;
+-  static bool        _should_post_monitor_waited;
+-  static bool        _should_post_data_dump;
+-  static bool        _should_post_garbage_collection_start;
+-  static bool        _should_post_garbage_collection_finish;
+-  static bool        _should_post_thread_life;
+-  static bool	     _should_post_object_free;
+-  static bool	     _should_post_resource_exhausted;
+-  static bool        _should_clean_up_heap_objects;
+-  static bool        _should_post_vm_object_alloc;    
++  static bool        _can_walk_any_space;
++
++  JVMTI_SUPPORT_FLAG(can_get_source_debug_extension)
++  JVMTI_SUPPORT_FLAG(can_maintain_original_method_order)
++  JVMTI_SUPPORT_FLAG(can_post_interpreter_events)
++  JVMTI_SUPPORT_FLAG(can_post_exceptions)
++  JVMTI_SUPPORT_FLAG(can_post_breakpoint)
++  JVMTI_SUPPORT_FLAG(can_post_field_access)
++  JVMTI_SUPPORT_FLAG(can_post_field_modification)
++  JVMTI_SUPPORT_FLAG(can_post_method_entry)
++  JVMTI_SUPPORT_FLAG(can_post_method_exit)
++  JVMTI_SUPPORT_FLAG(can_pop_frame)
++  JVMTI_SUPPORT_FLAG(can_force_early_return)
++
++  friend class JvmtiEventControllerPrivate;  // should only modify these flags
++  JVMTI_SUPPORT_FLAG(should_post_single_step)
++  JVMTI_SUPPORT_FLAG(should_post_field_access)
++  JVMTI_SUPPORT_FLAG(should_post_field_modification)
++  JVMTI_SUPPORT_FLAG(should_post_class_load)
++  JVMTI_SUPPORT_FLAG(should_post_class_prepare)
++  JVMTI_SUPPORT_FLAG(should_post_class_unload)
++  JVMTI_SUPPORT_FLAG(should_post_native_method_bind)
++  JVMTI_SUPPORT_FLAG(should_post_compiled_method_load)
++  JVMTI_SUPPORT_FLAG(should_post_compiled_method_unload)
++  JVMTI_SUPPORT_FLAG(should_post_dynamic_code_generated)
++  JVMTI_SUPPORT_FLAG(should_post_monitor_contended_enter)
++  JVMTI_SUPPORT_FLAG(should_post_monitor_contended_entered)
++  JVMTI_SUPPORT_FLAG(should_post_monitor_wait)
++  JVMTI_SUPPORT_FLAG(should_post_monitor_waited)
++  JVMTI_SUPPORT_FLAG(should_post_data_dump)
++  JVMTI_SUPPORT_FLAG(should_post_garbage_collection_start)
++  JVMTI_SUPPORT_FLAG(should_post_garbage_collection_finish)
+ 
++  // ------ the below maybe don't have to be (but are for now)
++  // fixed conditions here ------------
++  // any events can be enabled
++  JVMTI_SUPPORT_FLAG(should_post_thread_life)
++  JVMTI_SUPPORT_FLAG(should_post_object_free)
++  JVMTI_SUPPORT_FLAG(should_post_resource_exhausted)
++
++  // we are holding objects on the heap - need to talk to GC - e.g.
++  // breakpoint info
++  JVMTI_SUPPORT_FLAG(should_clean_up_heap_objects)
++  JVMTI_SUPPORT_FLAG(should_post_vm_object_alloc)
++
++  // If flag cannot be implemented, give an error if on=true
++  static void report_unsupported(bool on);
+ 
+   // these should only be called by the friend class
+   friend class JvmtiManageCapabilities;
+-  inline static void set_can_get_source_debug_extension(bool on)       { _can_get_source_debug_extension = (on != 0); }
+   inline static void set_can_examine_or_deopt_anywhere(bool on)        { _can_examine_or_deopt_anywhere = (on != 0); }
+-  inline static void set_can_maintain_original_method_order(bool on)   { _can_maintain_original_method_order = (on != 0); }
+-  inline static void set_can_post_interpreter_events(bool on)          { _can_post_interpreter_events = (on != 0); }
+-  inline static void set_can_hotswap_or_post_breakpoint(bool on)       { _can_hotswap_or_post_breakpoint = (on != 0); }
+   inline static void set_can_modify_any_class(bool on)                 { _can_modify_any_class = (on != 0); }
+-  inline static void set_can_walk_any_space(bool on)		       { _can_walk_any_space = (on != 0); }
+   inline static void set_can_access_local_variables(bool on)           { _can_access_local_variables = (on != 0); }
+-  inline static void set_can_post_exceptions(bool on)                  { _can_post_exceptions = (on != 0); }
+-  inline static void set_can_post_breakpoint(bool on)                  { _can_post_breakpoint = (on != 0); }
+-  inline static void set_can_post_field_access(bool on)                { _can_post_field_access = (on != 0); }
+-  inline static void set_can_post_field_modification(bool on)          { _can_post_field_modification = (on != 0); }
+-  inline static void set_can_post_method_entry(bool on)                { _can_post_method_entry = (on != 0); }
+-  inline static void set_can_post_method_exit(bool on)                 { _can_post_method_exit = (on != 0); }
+-  inline static void set_can_pop_frame(bool on)                        { _can_pop_frame = (on != 0); }
+-  inline static void set_can_force_early_return(bool on)               { _can_force_early_return = (on != 0); }
+-
+-  // these should only be called by the friend class
+-  friend class JvmtiEventControllerPrivate;
+-  inline static void set_should_post_single_step(bool on)              { _should_post_single_step = on; }
+-  inline static void set_should_post_field_access(bool on)             { _should_post_field_access = on; }
+-  inline static void set_should_post_field_modification(bool on)       { _should_post_field_modification = on; }
+-  inline static void set_should_post_class_load(bool on)               { _should_post_class_load = on; }
+-  inline static void set_should_post_class_prepare(bool on)            { _should_post_class_prepare = on; }
+-  inline static void set_should_post_class_unload(bool on)             { _should_post_class_unload = on; }
+-  inline static void set_should_post_class_file_load_hook(bool on)     { _should_post_class_file_load_hook = on;  }   
+-  inline static void set_should_post_native_method_bind(bool on)       { _should_post_native_method_bind = on; }
+-  inline static void set_should_post_compiled_method_load(bool on)     { _should_post_compiled_method_load = on; }
+-  inline static void set_should_post_compiled_method_unload(bool on)   { _should_post_compiled_method_unload = on; }
+-  inline static void set_should_post_dynamic_code_generated(bool on)   { _should_post_dynamic_code_generated = on;  }   
+-  inline static void set_should_post_monitor_contended_enter(bool on)  { _should_post_monitor_contended_enter = on; }
+-  inline static void set_should_post_monitor_contended_entered(bool on){ _should_post_monitor_contended_entered = on; }
+-  inline static void set_should_post_monitor_wait(bool on)             { _should_post_monitor_wait = on; }
+-  inline static void set_should_post_monitor_waited(bool on)           { _should_post_monitor_waited = on; }
+-  inline static void set_should_post_garbage_collection_start(bool on) { _should_post_garbage_collection_start = on; }
+-  inline static void set_should_post_garbage_collection_finish(bool on){ _should_post_garbage_collection_finish = on; }
+-  inline static void set_should_post_data_dump(bool on)                { _should_post_data_dump = on;  }   
+-  inline static void set_should_post_object_free(bool on)	       { _should_post_object_free = on; }
+-  inline static void set_should_post_resource_exhausted(bool on)       { _should_post_resource_exhausted = on; }
+-  inline static void set_should_post_vm_object_alloc(bool on)	       { _should_post_vm_object_alloc = on; }    
+-
+-  inline static void set_should_post_thread_life(bool on)              { _should_post_thread_life = on; }
+-  inline static void set_should_clean_up_heap_objects(bool on)         { _should_clean_up_heap_objects = on; }
++  inline static void set_can_hotswap_or_post_breakpoint(bool on)       { _can_hotswap_or_post_breakpoint = (on != 0); }
++  inline static void set_can_walk_any_space(bool on)                   { _can_walk_any_space = (on != 0); }
+ 
+   enum {
+     JVMTI_VERSION_MASK   = 0x70000000,
+@@ -137,7 +123,7 @@
+     JVMDI_VERSION_VALUE  = 0x20000000
+   };
+ 
+-  static void post_field_modification(JavaThread *thread, methodOop method, address location, 
++  static void post_field_modification(JavaThread *thread, methodOop method, address location,
+                                       KlassHandle field_klass, Handle object, jfieldID field,
+                                       char sig_type, jvalue *value);
+ 
+@@ -146,34 +132,34 @@
+   // CompiledMethodUnload events are reported from the VM thread so they
+   // are collected in lists (of jmethodID/addresses) and the events are posted later
+   // from threads posting CompieldMethodLoad or DynamicCodeGenerated events.
+-  static bool _have_pending_compiled_method_unload_events;		
+-  static GrowableArray<jmethodID>* _pending_compiled_method_unload_method_ids;	
+-  static GrowableArray<const void *>* _pending_compiled_method_unload_code_begins;	
++  static bool _have_pending_compiled_method_unload_events;
++  static GrowableArray<jmethodID>* _pending_compiled_method_unload_method_ids;
++  static GrowableArray<const void *>* _pending_compiled_method_unload_code_begins;
+   static JavaThread* _current_poster;
+ 
+   // tests if there are CompiledMethodUnload events pending
+-  inline static bool have_pending_compiled_method_unload_events() { 
+-    return _have_pending_compiled_method_unload_events; 
++  inline static bool have_pending_compiled_method_unload_events() {
++    return _have_pending_compiled_method_unload_events;
+   }
+ 
+-  // posts any pending CompiledMethodUnload events. 
++  // posts any pending CompiledMethodUnload events.
+   static void post_pending_compiled_method_unload_events();
+ 
+-  // posts a DynamicCodeGenerated event (internal/private implementation). 
++  // posts a DynamicCodeGenerated event (internal/private implementation).
+   // The public post_dynamic_code_generated* functions make use of the
+   // internal implementation.
+-  static void post_dynamic_code_generated_internal(const char *name, const void *code_begin, const void *code_end);
++  static void post_dynamic_code_generated_internal(const char *name, const void *code_begin, const void *code_end) KERNEL_RETURN;
+ 
+ 
+   // GenerateEvents support to allow posting of CompiledMethodLoad and
+   // DynamicCodeGenerated events for a given environment.
+   friend class JvmtiCodeBlobEvents;
+ 
+-  static void post_compiled_method_load(JvmtiEnv* env, const jmethodID method, const jint length, 
+-				        const void *code_begin, const jint map_length, 
+-					const jvmtiAddrLocationMap* map);
+-  static void post_dynamic_code_generated(JvmtiEnv* env, const char *name, const void *code_begin, 
+-					  const void *code_end);
++  static void post_compiled_method_load(JvmtiEnv* env, const jmethodID method, const jint length,
++                                        const void *code_begin, const jint map_length,
++                                        const jvmtiAddrLocationMap* map) KERNEL_RETURN;
++  static void post_dynamic_code_generated(JvmtiEnv* env, const char *name, const void *code_begin,
++                                          const void *code_end) KERNEL_RETURN;
+ 
+   // The RedefineClasses() API breaks some invariants in the "regular"
+   // system. For example, there are sanity checks when GC'ing nmethods
+@@ -202,11 +188,11 @@
+   // can_redefine_classes capability is enabled in the OnLoad phase then the compiler
+   // records all dependencies from startup. However if the capability is first
+   // enabled some time later then the dependencies recorded by the compiler
+-  // are incomplete. This flag is used by RedefineClasses to know if the 
++  // are incomplete. This flag is used by RedefineClasses to know if the
+   // dependency information is complete or not.
+   static bool _all_dependencies_are_recorded;
+ 
+- public:  
++ public:
+   inline static bool has_redefined_a_class() {
+     return _has_redefined_a_class;
+   }
+@@ -233,70 +219,11 @@
+   static void enter_live_phase();
+ 
+   // ------ can_* conditions (below) are set at OnLoad and never changed ------------
+-
+-  inline static bool can_get_source_debug_extension()             { return _can_get_source_debug_extension; }
+-
+-  // BP, expression stack, hotswap, interp_only, local_var, monitor info
+   inline static bool can_examine_or_deopt_anywhere()              { return _can_examine_or_deopt_anywhere; }
+-
+-  // JVMDI spec requires this, does this matter for JVMTI?
+-  inline static bool can_maintain_original_method_order()         { return _can_maintain_original_method_order; }
+-
+-  // any of single-step, method-entry/exit, frame-pop, and field-access/modification
+-  inline static bool can_post_interpreter_events()                { return _can_post_interpreter_events; }
+-
+-  inline static bool can_hotswap_or_post_breakpoint()             { return _can_hotswap_or_post_breakpoint; }
+-
+   inline static bool can_modify_any_class()                       { return _can_modify_any_class; }
+-
+-  inline static bool can_walk_any_space()			  { return _can_walk_any_space; }
+-
+-  // can retrieve frames, set/get local variables or hotswap
+   inline static bool can_access_local_variables()                 { return _can_access_local_variables; }
+-
+-  // throw or catch
+-  inline static bool can_post_exceptions()                        { return _can_post_exceptions; }
+-
+-  inline static bool can_post_breakpoint()                        { return _can_post_breakpoint; }
+-  inline static bool can_post_field_access()                      { return _can_post_field_access; }
+-  inline static bool can_post_field_modification()                { return _can_post_field_modification; }
+-  inline static bool can_post_method_entry()                      { return _can_post_method_entry; }
+-  inline static bool can_post_method_exit()                       { return _can_post_method_exit; }
+-  inline static bool can_pop_frame()                              { return _can_pop_frame; }
+-  inline static bool can_force_early_return()                     { return _can_force_early_return; }
+-
+-
+-  // ------ the below maybe don't have to be (but are for now) fixed conditions here ------------
+-  // any events can be enabled
+-  inline static bool should_post_thread_life()                   { return _should_post_thread_life; }
+-
+-
+-  // ------ DYNAMIC conditions here ------------
+-
+-  inline static bool should_post_single_step()                    { return _should_post_single_step; }
+-  inline static bool should_post_field_access()                   { return _should_post_field_access; }
+-  inline static bool should_post_field_modification()             { return _should_post_field_modification; }
+-  inline static bool should_post_class_load()                     { return _should_post_class_load; }
+-  inline static bool should_post_class_prepare()                  { return _should_post_class_prepare; }
+-  inline static bool should_post_class_unload()                   { return _should_post_class_unload; }
+-  inline static bool should_post_class_file_load_hook()           { return _should_post_class_file_load_hook; }
+-  inline static bool should_post_native_method_bind()             { return _should_post_native_method_bind; }
+-  inline static bool should_post_compiled_method_load()           { return _should_post_compiled_method_load; }
+-  inline static bool should_post_compiled_method_unload()         { return _should_post_compiled_method_unload; }
+-  inline static bool should_post_dynamic_code_generated()         { return _should_post_dynamic_code_generated; }
+-  inline static bool should_post_monitor_contended_enter()        { return _should_post_monitor_contended_enter; }
+-  inline static bool should_post_monitor_contended_entered()      { return _should_post_monitor_contended_entered; }
+-  inline static bool should_post_monitor_wait()                   { return _should_post_monitor_wait; }
+-  inline static bool should_post_monitor_waited()                 { return _should_post_monitor_waited; }
+-  inline static bool should_post_data_dump()                      { return _should_post_data_dump; }
+-  inline static bool should_post_garbage_collection_start()       { return _should_post_garbage_collection_start; }
+-  inline static bool should_post_garbage_collection_finish()      { return _should_post_garbage_collection_finish; }
+-  inline static bool should_post_object_free()			  { return _should_post_object_free; }
+-  inline static bool should_post_resource_exhausted()		  { return _should_post_resource_exhausted; }
+-  inline static bool should_post_vm_object_alloc()		  { return _should_post_vm_object_alloc; }
+-
+-  // we are holding objects on the heap - need to talk to GC - e.g. breakpoint info
+-  inline static bool should_clean_up_heap_objects()               { return _should_clean_up_heap_objects; }
++  inline static bool can_hotswap_or_post_breakpoint()             { return _can_hotswap_or_post_breakpoint; }
++  inline static bool can_walk_any_space()                         { return _can_walk_any_space; }
+ 
+   // field access management
+   static address  get_field_access_count_addr();
+@@ -309,106 +236,113 @@
+   static bool is_jvmti_version(jint version)                      { return (version & JVMTI_VERSION_MASK) == JVMTI_VERSION_VALUE; }
+   static bool is_jvmdi_version(jint version)                      { return (version & JVMTI_VERSION_MASK) == JVMDI_VERSION_VALUE; }
+   static jint get_jvmti_interface(JavaVM *jvm, void **penv, jint version);
+-  
+ 
+   // single stepping management methods
+-  static void at_single_stepping_point(JavaThread *thread, methodOop method, address location);
+-  static void expose_single_stepping(JavaThread *thread);
+-  static bool hide_single_stepping(JavaThread *thread);
++  static void at_single_stepping_point(JavaThread *thread, methodOop method, address location) KERNEL_RETURN;
++  static void expose_single_stepping(JavaThread *thread) KERNEL_RETURN;
++  static bool hide_single_stepping(JavaThread *thread) KERNEL_RETURN_(return false;);
+ 
+   // Methods that notify the debugger that something interesting has happened in the VM.
+-  static void post_vm_start              (); 
+-  static void post_vm_initialized        (); 
++  static void post_vm_start              ();
++  static void post_vm_initialized        ();
+   static void post_vm_death              ();
+-  
+-  static void post_single_step           (JavaThread *thread, methodOop method, address location);
+-  static void post_raw_breakpoint        (JavaThread *thread, methodOop method, address location);
+-  
+-  static void post_exception_throw       (JavaThread *thread, methodOop method, address location, oop exception);
+-  static void notice_unwind_due_to_exception (JavaThread *thread, methodOop method, address location, oop exception, bool in_handler_frame);
++
++  static void post_single_step           (JavaThread *thread, methodOop method, address location) KERNEL_RETURN;
++  static void post_raw_breakpoint        (JavaThread *thread, methodOop method, address location) KERNEL_RETURN;
++
++  static void post_exception_throw       (JavaThread *thread, methodOop method, address location, oop exception) KERNEL_RETURN;
++  static void notice_unwind_due_to_exception (JavaThread *thread, methodOop method, address location, oop exception, bool in_handler_frame) KERNEL_RETURN;
+ 
+   static oop jni_GetField_probe          (JavaThread *thread, jobject jobj,
+-    oop obj, klassOop klass, jfieldID fieldID, bool is_static);
++    oop obj, klassOop klass, jfieldID fieldID, bool is_static)
++    KERNEL_RETURN_(return NULL;);
+   static oop jni_GetField_probe_nh       (JavaThread *thread, jobject jobj,
+-    oop obj, klassOop klass, jfieldID fieldID, bool is_static);
++    oop obj, klassOop klass, jfieldID fieldID, bool is_static)
++    KERNEL_RETURN_(return NULL;);
+   static void post_field_access_by_jni   (JavaThread *thread, oop obj,
+-    klassOop klass, jfieldID fieldID, bool is_static);
++    klassOop klass, jfieldID fieldID, bool is_static) KERNEL_RETURN;
+   static void post_field_access          (JavaThread *thread, methodOop method,
+-    address location, KlassHandle field_klass, Handle object, jfieldID field);
++    address location, KlassHandle field_klass, Handle object, jfieldID field) KERNEL_RETURN;
+   static oop jni_SetField_probe          (JavaThread *thread, jobject jobj,
+     oop obj, klassOop klass, jfieldID fieldID, bool is_static, char sig_type,
+-    jvalue *value);
++    jvalue *value) KERNEL_RETURN_(return NULL;);
+   static oop jni_SetField_probe_nh       (JavaThread *thread, jobject jobj,
+     oop obj, klassOop klass, jfieldID fieldID, bool is_static, char sig_type,
+-    jvalue *value);
++    jvalue *value) KERNEL_RETURN_(return NULL;);
+   static void post_field_modification_by_jni(JavaThread *thread, oop obj,
+     klassOop klass, jfieldID fieldID, bool is_static, char sig_type,
+     jvalue *value);
+   static void post_raw_field_modification(JavaThread *thread, methodOop method,
+     address location, KlassHandle field_klass, Handle object, jfieldID field,
+-    char sig_type, jvalue *value);
++    char sig_type, jvalue *value) KERNEL_RETURN;
+ 
+-  static void post_method_entry          (JavaThread *thread, methodOop method, frame current_frame);
+-  static void post_method_exit           (JavaThread *thread, methodOop method, frame current_frame);
++  static void post_method_entry          (JavaThread *thread, methodOop method, frame current_frame) KERNEL_RETURN;
++  static void post_method_exit           (JavaThread *thread, methodOop method, frame current_frame) KERNEL_RETURN;
+ 
+-  static void post_class_load            (JavaThread *thread, klassOop klass);
+-  static void post_class_unload          (klassOop klass);
+-  static void post_class_prepare         (JavaThread *thread, klassOop klass);
+-  
+-  static void post_thread_start          (JavaThread *thread);
+-  static void post_thread_end            (JavaThread *thread);
+-
+-  static void post_class_file_load_hook(symbolHandle h_name, Handle class_loader, 
+-                                        Handle h_protection_domain, 
+-                                        unsigned char **data_ptr, unsigned char **end_ptr, 
+-                                        unsigned char **cached_data_ptr, 
++  static void post_class_load            (JavaThread *thread, klassOop klass) KERNEL_RETURN;
++  static void post_class_unload          (klassOop klass) KERNEL_RETURN;
++  static void post_class_prepare         (JavaThread *thread, klassOop klass) KERNEL_RETURN;
++
++  static void post_thread_start          (JavaThread *thread) KERNEL_RETURN;
++  static void post_thread_end            (JavaThread *thread) KERNEL_RETURN;
++
++  // Support for java.lang.instrument agent loading.
++  static bool _should_post_class_file_load_hook;
++  inline static void set_should_post_class_file_load_hook(bool on)     { _should_post_class_file_load_hook = on;  }
++  inline static bool should_post_class_file_load_hook()           { return _should_post_class_file_load_hook; }
++  static void post_class_file_load_hook(symbolHandle h_name, Handle class_loader,
++                                        Handle h_protection_domain,
++                                        unsigned char **data_ptr, unsigned char **end_ptr,
++                                        unsigned char **cached_data_ptr,
+                                         jint *cached_length_ptr);
+-  static void post_native_method_bind(methodOop method, address* function_ptr);
+-  static void post_compiled_method_load(nmethod *nm);
+-  static void post_dynamic_code_generated(const char *name, const void *code_begin, const void *code_end);
++  static void post_native_method_bind(methodOop method, address* function_ptr) KERNEL_RETURN;
++  static void post_compiled_method_load(nmethod *nm) KERNEL_RETURN;
++  static void post_dynamic_code_generated(const char *name, const void *code_begin, const void *code_end) KERNEL_RETURN;
+ 
+   // used at a safepoint to post a CompiledMethodUnload event
+-  static void post_compiled_method_unload_at_safepoint(jmethodID mid, const void *code_begin);
++  static void post_compiled_method_unload_at_safepoint(jmethodID mid, const void *code_begin) KERNEL_RETURN;
+ 
+   // similiar to post_dynamic_code_generated except that it can be used to
+   // post a DynamicCodeGenerated event while holding locks in the VM. Any event
+   // posted using this function is recorded by the enclosing event collector
+   // -- JvmtiDynamicCodeEventCollector.
+-  static void post_dynamic_code_generated_while_holding_locks(const char* name, address code_begin, address code_end); 
++  static void post_dynamic_code_generated_while_holding_locks(const char* name, address code_begin, address code_end) KERNEL_RETURN;
+ 
+-  static void post_garbage_collection_finish();
+-  static void post_garbage_collection_start();
+-  static void post_data_dump();
+-  static void post_monitor_contended_enter(JavaThread *thread, ObjectMonitor *obj_mntr);
+-  static void post_monitor_contended_entered(JavaThread *thread, ObjectMonitor *obj_mntr);
+-  static void post_monitor_wait(JavaThread *thread, oop obj, jlong timeout);
+-  static void post_monitor_waited(JavaThread *thread, ObjectMonitor *obj_mntr, jboolean timed_out);
+-  static void post_object_free(JvmtiEnv* env, jlong tag);
+-  static void post_resource_exhausted(jint resource_exhausted_flags, const char* detail);
+-  static void record_vm_internal_object_allocation(oop object);
++  static void post_garbage_collection_finish() KERNEL_RETURN;
++  static void post_garbage_collection_start() KERNEL_RETURN;
++  static void post_data_dump() KERNEL_RETURN;
++  static void post_monitor_contended_enter(JavaThread *thread, ObjectMonitor *obj_mntr) KERNEL_RETURN;
++  static void post_monitor_contended_entered(JavaThread *thread, ObjectMonitor *obj_mntr) KERNEL_RETURN;
++  static void post_monitor_wait(JavaThread *thread, oop obj, jlong timeout) KERNEL_RETURN;
++  static void post_monitor_waited(JavaThread *thread, ObjectMonitor *obj_mntr, jboolean timed_out) KERNEL_RETURN;
++  static void post_object_free(JvmtiEnv* env, jlong tag) KERNEL_RETURN;
++  static void post_resource_exhausted(jint resource_exhausted_flags, const char* detail) KERNEL_RETURN;
++  static void record_vm_internal_object_allocation(oop object) KERNEL_RETURN;
+   // Post objects collected by vm_object_alloc_event_collector.
+-  static void post_vm_object_alloc(JavaThread *thread, oop object);  
++  static void post_vm_object_alloc(JavaThread *thread, oop object) KERNEL_RETURN;
+   // Collects vm internal objects for later event posting.
+   inline static void vm_object_alloc_event_collector(oop object) {
+     if (should_post_vm_object_alloc()) {
+       record_vm_internal_object_allocation(object);
+-    }      
++    }
+   }
+ 
+-  static void cleanup_thread             (JavaThread* thread);  
++  static void cleanup_thread             (JavaThread* thread) KERNEL_RETURN;
+ 
+-  static void oops_do(OopClosure* f);
++  static void oops_do(OopClosure* f) KERNEL_RETURN;
+ 
+-  static void transition_pending_onload_raw_monitors();
++  static void transition_pending_onload_raw_monitors() KERNEL_RETURN;
+ 
++#ifndef SERVICES_KERNEL
+   // attach support
+   static jint load_agent_library(AttachOperation* op, outputStream* out);
++#endif // SERVICES_KERNEL
+ 
+   // SetNativeMethodPrefix support
+   static char** get_all_native_method_prefixes(int* count_ptr);
+ 
+   // call after CMS has completed referencing processing
+-  static void cms_ref_processing_epilogue();
++  static void cms_ref_processing_epilogue() KERNEL_RETURN;
+ };
+ 
+ // Support class used by JvmtiDynamicCodeEventCollector and others. It
+@@ -427,9 +361,9 @@
+     _code_begin = code_begin;
+     _code_end = code_end;
+   }
+-  char* name()			{ return _name; }
+-  address code_begin()		{ return _code_begin; }
+-  address code_end()		{ return _code_end; }
++  char* name()                  { return _name; }
++  address code_begin()          { return _code_begin; }
++  address code_end()            { return _code_end; }
+ };
+ 
+ // JvmtiEventCollector is a helper class to setup thread for
+@@ -437,7 +371,7 @@
+ class JvmtiEventCollector : public StackObj {
+  private:
+   JvmtiEventCollector* _prev;  // Save previous one to support nested event collector.
+-    
++
+  public:
+   void setup_jvmti_thread_state(); // Set this collector in current thread.
+   void unset_jvmti_thread_state(); // Reset previous collector in current thread.
+@@ -464,20 +398,20 @@
+ // }
+ 
+ class JvmtiDynamicCodeEventCollector : public JvmtiEventCollector {
+- private: 
+-  GrowableArray<JvmtiCodeBlobDesc*>* _code_blobs;	    // collected code blob events
++ private:
++  GrowableArray<JvmtiCodeBlobDesc*>* _code_blobs;           // collected code blob events
+ 
+   friend class JvmtiExport;
+   void register_stub(const char* name, address start, address end);
+ 
+  public:
+-  JvmtiDynamicCodeEventCollector();
+-  ~JvmtiDynamicCodeEventCollector();
++  JvmtiDynamicCodeEventCollector()  KERNEL_RETURN;
++  ~JvmtiDynamicCodeEventCollector() KERNEL_RETURN;
+   bool is_dynamic_code_event()   { return true; }
+-    
++
+ };
+ 
+-// Used to record vm internally allocated object oops and post 
++// Used to record vm internally allocated object oops and post
+ // vm object alloc event for objects visible to java world.
+ // Constructor enables JvmtiThreadState flag and all vm allocated
+ // objects are recorded in a growable array. When destructor is
+@@ -495,21 +429,21 @@
+ 
+   //GC support
+   void oops_do(OopClosure* f);
+-    
++
+   friend class JvmtiExport;
+-  // Record vm allocated object oop. 
++  // Record vm allocated object oop.
+   inline void record_allocation(oop obj);
+ 
+   //GC support
+   static void oops_do_for_all_threads(OopClosure* f);
+-    
++
+  public:
+-  JvmtiVMObjectAllocEventCollector(); 
+-  ~JvmtiVMObjectAllocEventCollector();
++  JvmtiVMObjectAllocEventCollector()  KERNEL_RETURN;
++  ~JvmtiVMObjectAllocEventCollector() KERNEL_RETURN;
+   bool is_vm_object_alloc_event()   { return true; }
+ 
+-  bool is_enabled()		    { return _enable; }
+-  void set_enabled(bool on)	    { _enable = on; }
++  bool is_enabled()                 { return _enable; }
++  void set_enabled(bool on)         { _enable = on; }
+ };
+ 
+ 
+@@ -530,24 +464,24 @@
+ class NoJvmtiVMObjectAllocMark : public StackObj {
+  private:
+   // enclosing collector if enabled, NULL otherwise
+-  JvmtiVMObjectAllocEventCollector *_collector;	    
+-  
+-  bool was_enabled()	{ return _collector != NULL; }  
++  JvmtiVMObjectAllocEventCollector *_collector;
++
++  bool was_enabled()    { return _collector != NULL; }
+ 
+  public:
+-  NoJvmtiVMObjectAllocMark();
+-  ~NoJvmtiVMObjectAllocMark();
++  NoJvmtiVMObjectAllocMark() KERNEL_RETURN;
++  ~NoJvmtiVMObjectAllocMark() KERNEL_RETURN;
+ };
+ 
+ 
+-// Base class for reporting GC events to JVMTI. 
++// Base class for reporting GC events to JVMTI.
+ class JvmtiGCMarker : public StackObj {
+  private:
+-  bool _full;				// marks a "full" GC
+-  unsigned int _invocation_count;	// GC invocation count
++  bool _full;                           // marks a "full" GC
++  unsigned int _invocation_count;       // GC invocation count
+  protected:
+-  JvmtiGCMarker(bool full);		// protected 
+-  ~JvmtiGCMarker();			// protected
++  JvmtiGCMarker(bool full) KERNEL_RETURN;       // protected
++  ~JvmtiGCMarker() KERNEL_RETURN;               // protected
+ };
+ 
+ 
+@@ -561,13 +495,13 @@
+ //   JvmtiGCForAllocationMarker jgcm;
+ //   :
+ // }
+-// 
++//
+ // If jvmti is not enabled the constructor and destructor is essentially
+-// a no-op (no overhead). 
++// a no-op (no overhead).
+ //
+ class JvmtiGCForAllocationMarker : public JvmtiGCMarker {
+  public:
+-  JvmtiGCForAllocationMarker() : JvmtiGCMarker(false) { 
++  JvmtiGCForAllocationMarker() : JvmtiGCMarker(false) {
+   }
+ };
+ 
+@@ -575,7 +509,7 @@
+ // allocated and should be placed in the doit() implementation of all
+ // vm operations that do a "full" stop-the-world GC. This class differs
+ // from JvmtiGCForAllocationMarker in that this class assumes that a
+-// "full" GC will happen. 
++// "full" GC will happen.
+ //
+ // Usage :-
+ //
+@@ -586,7 +520,7 @@
+ //
+ class JvmtiGCFullMarker : public JvmtiGCMarker {
+  public:
+-  JvmtiGCFullMarker() : JvmtiGCMarker(true) { 
++  JvmtiGCFullMarker() : JvmtiGCMarker(true) {
+   }
+ };
+ 
+@@ -612,7 +546,7 @@
+   ~JvmtiHideSingleStepping() {
+     if (_single_step_hidden) {
+       JvmtiExport::expose_single_stepping(_thread);
+-    }  
++    }
+   }
+ };
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiExtensions.cpp openjdk/hotspot/src/share/vm/prims/jvmtiExtensions.cpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiExtensions.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiExtensions.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)jvmtiExtensions.cpp	1.11 07/05/05 17:06:38 JVM"
+-#endif
+ /*
+  * Copyright 2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -56,7 +53,7 @@
+ 
+   // register our extension function
+   static jvmtiParamInfo func_params[] = {
+-    { (char*)"IsClassUnloadingEnabled", JVMTI_KIND_OUT,  JVMTI_TYPE_JBOOLEAN, JNI_FALSE } 
++    { (char*)"IsClassUnloadingEnabled", JVMTI_KIND_OUT,  JVMTI_TYPE_JBOOLEAN, JNI_FALSE }
+   };
+   static jvmtiExtensionFunctionInfo ext_func = {
+     (jvmtiExtensionFunction)IsClassUnloadingEnabled,
+@@ -64,7 +61,7 @@
+     (char*)"Tell if class unloading is enabled (-noclassgc)",
+     sizeof(func_params)/sizeof(func_params[0]),
+     func_params,
+-    0,		    // no non-universal errors
++    0,              // no non-universal errors
+     NULL
+   };
+   _ext_functions->append(&ext_func);
+@@ -90,21 +87,21 @@
+ // return the list of extension functions
+ 
+ jvmtiError JvmtiExtensions::get_functions(JvmtiEnv* env,
+-					  jint* extension_count_ptr, 
+-					  jvmtiExtensionFunctionInfo** extensions)
++                                          jint* extension_count_ptr,
++                                          jvmtiExtensionFunctionInfo** extensions)
+ {
+   guarantee(_ext_functions != NULL, "registration not done");
+ 
+   ResourceTracker rt(env);
+-  
++
+   jvmtiExtensionFunctionInfo* ext_funcs;
+-  jvmtiError err = rt.allocate(_ext_functions->length() * 
+-			       sizeof(jvmtiExtensionFunctionInfo),
+-			       (unsigned char**)&ext_funcs);
++  jvmtiError err = rt.allocate(_ext_functions->length() *
++                               sizeof(jvmtiExtensionFunctionInfo),
++                               (unsigned char**)&ext_funcs);
+   if (err != JVMTI_ERROR_NONE) {
+     return err;
+   }
+-  
++
+   for (int i=0; i<_ext_functions->length(); i++ ) {
+     ext_funcs[i].func = _ext_functions->at(i)->func;
+ 
+@@ -116,8 +113,8 @@
+     strcpy(ext_funcs[i].id, id);
+ 
+     char *desc = _ext_functions->at(i)->short_description;
+-    err = rt.allocate(strlen(desc)+1, 
+-		      (unsigned char**)&(ext_funcs[i].short_description));
++    err = rt.allocate(strlen(desc)+1,
++                      (unsigned char**)&(ext_funcs[i].short_description));
+     if (err != JVMTI_ERROR_NONE) {
+       return err;
+     }
+@@ -130,30 +127,30 @@
+     ext_funcs[i].param_count = param_count;
+     if (param_count == 0) {
+       ext_funcs[i].params = NULL;
+-    } else {    
++    } else {
+       err = rt.allocate(param_count*sizeof(jvmtiParamInfo),
+-		        (unsigned char**)&(ext_funcs[i].params));
++                        (unsigned char**)&(ext_funcs[i].params));
+       if (err != JVMTI_ERROR_NONE) {
+-	return err;
++        return err;
+       }
+       jvmtiParamInfo* src_params = _ext_functions->at(i)->params;
+       jvmtiParamInfo* dst_params = ext_funcs[i].params;
+ 
+       for (int j=0; j<param_count; j++) {
+-	err = rt.allocate(strlen(src_params[j].name)+1, 	   
+-			  (unsigned char**)&(dst_params[j].name));
+-	if (err != JVMTI_ERROR_NONE) {
+-	  return err;
+-	}
+-	strcpy(dst_params[j].name, src_params[j].name);
+-
+-	dst_params[j].kind = src_params[j].kind;
+-	dst_params[j].base_type = src_params[j].base_type;
+-	dst_params[j].null_ok = src_params[j].null_ok;
++        err = rt.allocate(strlen(src_params[j].name)+1,
++                          (unsigned char**)&(dst_params[j].name));
++        if (err != JVMTI_ERROR_NONE) {
++          return err;
++        }
++        strcpy(dst_params[j].name, src_params[j].name);
++
++        dst_params[j].kind = src_params[j].kind;
++        dst_params[j].base_type = src_params[j].base_type;
++        dst_params[j].null_ok = src_params[j].null_ok;
+       }
+     }
+ 
+-    // errors    
++    // errors
+ 
+     jint error_count = _ext_functions->at(i)->error_count;
+     ext_funcs[i].error_count = error_count;
+@@ -161,12 +158,12 @@
+       ext_funcs[i].errors = NULL;
+     } else {
+       err = rt.allocate(error_count*sizeof(jvmtiError),
+-		        (unsigned char**)&(ext_funcs[i].errors));
++                        (unsigned char**)&(ext_funcs[i].errors));
+       if (err != JVMTI_ERROR_NONE) {
+-	return err;
++        return err;
+       }
+-      memcpy(ext_funcs[i].errors, _ext_functions->at(i)->errors, 
+-	     error_count*sizeof(jvmtiError));
++      memcpy(ext_funcs[i].errors, _ext_functions->at(i)->errors,
++             error_count*sizeof(jvmtiError));
+     }
+   }
+ 
+@@ -179,8 +176,8 @@
+ // return the list of extension events
+ 
+ jvmtiError JvmtiExtensions::get_events(JvmtiEnv* env,
+-				       jint* extension_count_ptr, 
+-				       jvmtiExtensionEventInfo** extensions)
++                                       jint* extension_count_ptr,
++                                       jvmtiExtensionEventInfo** extensions)
+ {
+   guarantee(_ext_events != NULL, "registration not done");
+ 
+@@ -188,11 +185,11 @@
+ 
+   jvmtiExtensionEventInfo* ext_events;
+   jvmtiError err = rt.allocate(_ext_events->length() * sizeof(jvmtiExtensionEventInfo),
+-			       (unsigned char**)&ext_events);
++                               (unsigned char**)&ext_events);
+   if (err != JVMTI_ERROR_NONE) {
+     return err;
+   }
+-  
++
+   for (int i=0; i<_ext_events->length(); i++ ) {
+     ext_events[i].extension_event_index = _ext_events->at(i)->extension_event_index;
+ 
+@@ -204,8 +201,8 @@
+     strcpy(ext_events[i].id, id);
+ 
+     char *desc = _ext_events->at(i)->short_description;
+-    err = rt.allocate(strlen(desc)+1, 
+-		      (unsigned char**)&(ext_events[i].short_description));
++    err = rt.allocate(strlen(desc)+1,
++                      (unsigned char**)&(ext_events[i].short_description));
+     if (err != JVMTI_ERROR_NONE) {
+       return err;
+     }
+@@ -218,27 +215,27 @@
+     ext_events[i].param_count = param_count;
+     if (param_count == 0) {
+       ext_events[i].params = NULL;
+-    } else {    
++    } else {
+       err = rt.allocate(param_count*sizeof(jvmtiParamInfo),
+-		        (unsigned char**)&(ext_events[i].params));
++                        (unsigned char**)&(ext_events[i].params));
+       if (err != JVMTI_ERROR_NONE) {
+-	return err;
++        return err;
+       }
+       jvmtiParamInfo* src_params = _ext_events->at(i)->params;
+       jvmtiParamInfo* dst_params = ext_events[i].params;
+ 
+       for (int j=0; j<param_count; j++) {
+-	err = rt.allocate(strlen(src_params[j].name)+1, 	   
+-			  (unsigned char**)&(dst_params[j].name));
+-	if (err != JVMTI_ERROR_NONE) {
+-	  return err;
+-	}
+-	strcpy(dst_params[j].name, src_params[j].name);
+-
+-	dst_params[j].kind = src_params[j].kind;
+-	dst_params[j].base_type = src_params[j].base_type;
+-	dst_params[j].null_ok = src_params[j].null_ok;
+-      }    
++        err = rt.allocate(strlen(src_params[j].name)+1,
++                          (unsigned char**)&(dst_params[j].name));
++        if (err != JVMTI_ERROR_NONE) {
++          return err;
++        }
++        strcpy(dst_params[j].name, src_params[j].name);
++
++        dst_params[j].kind = src_params[j].kind;
++        dst_params[j].base_type = src_params[j].base_type;
++        dst_params[j].null_ok = src_params[j].null_ok;
++      }
+     }
+   }
+ 
+@@ -250,12 +247,12 @@
+ // set callback for an extension event and enable/disable it.
+ 
+ jvmtiError JvmtiExtensions::set_event_callback(JvmtiEnv* env,
+-					       jint extension_event_index, 
+-					       jvmtiExtensionEvent callback)
+-{  
++                                               jint extension_event_index,
++                                               jvmtiExtensionEvent callback)
++{
+   guarantee(_ext_events != NULL, "registration not done");
+-  
+-  jvmtiExtensionEventInfo* event = NULL;  
++
++  jvmtiExtensionEventInfo* event = NULL;
+ 
+   // if there are extension events registered then validate that the
+   // extension_event_index matches one of the registered events.
+@@ -273,8 +270,8 @@
+     return JVMTI_ERROR_ILLEGAL_ARGUMENT;
+   }
+ 
+-  JvmtiEventController::set_extension_event_callback(env, extension_event_index, 
+-						     callback);
++  JvmtiEventController::set_extension_event_callback(env, extension_event_index,
++                                                     callback);
+ 
+   return JVMTI_ERROR_NONE;
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiExtensions.hpp openjdk/hotspot/src/share/vm/prims/jvmtiExtensions.hpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiExtensions.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiExtensions.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jvmtiExtensions.hpp	1.8 07/05/05 17:06:38 JVM"
+-#endif
+ /*
+  * Copyright 2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,9 +19,9 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+- 
++
+ #ifndef _JVMTI_EXTENSIONS_H_
+ #define _JVMTI_EXTENSIONS_H_
+ 
+@@ -32,9 +29,9 @@
+ // JvmtiExtensions
+ //
+ // Maintains the list of extension functions and events in this JVMTI
+-// implementation. The list of functions and events can be obtained by 
+-// the profiler using the JVMTI GetExtensionFunctions and 
+-// GetExtensionEvents functions. 
++// implementation. The list of functions and events can be obtained by
++// the profiler using the JVMTI GetExtensionFunctions and
++// GetExtensionEvents functions.
+ 
+ class JvmtiExtensions : public AllStatic {
+  private:
+@@ -46,16 +43,16 @@
+   static void register_extensions();
+ 
+   // returns the list of extension functions
+-  static jvmtiError get_functions(JvmtiEnv* env, jint* extension_count_ptr, 
+-				  jvmtiExtensionFunctionInfo** extensions);
++  static jvmtiError get_functions(JvmtiEnv* env, jint* extension_count_ptr,
++                                  jvmtiExtensionFunctionInfo** extensions);
+ 
+   // returns the list of extension events
+-  static jvmtiError get_events(JvmtiEnv* env, jint* extension_count_ptr, 
+-			       jvmtiExtensionEventInfo** extensions);
++  static jvmtiError get_events(JvmtiEnv* env, jint* extension_count_ptr,
++                               jvmtiExtensionEventInfo** extensions);
+ 
+   // sets the callback function for an extension event and enables the event
+-  static jvmtiError set_event_callback(JvmtiEnv* env, jint extension_event_index, 
+-				       jvmtiExtensionEvent callback);
++  static jvmtiError set_event_callback(JvmtiEnv* env, jint extension_event_index,
++                                       jvmtiExtensionEvent callback);
+ };
+ 
+ #endif  /* _JVMTI_EXTENSIONS_H_ */
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiGen.java openjdk/hotspot/src/share/vm/prims/jvmtiGen.java
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiGen.java	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiGen.java	2008-01-31 09:19:01.000000000 -0500
+@@ -19,14 +19,14 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ import javax.xml.parsers.DocumentBuilder;
+ import javax.xml.parsers.DocumentBuilderFactory;
+ import javax.xml.parsers.FactoryConfigurationError;
+ import javax.xml.parsers.ParserConfigurationException;
+- 
++
+ import org.xml.sax.SAXException;
+ import org.xml.sax.SAXParseException;
+ import org.w3c.dom.Document;
+@@ -49,13 +49,13 @@
+      * Write out usage and exit.
+      */
+     private static void showUsage() {
+-	System.err.println("usage:");
+-        System.err.println("  java jvmtiGen " + 
++        System.err.println("usage:");
++        System.err.println("  java jvmtiGen " +
+                            "-IN <input XML file name> " +
+                            "-XSL <XSL file> " +
+                            "-OUT <output file name> " +
+                            "[-PARAM <name> <expression> ...]");
+-	System.exit(0);		// There is no returning from showUsage()
++        System.exit(0);         // There is no returning from showUsage()
+     }
+ 
+     // Global value so it can be ref'd by the tree-adapter
+@@ -87,7 +87,7 @@
+                 showUsage();
+             }
+         }
+-	if (inFileName==null || xslFileName==null || outFileName==null){
++        if (inFileName==null || xslFileName==null || outFileName==null){
+             showUsage();
+         }
+ 
+@@ -104,7 +104,7 @@
+          * above was fixed, so if the class is not found we can proceed
+          * and use the default parser.
+          */
+-        final String parserProperty = 
++        final String parserProperty =
+             "javax.xml.transform.TransformerFactory";
+         final String workaroundParser =
+             "org.apache.xalan.processor.TransformerFactoryImpl";
+@@ -123,20 +123,20 @@
+              * exception and proceed with default settings.
+              */
+         }
+-                                        
++
+         DocumentBuilderFactory factory =
+             DocumentBuilderFactory.newInstance();
+ 
+         factory.setNamespaceAware(true);
+         factory.setValidating(true);
+- 
++
+         try {
+             File datafile   = new File(inFileName);
+             File stylesheet = new File(xslFileName);
+- 
++
+             DocumentBuilder builder = factory.newDocumentBuilder();
+             document = builder.parse(datafile);
+- 
++
+             // Use a Transformer for output
+             TransformerFactory tFactory =
+                 TransformerFactory.newInstance();
+@@ -147,11 +147,11 @@
+                                          (String) params.elementAt(ii + 1));
+             }
+             DOMSource source = new DOMSource(document);
+-            
++
+             PrintStream ps = new PrintStream( new FileOutputStream(outFileName));
+             StreamResult result = new StreamResult(ps);
+             transformer.transform(source, result);
+-           
++
+         } catch (TransformerConfigurationException tce) {
+            // Error generated by the parser
+            System.out.println ("\n** Transformer Factory error");
+@@ -162,7 +162,7 @@
+            if (tce.getException() != null)
+                x = tce.getException();
+            x.printStackTrace();
+-      
++
+         } catch (TransformerException te) {
+            // Error generated by the parser
+            System.out.println ("\n** Transformation error");
+@@ -173,7 +173,7 @@
+            if (te.getException() != null)
+                x = te.getException();
+            x.printStackTrace();
+-           
++
+          } catch (SAXException sxe) {
+            // Error generated by this application
+            // (or a parser-initialization error)
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiGetLoadedClasses.cpp openjdk/hotspot/src/share/vm/prims/jvmtiGetLoadedClasses.cpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiGetLoadedClasses.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiGetLoadedClasses.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)jvmtiGetLoadedClasses.cpp	1.13 07/05/05 17:06:38 JVM"
+-#endif
+ /*
+  * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ 
+@@ -32,9 +29,9 @@
+ 
+ // The closure for GetLoadedClasses and GetClassLoaderClasses
+ class JvmtiGetLoadedClassesClosure : public StackObj {
+-  // Since the SystemDictionary::classes_do callback 
+-  // doesn't pass a closureData pointer, 
+-  // we use a thread-local slot to hold a pointer to 
++  // Since the SystemDictionary::classes_do callback
++  // doesn't pass a closureData pointer,
++  // we use a thread-local slot to hold a pointer to
+   // a stack allocated instance of this structure.
+  private:
+   jobject _initiatingLoader;
+@@ -208,7 +205,7 @@
+         oop mirror = Klass::cast(k)->java_mirror();
+         that->set_element(that->get_index(), mirror);
+         that->set_index(that->get_index() + 1);
+-      }        
++      }
+     }
+   }
+ 
+@@ -252,8 +249,8 @@
+ 
+ jvmtiError
+ JvmtiGetLoadedClasses::getLoadedClasses(JvmtiEnv *env, jint* classCountPtr, jclass** classesPtr) {
+-  // Since SystemDictionary::classes_do only takes a function pointer 
+-  // and doesn't call back with a closure data pointer, 
++  // Since SystemDictionary::classes_do only takes a function pointer
++  // and doesn't call back with a closure data pointer,
+   // we can only pass static methods.
+ 
+   JvmtiGetLoadedClassesClosure closure;
+@@ -262,7 +259,7 @@
+     // array classes aren't created, and SystemDictionary_lock to ensure that
+     // classes aren't added to the system dictionary,
+     MutexLocker ma(MultiArray_lock);
+-    MutexLocker sd(SystemDictionary_lock);    
++    MutexLocker sd(SystemDictionary_lock);
+ 
+     // First, count the classes
+     SystemDictionary::classes_do(&JvmtiGetLoadedClassesClosure::increment);
+@@ -276,7 +273,7 @@
+   }
+   // Post results
+   jclass* result_list;
+-  jvmtiError err = env->Allocate(closure.get_count() * sizeof(jclass), 
++  jvmtiError err = env->Allocate(closure.get_count() * sizeof(jclass),
+                                  (unsigned char**)&result_list);
+   if (err != JVMTI_ERROR_NONE) {
+     return err;
+@@ -284,14 +281,14 @@
+   closure.extract(env, result_list);
+   *classCountPtr = closure.get_count();
+   *classesPtr = result_list;
+-  return JVMTI_ERROR_NONE;    
+-} 
++  return JVMTI_ERROR_NONE;
++}
+ 
+ jvmtiError
+-JvmtiGetLoadedClasses::getClassLoaderClasses(JvmtiEnv *env, jobject initiatingLoader, 
++JvmtiGetLoadedClasses::getClassLoaderClasses(JvmtiEnv *env, jobject initiatingLoader,
+                                              jint* classCountPtr, jclass** classesPtr) {
+-  // Since SystemDictionary::classes_do only takes a function pointer 
+-  // and doesn't call back with a closure data pointer, 
++  // Since SystemDictionary::classes_do only takes a function pointer
++  // and doesn't call back with a closure data pointer,
+   // we can only pass static methods.
+   JvmtiGetLoadedClassesClosure closure(initiatingLoader);
+   {
+@@ -316,7 +313,7 @@
+   }
+   // Post results
+   jclass* result_list;
+-  jvmtiError err = env->Allocate(closure.get_count() * sizeof(jclass), 
++  jvmtiError err = env->Allocate(closure.get_count() * sizeof(jclass),
+                                  (unsigned char**)&result_list);
+   if (err != JVMTI_ERROR_NONE) {
+     return err;
+@@ -324,5 +321,5 @@
+   closure.extract(env, result_list);
+   *classCountPtr = closure.get_count();
+   *classesPtr = result_list;
+-  return JVMTI_ERROR_NONE;    
++  return JVMTI_ERROR_NONE;
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiGetLoadedClasses.hpp openjdk/hotspot/src/share/vm/prims/jvmtiGetLoadedClasses.hpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiGetLoadedClasses.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiGetLoadedClasses.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jvmtiGetLoadedClasses.hpp	1.8 07/05/05 17:06:38 JVM"
+-#endif
+ /*
+  * Copyright 2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,12 +19,12 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class JvmtiGetLoadedClasses : AllStatic {
+ public:
+   static jvmtiError getLoadedClasses(JvmtiEnv *env, jint* classCountPtr, jclass** classesPtr);
+-  static jvmtiError getClassLoaderClasses(JvmtiEnv *env, jobject initiatingLoader, 
++  static jvmtiError getClassLoaderClasses(JvmtiEnv *env, jobject initiatingLoader,
+                                           jint* classCountPtr, jclass** classesPtr);
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiImpl.cpp openjdk/hotspot/src/share/vm/prims/jvmtiImpl.cpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiImpl.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiImpl.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)jvmtiImpl.cpp	1.63 07/05/17 16:05:04 JVM"
+-#endif
+ /*
+  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -69,7 +66,7 @@
+     // of the thread is given in JavaThread::thread_main().
+     assert(thread->is_Java_thread(), "debugger thread should be a Java Thread");
+     assert(thread == JavaThread::current(), "sanity check");
+-  
++
+     JvmtiAgentThread *dthread = (JvmtiAgentThread *)thread;
+     dthread->call_start_function();
+ }
+@@ -80,291 +77,6 @@
+     _start_fn(_env->jvmti_external(), jni_environment(), (void*)_start_arg);
+ }
+ 
+-//
+-// class JvmtiUtil
+-//
+-
+-ResourceArea* JvmtiUtil::_single_threaded_resource_area = NULL;
+-
+-ResourceArea* JvmtiUtil::single_threaded_resource_area() {
+-  if (_single_threaded_resource_area == NULL) {
+-    // lazily create the single threaded resource area
+-    // pick a size which is not a standard since the pools don't exist yet
+-    _single_threaded_resource_area = new ResourceArea(Chunk::non_pool_size);
+-  }
+-  return _single_threaded_resource_area;
+-}
+-
+-//
+-// class JvmtiTrace
+-//
+-// Support for JVMTI tracing code
+-//
+-// ------------
+-// Usage:
+-//    -XX:TraceJVMTI=DESC,DESC,DESC
+-//
+-//    DESC is   DOMAIN ACTION KIND
+-//
+-//    DOMAIN is function name
+-//              event name
+-//              "all" (all functions and events)
+-//              "func" (all functions except boring)
+-//              "allfunc" (all functions)
+-//              "event" (all events)
+-//              "ec" (event controller)
+-//
+-//    ACTION is "+" (add)
+-//              "-" (remove)
+-//
+-//    KIND is
+-//     for func
+-//              "i" (input params)
+-//              "e" (error returns)
+-//              "o" (output)
+-//     for event
+-//              "t" (event triggered aka posted)
+-//              "s" (event sent)
+-//
+-// Example:
+-//            -XX:TraceJVMTI=ec+,GetCallerFrame+ie,Breakpoint+s
+-
+-#ifdef JVMTI_TRACE
+-
+-bool JvmtiTrace::_initialized = false;
+-bool JvmtiTrace::_on = false;
+-bool JvmtiTrace::_trace_event_controller = false;
+-
+-void JvmtiTrace::initialize() {
+-  if (_initialized) {
+-    return;
+-  }
+-  SafeResourceMark rm;
+-  
+-  const char *very_end;
+-  const char *curr;
+-  if (strlen(TraceJVMTI)) {
+-    curr = TraceJVMTI;
+-  } else {
+-    curr = "";  // hack in fixed tracing here
+-  }
+-  very_end = curr + strlen(curr);
+-  while (curr < very_end) {
+-    const char *curr_end = strchr(curr, ',');
+-    if (curr_end == NULL) {
+-      curr_end = very_end;
+-    }
+-    const char *op_pos = strchr(curr, '+');
+-    const char *minus_pos = strchr(curr, '-');
+-    if (minus_pos != NULL && (minus_pos < op_pos || op_pos == NULL)) {
+-      op_pos = minus_pos;
+-    }
+-    char op;
+-    const char *flags = op_pos + 1;
+-    const char *flags_end = curr_end;
+-    if (op_pos == NULL || op_pos > curr_end) {
+-      flags = "ies";
+-      flags_end = flags + strlen(flags);
+-      op_pos = curr_end;
+-      op = '+';
+-    } else {
+-      op = *op_pos;
+-    }
+-    jbyte bits = 0;
+-    for (; flags < flags_end; ++flags) {
+-      switch (*flags) {
+-      case 'i': 
+-        bits |= SHOW_IN;
+-        break;
+-      case 'I': 
+-        bits |= SHOW_IN_DETAIL;
+-        break;
+-      case 'e': 
+-        bits |= SHOW_ERROR;
+-        break;
+-      case 'o': 
+-        bits |= SHOW_OUT;
+-        break;
+-      case 'O': 
+-        bits |= SHOW_OUT_DETAIL;
+-        break;
+-      case 't': 
+-        bits |= SHOW_EVENT_TRIGGER;
+-        break;
+-      case 's': 
+-        bits |= SHOW_EVENT_SENT;
+-        break;
+-      default:
+-        tty->print_cr("Invalid trace flag '%c'", *flags);
+-        break;
+-      }
+-    }
+-    const int FUNC = 1;
+-    const int EXCLUDE  = 2;
+-    const int ALL_FUNC = 4;
+-    const int EVENT = 8;
+-    const int ALL_EVENT = 16;
+-    int domain = 0;
+-    size_t len = op_pos - curr;
+-    if (op_pos == curr) {
+-      domain = ALL_FUNC | FUNC | ALL_EVENT | EVENT | EXCLUDE;
+-    } else if (len==3 && strncmp(curr, "all", 3)==0) {
+-      domain = ALL_FUNC | FUNC | ALL_EVENT | EVENT;
+-    } else if (len==7 && strncmp(curr, "allfunc", 7)==0) {
+-      domain = ALL_FUNC | FUNC;
+-    } else if (len==4 && strncmp(curr, "func", 4)==0) {
+-      domain = ALL_FUNC | FUNC | EXCLUDE;
+-    } else if (len==8 && strncmp(curr, "allevent", 8)==0) {
+-      domain = ALL_EVENT | EVENT;
+-    } else if (len==5 && strncmp(curr, "event", 5)==0) {
+-      domain = ALL_EVENT | EVENT;
+-    } else if (len==2 && strncmp(curr, "ec", 2)==0) {
+-      _trace_event_controller = true;
+-      tty->print_cr("JVMTI Tracing the event controller");
+-    } else {
+-      domain = FUNC | EVENT;  // go searching
+-    }
+-
+-    int exclude_index = 0;
+-    if (domain & FUNC) {
+-      if (domain & ALL_FUNC) {
+-        if (domain & EXCLUDE) {
+-          tty->print("JVMTI Tracing all significant functions");
+-        } else {
+-          tty->print_cr("JVMTI Tracing all functions");
+-        }
+-      }
+-      for (int i = 0; i <= _max_function_index; ++i) {
+-        if (domain & EXCLUDE && i == _exclude_functions[exclude_index]) {
+-          ++exclude_index;
+-        } else {
+-          bool do_op = false;
+-          if (domain & ALL_FUNC) {
+-            do_op = true;
+-          } else {
+-            const char *fname = function_name(i);
+-            if (fname != NULL) {
+-              size_t fnlen = strlen(fname);
+-              if (len==fnlen && strncmp(curr, fname, fnlen)==0) {
+-                tty->print_cr("JVMTI Tracing the function: %s", fname);
+-                do_op = true;
+-              }
+-            }
+-          }
+-          if (do_op) {
+-            if (op == '+') {
+-              _trace_flags[i] |= bits;
+-            } else {
+-              _trace_flags[i] &= ~bits;
+-            }
+-            _on = true;
+-          }
+-        }
+-      }
+-    }
+-    if (domain & EVENT) {
+-      if (domain & ALL_EVENT) {
+-        tty->print_cr("JVMTI Tracing all events");
+-      }
+-      for (int i = 0; i <= _max_event_index; ++i) {
+-        bool do_op = false;
+-        if (domain & ALL_EVENT) {
+-          do_op = true;
+-        } else {
+-          const char *ename = event_name(i);
+-          if (ename != NULL) {
+-            size_t evtlen = strlen(ename);
+-            if (len==evtlen && strncmp(curr, ename, evtlen)==0) {
+-              tty->print_cr("JVMTI Tracing the event: %s", ename);
+-              do_op = true;
+-            }
+-          }
+-        }
+-        if (do_op) {
+-          if (op == '+') {
+-            _event_trace_flags[i] |= bits;
+-          } else {
+-            _event_trace_flags[i] &= ~bits;
+-          }
+-          _on = true;
+-        }
+-      }
+-    }
+-    if (!_on && (domain & (FUNC|EVENT))) {
+-      tty->print_cr("JVMTI Trace domain not found");
+-    }
+-    curr = curr_end + 1;
+-  }
+-  _initialized = true;
+-}
+-
+-
+-void JvmtiTrace::shutdown() {
+-  int i;
+-  _on = false;
+-  _trace_event_controller = false;
+-  for (i = 0; i <= _max_function_index; ++i) {
+-    _trace_flags[i] = 0;
+-  }
+-  for (i = 0; i <= _max_event_index; ++i) {
+-    _event_trace_flags[i] = 0;
+-  }
+-}
+-
+-
+-const char* JvmtiTrace::enum_name(const char** names, const jint* values, jint value) {
+-  for (int index = 0; names[index] != 0; ++index) {
+-    if (values[index] == value) {
+-      return names[index];
+-    }
+-  }
+-  return "*INVALID-ENUM-VALUE*";
+-}
+-
+-
+-// return a valid string no matter what state the thread is in
+-const char *JvmtiTrace::safe_get_thread_name(Thread *thread) {
+-  if (thread == NULL) {
+-    return "NULL";
+-  }
+-  if (!thread->is_Java_thread()) {
+-    return thread->name();
+-  }
+-  JavaThread *java_thread = (JavaThread *)thread;
+-  oop threadObj = java_thread->threadObj();
+-  if (threadObj == NULL) {
+-    return "NULL";
+-  }
+-  typeArrayOop name = java_lang_Thread::name(threadObj);
+-  if (name == NULL) {
+-    return "<NOT FILLED IN>";
+-  }
+-  return UNICODE::as_utf8((jchar*) name->base(T_CHAR), name->length());
+-}
+-    
+-
+-// return the name of the current thread
+-const char *JvmtiTrace::safe_get_current_thread_name() {
+-  if (JvmtiEnv::is_vm_live()) {
+-    return JvmtiTrace::safe_get_thread_name(Thread::current());
+-  } else {
+-    return "VM not live";
+-  }
+-}
+-
+-// return a valid string no matter what the state of k_mirror
+-const char * JvmtiTrace::get_class_name(oop k_mirror) {
+-  if (java_lang_Class::is_primitive(k_mirror)) {
+-    return "primitive";
+-  }
+-  klassOop k_oop = java_lang_Class::as_klassOop(k_mirror);
+-  if (k_oop == NULL) {
+-    return "INVALID";
+-  }
+-  return Klass::cast(k_oop)->external_name();
+-}
+-
+-#endif /*JVMTI_TRACE */
+ 
+ //
+ // class GrowableCache - private methods
+@@ -409,7 +121,7 @@
+ 
+ GrowableCache::GrowableCache() {
+   _this_obj       = NULL;
+-  _listener_fun   = NULL;    
++  _listener_fun   = NULL;
+   _elements       = NULL;
+   _cache          = NULL;
+ }
+@@ -422,14 +134,14 @@
+ 
+ void GrowableCache::initialize(void *this_obj, void listener_fun(void *, address*) ) {
+   _this_obj       = this_obj;
+-  _listener_fun   = listener_fun;    
++  _listener_fun   = listener_fun;
+   _elements       = new (ResourceObj::C_HEAP) GrowableArray<GrowableElement*>(5,true);
+   recache();
+ }
+ 
+ // number of elements in the collection
+-int GrowableCache::length() { 
+-  return _elements->length(); 
++int GrowableCache::length() {
++  return _elements->length();
+ }
+ 
+ // get the value of the index element in the collection
+@@ -438,7 +150,7 @@
+   assert(e != NULL, "e != NULL");
+   return e;
+ }
+- 
++
+ int GrowableCache::find(GrowableElement* e) {
+   return _elements->find(e, GrowableCache::equals);
+ }
+@@ -545,7 +257,7 @@
+ JvmtiBreakpoint::JvmtiBreakpoint(methodOop m_method, jlocation location) {
+   _method        = m_method;
+   assert(_method != NULL, "_method != NULL");
+-  _bci           = (int) location;  
++  _bci           = (int) location;
+ #ifdef CHECK_UNHANDLED_OOPS
+   // Could be allocated with new and wouldn't be on the unhandled oop list.
+   Thread *thread = Thread::current();
+@@ -554,7 +266,7 @@
+   }
+ #endif // CHECK_UNHANDLED_OOPS
+ 
+-  assert(_bci >= 0, "_bci >= 0"); 
++  assert(_bci >= 0, "_bci >= 0");
+ }
+ 
+ void JvmtiBreakpoint::copy(JvmtiBreakpoint& bp) {
+@@ -592,7 +304,7 @@
+   symbolOop m_name = _method->name();
+   symbolOop m_signature = _method->signature();
+ 
+-  { 
++  {
+     ResourceMark rm(thread);
+     // PreviousVersionInfo objects returned via PreviousVersionWalker
+     // contain a GrowableArray of handles. We have to clean up the
+@@ -689,7 +401,7 @@
+ }
+ 
+ //
+-// class JvmtiBreakpoints 
++// class JvmtiBreakpoints
+ //
+ // a JVMTI internal collection of JvmtiBreakpoint
+ //
+@@ -700,13 +412,13 @@
+ 
+ JvmtiBreakpoints:: ~JvmtiBreakpoints() {}
+ 
+-void  JvmtiBreakpoints::oops_do(OopClosure* f) {  
++void  JvmtiBreakpoints::oops_do(OopClosure* f) {
+   _bps.oops_do(f);
+-} 
++}
+ 
+-void  JvmtiBreakpoints::gc_epilogue() {  
++void  JvmtiBreakpoints::gc_epilogue() {
+   _bps.gc_epilogue();
+-} 
++}
+ 
+ void  JvmtiBreakpoints::print() {
+ #ifndef PRODUCT
+@@ -727,7 +439,7 @@
+   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+ 
+   int i = _bps.find(bp);
+-  if (i == -1) { 
++  if (i == -1) {
+     _bps.append(bp);
+     bp.set();
+   }
+@@ -752,7 +464,7 @@
+   }
+   _bps.clear();
+ }
+- 
++
+ int JvmtiBreakpoints::length() { return _bps.length(); }
+ 
+ int JvmtiBreakpoints::set(JvmtiBreakpoint& bp) {
+@@ -807,7 +519,7 @@
+ 
+ //
+ // class JvmtiCurrentBreakpoints
+-// 
++//
+ 
+ JvmtiBreakpoints *JvmtiCurrentBreakpoints::_jvmti_breakpoints  = NULL;
+ address *         JvmtiCurrentBreakpoints::_breakpoint_list    = NULL;
+@@ -820,7 +532,7 @@
+   return (*_jvmti_breakpoints);
+ }
+ 
+-void  JvmtiCurrentBreakpoints::listener_fun(void *this_obj, address *cache) { 
++void  JvmtiCurrentBreakpoints::listener_fun(void *this_obj, address *cache) {
+   JvmtiBreakpoints *this_jvmti = (JvmtiBreakpoints *) this_obj;
+   assert(this_jvmti != NULL, "this_jvmti != NULL");
+ 
+@@ -831,13 +543,13 @@
+ }
+ 
+ 
+-void JvmtiCurrentBreakpoints::oops_do(OopClosure* f) { 
++void JvmtiCurrentBreakpoints::oops_do(OopClosure* f) {
+   if (_jvmti_breakpoints != NULL) {
+     _jvmti_breakpoints->oops_do(f);
+   }
+ }
+ 
+-void JvmtiCurrentBreakpoints::gc_epilogue() { 
++void JvmtiCurrentBreakpoints::gc_epilogue() {
+   if (_jvmti_breakpoints != NULL) {
+     _jvmti_breakpoints->gc_epilogue();
+   }
+@@ -859,7 +571,7 @@
+   , _set(false)
+   , _jvf(NULL)
+   , _result(JVMTI_ERROR_NONE)
+-{  
++{
+ }
+ 
+ // Constructor for object or non-object setter
+@@ -947,7 +659,7 @@
+     }
+   }
+   // Compare secondary supers
+-  objArrayOop sec_supers = klass->secondary_supers(); 
++  objArrayOop sec_supers = klass->secondary_supers();
+   for (idx = 0; idx < sec_supers->length(); idx++) {
+     if (Klass::cast((klassOop) sec_supers->obj_at(idx))->name() == ty_sym()) {
+       return true;
+@@ -960,7 +672,7 @@
+ //   JVMTI_ERROR_INVALID_SLOT
+ //   JVMTI_ERROR_TYPE_MISMATCH
+ // Returns: 'true' - everything is Ok, 'false' - error code
+-  
++
+ bool VM_GetOrSetLocal::check_slot_type(javaVFrame* jvf) {
+   methodOop method_oop = jvf->method();
+   if (!method_oop->has_localvariable_table()) {
+@@ -976,7 +688,7 @@
+   jint num_entries = method_oop->localvariable_table_length();
+   if (num_entries == 0) {
+     _result = JVMTI_ERROR_INVALID_SLOT;
+-    return false;	// There are no slots
++    return false;       // There are no slots
+   }
+   int signature_idx = -1;
+   int vf_bci = jvf->bci();
+@@ -994,7 +706,7 @@
+   }
+   if (signature_idx == -1) {
+     _result = JVMTI_ERROR_INVALID_SLOT;
+-    return false;	// Incorrect slot index
++    return false;       // Incorrect slot index
+   }
+   symbolOop   sign_sym  = method_oop->constants()->symbol_at(signature_idx);
+   const char* signature = (const char *) sign_sym->as_utf8();
+@@ -1010,7 +722,7 @@
+   case T_ARRAY:
+     slot_type = T_OBJECT;
+     break;
+-  };    
++  };
+   if (_type != slot_type) {
+     _result = JVMTI_ERROR_TYPE_MISMATCH;
+     return false;
+@@ -1039,7 +751,7 @@
+   return (vf->is_compiled_frame() && vf->fr().can_be_deoptimized());
+ }
+ 
+-bool VM_GetOrSetLocal::doit_prologue() { 
++bool VM_GetOrSetLocal::doit_prologue() {
+   _jvf = get_java_vframe();
+   NULL_CHECK(_jvf, false);
+ 
+@@ -1135,7 +847,7 @@
+ // class JvmtiSuspendControl - see comments in jvmtiImpl.hpp
+ //
+ 
+-bool JvmtiSuspendControl::suspend(JavaThread *java_thread) {  
++bool JvmtiSuspendControl::suspend(JavaThread *java_thread) {
+   // external suspend should have caught suspending a thread twice
+ 
+   // Immediate suspension required for JPDA back-end so JVMTI agent threads do
+@@ -1162,17 +874,17 @@
+   return true;
+ }
+ 
+-bool JvmtiSuspendControl::resume(JavaThread *java_thread) {  
++bool JvmtiSuspendControl::resume(JavaThread *java_thread) {
+   // external suspend should have caught resuming a thread twice
+   assert(java_thread->is_being_ext_suspended(), "thread should be suspended");
+ 
+   // resume thread
+   {
+     // must always grab Threads_lock, see JVM_SuspendThread
+-    MutexLocker ml(Threads_lock);  
+-    java_thread->java_resume(); 
++    MutexLocker ml(Threads_lock);
++    java_thread->java_resume();
+   }
+- 
++
+   return true;
+ }
+ 
+@@ -1196,5 +908,5 @@
+     tty->print(") ");
+   }
+   tty->print_cr("]");
+-#endif  
++#endif
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiImpl.hpp openjdk/hotspot/src/share/vm/prims/jvmtiImpl.hpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiImpl.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiImpl.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jvmtiImpl.hpp	1.102 07/05/23 10:53:50 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //
+@@ -35,7 +32,7 @@
+ 
+ 
+ ///////////////////////////////////////////////////////////////
+-// 
++//
+ // class GrowableCache, GrowableElement
+ // Used by              : JvmtiBreakpointCache
+ // Used by JVMTI methods: none directly.
+@@ -43,12 +40,12 @@
+ // GrowableCache is a permanent CHeap growable array of <GrowableElement *>
+ //
+ // In addition, the GrowableCache maintains a NULL terminated cache array of type address
+-// that's created from the element array using the function: 
+-//     address GrowableElement::getCacheValue(). 
++// that's created from the element array using the function:
++//     address GrowableElement::getCacheValue().
+ //
+ // Whenever the GrowableArray changes size, the cache array gets recomputed into a new C_HEAP allocated
+ // block of memory. Additionally, every time the cache changes its position in memory, the
+-//    void (*_listener_fun)(void *this_obj, address* cache) 
++//    void (*_listener_fun)(void *this_obj, address* cache)
+ // gets called with the cache's new address. This gives the user of the GrowableCache a callback
+ // to update its pointer to the address cache.
+ //
+@@ -75,7 +72,7 @@
+   address *_cache;
+ 
+   // Listener for changes to the _cache field.
+-  // Called whenever the _cache field has it's value changed  
++  // Called whenever the _cache field has it's value changed
+   // (but NOT when cached elements are recomputed).
+   void (*_listener_fun)(void *, address*);
+ 
+@@ -83,8 +80,8 @@
+ 
+   // recache all elements after size change, notify listener
+   void recache();
+-  
+-public:  
++
++public:
+    GrowableCache();
+    ~GrowableCache();
+ 
+@@ -97,9 +94,9 @@
+   // find the index of the element, -1 if it doesn't exist
+   int find(GrowableElement* e);
+   // append a copy of the element to the end of the collection, notify listener
+-  void append(GrowableElement* e);  
++  void append(GrowableElement* e);
+   // insert a copy of the element using lessthan(), notify listener
+-  void insert(GrowableElement* e);  
++  void insert(GrowableElement* e);
+   // remove the element at index, notify listener
+   void remove (int index);
+   // clear out all elements and release all heap space, notify listener
+@@ -122,8 +119,8 @@
+ 
+ private:
+   GrowableCache _cache;
+-  
+-public:  
++
++public:
+   JvmtiBreakpointCache()  {}
+   ~JvmtiBreakpointCache() {}
+ 
+@@ -155,12 +152,12 @@
+ typedef void (methodOopDesc::*method_action)(int _bci);
+ 
+ class JvmtiBreakpoint : public GrowableElement {
+-private:   
+-  methodOop 		_method;
+-  int       		_bci;
+-  Bytecodes::Code 	_orig_bytecode;
++private:
++  methodOop             _method;
++  int                   _bci;
++  Bytecodes::Code       _orig_bytecode;
+ 
+-public: 
++public:
+   JvmtiBreakpoint();
+   JvmtiBreakpoint(methodOop m_method, jlocation location);
+   bool equals(JvmtiBreakpoint& bp);
+@@ -203,7 +200,7 @@
+   JvmtiBreakpoints* _breakpoints;
+   int               _operation;
+   JvmtiBreakpoint*  _bp;
+-  
++
+ public:
+   enum { SET_BREAKPOINT=0, CLEAR_BREAKPOINT=1, CLEAR_ALL_BREAKPOINT=2 };
+ 
+@@ -221,17 +218,17 @@
+     assert(breakpoints != NULL, "breakpoints != NULL");
+     assert(bp != NULL, "bp != NULL");
+     assert(operation == SET_BREAKPOINT || operation == CLEAR_BREAKPOINT , "unknown breakpoint operation");
+-  }    
++  }
+ 
+   VMOp_Type type() const { return VMOp_ChangeBreakpoints; }
+   void doit();
+-  void oops_do(OopClosure* f);   
++  void oops_do(OopClosure* f);
+ };
+- 
++
+ 
+ ///////////////////////////////////////////////////////////////
+ //
+-// class JvmtiBreakpoints 
++// class JvmtiBreakpoints
+ // Used by              : JvmtiCurrentBreakpoints
+ // Used by JVMTI methods: none directly
+ // Note: A Helper class
+@@ -243,14 +240,14 @@
+ // cached byte code pointers from _bps without doing any synchronization (see JvmtiCurrentBreakpoints).
+ //
+ // It would be possible to make JvmtiBreakpoints a static class, but I've made it
+-// CHeap allocated to emphasize its similarity to JvmtiFramePops. 
++// CHeap allocated to emphasize its similarity to JvmtiFramePops.
+ //
+ 
+ class JvmtiBreakpoints : public CHeapObj {
+ private:
+ 
+   JvmtiBreakpointCache _bps;
+-  
++
+   // These should only be used by VM_ChangeBreakpoints
+   // to insure they only occur at safepoints.
+   // Todo: add checks for safepoint
+@@ -261,9 +258,9 @@
+ 
+   static void do_element(GrowableElement *e);
+ 
+-public:  
++public:
+   JvmtiBreakpoints(void listener_fun(void *, address *));
+-  ~JvmtiBreakpoints(); 
++  ~JvmtiBreakpoints();
+ 
+   int length();
+   void oops_do(OopClosure* f);
+@@ -273,7 +270,7 @@
+   int  set(JvmtiBreakpoint& bp);
+   int  clear(JvmtiBreakpoint& bp);
+   void clearall_in_class_at_safepoint(klassOop klass);
+-  void clearall(); 
++  void clearall();
+ };
+ 
+ 
+@@ -284,7 +281,7 @@
+ // A static wrapper class for the JvmtiBreakpoints that provides:
+ // 1. a fast inlined function to check if a byte code pointer is a breakpoint (is_breakpoint).
+ // 2. a function for lazily creating the JvmtiBreakpoints class (this is not strictly necessary,
+-//    but I'm copying the code from JvmtiThreadState which needs to lazily initialize 
++//    but I'm copying the code from JvmtiThreadState which needs to lazily initialize
+ //    JvmtiFramePops).
+ // 3. An oops_do entry point for GC'ing the breakpoint array.
+ //
+@@ -297,7 +294,7 @@
+   static JvmtiBreakpoints *_jvmti_breakpoints;
+ 
+   // NULL terminated cache of byte-code pointers corresponding to current breakpoints.
+-  // Updated only at safepoints (with listener_fun) when the cache is moved. 
++  // Updated only at safepoints (with listener_fun) when the cache is moved.
+   // It exists only to make is_breakpoint fast.
+   static address          *_breakpoint_list;
+   static inline void set_breakpoint_list(address *breakpoint_list) { _breakpoint_list = breakpoint_list; }
+@@ -332,7 +329,7 @@
+ 
+ 
+ ///////////////////////////////////////////////////////////////
+-// 
++//
+ // class JvmtiRawMonitor
+ //
+ // Used by JVMTI methods: All RawMonitor methods (CreateRawMonitor, EnterRawMonitor, etc.)
+@@ -346,13 +343,13 @@
+   char *        _name;
+   // JVMTI_RM_MAGIC is set in contructor and unset in destructor.
+   enum { JVMTI_RM_MAGIC = (int)(('T' << 24) | ('I' << 16) | ('R' << 8) | 'M') };
+-    
++
+ public:
+   JvmtiRawMonitor(const char *name);
+   ~JvmtiRawMonitor();
+   int            magic()   { return _magic;  }
+   const char *get_name()   { return _name; }
+-  bool        is_valid()   { return _magic == JVMTI_RM_MAGIC;  } 
++  bool        is_valid()   { return _magic == JVMTI_RM_MAGIC;  }
+ };
+ 
+ // Onload pending raw monitors
+@@ -374,9 +371,9 @@
+   static void enter(JvmtiRawMonitor *monitor) {
+     monitors()->append(monitor);
+   }
+-    
++
+   static int count() {
+-    return monitors()->length();            
++    return monitors()->length();
+   }
+ 
+   static void destroy(JvmtiRawMonitor *monitor) {
+@@ -384,7 +381,7 @@
+       monitors()->remove(monitor);
+     }
+   }
+-    
++
+   // Return false if monitor is not found in the list.
+   static bool exit(JvmtiRawMonitor *monitor) {
+     if (monitors()->contains(monitor)) {
+@@ -394,7 +391,7 @@
+       return false;
+     }
+   }
+-    
++
+   static void transition_raw_monitors();
+ };
+ 
+@@ -460,8 +457,8 @@
+ // class JvmtiSuspendControl
+ //
+ // Convenience routines for suspending and resuming threads.
+-// 
+-// All attempts by JVMTI to suspend and resume threads must go through the 
++//
++// All attempts by JVMTI to suspend and resume threads must go through the
+ // JvmtiSuspendControl interface.
+ //
+ // methods return true if successful
+@@ -476,147 +473,5 @@
+   static void print();
+ };
+ 
+-
+-///////////////////////////////////////////////////////////////
+-//
+-// class JvmtiUtil
+-//
+-// class for miscellaneous jvmti utility static methods
+-//
+-
+-class JvmtiUtil : AllStatic {
+-
+-  static ResourceArea* _single_threaded_resource_area;
+-
+-  static const char* _error_names[];
+-  static const bool  _event_threaded[];
+-
+-public:
+-
+-  static ResourceArea* single_threaded_resource_area();
+-
+-  static const char* error_name(int num)    { return _error_names[num]; }    // To Do: add range checking
+-
+-  static const bool has_event_capability(jvmtiEvent event_type, const jvmtiCapabilities* capabilities_ptr);
+-
+-  static const bool  event_threaded(int num) {
+-    if (num >= JVMTI_MIN_EVENT_TYPE_VAL && num <= JVMTI_MAX_EVENT_TYPE_VAL) {
+-      return _event_threaded[num];
+-    }
+-    if (num >= EXT_MIN_EVENT_TYPE_VAL && num <= EXT_MAX_EVENT_TYPE_VAL) {
+-      return false;
+-    }
+-    ShouldNotReachHere();
+-    return false;
+-  }
+-};
+-
+-
+-///////////////////////////////////////////////////////////////
+-//
+-// class SafeResourceMark
+-//
+-// ResourceMarks that work before threads exist
+-//
+-
+-class SafeResourceMark : public ResourceMark {
+-
+-  ResourceArea* safe_resource_area() {
+-    Thread* thread;
+-
+-    if (Threads::number_of_threads() == 0) {
+-      return JvmtiUtil::single_threaded_resource_area();
+-    }
+-    thread = ThreadLocalStorage::thread();
+-    if (thread == NULL) {
+-      return JvmtiUtil::single_threaded_resource_area();
+-    }
+-    return thread->resource_area();
+-  }
+-
+- public:
+-
+-  SafeResourceMark() : ResourceMark(safe_resource_area()) {}
+-
+-};
+-
+-
+-///////////////////////////////////////////////////////////////
+-//
+-// class JvmtiTrace
+-//
+-// Support for JVMTI tracing code
+-//
+-
+-// Support tracing except in product build on the client compiler
+-#ifndef PRODUCT
+-#define JVMTI_TRACE 1
+-#else
+-#ifdef COMPILER2
+-#define JVMTI_TRACE 1
+-#endif
+-#endif
+-
+-#ifdef JVMTI_TRACE
+-
+-class JvmtiTrace : AllStatic {
+-
+-  static bool        _initialized;
+-  static bool        _on;
+-  static bool        _trace_event_controller;
+-  static jbyte       _trace_flags[];
+-  static jbyte       _event_trace_flags[];
+-  static const char* _event_names[];
+-  static jint        _max_function_index;
+-  static jint        _max_event_index;
+-  static short       _exclude_functions[];
+-  static const char* _function_names[];
+-
+-public:
+-
+-  enum {
+-    SHOW_IN =              01,
+-    SHOW_OUT =             02,
+-    SHOW_ERROR =           04,
+-    SHOW_IN_DETAIL =      010,
+-    SHOW_OUT_DETAIL =     020,
+-    SHOW_EVENT_TRIGGER =  040,
+-    SHOW_EVENT_SENT =    0100
+-  };
+-
+-  static bool tracing()                     { return _on; }
+-  static bool trace_event_controller()      { return _trace_event_controller; }
+-  static jbyte trace_flags(int num)         { return _trace_flags[num]; }
+-  static jbyte event_trace_flags(int num)   { return _event_trace_flags[num]; }
+-  static const char* function_name(int num) { return _function_names[num]; } // To Do: add range checking
+-
+-  static const char* event_name(int num) {
+-    static char* ext_event_name = (char*)"(extension event)";
+-    if (num >= JVMTI_MIN_EVENT_TYPE_VAL && num <= JVMTI_MAX_EVENT_TYPE_VAL) {
+-      return _event_names[num];
+-    } else {
+-      return ext_event_name;
+-    }
+-  }
+-
+-  static const char* enum_name(const char** names, const jint* values, jint value);
+-
+-  static void initialize();
+-  static void shutdown();
+-
+-  // return a valid string no matter what state the thread is in
+-  static const char *safe_get_thread_name(Thread *thread);
+-    
+-  // return the name of the current thread
+-  static const char *safe_get_current_thread_name();
+-       
+-  // return a valid string no matter what the state of k_mirror
+-  static const char *get_class_name(oop k_mirror);
+-};
+-
+-#endif /*JVMTI_TRACE */
+-
+-
+ // Utility macro that checks for NULL pointers:
+-#define NULL_CHECK(X, Y) if ((X) == NULL) { return (Y); } 
+-
++#define NULL_CHECK(X, Y) if ((X) == NULL) { return (Y); }
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiLib.xsl openjdk/hotspot/src/share/vm/prims/jvmtiLib.xsl
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiLib.xsl	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiLib.xsl	2008-01-31 09:19:01.000000000 -0500
+@@ -27,18 +27,8 @@
+ <xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+                 version="1.0">
+ 
+-  <xsl:variable name="microversionbase" select="100"/>
+-
+   <xsl:template name="microversion">
+-    <xsl:variable name="micro" select="substring-after(//specification/@onedotmicroversion, '1.')"/>
+-    <xsl:choose>
+-      <xsl:when test="string($micro)=''">
+-        <xsl:text>dev</xsl:text>
+-      </xsl:when>
+-      <xsl:otherwise>
+-        <xsl:value-of select="$micro - $microversionbase"/>
+-      </xsl:otherwise>
+-    </xsl:choose>
++    <xsl:value-of select="//specification/@microversion"/>
+   </xsl:template>
+ 
+   <xsl:template name="showbasicversion">
+@@ -53,32 +43,21 @@
+     <xsl:call-template name="microversion"/>
+   </xsl:template>
+ 
+-  <xsl:template name="includeHeader">
+-    <xsl:text>#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jvmtiLib.xsl	1.43 07/05/05 17:06:39 JVM"
+-#endif
+-/*
+- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+- * SUN PROPRIETARY/CONFIDENTIAL.  Use is subject to license terms.
+- */
+-
+-    /* AUTOMATICALLY GENERATED FILE - DO NOT EDIT */
++  <xsl:template name="copyrightComment">
++    <xsl:text>/* </xsl:text>
++    <!-- Copy the Copyright comment from jvmti.xml -->
++    <xsl:value-of select="/comment()[position()=1]"/>
++    <xsl:text> */ &#xA;&#xA;</xsl:text>
++  </xsl:template>
+ 
+-</xsl:text>    
++  <xsl:template name="includeHeader">
++    <xsl:call-template name="copyrightComment"/>
++    <xsl:text> /* AUTOMATICALLY GENERATED FILE - DO NOT EDIT */ &#xA;</xsl:text>    
+   </xsl:template>
+ 
+   <xsl:template name="sourceHeader">
+-    <xsl:text>#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)jvmtiLib.xsl	1.43 07/05/05 17:06:39 JVM"
+-#endif
+-/*
+- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+- * SUN PROPRIETARY/CONFIDENTIAL.  Use is subject to license terms.
+- */
+-
+-  // AUTOMATICALLY GENERATED FILE - DO NOT EDIT
+-
+-</xsl:text>    
++    <xsl:call-template name="copyrightComment"/>
++    <xsl:text> // AUTOMATICALLY GENERATED FILE - DO NOT EDIT &#xA;</xsl:text>    
+   </xsl:template>
+ 
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiManageCapabilities.cpp openjdk/hotspot/src/share/vm/prims/jvmtiManageCapabilities.cpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiManageCapabilities.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiManageCapabilities.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)jvmtiManageCapabilities.cpp	1.44 07/05/05 17:06:40 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,20 +19,20 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ # include "incls/_precompiled.incl"
+ # include "incls/_jvmtiManageCapabilities.cpp.incl"
+ 
+-static const jint CAPA_SIZE = (JVMTI_INTERNAL_CAPABILITY_COUNT + 7) / 8; 
++static const jint CAPA_SIZE = (JVMTI_INTERNAL_CAPABILITY_COUNT + 7) / 8;
+ 
+-  // capabilities which are always potentially available 
++  // capabilities which are always potentially available
+ jvmtiCapabilities JvmtiManageCapabilities::always_capabilities;
+ 
+   // capabilities which are potentially available during OnLoad
+ jvmtiCapabilities JvmtiManageCapabilities::onload_capabilities;
+ 
+-  // capabilities which are always potentially available 
++  // capabilities which are always potentially available
+   // but to only one environment
+ jvmtiCapabilities JvmtiManageCapabilities::always_solo_capabilities;
+ 
+@@ -43,7 +40,7 @@
+   // but to only one environment
+ jvmtiCapabilities JvmtiManageCapabilities::onload_solo_capabilities;
+ 
+-  // remaining capabilities which are always potentially available 
++  // remaining capabilities which are always potentially available
+   // but to only one environment
+ jvmtiCapabilities JvmtiManageCapabilities::always_solo_remaining_capabilities;
+ 
+@@ -82,9 +79,9 @@
+ 
+ 
+ // corresponding init functions
+-jvmtiCapabilities JvmtiManageCapabilities::init_always_capabilities() { 
++jvmtiCapabilities JvmtiManageCapabilities::init_always_capabilities() {
+   jvmtiCapabilities jc;
+-  
++
+   memset(&jc, 0, sizeof(jc));
+   jc.can_get_bytecodes = 1;
+   jc.can_signal_thread = 1;
+@@ -112,11 +109,11 @@
+   jc.can_generate_resource_exhaustion_heap_events = 1;
+   jc.can_generate_resource_exhaustion_threads_events = 1;
+   return jc;
+-} 
++}
+ 
+ jvmtiCapabilities JvmtiManageCapabilities::init_onload_capabilities() {
+   jvmtiCapabilities jc;
+-  
++
+   memset(&jc, 0, sizeof(jc));
+   jc.can_pop_frame = 1;
+   jc.can_force_early_return = 1;
+@@ -141,7 +138,7 @@
+ 
+ jvmtiCapabilities JvmtiManageCapabilities::init_always_solo_capabilities() {
+   jvmtiCapabilities jc;
+-  
++
+   memset(&jc, 0, sizeof(jc));
+   jc.can_suspend = 1;
+   return jc;
+@@ -150,7 +147,7 @@
+ 
+ jvmtiCapabilities JvmtiManageCapabilities::init_onload_solo_capabilities() {
+   jvmtiCapabilities jc;
+-  
++
+   memset(&jc, 0, sizeof(jc));
+   jc.can_generate_field_modification_events = 1;
+   jc.can_generate_field_access_events = 1;
+@@ -173,7 +170,7 @@
+ }
+ 
+ 
+-jvmtiCapabilities *JvmtiManageCapabilities::both(const jvmtiCapabilities *a, const jvmtiCapabilities *b, 
++jvmtiCapabilities *JvmtiManageCapabilities::both(const jvmtiCapabilities *a, const jvmtiCapabilities *b,
+                                                     jvmtiCapabilities *result) {
+   char *ap = (char *)a;
+   char *bp = (char *)b;
+@@ -187,7 +184,7 @@
+ }
+ 
+ 
+-jvmtiCapabilities *JvmtiManageCapabilities::exclude(const jvmtiCapabilities *a, const jvmtiCapabilities *b, 
++jvmtiCapabilities *JvmtiManageCapabilities::exclude(const jvmtiCapabilities *a, const jvmtiCapabilities *b,
+                                                     jvmtiCapabilities *result) {
+   char *ap = (char *)a;
+   char *bp = (char *)b;
+@@ -224,8 +221,8 @@
+ }
+ 
+ 
+-void JvmtiManageCapabilities::get_potential_capabilities(const jvmtiCapabilities *current, 
+-                                                         const jvmtiCapabilities *prohibited, 
++void JvmtiManageCapabilities::get_potential_capabilities(const jvmtiCapabilities *current,
++                                                         const jvmtiCapabilities *prohibited,
+                                                          jvmtiCapabilities *result) {
+   // exclude prohibited capabilities, must be before adding current
+   exclude(&always_capabilities, prohibited, result);
+@@ -233,7 +230,7 @@
+   // must include current since it may possess solo capabilities and now prohibited
+   either(result, current, result);
+ 
+-  // add other remaining 
++  // add other remaining
+   either(result, &always_solo_remaining_capabilities, result);
+ 
+   // if this is during OnLoad more capabilities are available
+@@ -244,8 +241,8 @@
+ }
+ 
+ jvmtiError JvmtiManageCapabilities::add_capabilities(const jvmtiCapabilities *current,
+-                                                     const jvmtiCapabilities *prohibited, 
+-                                                     const jvmtiCapabilities *desired, 
++                                                     const jvmtiCapabilities *prohibited,
++                                                     const jvmtiCapabilities *desired,
+                                                      jvmtiCapabilities *result) {
+   // check that the capabilities being added are potential capabilities
+   jvmtiCapabilities temp;
+@@ -281,7 +278,7 @@
+ 
+ 
+ void JvmtiManageCapabilities::relinquish_capabilities(const jvmtiCapabilities *current,
+-                                                      const jvmtiCapabilities *unwanted, 
++                                                      const jvmtiCapabilities *unwanted,
+                                                       jvmtiCapabilities *result) {
+   jvmtiCapabilities to_trash;
+   jvmtiCapabilities temp;
+@@ -290,7 +287,7 @@
+   both(current, unwanted, &to_trash);
+ 
+   // restore solo capabilities but only those that belong
+-  either(&always_solo_remaining_capabilities, both(&always_solo_capabilities, &to_trash, &temp), 
++  either(&always_solo_remaining_capabilities, both(&always_solo_capabilities, &to_trash, &temp),
+          &always_solo_remaining_capabilities);
+   either(&onload_solo_remaining_capabilities, both(&onload_solo_capabilities, &to_trash, &temp),
+          &onload_solo_remaining_capabilities);
+@@ -308,14 +305,14 @@
+   // all capabilities
+   either(&always_capabilities, &always_solo_capabilities, &avail);
+ 
+-  bool interp_events = 
++  bool interp_events =
+     avail.can_generate_field_access_events ||
+     avail.can_generate_field_modification_events ||
+     avail.can_generate_single_step_events ||
+     avail.can_generate_frame_pop_events ||
+     avail.can_generate_method_entry_events ||
+     avail.can_generate_method_exit_events;
+-  bool enter_all_methods = 
++  bool enter_all_methods =
+     interp_events ||
+     avail.can_generate_breakpoint_events;
+   UseFastEmptyMethods = !enter_all_methods;
+@@ -327,7 +324,7 @@
+ 
+   // If can_redefine_classes is enabled in the onload phase then we know that the
+   // dependency information recorded by the compiler is complete.
+-  if ((avail.can_redefine_classes || avail.can_retransform_classes) && 
++  if ((avail.can_redefine_classes || avail.can_retransform_classes) &&
+       JvmtiEnv::get_phase() == JVMTI_PHASE_ONLOAD) {
+     JvmtiExport::set_all_dependencies_are_recorded(true);
+   }
+@@ -335,7 +332,7 @@
+   JvmtiExport::set_can_get_source_debug_extension(avail.can_get_source_debug_extension);
+   JvmtiExport::set_can_examine_or_deopt_anywhere(
+     avail.can_generate_breakpoint_events ||
+-    interp_events || 
++    interp_events ||
+     avail.can_redefine_classes ||
+     avail.can_retransform_classes ||
+     avail.can_access_local_variables ||
+@@ -377,86 +374,84 @@
+ 
+ void JvmtiManageCapabilities:: print(const jvmtiCapabilities* cap) {
+   tty->print_cr("----- capabilities -----");
+-  if (cap->can_tag_objects)				    
++  if (cap->can_tag_objects)
+     tty->print_cr("can_tag_objects");
+-  if (cap->can_generate_field_modification_events)	    
++  if (cap->can_generate_field_modification_events)
+     tty->print_cr("can_generate_field_modification_events");
+-  if (cap->can_generate_field_access_events)		    
++  if (cap->can_generate_field_access_events)
+     tty->print_cr("can_generate_field_access_events");
+-  if (cap->can_get_bytecodes)				    
++  if (cap->can_get_bytecodes)
+     tty->print_cr("can_get_bytecodes");
+-  if (cap->can_get_synthetic_attribute)			    
++  if (cap->can_get_synthetic_attribute)
+     tty->print_cr("can_get_synthetic_attribute");
+-  if (cap->can_get_owned_monitor_info)			    
++  if (cap->can_get_owned_monitor_info)
+     tty->print_cr("can_get_owned_monitor_info");
+-  if (cap->can_get_current_contended_monitor)		    
++  if (cap->can_get_current_contended_monitor)
+     tty->print_cr("can_get_current_contended_monitor");
+-  if (cap->can_get_monitor_info)			    
++  if (cap->can_get_monitor_info)
+     tty->print_cr("can_get_monitor_info");
+-  if (cap->can_get_constant_pool)	    
++  if (cap->can_get_constant_pool)
+     tty->print_cr("can_get_constant_pool");
+-  if (cap->can_pop_frame)				    
++  if (cap->can_pop_frame)
+     tty->print_cr("can_pop_frame");
+-  if (cap->can_force_early_return)				    
++  if (cap->can_force_early_return)
+     tty->print_cr("can_force_early_return");
+-  if (cap->can_redefine_classes)			    
++  if (cap->can_redefine_classes)
+     tty->print_cr("can_redefine_classes");
+-  if (cap->can_retransform_classes)			    
++  if (cap->can_retransform_classes)
+     tty->print_cr("can_retransform_classes");
+-  if (cap->can_signal_thread)				    
++  if (cap->can_signal_thread)
+     tty->print_cr("can_signal_thread");
+-  if (cap->can_get_source_file_name)			    
++  if (cap->can_get_source_file_name)
+     tty->print_cr("can_get_source_file_name");
+-  if (cap->can_get_line_numbers)			    
++  if (cap->can_get_line_numbers)
+     tty->print_cr("can_get_line_numbers");
+-  if (cap->can_get_source_debug_extension)		    
++  if (cap->can_get_source_debug_extension)
+     tty->print_cr("can_get_source_debug_extension");
+-  if (cap->can_access_local_variables)			    
++  if (cap->can_access_local_variables)
+     tty->print_cr("can_access_local_variables");
+-  if (cap->can_maintain_original_method_order)		    
++  if (cap->can_maintain_original_method_order)
+     tty->print_cr("can_maintain_original_method_order");
+-  if (cap->can_generate_single_step_events)		    
++  if (cap->can_generate_single_step_events)
+     tty->print_cr("can_generate_single_step_events");
+-  if (cap->can_generate_exception_events)		    
++  if (cap->can_generate_exception_events)
+     tty->print_cr("can_generate_exception_events");
+-  if (cap->can_generate_frame_pop_events)		    
++  if (cap->can_generate_frame_pop_events)
+     tty->print_cr("can_generate_frame_pop_events");
+-  if (cap->can_generate_breakpoint_events)		    
++  if (cap->can_generate_breakpoint_events)
+     tty->print_cr("can_generate_breakpoint_events");
+-  if (cap->can_suspend)					    
++  if (cap->can_suspend)
+     tty->print_cr("can_suspend");
+-  if (cap->can_redefine_any_class )			    
++  if (cap->can_redefine_any_class )
+     tty->print_cr("can_redefine_any_class");
+-  if (cap->can_retransform_any_class )			    
++  if (cap->can_retransform_any_class )
+     tty->print_cr("can_retransform_any_class");
+-  if (cap->can_get_current_thread_cpu_time)		    
++  if (cap->can_get_current_thread_cpu_time)
+     tty->print_cr("can_get_current_thread_cpu_time");
+-  if (cap->can_get_thread_cpu_time)			    
++  if (cap->can_get_thread_cpu_time)
+     tty->print_cr("can_get_thread_cpu_time");
+-  if (cap->can_generate_method_entry_events)		    
++  if (cap->can_generate_method_entry_events)
+     tty->print_cr("can_generate_method_entry_events");
+-  if (cap->can_generate_method_exit_events)		    
++  if (cap->can_generate_method_exit_events)
+     tty->print_cr("can_generate_method_exit_events");
+-  if (cap->can_generate_all_class_hook_events)		    
++  if (cap->can_generate_all_class_hook_events)
+     tty->print_cr("can_generate_all_class_hook_events");
+-  if (cap->can_generate_compiled_method_load_events)	    
++  if (cap->can_generate_compiled_method_load_events)
+     tty->print_cr("can_generate_compiled_method_load_events");
+-  if (cap->can_generate_monitor_events)			    
++  if (cap->can_generate_monitor_events)
+     tty->print_cr("can_generate_monitor_events");
+-  if (cap->can_generate_vm_object_alloc_events)		    
++  if (cap->can_generate_vm_object_alloc_events)
+     tty->print_cr("can_generate_vm_object_alloc_events");
+-  if (cap->can_generate_native_method_bind_events)	    
++  if (cap->can_generate_native_method_bind_events)
+     tty->print_cr("can_generate_native_method_bind_events");
+-  if (cap->can_generate_garbage_collection_events)	    
++  if (cap->can_generate_garbage_collection_events)
+     tty->print_cr("can_generate_garbage_collection_events");
+-  if (cap->can_generate_object_free_events)		    
++  if (cap->can_generate_object_free_events)
+     tty->print_cr("can_generate_object_free_events");
+-  if (cap->can_generate_resource_exhaustion_heap_events)		    
++  if (cap->can_generate_resource_exhaustion_heap_events)
+     tty->print_cr("can_generate_resource_exhaustion_heap_events");
+-  if (cap->can_generate_resource_exhaustion_threads_events)		    
++  if (cap->can_generate_resource_exhaustion_threads_events)
+     tty->print_cr("can_generate_resource_exhaustion_threads_events");
+ }
+ 
+ #endif
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiManageCapabilities.hpp openjdk/hotspot/src/share/vm/prims/jvmtiManageCapabilities.hpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiManageCapabilities.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiManageCapabilities.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jvmtiManageCapabilities.hpp	1.11 07/05/05 17:06:39 JVM"
+-#endif
+ /*
+  * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #ifndef _JAVA_JVMTI_MANAGE_CAPABILITIES_H_
+@@ -36,7 +33,7 @@
+ 
+   // these four capabilities sets represent all potentially
+   // available capabilities.  They are disjoint, covering
+-  // the four cases: (OnLoad vs OnLoad+live phase) X 
++  // the four cases: (OnLoad vs OnLoad+live phase) X
+   // (one environment vs any environment).
+   static jvmtiCapabilities always_capabilities;
+   static jvmtiCapabilities onload_capabilities;
+@@ -70,15 +67,15 @@
+   static void recompute_always_capabilities();
+ 
+   // queries and actions
+-  static void get_potential_capabilities(const jvmtiCapabilities *current,  
++  static void get_potential_capabilities(const jvmtiCapabilities *current,
+                                          const jvmtiCapabilities *prohibited,
+                                          jvmtiCapabilities *result);
+-  static jvmtiError add_capabilities(const jvmtiCapabilities *current,  
++  static jvmtiError add_capabilities(const jvmtiCapabilities *current,
+                                      const jvmtiCapabilities *prohibited,
+-                                     const jvmtiCapabilities *desired, 
++                                     const jvmtiCapabilities *desired,
+                                      jvmtiCapabilities *result);
+-  static void relinquish_capabilities(const jvmtiCapabilities *current, 
+-                                      const jvmtiCapabilities *unwanted, 
++  static void relinquish_capabilities(const jvmtiCapabilities *current,
++                                      const jvmtiCapabilities *unwanted,
+                                       jvmtiCapabilities *result);
+   static void copy_capabilities(const jvmtiCapabilities *from, jvmtiCapabilities *to);
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp openjdk/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)jvmtiRedefineClasses.cpp	1.78 07/05/05 17:06:41 JVM"
+-#endif
+ /*
+  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -30,18 +27,18 @@
+ 
+ 
+ objArrayOop VM_RedefineClasses::_old_methods = NULL;
+-objArrayOop VM_RedefineClasses::_new_methods = NULL;  
++objArrayOop VM_RedefineClasses::_new_methods = NULL;
+ methodOop*  VM_RedefineClasses::_matching_old_methods = NULL;
+-methodOop*  VM_RedefineClasses::_matching_new_methods = NULL;  
++methodOop*  VM_RedefineClasses::_matching_new_methods = NULL;
+ methodOop*  VM_RedefineClasses::_deleted_methods      = NULL;
+-methodOop*  VM_RedefineClasses::_added_methods        = NULL;  
+-int         VM_RedefineClasses::_matching_methods_length = 0;  
++methodOop*  VM_RedefineClasses::_added_methods        = NULL;
++int         VM_RedefineClasses::_matching_methods_length = 0;
+ int         VM_RedefineClasses::_deleted_methods_length  = 0;
+-int         VM_RedefineClasses::_added_methods_length    = 0;  
++int         VM_RedefineClasses::_added_methods_length    = 0;
+ klassOop    VM_RedefineClasses::_the_class_oop = NULL;
+ 
+ 
+-VM_RedefineClasses::VM_RedefineClasses(jint class_count, 
++VM_RedefineClasses::VM_RedefineClasses(jint class_count,
+                                        const jvmtiClassDefinition *class_defs,
+                                        JvmtiClassLoadKind class_load_kind) {
+   _class_count = class_count;
+@@ -328,7 +325,7 @@
+       }
+       (*merge_cp_length_p)++;
+     } break;
+-  
++
+     // this is a double-indirect CP entry so it needs special handling
+     case JVM_CONSTANT_Fieldref:           // fall through
+     case JVM_CONSTANT_InterfaceMethodref: // fall through
+@@ -446,7 +443,7 @@
+       }
+       (*merge_cp_length_p)++;
+     } break;
+-  
++
+     // At this stage, Class or UnresolvedClass could be here, but not
+     // ClassIndex
+     case JVM_CONSTANT_ClassIndex: // fall through
+@@ -460,7 +457,7 @@
+     // StringIndex
+     case JVM_CONSTANT_StringIndex: // fall through
+ 
+-    // At this stage JVM_CONSTANT_UnresolvedClassInError should not be 
++    // At this stage JVM_CONSTANT_UnresolvedClassInError should not be
+     // here
+     case JVM_CONSTANT_UnresolvedClassInError: // fall through
+ 
+@@ -497,9 +494,9 @@
+   int i;
+ 
+   // Check superclasses, or rather their names, since superclasses themselves can be
+-  // requested to replace. 
++  // requested to replace.
+   // Check for NULL superclass first since this might be java.lang.Object
+-  if (the_class->super() != scratch_class->super() && 
++  if (the_class->super() != scratch_class->super() &&
+       (the_class->super() == NULL || scratch_class->super() == NULL ||
+        Klass::cast(the_class->super())->name() !=
+        Klass::cast(scratch_class->super())->name())) {
+@@ -528,7 +525,7 @@
+ 
+   // Check whether class is in the error init state.
+   if (the_class->is_in_error_state()) {
+-    // TBD #5057930: special error code is needed in 1.6 
++    // TBD #5057930: special error code is needed in 1.6
+     return JVMTI_ERROR_INVALID_CLASS;
+   }
+ 
+@@ -556,9 +553,9 @@
+       return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED;
+     }
+     // offset
+-    if (k_old_fields->short_at(i + instanceKlass::low_offset) != 
++    if (k_old_fields->short_at(i + instanceKlass::low_offset) !=
+         k_new_fields->short_at(i + instanceKlass::low_offset) ||
+-        k_old_fields->short_at(i + instanceKlass::high_offset) != 
++        k_old_fields->short_at(i + instanceKlass::high_offset) !=
+         k_new_fields->short_at(i + instanceKlass::high_offset)) {
+       return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED;
+     }
+@@ -577,11 +574,11 @@
+   }
+ 
+   // Do a parallel walk through the old and new methods. Detect
+-  // cases where they match (exist in both), have been added in 
+-  // the new methods, or have been deleted (exist only in the 
++  // cases where they match (exist in both), have been added in
++  // the new methods, or have been deleted (exist only in the
+   // old methods).  The class file parser places methods in order
+-  // by method name, but does not order overloaded methods by 
+-  // signature.  In order to determine what fate befell the methods, 
++  // by method name, but does not order overloaded methods by
++  // signature.  In order to determine what fate befell the methods,
+   // this code places the overloaded new methods that have matching
+   // old methods in the same order as the old methods and places
+   // new overloaded methods at the end of overloaded methods of
+@@ -590,8 +587,8 @@
+   // Since we are swapping out of order entries as we find them,
+   // we only have to search forward through the overloaded methods.
+   // Methods which are added and have the same name as an existing
+-  // method (but different signature) will be put at the end of 
+-  // the methods with that name, and the name mismatch code will 
++  // method (but different signature) will be put at the end of
++  // the methods with that name, and the name mismatch code will
+   // handle them.
+   objArrayHandle k_old_methods(the_class->methods());
+   objArrayHandle k_new_methods(scratch_class->methods());
+@@ -633,7 +630,7 @@
+         method_was = matched;
+       } else {
+         // The name matches, but the signature doesn't, which means we have to
+-        // search forward through the new overloaded methods. 
++        // search forward through the new overloaded methods.
+         int nj;  // outside the loop for post-loop check
+         for (nj = ni + 1; nj < n_new_methods; nj++) {
+           methodOop m = (methodOop)k_new_methods->obj_at(nj);
+@@ -646,7 +643,7 @@
+             // found a match so swap the methods
+             k_new_methods->obj_at_put(ni, m);
+             k_new_methods->obj_at_put(nj, k_new_method);
+-            k_new_method = m;            
++            k_new_method = m;
+             method_was = matched;
+             break;
+           }
+@@ -860,8 +857,8 @@
+     // load hook event.
+     state->set_class_being_redefined(&the_class, _class_load_kind);
+ 
+-    klassOop k = SystemDictionary::parse_stream(the_class_sym, 
+-                                                the_class_loader, 
++    klassOop k = SystemDictionary::parse_stream(the_class_sym,
++                                                the_class_loader,
+                                                 protection_domain,
+                                                 &st,
+                                                 THREAD);
+@@ -869,7 +866,7 @@
+     state->clear_class_being_redefined();
+ 
+     // TODO: if this is retransform, and nothing changed we can skip it
+-                                     
++
+     instanceKlassHandle scratch_class (THREAD, k);
+ 
+     if (HAS_PENDING_EXCEPTION) {
+@@ -988,7 +985,7 @@
+         return JVMTI_ERROR_INTERNAL;
+       }
+     }
+-  
++
+     _scratch_classes[i] = scratch_class;
+ 
+     // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark
+@@ -996,7 +993,7 @@
+       ("loaded name=%s (avail_mem=" UINT64_FORMAT "K)",
+       the_class->external_name(), os::available_memory() >> 10));
+   }
+-  
++
+   return JVMTI_ERROR_NONE;
+ }
+ 
+@@ -1083,7 +1080,7 @@
+         old_cp->copy_entry_to(old_i, *merge_cp_p, old_i, CHECK_0);
+         old_i++;
+         break;
+-  
++
+       default:
+         // just copy the entry to *merge_cp_p
+         old_cp->copy_entry_to(old_i, *merge_cp_p, old_i, CHECK_0);
+@@ -1428,7 +1425,7 @@
+   for (int bci = 0; bci < code_length; bci += bc_length) {
+     address bcp = code_base + bci;
+     Bytecodes::Code c = (Bytecodes::Code)(*bcp);
+-    
++
+     bc_length = Bytecodes::length_for(c);
+     if (bc_length == 0) {
+       // More complicated bytecodes report a length of zero so
+@@ -1534,7 +1531,7 @@
+           Bytes::put_Java_u2(p, new_index);
+         }
+       } break;
+-    }  
++    }
+   } // end for each bytecode
+ } // end rewrite_cp_refs_in_method()
+ 
+@@ -2085,7 +2082,7 @@
+     //   append_frame;
+     //   full_frame;
+     // }
+-  
++
+     assert(stackmap_p + 1 <= stackmap_end, "no room for frame_type");
+     // The Linux compiler does not like frame_type to be u1 or u2. It
+     // issues the following warning for the first if-statement below:
+@@ -2094,14 +2091,14 @@
+     //
+     u4 frame_type = *stackmap_p;
+     stackmap_p++;
+-  
++
+     // same_frame {
+     //   u1 frame_type = SAME; /* 0-63 */
+     // }
+     if (frame_type >= 0 && frame_type <= 63) {
+       // nothing more to do for same_frame
+     }
+-  
++
+     // same_locals_1_stack_item_frame {
+     //   u1 frame_type = SAME_LOCALS_1_STACK_ITEM; /* 64-127 */
+     //   verification_type_info stack[1];
+@@ -2110,12 +2107,12 @@
+       rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end,
+         calc_number_of_entries, frame_type, THREAD);
+     }
+-  
++
+     // reserved for future use
+     else if (frame_type >= 128 && frame_type <= 246) {
+       // nothing more to do for reserved frame_types
+     }
+-  
++
+     // same_locals_1_stack_item_frame_extended {
+     //   u1 frame_type = SAME_LOCALS_1_STACK_ITEM_EXTENDED; /* 247 */
+     //   u2 offset_delta;
+@@ -2126,7 +2123,7 @@
+       rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end,
+         calc_number_of_entries, frame_type, THREAD);
+     }
+-  
++
+     // chop_frame {
+     //   u1 frame_type = CHOP; /* 248-250 */
+     //   u2 offset_delta;
+@@ -2134,7 +2131,7 @@
+     else if (frame_type >= 248 && frame_type <= 250) {
+       stackmap_p += 2;
+     }
+-  
++
+     // same_frame_extended {
+     //   u1 frame_type = SAME_FRAME_EXTENDED; /* 251*/
+     //   u2 offset_delta;
+@@ -2142,7 +2139,7 @@
+     else if (frame_type == 251) {
+       stackmap_p += 2;
+     }
+-  
++
+     // append_frame {
+     //   u1 frame_type = APPEND; /* 252-254 */
+     //   u2 offset_delta;
+@@ -2158,7 +2155,7 @@
+           calc_number_of_entries, frame_type, THREAD);
+       }
+     }
+-  
++
+     // full_frame {
+     //   u1 frame_type = FULL_FRAME; /* 255 */
+     //   u2 offset_delta;
+@@ -2573,7 +2570,7 @@
+     if (ik->vtable_length() > 0 && ik->is_subtype_of(_the_class_oop)) {
+       // ik->vtable() creates a wrapper object; rm cleans it up
+       ResourceMark rm(THREAD);
+-      ik->vtable()->adjust_method_entries(_matching_old_methods, 
++      ik->vtable()->adjust_method_entries(_matching_old_methods,
+                                           _matching_new_methods,
+                                           _matching_methods_length,
+                                           &trace_name_printed);
+@@ -2589,7 +2586,7 @@
+         || ik->is_subclass_of(_the_class_oop))) {
+       // ik->itable() creates a wrapper object; rm cleans it up
+       ResourceMark rm(THREAD);
+-      ik->itable()->adjust_method_entries(_matching_old_methods, 
++      ik->itable()->adjust_method_entries(_matching_old_methods,
+                                           _matching_new_methods,
+                                           _matching_methods_length,
+                                           &trace_name_printed);
+@@ -2616,13 +2613,13 @@
+       other_cp = constantPoolHandle(ik->constants());
+       cp_cache = other_cp->cache();
+       if (cp_cache != NULL) {
+-        cp_cache->adjust_method_entries(_matching_old_methods, 
++        cp_cache->adjust_method_entries(_matching_old_methods,
+                                         _matching_new_methods,
+                                         _matching_methods_length,
+                                         &trace_name_printed);
+       }
+     }
+-    { 
++    {
+       ResourceMark rm(THREAD);
+       // PreviousVersionInfo objects returned via PreviousVersionWalker
+       // contain a GrowableArray of handles. We have to clean up the
+@@ -2636,7 +2633,7 @@
+           other_cp = pv_info->prev_constant_pool_handle();
+           cp_cache = other_cp->cache();
+           if (cp_cache != NULL) {
+-            cp_cache->adjust_method_entries(_matching_old_methods, 
++            cp_cache->adjust_method_entries(_matching_old_methods,
+                                             _matching_new_methods,
+                                             _matching_methods_length,
+                                             &trace_name_printed);
+@@ -2816,7 +2813,7 @@
+ //
+ class TransferNativeFunctionRegistration {
+  private:
+-  instanceKlassHandle the_class;  
++  instanceKlassHandle the_class;
+   int prefix_count;
+   char** prefixes;
+ 
+@@ -2827,7 +2824,7 @@
+   //    (1) without the prefix.
+   //    (2) with the prefix.
+   // where 'prefix' is the prefix at that 'depth' (first prefix, second prefix,...)
+-  methodOop search_prefix_name_space(int depth, char* name_str, size_t name_len, 
++  methodOop search_prefix_name_space(int depth, char* name_str, size_t name_len,
+                                      symbolOop signature) {
+     symbolOop name_symbol = SymbolTable::probe(name_str, (int)name_len);
+     if (name_symbol != NULL) {
+@@ -2836,7 +2833,7 @@
+         // Even if prefixed, intermediate methods must exist.
+         if (method->is_native()) {
+           // Wahoo, we found a (possibly prefixed) version of the method, return it.
+-          return method; 
++          return method;
+         }
+         if (depth < prefix_count) {
+           // Try applying further prefixes (other than this one).
+@@ -2848,7 +2845,7 @@
+           // Try adding this prefix to the method name and see if it matches
+           // another method name.
+           char* prefix = prefixes[depth];
+-          size_t prefix_len = strlen(prefix);          
++          size_t prefix_len = strlen(prefix);
+           size_t trial_len = name_len + prefix_len;
+           char* trial_name_str = NEW_RESOURCE_ARRAY(char, trial_len + 1);
+           strcpy(trial_name_str, prefix);
+@@ -2882,7 +2879,7 @@
+     return name_str;
+   }
+ 
+-  // Strip any prefixes off the old native method, then try to find a 
++  // Strip any prefixes off the old native method, then try to find a
+   // (possibly prefixed) new native that matches it.
+   methodOop strip_and_search_for_new_native(methodOop method) {
+     ResourceMark rm;
+@@ -2905,12 +2902,12 @@
+   void transfer_registrations(methodOop* old_methods, int methods_length) {
+     for (int j = 0; j < methods_length; j++) {
+       methodOop old_method = old_methods[j];
+-      
++
+       if (old_method->is_native() && old_method->has_native_function()) {
+         methodOop new_method = strip_and_search_for_new_native(old_method);
+         if (new_method != NULL) {
+           // Actually set the native function in the new method.
+-          // Redefine does not send events (except CFLH), certainly not this 
++          // Redefine does not send events (except CFLH), certainly not this
+           // behind the scenes re-registration.
+           new_method->set_native_function(old_method->native_function(),
+                               !methodOopDesc::native_bind_event_is_interesting);
+@@ -2960,8 +2957,8 @@
+ 
+     // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
+     CodeCache::make_marked_nmethods_not_entrant();
+- 
+-    // From now on we know that the dependency information is complete 
++
++    // From now on we know that the dependency information is complete
+     JvmtiExport::set_all_dependencies_are_recorded(true);
+   }
+ }
+@@ -2974,10 +2971,10 @@
+   _matching_new_methods = NEW_RESOURCE_ARRAY(methodOop, _old_methods->length());
+   _added_methods        = NEW_RESOURCE_ARRAY(methodOop, _new_methods->length());
+   _deleted_methods      = NEW_RESOURCE_ARRAY(methodOop, _old_methods->length());
+- 
+-  _matching_methods_length = 0;  
++
++  _matching_methods_length = 0;
+   _deleted_methods_length  = 0;
+-  _added_methods_length    = 0;  
++  _added_methods_length    = 0;
+ 
+   int nj = 0;
+   int oj = 0;
+@@ -3050,9 +3047,11 @@
+   klassOop the_class_oop = java_lang_Class::as_klassOop(the_class_mirror);
+   instanceKlassHandle the_class = instanceKlassHandle(THREAD, the_class_oop);
+ 
++#ifndef JVMTI_KERNEL
+   // Remove all breakpoints in methods of this class
+   JvmtiBreakpoints& jvmti_breakpoints = JvmtiCurrentBreakpoints::get_jvmti_breakpoints();
+-  jvmti_breakpoints.clearall_in_class_at_safepoint(the_class_oop); 
++  jvmti_breakpoints.clearall_in_class_at_safepoint(the_class_oop);
++#endif // !JVMTI_KERNEL
+ 
+   if (the_class_oop == Universe::reflect_invoke_cache()->klass()) {
+     // We are redefining java.lang.reflect.Method. Method.invoke() is
+@@ -3124,7 +3123,7 @@
+ 
+   // Replace methods and constantpool
+   the_class->set_methods(_new_methods);
+-  scratch_class->set_methods(_old_methods);     // To prevent potential GCing of the old methods, 
++  scratch_class->set_methods(_old_methods);     // To prevent potential GCing of the old methods,
+                                           // and to be able to undo operation easily.
+ 
+   constantPoolOop old_constants = the_class->constants();
+@@ -3150,7 +3149,7 @@
+   // methods, the method holder is "the class" itself (as gotten from
+   // the new constant pool). The check works fine in this case. The
+   // check also works fine for methods inherited from super classes.
+-  // 
++  //
+   // Miranda methods are a little more complicated. A miranda method is
+   // provided by an interface when the class implementing the interface
+   // does not provide its own method.  These interfaces are implemented
+@@ -3177,7 +3176,7 @@
+   // with them was cached on the scratch class, move to the_class.
+   // Note: we still want to do this if nothing needed caching since it
+   // should get cleared in the_class too.
+-  the_class->set_cached_class_file(scratch_class->get_cached_class_file_bytes(), 
++  the_class->set_cached_class_file(scratch_class->get_cached_class_file_bytes(),
+                                    scratch_class->get_cached_class_file_len());
+ 
+   // Replace inner_classes
+@@ -3190,7 +3189,7 @@
+   {
+     ResourceMark rm(THREAD);
+     // no exception should happen here since we explicitly
+-    // do not check loader constraints. 
++    // do not check loader constraints.
+     // compare_and_normalize_class_versions has already checked:
+     //  - classloaders unchanged, signatures unchanged
+     //  - all instanceKlasses for redefined classes reused & contents updated
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiRedefineClasses.hpp openjdk/hotspot/src/share/vm/prims/jvmtiRedefineClasses.hpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiRedefineClasses.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiRedefineClasses.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jvmtiRedefineClasses.hpp	1.40 07/05/23 10:53:53 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Introduction:
+@@ -201,7 +198,7 @@
+ // to merge_cp. Two of the CP entry types are special in that they are
+ // lazily resolved. Before explaining the copying complication, we need
+ // to digress into CP entry resolution.
+-// 
++//
+ // JVM_CONSTANT_Class and JVM_CONSTANT_String entries are present in
+ // the class file, but are not stored in memory as such until they are
+ // resolved. The entries are not resolved unless they are used because
+@@ -334,119 +331,17 @@
+ //   coordinate a cleanup of these constants with Runtime.
+ //
+ 
+-
+-// RedefineClasses tracing support via the TraceRedefineClasses
+-// option. A bit is assigned to each group of trace messages.
+-// Groups of messages are individually selectable. We have to use
+-// decimal values on the command line since the command option
+-// parsing logic doesn't like non-decimal numerics. The HEX values
+-// are used in the actual RC_TRACE() calls for sanity. To achieve
+-// the old cumulative behavior, pick the level after the one in
+-// which you are interested and subtract one, e.g., 33554431 will
+-// print every tracing message.
+-// 
+-//    0x00000000 |          0 - default; no tracing messages
+-//    0x00000001 |          1 - name each target class before loading, after
+-//                              loading and after redefinition is completed
+-//    0x00000002 |          2 - print info if parsing, linking or
+-//                              verification throws an exception
+-//    0x00000004 |          4 - print timer info for the VM operation
+-//    0x00000008 |          8 - print subclass counter updates
+-//    0x00000010 |         16 - unused
+-//    0x00000020 |         32 - unused
+-//    0x00000040 |         64 - unused
+-//    0x00000080 |        128 - unused
+-//    0x00000100 |        256 - previous class weak reference addition
+-//    0x00000200 |        512 - previous class weak reference mgmt during
+-//                              class unloading checks (GC)
+-//    0x00000400 |       1024 - previous class weak reference mgmt during
+-//                              add previous ops (GC)
+-//    0x00000800 |       2048 - previous class breakpoint mgmt
+-//    0x00001000 |       4096 - unused
+-//    0x00002000 |       8192 - unused
+-//    0x00004000 |      16384 - unused
+-//    0x00008000 |      32768 - old/new method matching/add/delete 
+-//    0x00010000 |      65536 - impl details: CP size info
+-//    0x00020000 |     131072 - impl details: CP merge pass info
+-//    0x00040000 |     262144 - impl details: CP index maps
+-//    0x00080000 |     524288 - impl details: modified CP index values
+-//    0x00100000 |    1048576 - impl details: vtable updates
+-//    0x00200000 |    2097152 - impl details: itable updates
+-//    0x00400000 |    4194304 - impl details: constant pool cache updates
+-//    0x00800000 |    8388608 - impl details: methodComparator info
+-//    0x01000000 |   16777216 - impl details: nmethod evolution info
+-//    0x02000000 |   33554432 - impl details: annotation updates
+-//    0x04000000 |   67108864 - impl details: StackMapTable updates
+-//    0x08000000 |  134217728 - unused
+-//    0x10000000 |  268435456 - unused
+-//    0x20000000 |  536870912 - unused
+-//    0x40000000 | 1073741824 - unused
+-//    0x80000000 | 2147483648 - unused
+-//
+-// Note: The ResourceMark is to cleanup resource allocated args.
+-//   The "while (0)" is so we can use semi-colon at end of RC_TRACE().
+-#define RC_TRACE(level, args) \
+-  if ((TraceRedefineClasses & level) != 0) { \
+-    ResourceMark rm; \
+-    tty->print("RedefineClasses-0x%x: ", level); \
+-    tty->print_cr args; \
+-  } while (0)
+-
+-#define RC_TRACE_WITH_THREAD(level, thread, args) \
+-  if ((TraceRedefineClasses & level) != 0) { \
+-    ResourceMark rm(thread); \
+-    tty->print("RedefineClasses-0x%x: ", level); \
+-    tty->print_cr args; \
+-  } while (0)
+-
+-#define RC_TRACE_MESG(args) \
+-  { \
+-    ResourceMark rm; \
+-    tty->print("RedefineClasses: "); \
+-    tty->print_cr args; \
+-  } while (0)
+-
+-// Macro for checking if TraceRedefineClasses has a specific bit
+-// enabled. Returns true if the bit specified by level is set.
+-#define RC_TRACE_ENABLED(level) ((TraceRedefineClasses & level) != 0)
+-
+-// Macro for checking if TraceRedefineClasses has one or more bits
+-// set in a range of bit values. Returns true if one or more bits
+-// is set in the range from low..high inclusive. Assumes that low
+-// and high are single bit values.
+-//
+-// ((high << 1) - 1)
+-//     Yields a mask that removes bits greater than the high bit value.
+-//     This algorithm doesn't work with highest bit.
+-// ~(low - 1)
+-//     Yields a mask that removes bits lower than the low bit value.
+-#define RC_TRACE_IN_RANGE(low, high) \
+-(((TraceRedefineClasses & ((high << 1) - 1)) & ~(low - 1)) != 0)
+-
+-// Timer support macros. Only do timer operations if timer tracing
+-// is enabled. The "while (0)" is so we can use semi-colon at end of
+-// the macro.
+-#define RC_TIMER_START(t) \
+-  if (RC_TRACE_ENABLED(0x00000004)) { \
+-    t.start(); \
+-  } while (0)
+-#define RC_TIMER_STOP(t) \
+-  if (RC_TRACE_ENABLED(0x00000004)) { \
+-    t.stop(); \
+-  } while (0)
+-
+-
+ class VM_RedefineClasses: public VM_Operation {
+  private:
+   // These static fields are needed by SystemDictionary::classes_do()
+   // facility and the adjust_cpool_cache_and_vtable() helper:
+   static objArrayOop     _old_methods;
+-  static objArrayOop     _new_methods;  
++  static objArrayOop     _new_methods;
+   static methodOop*      _matching_old_methods;
+-  static methodOop*      _matching_new_methods;  
++  static methodOop*      _matching_new_methods;
+   static methodOop*      _deleted_methods;
+-  static methodOop*      _added_methods;  
+-  static int             _matching_methods_length;  
++  static methodOop*      _added_methods;
++  static int             _matching_methods_length;
+   static int             _deleted_methods_length;
+   static int             _added_methods_length;
+   static klassOop        _the_class_oop;
+@@ -517,9 +412,9 @@
+   // to fix up these pointers.
+   static void adjust_cpool_cache_and_vtable(klassOop k_oop, oop loader, TRAPS);
+ 
+-  // Install the redefinition of a class 
++  // Install the redefinition of a class
+   void redefine_single_class(jclass the_jclass,
+-    instanceKlassHandle scratch_class, TRAPS); 
++    instanceKlassHandle scratch_class, TRAPS);
+ 
+   // Increment the classRedefinedCount field in the specific instanceKlass
+   // and in all direct and indirect subclasses.
+@@ -577,7 +472,7 @@
+   static void dump_methods()   PRODUCT_RETURN;
+ 
+  public:
+-  VM_RedefineClasses(jint class_count, 
++  VM_RedefineClasses(jint class_count,
+                      const jvmtiClassDefinition *class_defs,
+                      JvmtiClassLoadKind class_load_kind);
+   VMOp_Type type() const { return VMOp_RedefineClasses; }
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiRedefineClassesTrace.hpp openjdk/hotspot/src/share/vm/prims/jvmtiRedefineClassesTrace.hpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiRedefineClassesTrace.hpp	1969-12-31 19:00:00.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiRedefineClassesTrace.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -0,0 +1,123 @@
++/*
++ * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++ *
++ * This code is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 only, as
++ * published by the Free Software Foundation.
++ *
++ * This code is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++ * version 2 for more details (a copy is included in the LICENSE file that
++ * accompanied this code).
++ *
++ * You should have received a copy of the GNU General Public License version
++ * 2 along with this work; if not, write to the Free Software Foundation,
++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
++ * CA 95054 USA or visit www.sun.com if you need additional information or
++ * have any questions.
++ *
++ */
++
++// RedefineClasses tracing support via the TraceRedefineClasses
++// option. A bit is assigned to each group of trace messages.
++// Groups of messages are individually selectable. We have to use
++// decimal values on the command line since the command option
++// parsing logic doesn't like non-decimal numerics. The HEX values
++// are used in the actual RC_TRACE() calls for sanity. To achieve
++// the old cumulative behavior, pick the level after the one in
++// which you are interested and subtract one, e.g., 33554431 will
++// print every tracing message.
++//
++//    0x00000000 |          0 - default; no tracing messages
++//    0x00000001 |          1 - name each target class before loading, after
++//                              loading and after redefinition is completed
++//    0x00000002 |          2 - print info if parsing, linking or
++//                              verification throws an exception
++//    0x00000004 |          4 - print timer info for the VM operation
++//    0x00000008 |          8 - print subclass counter updates
++//    0x00000010 |         16 - unused
++//    0x00000020 |         32 - unused
++//    0x00000040 |         64 - unused
++//    0x00000080 |        128 - unused
++//    0x00000100 |        256 - previous class weak reference addition
++//    0x00000200 |        512 - previous class weak reference mgmt during
++//                              class unloading checks (GC)
++//    0x00000400 |       1024 - previous class weak reference mgmt during
++//                              add previous ops (GC)
++//    0x00000800 |       2048 - previous class breakpoint mgmt
++//    0x00001000 |       4096 - unused
++//    0x00002000 |       8192 - unused
++//    0x00004000 |      16384 - unused
++//    0x00008000 |      32768 - old/new method matching/add/delete
++//    0x00010000 |      65536 - impl details: CP size info
++//    0x00020000 |     131072 - impl details: CP merge pass info
++//    0x00040000 |     262144 - impl details: CP index maps
++//    0x00080000 |     524288 - impl details: modified CP index values
++//    0x00100000 |    1048576 - impl details: vtable updates
++//    0x00200000 |    2097152 - impl details: itable updates
++//    0x00400000 |    4194304 - impl details: constant pool cache updates
++//    0x00800000 |    8388608 - impl details: methodComparator info
++//    0x01000000 |   16777216 - impl details: nmethod evolution info
++//    0x02000000 |   33554432 - impl details: annotation updates
++//    0x04000000 |   67108864 - impl details: StackMapTable updates
++//    0x08000000 |  134217728 - unused
++//    0x10000000 |  268435456 - unused
++//    0x20000000 |  536870912 - unused
++//    0x40000000 | 1073741824 - unused
++//    0x80000000 | 2147483648 - unused
++//
++// Note: The ResourceMark is to cleanup resource allocated args.
++//   The "while (0)" is so we can use semi-colon at end of RC_TRACE().
++#define RC_TRACE(level, args) \
++  if ((TraceRedefineClasses & level) != 0) { \
++    ResourceMark rm; \
++    tty->print("RedefineClasses-0x%x: ", level); \
++    tty->print_cr args; \
++  } while (0)
++
++#define RC_TRACE_WITH_THREAD(level, thread, args) \
++  if ((TraceRedefineClasses & level) != 0) { \
++    ResourceMark rm(thread); \
++    tty->print("RedefineClasses-0x%x: ", level); \
++    tty->print_cr args; \
++  } while (0)
++
++#define RC_TRACE_MESG(args) \
++  { \
++    ResourceMark rm; \
++    tty->print("RedefineClasses: "); \
++    tty->print_cr args; \
++  } while (0)
++
++// Macro for checking if TraceRedefineClasses has a specific bit
++// enabled. Returns true if the bit specified by level is set.
++#define RC_TRACE_ENABLED(level) ((TraceRedefineClasses & level) != 0)
++
++// Macro for checking if TraceRedefineClasses has one or more bits
++// set in a range of bit values. Returns true if one or more bits
++// is set in the range from low..high inclusive. Assumes that low
++// and high are single bit values.
++//
++// ((high << 1) - 1)
++//     Yields a mask that removes bits greater than the high bit value.
++//     This algorithm doesn't work with highest bit.
++// ~(low - 1)
++//     Yields a mask that removes bits lower than the low bit value.
++#define RC_TRACE_IN_RANGE(low, high) \
++(((TraceRedefineClasses & ((high << 1) - 1)) & ~(low - 1)) != 0)
++
++// Timer support macros. Only do timer operations if timer tracing
++// is enabled. The "while (0)" is so we can use semi-colon at end of
++// the macro.
++#define RC_TIMER_START(t) \
++  if (RC_TRACE_ENABLED(0x00000004)) { \
++    t.start(); \
++  } while (0)
++#define RC_TIMER_STOP(t) \
++  if (RC_TRACE_ENABLED(0x00000004)) { \
++    t.stop(); \
++  } while (0)
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiTagMap.cpp openjdk/hotspot/src/share/vm/prims/jvmtiTagMap.cpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiTagMap.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiTagMap.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)jvmtiTagMap.cpp	1.84 07/06/06 13:18:31 JVM"
+-#endif
+ /*
+  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,13 +19,13 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_jvmtiTagMap.cpp.incl"
+ 
+-// JvmtiTagHashmapEntry 
++// JvmtiTagHashmapEntry
+ //
+ // Each entry encapsulates a JNI weak reference to the tagged object
+ // and the tag value. In addition an entry includes a next pointer which
+@@ -38,49 +35,49 @@
+  private:
+   friend class JvmtiTagMap;
+ 
+-  jweak _object;			// JNI weak ref to tagged object  
+-  jlong _tag;				// the tag
+-  JvmtiTagHashmapEntry* _next;		// next on the list
++  jweak _object;                        // JNI weak ref to tagged object
++  jlong _tag;                           // the tag
++  JvmtiTagHashmapEntry* _next;          // next on the list
+ 
+   inline void init(jweak object, jlong tag) {
+-    _object = object;    
++    _object = object;
+     _tag = tag;
+     _next = NULL;
+   }
+ 
+   // constructor
+-  JvmtiTagHashmapEntry(jweak object, jlong tag)		{ init(object, tag); }
++  JvmtiTagHashmapEntry(jweak object, jlong tag)         { init(object, tag); }
+ 
+- public:  
++ public:
+ 
+   // accessor methods
+-  inline jweak object() const				{ return _object; }
+-  inline jlong tag() const				{ return _tag; }
++  inline jweak object() const                           { return _object; }
++  inline jlong tag() const                              { return _tag; }
+ 
+-  inline void set_tag(jlong tag) { 
++  inline void set_tag(jlong tag) {
+     assert(tag != 0, "can't be zero");
+     _tag = tag;
+   }
+ 
+-  inline JvmtiTagHashmapEntry* next() const		{ return _next; }
+-  inline void set_next(JvmtiTagHashmapEntry* next)	{ _next = next; }
++  inline JvmtiTagHashmapEntry* next() const             { return _next; }
++  inline void set_next(JvmtiTagHashmapEntry* next)      { _next = next; }
+ };
+ 
+ 
+ // JvmtiTagHashmap
+-// 
++//
+ // A hashmap is essentially a table of pointers to entries. Entries
+-// are hashed to a location, or position in the table, and then 
++// are hashed to a location, or position in the table, and then
+ // chained from that location. The "key" for hashing is address of
+ // the object, or oop. The "value" is the JNI weak reference to the
+-// object and the tag value. Keys are not stored with the entry. 
++// object and the tag value. Keys are not stored with the entry.
+ // Instead the weak reference is resolved to obtain the key.
+ //
+ // A hashmap maintains a count of the number entries in the hashmap
+ // and resizes if the number of entries exceeds a given threshold.
+-// The threshold is specified as a percentage of the size - for 
++// The threshold is specified as a percentage of the size - for
+ // example a threshold of 0.75 will trigger the hashmap to resize
+-// if the number of entries is >75% of table size. 
++// if the number of entries is >75% of table size.
+ //
+ // A hashmap provides functions for adding, removing, and finding
+ // entries. It also provides a function to iterate over all entries
+@@ -90,26 +87,26 @@
+  private:
+   friend class JvmtiTagMap;
+ 
+-  enum {	    
++  enum {
+     small_trace_threshold  = 10000,                  // threshold for tracing
+     medium_trace_threshold = 100000,
+     large_trace_threshold  = 1000000,
+     initial_trace_threshold = small_trace_threshold
+   };
+ 
+-  static int _sizes[];			// array of possible hashmap sizes
+-  int _size;				// actual size of the table
+-  int _size_index;			// index into size table
++  static int _sizes[];                  // array of possible hashmap sizes
++  int _size;                            // actual size of the table
++  int _size_index;                      // index into size table
+ 
+-  int _entry_count;			// number of entries in the hashmap
++  int _entry_count;                     // number of entries in the hashmap
+ 
+-  float _load_factor;			// load factor as a % of the size
+-  int _resize_threshold;		// computed threshold to trigger resizing.
++  float _load_factor;                   // load factor as a % of the size
++  int _resize_threshold;                // computed threshold to trigger resizing.
+   bool _resizing_enabled;               // indicates if hashmap can resize
+ 
+   int _trace_threshold;                 // threshold for trace messages
+ 
+-  JvmtiTagHashmapEntry** _table;	// the table of entries.
++  JvmtiTagHashmapEntry** _table;        // the table of entries.
+ 
+   // private accessors
+   int resize_threshold() const                  { return _resize_threshold; }
+@@ -185,19 +182,19 @@
+     for (i=0; i<_size; i++) {
+       JvmtiTagHashmapEntry* entry = _table[i];
+       while (entry != NULL) {
+-	JvmtiTagHashmapEntry* next = entry->next();
+-	oop key = JNIHandles::resolve(entry->object());	
+-	assert(key != NULL, "jni weak reference cleared!!");
+-	unsigned int h = hash(key, new_size);
+-	JvmtiTagHashmapEntry* anchor = new_table[h];
+-	if (anchor == NULL) {
+-	  new_table[h] = entry;
+-	  entry->set_next(NULL);
+-	} else {
+-	  entry->set_next(anchor);
+-	  new_table[h] = entry;
+-	}	
+-	entry = next;
++        JvmtiTagHashmapEntry* next = entry->next();
++        oop key = JNIHandles::resolve(entry->object());
++        assert(key != NULL, "jni weak reference cleared!!");
++        unsigned int h = hash(key, new_size);
++        JvmtiTagHashmapEntry* anchor = new_table[h];
++        if (anchor == NULL) {
++          new_table[h] = entry;
++          entry->set_next(NULL);
++        } else {
++          entry->set_next(anchor);
++          new_table[h] = entry;
++        }
++        entry = next;
+       }
+     }
+ 
+@@ -213,7 +210,7 @@
+ 
+ 
+   // internal remove function - remove an entry at a given position in the
+-  // table. 
++  // table.
+   inline void remove(JvmtiTagHashmapEntry* prev, int pos, JvmtiTagHashmapEntry* entry) {
+     assert(pos >= 0 && pos < _size, "out of range");
+     if (prev == NULL) {
+@@ -241,15 +238,15 @@
+     int i=0;
+     while (_sizes[i] < size) {
+       if (_sizes[i] < 0) {
+-	assert(i > 0, "sanity check");
+-	i--;
+-	break;
++        assert(i > 0, "sanity check");
++        i--;
++        break;
+       }
+       i++;
+     }
+ 
+     // if a load factor is specified then use it, otherwise use default
+-    if (load_factor > 0.01f) {	  
++    if (load_factor > 0.01f) {
+       init(i, load_factor);
+     } else {
+       init(i);
+@@ -270,8 +267,8 @@
+   }
+ 
+   // accessors
+-  int size() const				{ return _size; }   
+-  JvmtiTagHashmapEntry** table() const		{ return _table; }
++  int size() const                              { return _size; }
++  JvmtiTagHashmapEntry** table() const          { return _table; }
+   int entry_count() const                       { return _entry_count; }
+ 
+   // find an entry in the hashmap, returns NULL if not found.
+@@ -279,18 +276,18 @@
+     unsigned int h = hash(key);
+     JvmtiTagHashmapEntry* entry = _table[h];
+     while (entry != NULL) {
+-      oop orig_key = JNIHandles::resolve(entry->object());	
++      oop orig_key = JNIHandles::resolve(entry->object());
+       assert(orig_key != NULL, "jni weak reference cleared!!");
+       if (key == orig_key) {
+-	break;
+-      } 
++        break;
++      }
+       entry = entry->next();
+     }
+     return entry;
+   }
+ 
+ 
+-  // add a new entry to hashmap 
++  // add a new entry to hashmap
+   inline void add(oop key, JvmtiTagHashmapEntry* entry) {
+     assert(key != NULL, "checking");
+     assert(find(key) == NULL, "duplicate detected");
+@@ -321,13 +318,13 @@
+   inline JvmtiTagHashmapEntry* remove(oop key) {
+     unsigned int h = hash(key);
+     JvmtiTagHashmapEntry* entry = _table[h];
+-    JvmtiTagHashmapEntry* prev = NULL; 
++    JvmtiTagHashmapEntry* prev = NULL;
+     while (entry != NULL) {
+-      oop orig_key = JNIHandles::resolve(entry->object());	
++      oop orig_key = JNIHandles::resolve(entry->object());
+       assert(orig_key != NULL, "jni weak reference cleared!!");
+       if (key == orig_key) {
+-	break;
+-      } 
++        break;
++      }
+       prev = entry;
+       entry = entry->next();
+     }
+@@ -337,14 +334,14 @@
+     return entry;
+   }
+ 
+-  // iterate over all entries in the hashmap 
++  // iterate over all entries in the hashmap
+   void entry_iterate(JvmtiTagHashmapEntryClosure* closure);
+ };
+ 
+ // possible hashmap sizes - odd primes that roughly double in size.
+ // To avoid excessive resizing the odd primes from 4801-76831 and
+ // 76831-307261 have been removed. The list must be terminated by -1.
+-int JvmtiTagHashmap::_sizes[] =  { 4801, 76831, 307261, 614563, 1228891, 
++int JvmtiTagHashmap::_sizes[] =  { 4801, 76831, 307261, 614563, 1228891,
+     2457733, 4915219, 9830479, 19660831, 39321619, 78643219, -1 };
+ 
+ 
+@@ -355,7 +352,7 @@
+ };
+ 
+ 
+-// iterate over all entries in the hashmap 
++// iterate over all entries in the hashmap
+ void JvmtiTagHashmap::entry_iterate(JvmtiTagHashmapEntryClosure* closure) {
+   for (int i=0; i<_size; i++) {
+     JvmtiTagHashmapEntry* entry = _table[i];
+@@ -365,7 +362,7 @@
+       // necessary because do_entry may remove the entry from the
+       // hashmap.
+       JvmtiTagHashmapEntry* next = entry->next();
+-      closure->do_entry(entry);		
++      closure->do_entry(entry);
+       entry = next;
+      }
+   }
+@@ -379,7 +376,7 @@
+   // table + entries in KB
+   int hashmap_usage = (size()*sizeof(JvmtiTagHashmapEntry*) +
+     entry_count()*sizeof(JvmtiTagHashmapEntry))/K;
+-  
++
+   int weak_globals_usage = (int)(JNIHandles::weak_global_handle_memory_usage()/K);
+   tty->print_cr(", %d entries (%d KB) <JNI weak globals: %d KB>]",
+     entry_count(), hashmap_usage, weak_globals_usage);
+@@ -450,7 +447,7 @@
+   }
+ 
+   // get the memory region used by the young generation
+-  get_young_generation(); 
++  get_young_generation();
+ 
+   // finally add us to the environment
+   ((JvmtiEnvBase *)env)->set_tag_map(this);
+@@ -460,7 +457,7 @@
+ // destroy a JvmtiTagMap
+ JvmtiTagMap::~JvmtiTagMap() {
+ 
+-  // no lock acquired as we assume the enclosing environment is 
++  // no lock acquired as we assume the enclosing environment is
+   // also being destroryed.
+   ((JvmtiEnvBase *)_env)->set_tag_map(NULL);
+ 
+@@ -471,11 +468,11 @@
+     for (int j=0; j<hashmap->size(); j++) {
+       JvmtiTagHashmapEntry *entry = table[j];
+       while (entry != NULL) {
+-	JvmtiTagHashmapEntry* next = entry->next();
+-	jweak ref = entry->object();
++        JvmtiTagHashmapEntry* next = entry->next();
++        jweak ref = entry->object();
+         JNIHandles::destroy_weak_global(ref);
+-	delete entry;
+-	entry = next;
++        delete entry;
++        entry = next;
+       }
+     }
+ 
+@@ -541,7 +538,7 @@
+ 
+ // iterate over all entries in the tag map.
+ void JvmtiTagMap::entry_iterate(JvmtiTagHashmapEntryClosure* closure) {
+-  for (int i=0; i<n_hashmaps; i++) {      
++  for (int i=0; i<n_hashmaps; i++) {
+     JvmtiTagHashmap* hashmap = _hashmap[i];
+     hashmap->entry_iterate(closure);
+   }
+@@ -564,16 +561,16 @@
+     return 0;
+   } else {
+     return entry->tag();
+-  }  
++  }
+ }
+ 
+-// If the object is a java.lang.Class then return the klassOop, 
++// If the object is a java.lang.Class then return the klassOop,
+ // otherwise return the original object
+ static inline oop klassOop_if_java_lang_Class(oop o) {
+   if (o->klass() == SystemDictionary::class_klass()) {
+     if (!java_lang_Class::is_primitive(o)) {
+       o = (oop)java_lang_Class::as_klassOop(o);
+-      assert(o != NULL, "class for non-primitive mirror must exist");      
++      assert(o != NULL, "class for non-primitive mirror must exist");
+     }
+   }
+   return o;
+@@ -581,18 +578,18 @@
+ 
+ // A CallbackWrapper is a support class for querying and tagging an object
+ // around a callback to a profiler. The constructor does pre-callback
+-// work to get the tag value, klass tag value, ... and the destructor 
++// work to get the tag value, klass tag value, ... and the destructor
+ // does the post-callback work of tagging or untagging the object.
+ //
+ // {
+-//   CallbackWrapper wrapper(tag_map, o); 
++//   CallbackWrapper wrapper(tag_map, o);
+ //
+ //   (*callback)(wrapper.klass_tag(), wrapper.obj_size(), wrapper.obj_tag_p(), ...)
+ //
+-// } // wrapper goes out of scope here which results in the destructor 
++// } // wrapper goes out of scope here which results in the destructor
+ //      checking to see if the object has been tagged, untagged, or the
+-//	tag value has changed.
+-//	
++//      tag value has changed.
++//
+ class CallbackWrapper : public StackObj {
+  private:
+   JvmtiTagMap* _tag_map;
+@@ -602,21 +599,21 @@
+   jlong _obj_size;
+   jlong _obj_tag;
+   klassOop _klass;         // the object's class
+-  jlong _klass_tag;  
++  jlong _klass_tag;
+ 
+  protected:
+-  JvmtiTagMap* tag_map() const	    { return _tag_map; }
++  JvmtiTagMap* tag_map() const      { return _tag_map; }
+ 
+   // invoked post-callback to tag, untag, or update the tag of an object
+-  void inline post_callback_tag_update(oop o, JvmtiTagHashmap* hashmap, 
++  void inline post_callback_tag_update(oop o, JvmtiTagHashmap* hashmap,
+                                        JvmtiTagHashmapEntry* entry, jlong obj_tag);
+  public:
+-  CallbackWrapper(JvmtiTagMap* tag_map, oop o) {  
+-    assert(Thread::current()->is_VM_thread() || tag_map->is_locked(), 
+-	   "MT unsafe or must be VM thread");
+-   
++  CallbackWrapper(JvmtiTagMap* tag_map, oop o) {
++    assert(Thread::current()->is_VM_thread() || tag_map->is_locked(),
++           "MT unsafe or must be VM thread");
++
+     // for Classes the klassOop is tagged
+-    _o = klassOop_if_java_lang_Class(o);  
++    _o = klassOop_if_java_lang_Class(o);
+ 
+     // object size
+     _obj_size = _o->size() * wordSize;
+@@ -626,8 +623,8 @@
+     _hashmap = tag_map->hashmap_for(_o);
+     _entry = _hashmap->find(_o);
+ 
+-    // get object tag 
+-    _obj_tag = (_entry == NULL) ? 0 : _entry->tag();   
++    // get object tag
++    _obj_tag = (_entry == NULL) ? 0 : _entry->tag();
+ 
+     // get the class and the class's tag value
+     if (_o == o) {
+@@ -644,45 +641,45 @@
+     post_callback_tag_update(_o, _hashmap, _entry, _obj_tag);
+   }
+ 
+-  inline jlong* obj_tag_p()			{ return &_obj_tag; } 
+-  inline jlong obj_size() const			{ return _obj_size; }
++  inline jlong* obj_tag_p()                     { return &_obj_tag; }
++  inline jlong obj_size() const                 { return _obj_size; }
+   inline jlong obj_tag() const                  { return _obj_tag; }
+   inline klassOop klass() const                 { return _klass; }
+-  inline jlong klass_tag() const		{ return _klass_tag; } 
++  inline jlong klass_tag() const                { return _klass_tag; }
+ };
+ 
+ 
+ 
+ // callback post-callback to tag, untag, or update the tag of an object
+-void inline CallbackWrapper::post_callback_tag_update(oop o, 
+-                                                      JvmtiTagHashmap* hashmap, 
+-                                                      JvmtiTagHashmapEntry* entry, 
++void inline CallbackWrapper::post_callback_tag_update(oop o,
++                                                      JvmtiTagHashmap* hashmap,
++                                                      JvmtiTagHashmapEntry* entry,
+                                                       jlong obj_tag) {
+-  if (entry == NULL) { 
+-    if (obj_tag != 0) {	
++  if (entry == NULL) {
++    if (obj_tag != 0) {
+       // callback has tagged the object
+       assert(Thread::current()->is_VM_thread(), "must be VMThread");
+       HandleMark hm;
+-      Handle h(o);      
+-      jweak ref = JNIHandles::make_weak_global(h);	
++      Handle h(o);
++      jweak ref = JNIHandles::make_weak_global(h);
+       entry = tag_map()->create_entry(ref, obj_tag);
+       hashmap->add(o, entry);
+     }
+   } else {
+     // object was previously tagged - the callback may have untagged
+     // the object or changed the tag value
+-    if (obj_tag == 0) {		
++    if (obj_tag == 0) {
+       jweak ref = entry->object();
+-	
++
+       JvmtiTagHashmapEntry* entry_removed = hashmap->remove(o);
+-      assert(entry_removed == entry, "checking");   
++      assert(entry_removed == entry, "checking");
+       tag_map()->destroy_entry(entry);
+-	
+-      JNIHandles::destroy_weak_global(ref);	
++
++      JNIHandles::destroy_weak_global(ref);
+     } else {
+       if (obj_tag != entry->tag()) {
+-	 entry->set_tag(obj_tag);	
+-      }     
++         entry->set_tag(obj_tag);
++      }
+     }
+   }
+ }
+@@ -693,13 +690,13 @@
+ // {
+ //   TwoOopCallbackWrapper wrapper(tag_map, referrer, o);
+ //
+-//   (*callback)(wrapper.klass_tag(), 
+-//               wrapper.obj_size(), 
++//   (*callback)(wrapper.klass_tag(),
++//               wrapper.obj_size(),
+ //               wrapper.obj_tag_p()
+ //               wrapper.referrer_tag_p(), ...)
+ //
+-// } // wrapper goes out of scope here which results in the destructor 
+-//      checking to see if the referrer object has been tagged, untagged, 
++// } // wrapper goes out of scope here which results in the destructor
++//      checking to see if the referrer object has been tagged, untagged,
+ //      or the tag value has changed.
+ //
+ class TwoOopCallbackWrapper : public CallbackWrapper {
+@@ -712,30 +709,30 @@
+   jlong _referrer_klass_tag;
+   jlong* _referrer_tag_p;
+ 
+-  bool is_reference_to_self() const		{ return _is_reference_to_self; }
++  bool is_reference_to_self() const             { return _is_reference_to_self; }
+ 
+  public:
+-  TwoOopCallbackWrapper(JvmtiTagMap* tag_map, oop referrer, oop o) : 
+-    CallbackWrapper(tag_map, o) 
++  TwoOopCallbackWrapper(JvmtiTagMap* tag_map, oop referrer, oop o) :
++    CallbackWrapper(tag_map, o)
+   {
+     // self reference needs to be handled in a special way
+-    _is_reference_to_self = (referrer == o);       
++    _is_reference_to_self = (referrer == o);
+ 
+-    if (_is_reference_to_self) {  
++    if (_is_reference_to_self) {
+       _referrer_klass_tag = klass_tag();
+       _referrer_tag_p = obj_tag_p();
+     } else {
+       // for Classes the klassOop is tagged
+-      _referrer = klassOop_if_java_lang_Class(referrer);  
++      _referrer = klassOop_if_java_lang_Class(referrer);
+       // record the context
+       _referrer_hashmap = tag_map->hashmap_for(_referrer);
+       _referrer_entry = _referrer_hashmap->find(_referrer);
+ 
+-      // get object tag 
++      // get object tag
+       _referrer_obj_tag = (_referrer_entry == NULL) ? 0 : _referrer_entry->tag();
+       _referrer_tag_p = &_referrer_obj_tag;
+ 
+-      // get referrer class tag.  
++      // get referrer class tag.
+       klassOop k = (_referrer == referrer) ?  // Check if referrer is a class...
+           _referrer->klass()                  // No, just get its class
+          : SystemDictionary::class_klass();   // Yes, its class is Class
+@@ -743,33 +740,33 @@
+     }
+   }
+ 
+-  ~TwoOopCallbackWrapper() {    
++  ~TwoOopCallbackWrapper() {
+     if (!is_reference_to_self()){
+-      post_callback_tag_update(_referrer, 
+-	                       _referrer_hashmap, 
+-			       _referrer_entry, 
+-			       _referrer_obj_tag);
++      post_callback_tag_update(_referrer,
++                               _referrer_hashmap,
++                               _referrer_entry,
++                               _referrer_obj_tag);
+     }
+   }
+ 
+   // address of referrer tag
+   // (for a self reference this will return the same thing as obj_tag_p())
+-  inline jlong* referrer_tag_p()	{ return _referrer_tag_p; }
++  inline jlong* referrer_tag_p()        { return _referrer_tag_p; }
+ 
+   // referrer's class tag
+-  inline jlong referrer_klass_tag()	{ return _referrer_klass_tag; }
++  inline jlong referrer_klass_tag()     { return _referrer_klass_tag; }
+ };
+ 
+ // tag an object
+ //
+-// This function is performance critical. If many threads attempt to tag objects 
+-// around the same time then it's possible that the Mutex associated with the 
++// This function is performance critical. If many threads attempt to tag objects
++// around the same time then it's possible that the Mutex associated with the
+ // tag map will be a hot lock. Eliminating this lock will not eliminate the issue
+ // because creating a JNI weak reference requires acquiring a global lock also.
+ void JvmtiTagMap::set_tag(jobject object, jlong tag) {
+   MutexLocker ml(lock());
+ 
+-  // resolve the object 
++  // resolve the object
+   oop o = JNIHandles::resolve_non_null(object);
+ 
+   // for Classes we tag the klassOop
+@@ -783,29 +780,29 @@
+   if (entry == NULL) {
+     if (tag != 0) {
+       HandleMark hm;
+-      Handle h(o);      
+-      jweak ref = JNIHandles::make_weak_global(h);	
++      Handle h(o);
++      jweak ref = JNIHandles::make_weak_global(h);
+ 
+       // the object may have moved because make_weak_global may
+-      // have blocked - thus it is necessary resolve the handle 
++      // have blocked - thus it is necessary resolve the handle
+       // and re-hash the object.
+       o = h();
+       entry = create_entry(ref, tag);
+-      hashmap_for(o)->add(o, entry);  
++      hashmap_for(o)->add(o, entry);
+     } else {
+       // no-op
+     }
+   } else {
+     // if the object is already tagged then we either update
+     // the tag (if a new tag value has been provided)
+-    // or remove the object if the new tag value is 0. 
++    // or remove the object if the new tag value is 0.
+     // Removing the object requires that we also delete the JNI
+     // weak ref to the object.
+     if (tag == 0) {
+       jweak ref = entry->object();
+       hashmap->remove(o);
+       destroy_entry(entry);
+-      JNIHandles::destroy_weak_global(ref);           
++      JNIHandles::destroy_weak_global(ref);
+     } else {
+       entry->set_tag(tag);
+     }
+@@ -814,9 +811,9 @@
+ 
+ // get the tag for an object
+ jlong JvmtiTagMap::get_tag(jobject object) {
+-  MutexLocker ml(lock()); 
++  MutexLocker ml(lock());
+ 
+-  // resolve the object 
++  // resolve the object
+   oop o = JNIHandles::resolve_non_null(object);
+ 
+   // for Classes get the tag from the klassOop
+@@ -835,7 +832,7 @@
+   char _field_type;
+  public:
+   ClassFieldDescriptor(int index, char type, int offset) :
+-    _field_index(index), _field_type(type), _field_offset(offset) { 
++    _field_index(index), _field_type(type), _field_offset(offset) {
+   }
+   int field_index()  const  { return _field_index; }
+   char field_type()  const  { return _field_type; }
+@@ -855,14 +852,14 @@
+   ClassFieldMap();
+ 
+   // add a field
+-  void add(int index, char type, int offset);   
++  void add(int index, char type, int offset);
+ 
+   // returns the field count for the given class
+   static int compute_field_count(instanceKlassHandle ikh);
+ 
+  public:
+-  ~ClassFieldMap(); 
+- 
++  ~ClassFieldMap();
++
+   // access
+   int field_count()                     { return _fields->length(); }
+   ClassFieldDescriptor* field_at(int i) { return _fields->at(i); }
+@@ -889,7 +886,7 @@
+ }
+ 
+ // Returns a heap allocated ClassFieldMap to describe the static fields
+-// of the given class. 
++// of the given class.
+ //
+ ClassFieldMap* ClassFieldMap::create_map_of_static_fields(klassOop k) {
+   HandleMark hm;
+@@ -897,16 +894,16 @@
+ 
+   // create the field map
+   ClassFieldMap* field_map = new ClassFieldMap();
+-  
++
+   FilteredFieldStream f(ikh, false, false);
+   int max_field_index = f.field_count()-1;
+-  
++
+   int index = 0;
+   for (FilteredFieldStream fld(ikh, true, true); !fld.eos(); fld.next(), index++) {
+     // ignore instance fields
+     if (!fld.access_flags().is_static()) {
+       continue;
+-    }    
++    }
+     field_map->add(max_field_index - index, fld.signature()->byte_at(0), fld.offset());
+   }
+   return field_map;
+@@ -922,9 +919,9 @@
+ 
+   // create the field map
+   ClassFieldMap* field_map = new ClassFieldMap();
+-  
++
+   FilteredFieldStream f(ikh, false, false);
+-  
++
+   int max_field_index = f.field_count()-1;
+ 
+   int index = 0;
+@@ -932,7 +929,7 @@
+     // ignore static fields
+     if (fld.access_flags().is_static()) {
+       continue;
+-    }    
++    }
+     field_map->add(max_field_index - index, fld.signature()->byte_at(0), fld.offset());
+   }
+ 
+@@ -955,7 +952,7 @@
+ 
+   JvmtiCachedClassFieldMap(ClassFieldMap* field_map);
+   ~JvmtiCachedClassFieldMap();
+- 
++
+   static GrowableArray<instanceKlass*>* _class_list;
+   static void add_to_class_list(instanceKlass* ik);
+ 
+@@ -975,12 +972,12 @@
+ GrowableArray<instanceKlass*>* JvmtiCachedClassFieldMap::_class_list;
+ 
+ JvmtiCachedClassFieldMap::JvmtiCachedClassFieldMap(ClassFieldMap* field_map) {
+-  _field_map = field_map;  
++  _field_map = field_map;
+ }
+ 
+ JvmtiCachedClassFieldMap::~JvmtiCachedClassFieldMap() {
+   if (_field_map != NULL) {
+-    delete _field_map; 
++    delete _field_map;
+   }
+ }
+ 
+@@ -989,7 +986,7 @@
+ class ClassFieldMapCacheMark : public StackObj {
+  private:
+    static bool _is_active;
+- public:  
++ public:
+    ClassFieldMapCacheMark() {
+      assert(Thread::current()->is_VM_thread(), "must be VMThread");
+      assert(JvmtiCachedClassFieldMap::cached_field_map_count() == 0, "cache not empty");
+@@ -1014,7 +1011,7 @@
+   _class_list->push(ik);
+ }
+ 
+-// returns the instance field map for the given object 
++// returns the instance field map for the given object
+ // (returns field map cached by the instanceKlass if possible)
+ ClassFieldMap* JvmtiCachedClassFieldMap::get_map_of_instance_fields(oop obj) {
+   assert(Thread::current()->is_VM_thread(), "must be VMThread");
+@@ -1029,7 +1026,7 @@
+     assert(cached_map->field_map() != NULL, "missing field list");
+     return cached_map->field_map();
+   } else {
+-    ClassFieldMap* field_map = ClassFieldMap::create_map_of_instance_fields(obj);  
++    ClassFieldMap* field_map = ClassFieldMap::create_map_of_instance_fields(obj);
+     cached_map = new JvmtiCachedClassFieldMap(field_map);
+     ik->set_jvmti_cached_class_field_map(cached_map);
+     add_to_class_list(ik);
+@@ -1041,7 +1038,7 @@
+ void JvmtiCachedClassFieldMap::clear_cache() {
+   assert(Thread::current()->is_VM_thread(), "must be VMThread");
+   if (_class_list != NULL) {
+-    for (int i = 0; i < _class_list->length(); i++) {   
++    for (int i = 0; i < _class_list->length(); i++) {
+       instanceKlass* ik = _class_list->at(i);
+       JvmtiCachedClassFieldMap* cached_map = ik->jvmti_cached_class_field_map();
+       assert(cached_map != NULL, "should not be NULL");
+@@ -1059,9 +1056,9 @@
+ }
+ 
+ // helper function to indicate if an object is filtered by its tag or class tag
+-static inline bool is_filtered_by_heap_filter(jlong obj_tag, 
+-					      jlong klass_tag, 
+-					      int heap_filter) {
++static inline bool is_filtered_by_heap_filter(jlong obj_tag,
++                                              jlong klass_tag,
++                                              int heap_filter) {
+   // apply the heap filter
+   if (obj_tag != 0) {
+     // filter out tagged objects
+@@ -1113,13 +1110,13 @@
+ // helper function to invoke string primitive value callback
+ // returns visit control flags
+ static jint invoke_string_value_callback(jvmtiStringPrimitiveValueCallback cb,
+-					 CallbackWrapper* wrapper,
+-					 oop str,
+-					 void* user_data)
++                                         CallbackWrapper* wrapper,
++                                         oop str,
++                                         void* user_data)
+ {
+   assert(str->klass() == SystemDictionary::string_klass(), "not a string");
+-  
+-  // get the string value and length 
++
++  // get the string value and length
+   // (string value may be offset from the base)
+   int s_len = java_lang_String::length(str);
+   typeArrayOop s_value = java_lang_String::value(str);
+@@ -1132,33 +1129,33 @@
+   }
+ 
+   // invoke the callback
+-  return (*cb)(wrapper->klass_tag(), 
++  return (*cb)(wrapper->klass_tag(),
+                wrapper->obj_size(),
+-	       wrapper->obj_tag_p(),
+-	       value,
+-	       (jint)s_len,
+-	       user_data);
++               wrapper->obj_tag_p(),
++               value,
++               (jint)s_len,
++               user_data);
+ }
+ 
+ // helper function to invoke string primitive value callback
+ // returns visit control flags
+ static jint invoke_array_primitive_value_callback(jvmtiArrayPrimitiveValueCallback cb,
+-					          CallbackWrapper* wrapper,
+-					          oop obj,
+-					          void* user_data)
++                                                  CallbackWrapper* wrapper,
++                                                  oop obj,
++                                                  void* user_data)
+ {
+   assert(obj->is_typeArray(), "not a primitive array");
+ 
+   // get base address of first element
+-  typeArrayOop array = typeArrayOop(obj); 
++  typeArrayOop array = typeArrayOop(obj);
+   BasicType type = typeArrayKlass::cast(array->klass())->element_type();
+   void* elements = array->base(type);
+ 
+   // jvmtiPrimitiveType is defined so this mapping is always correct
+   jvmtiPrimitiveType elem_type = (jvmtiPrimitiveType)type2char(type);
+- 
+-  return (*cb)(wrapper->klass_tag(), 
+-               wrapper->obj_size(), 
++
++  return (*cb)(wrapper->klass_tag(),
++               wrapper->obj_size(),
+                wrapper->obj_tag_p(),
+                (jint)array->length(),
+                elem_type,
+@@ -1170,23 +1167,23 @@
+ // of a given class
+ static jint invoke_primitive_field_callback_for_static_fields
+   (CallbackWrapper* wrapper,
+-   oop obj, 
++   oop obj,
+    jvmtiPrimitiveFieldCallback cb,
+-   void* user_data) 
++   void* user_data)
+ {
+   // for static fields only the index will be set
+   static jvmtiHeapReferenceInfo reference_info = { 0 };
+ 
+   assert(obj->klass() == SystemDictionary::class_klass(), "not a class");
+   if (java_lang_Class::is_primitive(obj)) {
+-    return 0; 
++    return 0;
+   }
+   klassOop k = java_lang_Class::as_klassOop(obj);
+   Klass* klass = k->klass_part();
+ 
+   // ignore classes for object and type arrays
+-  if (!klass->oop_is_instance()) {   
+-    return 0;      
++  if (!klass->oop_is_instance()) {
++    return 0;
+   }
+ 
+   // ignore classes which aren't linked yet
+@@ -1199,7 +1196,7 @@
+   ClassFieldMap* field_map = ClassFieldMap::create_map_of_static_fields(k);
+ 
+   // invoke the callback for each static primitive field
+-  for (int i=0; i<field_map->field_count(); i++) {   
++  for (int i=0; i<field_map->field_count(); i++) {
+     ClassFieldDescriptor* field = field_map->field_at(i);
+ 
+     // ignore non-primitive fields
+@@ -1220,13 +1217,13 @@
+     reference_info.field.index = field->field_index();
+ 
+     // invoke the callback
+-    jint res = (*cb)(JVMTI_HEAP_REFERENCE_STATIC_FIELD, 
+-	             &reference_info,
+-                     wrapper->klass_tag(), 
+-		     wrapper->obj_tag_p(),
+-                     value, 
+-		     value_type, 
+-		     user_data);                                            
++    jint res = (*cb)(JVMTI_HEAP_REFERENCE_STATIC_FIELD,
++                     &reference_info,
++                     wrapper->klass_tag(),
++                     wrapper->obj_tag_p(),
++                     value,
++                     value_type,
++                     user_data);
+     if (res & JVMTI_VISIT_ABORT) {
+       delete field_map;
+       return res;
+@@ -1248,11 +1245,11 @@
+   // for instance fields only the index will be set
+   static jvmtiHeapReferenceInfo reference_info = { 0 };
+ 
+-  // get the map of the instance fields 
++  // get the map of the instance fields
+   ClassFieldMap* fields = JvmtiCachedClassFieldMap::get_map_of_instance_fields(obj);
+ 
+   // invoke the callback for each instance primitive field
+-  for (int i=0; i<fields->field_count(); i++) {   
++  for (int i=0; i<fields->field_count(); i++) {
+     ClassFieldDescriptor* field = fields->field_at(i);
+ 
+     // ignore non-primitive fields
+@@ -1273,13 +1270,13 @@
+     reference_info.field.index = field->field_index();
+ 
+     // invoke the callback
+-    jint res = (*cb)(JVMTI_HEAP_REFERENCE_FIELD, 
+-	             &reference_info,
+-                     wrapper->klass_tag(), 
+-		     wrapper->obj_tag_p(),
+-                     value, 
+-		     value_type, 
+-		     user_data);                                            
++    jint res = (*cb)(JVMTI_HEAP_REFERENCE_FIELD,
++                     &reference_info,
++                     wrapper->klass_tag(),
++                     wrapper->obj_tag_p(),
++                     value,
++                     value_type,
++                     user_data);
+     if (res & JVMTI_VISIT_ABORT) {
+       return res;
+     }
+@@ -1297,14 +1294,14 @@
+   VM_HeapIterateOperation(ObjectClosure* blk) { _blk = blk; }
+ 
+   VMOp_Type type() const { return VMOp_HeapIterateOperation; }
+-  void doit() {        
+-    // allows class files maps to be cached during iteration    
++  void doit() {
++    // allows class files maps to be cached during iteration
+     ClassFieldMapCacheMark cm;
+ 
+     // make sure that heap is parsable (fills TLABs with filler objects)
+     Universe::heap()->ensure_parsability(false);  // no need to retire TLABs
+ 
+-    // Verify heap before iteration - if the heap gets corrupted then 
++    // Verify heap before iteration - if the heap gets corrupted then
+     // JVMTI's IterateOverHeap will crash.
+     if (VerifyBeforeIteration) {
+       Universe::verify();
+@@ -1325,7 +1322,7 @@
+ };
+ 
+ 
+-// An ObjectClosure used to support the deprecated IterateOverHeap and 
++// An ObjectClosure used to support the deprecated IterateOverHeap and
+ // IterateOverInstancesOfClass functions
+ class IterateOverHeapObjectClosure: public ObjectClosure {
+  private:
+@@ -1339,7 +1336,7 @@
+   JvmtiTagMap* tag_map() const                    { return _tag_map; }
+   jvmtiHeapObjectFilter object_filter() const     { return _object_filter; }
+   jvmtiHeapObjectCallback object_callback() const { return _heap_object_callback; }
+-  KlassHandle klass() const		          { return _klass; }
++  KlassHandle klass() const                       { return _klass; }
+   const void* user_data() const                   { return _user_data; }
+ 
+   // indicates if iteration has been aborted
+@@ -1348,20 +1345,20 @@
+   void set_iteration_aborted(bool aborted)        { _iteration_aborted = aborted; }
+ 
+  public:
+-  IterateOverHeapObjectClosure(JvmtiTagMap* tag_map, 
+-                               KlassHandle klass, 
+-			       jvmtiHeapObjectFilter object_filter, 
++  IterateOverHeapObjectClosure(JvmtiTagMap* tag_map,
++                               KlassHandle klass,
++                               jvmtiHeapObjectFilter object_filter,
+                                jvmtiHeapObjectCallback heap_object_callback,
+-			       const void* user_data) :
++                               const void* user_data) :
+     _tag_map(tag_map),
+     _klass(klass),
+     _object_filter(object_filter),
+     _heap_object_callback(heap_object_callback),
+     _user_data(user_data),
+-    _iteration_aborted(false) 
+-  { 
++    _iteration_aborted(false)
++  {
+   }
+-    
++
+   void do_object(oop o);
+ };
+ 
+@@ -1369,14 +1366,14 @@
+ void IterateOverHeapObjectClosure::do_object(oop o) {
+   // check if iteration has been halted
+   if (is_iteration_aborted()) return;
+-  
++
+   // ignore any objects that aren't visible to profiler
+-  if (!ServiceUtil::visible_oop(o)) return; 
+-    
++  if (!ServiceUtil::visible_oop(o)) return;
++
+   // instanceof check when filtering by klass
+-  if (!klass().is_null() && !o->is_a(klass()())) { 
+-    return;     
+-  } 
++  if (!klass().is_null() && !o->is_a(klass()())) {
++    return;
++  }
+   // prepare for the calllback
+   CallbackWrapper wrapper(tag_map(), o);
+ 
+@@ -1384,15 +1381,15 @@
+   // then don't invoke the callback. Similiarly, if the object is untagged
+   // and we're only interested in tagged objects we skip the callback.
+   if (wrapper.obj_tag() != 0) {
+-    if (object_filter() == JVMTI_HEAP_OBJECT_UNTAGGED) return; 
+-  } else {   
++    if (object_filter() == JVMTI_HEAP_OBJECT_UNTAGGED) return;
++  } else {
+     if (object_filter() == JVMTI_HEAP_OBJECT_TAGGED) return;
+   }
+ 
+   // invoke the agent's callback
+-  jvmtiIterationControl control = (*object_callback())(wrapper.klass_tag(), 
+-	                                               wrapper.obj_size(), 
+-                                                       wrapper.obj_tag_p(), 
++  jvmtiIterationControl control = (*object_callback())(wrapper.klass_tag(),
++                                                       wrapper.obj_size(),
++                                                       wrapper.obj_tag_p(),
+                                                        (void*)user_data());
+   if (control == JVMTI_ITERATION_ABORT) {
+     set_iteration_aborted(true);
+@@ -1401,7 +1398,7 @@
+ 
+ // An ObjectClosure used to support the IterateThroughHeap function
+ class IterateThroughHeapObjectClosure: public ObjectClosure {
+- private:  
++ private:
+   JvmtiTagMap* _tag_map;
+   KlassHandle _klass;
+   int _heap_filter;
+@@ -1412,12 +1409,12 @@
+   JvmtiTagMap* tag_map() const                     { return _tag_map; }
+   int heap_filter() const                          { return _heap_filter; }
+   const jvmtiHeapCallbacks* callbacks() const      { return _callbacks; }
+-  KlassHandle klass() const		           { return _klass; }
++  KlassHandle klass() const                        { return _klass; }
+   const void* user_data() const                    { return _user_data; }
+ 
+   // indicates if the iteration has been aborted
+   bool _iteration_aborted;
+-  bool is_iteration_aborted() const                { return _iteration_aborted; }  
++  bool is_iteration_aborted() const                { return _iteration_aborted; }
+ 
+   // used to check the visit control flags. If the abort flag is set
+   // then we set the iteration aborted flag so that the iteration completes
+@@ -1431,17 +1428,17 @@
+   }
+ 
+  public:
+-  IterateThroughHeapObjectClosure(JvmtiTagMap* tag_map, 
+-                                  KlassHandle klass, 
+-			          int heap_filter,
++  IterateThroughHeapObjectClosure(JvmtiTagMap* tag_map,
++                                  KlassHandle klass,
++                                  int heap_filter,
+                                   const jvmtiHeapCallbacks* heap_callbacks,
+-				  const void* user_data) :
++                                  const void* user_data) :
+     _tag_map(tag_map),
+     _klass(klass),
+     _heap_filter(heap_filter),
+     _callbacks(heap_callbacks),
+     _user_data(user_data),
+-    _iteration_aborted(false) 
++    _iteration_aborted(false)
+   {
+   }
+ 
+@@ -1454,8 +1451,8 @@
+   if (is_iteration_aborted()) return;
+ 
+   // ignore any objects that aren't visible to profiler
+-  if (!ServiceUtil::visible_oop(obj)) return; 
+-    
++  if (!ServiceUtil::visible_oop(obj)) return;
++
+   // apply class filter
+   if (is_filtered_by_klass_filter(obj, klass())) return;
+ 
+@@ -1470,15 +1467,15 @@
+   // for arrays we need the length, otherwise -1
+   bool is_array = obj->is_array();
+   int len = is_array ? arrayOop(obj)->length() : -1;
+-  
++
+   // invoke the object callback (if callback is provided)
+   if (callbacks()->heap_iteration_callback != NULL) {
+     jvmtiHeapIterationCallback cb = callbacks()->heap_iteration_callback;
+-    jint res = (*cb)(wrapper.klass_tag(), 
+-	             wrapper.obj_size(), 
+-                     wrapper.obj_tag_p(), 
+-	             (jint)len, 
+-		     (void*)user_data());
++    jint res = (*cb)(wrapper.klass_tag(),
++                     wrapper.obj_size(),
++                     wrapper.obj_tag_p(),
++                     (jint)len,
++                     (void*)user_data());
+     if (check_flags_for_abort(res)) return;
+   }
+ 
+@@ -1487,76 +1484,76 @@
+     jint res;
+     jvmtiPrimitiveFieldCallback cb = callbacks()->primitive_field_callback;
+     if (obj->klass() == SystemDictionary::class_klass()) {
+-      res = invoke_primitive_field_callback_for_static_fields(&wrapper, 
+-	                                                            obj, 
+-								    cb, 
+-								    (void*)user_data());
++      res = invoke_primitive_field_callback_for_static_fields(&wrapper,
++                                                                    obj,
++                                                                    cb,
++                                                                    (void*)user_data());
+     } else {
+-      res = invoke_primitive_field_callback_for_instance_fields(&wrapper, 
+-	                                                              obj, 
+-								      cb, 
+-								      (void*)user_data());
++      res = invoke_primitive_field_callback_for_instance_fields(&wrapper,
++                                                                      obj,
++                                                                      cb,
++                                                                      (void*)user_data());
+     }
+     if (check_flags_for_abort(res)) return;
+   }
+-     
++
+   // string callback
+   if (!is_array &&
+       callbacks()->string_primitive_value_callback != NULL &&
+-      obj->klass() == SystemDictionary::string_klass()) {  
++      obj->klass() == SystemDictionary::string_klass()) {
+     jint res = invoke_string_value_callback(
+-	        callbacks()->string_primitive_value_callback,
+-	        &wrapper,
+-		obj,
+-		(void*)user_data() );	       
++                callbacks()->string_primitive_value_callback,
++                &wrapper,
++                obj,
++                (void*)user_data() );
+     if (check_flags_for_abort(res)) return;
+-  }   
++  }
+ 
+   // array callback
+-  if (is_array && 
++  if (is_array &&
+       callbacks()->array_primitive_value_callback != NULL &&
+-      obj->is_typeArray()) { 
++      obj->is_typeArray()) {
+     jint res = invoke_array_primitive_value_callback(
+-	       callbacks()->array_primitive_value_callback,
+-	       &wrapper,
+-	       obj,
+-	       (void*)user_data() );
++               callbacks()->array_primitive_value_callback,
++               &wrapper,
++               obj,
++               (void*)user_data() );
+     if (check_flags_for_abort(res)) return;
+-  }  
++  }
+ };
+ 
+ 
+ // Deprecated function to iterate over all objects in the heap
+ void JvmtiTagMap::iterate_over_heap(jvmtiHeapObjectFilter object_filter,
+-				    KlassHandle klass, 				     
+-				    jvmtiHeapObjectCallback heap_object_callback, 
+-                                    const void* user_data) 
++                                    KlassHandle klass,
++                                    jvmtiHeapObjectCallback heap_object_callback,
++                                    const void* user_data)
+ {
+   MutexLocker ml(Heap_lock);
+-  IterateOverHeapObjectClosure blk(this, 
+-                                   klass, 
+-				   object_filter, 
+-                                   heap_object_callback, 
+-				   user_data);
++  IterateOverHeapObjectClosure blk(this,
++                                   klass,
++                                   object_filter,
++                                   heap_object_callback,
++                                   user_data);
+   VM_HeapIterateOperation op(&blk);
+-  VMThread::execute(&op);  
++  VMThread::execute(&op);
+ }
+ 
+ 
+ // Iterates over all objects in the heap
+-void JvmtiTagMap::iterate_through_heap(jint heap_filter, 
+-				       KlassHandle klass, 
+-                                       const jvmtiHeapCallbacks* callbacks, 
+-                                       const void* user_data) 
+-{   
+-  MutexLocker ml(Heap_lock); 
+-  IterateThroughHeapObjectClosure blk(this, 
+-                                      klass, 
+-				      heap_filter, 
+-				      callbacks, 
+-				      user_data);
++void JvmtiTagMap::iterate_through_heap(jint heap_filter,
++                                       KlassHandle klass,
++                                       const jvmtiHeapCallbacks* callbacks,
++                                       const void* user_data)
++{
++  MutexLocker ml(Heap_lock);
++  IterateThroughHeapObjectClosure blk(this,
++                                      klass,
++                                      heap_filter,
++                                      callbacks,
++                                      user_data);
+   VM_HeapIterateOperation op(&blk);
+-  VMThread::execute(&op); 
++  VMThread::execute(&op);
+ }
+ 
+ // support class for get_objects_with_tags
+@@ -1589,20 +1586,20 @@
+   // and record the reference and tag value.
+   //
+   void do_entry(JvmtiTagHashmapEntry* entry) {
+-    for (int i=0; i<_tag_count; i++) {      
++    for (int i=0; i<_tag_count; i++) {
+       if (_tags[i] == entry->tag()) {
+-	oop o = JNIHandles::resolve(entry->object());
+-	assert(o != NULL && o != JNIHandles::deleted_handle(), "sanity check");
++        oop o = JNIHandles::resolve(entry->object());
++        assert(o != NULL && o != JNIHandles::deleted_handle(), "sanity check");
+ 
+-	// the mirror is tagged
+-	if (o->is_klass()) {
+-	  klassOop k = (klassOop)o;
+-	  o = Klass::cast(k)->java_mirror();
+-	}
+-
+-	jobject ref = JNIHandles::make_local(JavaThread::current(), o);
+-	_object_results->append(ref);
+-	_tag_results->append((uint64_t)entry->tag());
++        // the mirror is tagged
++        if (o->is_klass()) {
++          klassOop k = (klassOop)o;
++          o = Klass::cast(k)->java_mirror();
++        }
++
++        jobject ref = JNIHandles::make_local(JavaThread::current(), o);
++        _object_results->append(ref);
++        _tag_results->append((uint64_t)entry->tag());
+       }
+     }
+   }
+@@ -1612,7 +1609,7 @@
+   jvmtiError result(jint* count_ptr, jobject** object_result_ptr, jlong** tag_result_ptr) {
+     jvmtiError error;
+     int count = _object_results->length();
+-    assert(count >= 0, "sanity check");    
++    assert(count >= 0, "sanity check");
+ 
+     // if object_result_ptr is not NULL then allocate the result and copy
+     // in the object references.
+@@ -1630,14 +1627,14 @@
+     // in the tag values.
+     if (tag_result_ptr != NULL) {
+       error = _env->Allocate(count * sizeof(jlong), (unsigned char**)tag_result_ptr);
+-      if (error != JVMTI_ERROR_NONE) {	
+-	if (object_result_ptr != NULL) {
++      if (error != JVMTI_ERROR_NONE) {
++        if (object_result_ptr != NULL) {
+           _env->Deallocate((unsigned char*)object_result_ptr);
+-	}
++        }
+         return error;
+       }
+       for (int i=0; i<count; i++) {
+-	(*tag_result_ptr)[i] = (jlong)_tag_results->at(i);
++        (*tag_result_ptr)[i] = (jlong)_tag_results->at(i);
+       }
+     }
+ 
+@@ -1647,20 +1644,20 @@
+ };
+ 
+ // return the list of objects with the specified tags
+-jvmtiError JvmtiTagMap::get_objects_with_tags(const jlong* tags, 
++jvmtiError JvmtiTagMap::get_objects_with_tags(const jlong* tags,
+   jint count, jint* count_ptr, jobject** object_result_ptr, jlong** tag_result_ptr) {
+ 
+-  TagObjectCollector collector(env(), tags, count); 
+-  {    
++  TagObjectCollector collector(env(), tags, count);
++  {
+     // iterate over all tagged objects
+     MutexLocker ml(lock());
+-    entry_iterate(&collector);    
++    entry_iterate(&collector);
+   }
+   return collector.result(count_ptr, object_result_ptr, tag_result_ptr);
+ }
+ 
+ 
+-// ObjectMarker is used to support the marking objects when walking the 
++// ObjectMarker is used to support the marking objects when walking the
+ // heap.
+ //
+ // This implementation uses the existing mark bits in an object for
+@@ -1673,20 +1670,20 @@
+ //
+ // Future work: This implementation currently uses growable arrays to save
+ // the oop and header of interesting objects. As an optimization we could
+-// use the same technique as the GC and make use of the unused area 
++// use the same technique as the GC and make use of the unused area
+ // between top() and end().
+ //
+ 
+ // An ObjectClosure used to restore the mark bits of an object
+ class RestoreMarksClosure : public ObjectClosure {
+- public:  
+-  void do_object(oop o) {   
++ public:
++  void do_object(oop o) {
+     if (o != NULL) {
+       markOop mark = o->mark();
+       if (mark->is_marked()) {
+         o->init_mark();
+       }
+-    }    
++    }
+   }
+ };
+ 
+@@ -1694,15 +1691,15 @@
+ class ObjectMarker : AllStatic {
+  private:
+   // saved headers
+-  static GrowableArray<oop>* _saved_oop_stack;	    
++  static GrowableArray<oop>* _saved_oop_stack;
+   static GrowableArray<markOop>* _saved_mark_stack;
+ 
+  public:
+-  static void init();			    // initialize
+-  static void done();	                    // clean-up
++  static void init();                       // initialize
++  static void done();                       // clean-up
+ 
+-  static inline void mark(oop o);	    // mark an object
+-  static inline bool visited(oop o);	    // check if object has been visited
++  static inline void mark(oop o);           // mark an object
++  static inline bool visited(oop o);        // check if object has been visited
+ };
+ 
+ GrowableArray<oop>* ObjectMarker::_saved_oop_stack = NULL;
+@@ -1712,7 +1709,7 @@
+ void ObjectMarker::init() {
+   assert(Thread::current()->is_VM_thread(), "must be VMThread");
+ 
+-  // prepare heap for iteration 
++  // prepare heap for iteration
+   Universe::heap()->ensure_parsability(false);  // no need to retire TLABs
+ 
+   // create stacks for interesting headers
+@@ -1729,7 +1726,7 @@
+   // iterate over all objects and restore the mark bits to
+   // their initial value
+   RestoreMarksClosure blk;
+-  Universe::heap()->object_iterate(&blk);  
++  Universe::heap()->object_iterate(&blk);
+ 
+   // When sharing is enabled we need to restore the headers of the objects
+   // in the readwrite space too.
+@@ -1737,13 +1734,13 @@
+     GenCollectedHeap* gch = GenCollectedHeap::heap();
+     CompactingPermGenGen* gen = (CompactingPermGenGen*)gch->perm_gen();
+     gen->rw_space()->object_iterate(&blk);
+-  } 
++  }
+ 
+   // now restore the interesting headers
+-  for (int i = 0; i < _saved_oop_stack->length(); i++) {   
++  for (int i = 0; i < _saved_oop_stack->length(); i++) {
+     oop o = _saved_oop_stack->at(i);
+     markOop mark = _saved_mark_stack->at(i);
+-    o->set_mark(mark);      
++    o->set_mark(mark);
+   }
+ 
+   if (UseBiasedLocking) {
+@@ -1755,7 +1752,7 @@
+   delete _saved_mark_stack;
+ }
+ 
+-// mark an object 
++// mark an object
+ inline void ObjectMarker::mark(oop o) {
+   assert(Universe::heap()->is_in(o), "sanity check");
+   assert(!o->mark()->is_marked(), "should only mark an object once");
+@@ -1763,9 +1760,9 @@
+   // object's mark word
+   markOop mark = o->mark();
+ 
+-  if (mark->must_be_preserved(o)) {                
++  if (mark->must_be_preserved(o)) {
+     _saved_mark_stack->push(mark);
+-    _saved_oop_stack->push(o);      
++    _saved_oop_stack->push(o);
+   }
+ 
+   // mark the object
+@@ -1774,7 +1771,7 @@
+ 
+ // return true if object is marked
+ inline bool ObjectMarker::visited(oop o) {
+-  return o->mark()->is_marked();  
++  return o->mark()->is_marked();
+ }
+ 
+ // Stack allocated class to help ensure that ObjectMarker is used
+@@ -1793,7 +1790,7 @@
+ 
+ // helper to map a jvmtiHeapReferenceKind to an old style jvmtiHeapRootKind
+ // (not performance critical as only used for roots)
+-static jvmtiHeapRootKind toJvmtiHeapRootKind(jvmtiHeapReferenceKind kind) { 
++static jvmtiHeapRootKind toJvmtiHeapRootKind(jvmtiHeapReferenceKind kind) {
+   switch (kind) {
+     case JVMTI_HEAP_REFERENCE_JNI_GLOBAL:   return JVMTI_HEAP_ROOT_JNI_GLOBAL;
+     case JVMTI_HEAP_REFERENCE_SYSTEM_CLASS: return JVMTI_HEAP_ROOT_SYSTEM_CLASS;
+@@ -1807,14 +1804,14 @@
+ }
+ 
+ // Base class for all heap walk contexts. The base class maintains a flag
+-// to indicate if the context is valid or not. 
++// to indicate if the context is valid or not.
+ class HeapWalkContext VALUE_OBJ_CLASS_SPEC {
+  private:
+   bool _valid;
+  public:
+   HeapWalkContext(bool valid)                   { _valid = valid; }
+   void invalidate()                             { _valid = false; }
+-  bool is_valid() const				{ return _valid; }
++  bool is_valid() const                         { return _valid; }
+ };
+ 
+ // A basic heap walk context for the deprecated heap walking functions.
+@@ -1826,16 +1823,16 @@
+   jvmtiStackReferenceCallback _stack_ref_callback;
+   jvmtiObjectReferenceCallback _object_ref_callback;
+ 
+-  // used for caching 
++  // used for caching
+   oop _last_referrer;
+   jlong _last_referrer_tag;
+ 
+  public:
+-  BasicHeapWalkContext() : HeapWalkContext(false) { } 
++  BasicHeapWalkContext() : HeapWalkContext(false) { }
+ 
+   BasicHeapWalkContext(jvmtiHeapRootCallback heap_root_callback,
+                        jvmtiStackReferenceCallback stack_ref_callback,
+-		       jvmtiObjectReferenceCallback object_ref_callback) :
++                       jvmtiObjectReferenceCallback object_ref_callback) :
+     HeapWalkContext(true),
+     _heap_root_callback(heap_root_callback),
+     _stack_ref_callback(stack_ref_callback),
+@@ -1843,9 +1840,9 @@
+     _last_referrer(NULL),
+     _last_referrer_tag(0) {
+   }
+- 
++
+   // accessors
+-  jvmtiHeapRootCallback heap_root_callback() const         { return _heap_root_callback; }  
++  jvmtiHeapRootCallback heap_root_callback() const         { return _heap_root_callback; }
+   jvmtiStackReferenceCallback stack_ref_callback() const   { return _stack_ref_callback; }
+   jvmtiObjectReferenceCallback object_ref_callback() const { return _object_ref_callback;  }
+ 
+@@ -1866,9 +1863,9 @@
+  public:
+   AdvancedHeapWalkContext() : HeapWalkContext(false) { }
+ 
+-  AdvancedHeapWalkContext(jint heap_filter, 
+-                           KlassHandle klass_filter, 
+-			   const jvmtiHeapCallbacks* heap_callbacks) :
++  AdvancedHeapWalkContext(jint heap_filter,
++                           KlassHandle klass_filter,
++                           const jvmtiHeapCallbacks* heap_callbacks) :
+     HeapWalkContext(true),
+     _heap_filter(heap_filter),
+     _klass_filter(klass_filter),
+@@ -1910,14 +1907,14 @@
+   static BasicHeapWalkContext _basic_context;
+   static BasicHeapWalkContext* basic_context() {
+     assert(_basic_context.is_valid(), "invalid");
+-    return &_basic_context; 
++    return &_basic_context;
+   }
+ 
+   // context for advanced style heap walk
+   static AdvancedHeapWalkContext _advanced_context;
+-  static AdvancedHeapWalkContext* advanced_context() { 
++  static AdvancedHeapWalkContext* advanced_context() {
+     assert(_advanced_context.is_valid(), "invalid");
+-    return &_advanced_context; 
++    return &_advanced_context;
+   }
+ 
+   // context needed for all heap walks
+@@ -1941,42 +1938,42 @@
+   static inline bool invoke_basic_heap_root_callback
+     (jvmtiHeapRootKind root_kind, oop obj);
+   static inline bool invoke_basic_stack_ref_callback
+-    (jvmtiHeapRootKind root_kind, jlong thread_tag, jint depth, jmethodID method, 
++    (jvmtiHeapRootKind root_kind, jlong thread_tag, jint depth, jmethodID method,
+      int slot, oop obj);
+   static inline bool invoke_basic_object_reference_callback
+-    (jvmtiObjectReferenceKind ref_kind, oop referrer, oop referree, jint index); 
++    (jvmtiObjectReferenceKind ref_kind, oop referrer, oop referree, jint index);
+ 
+   // invoke advanced style callbacks
+   static inline bool invoke_advanced_heap_root_callback
+     (jvmtiHeapReferenceKind ref_kind, oop obj);
+   static inline bool invoke_advanced_stack_ref_callback
+-    (jvmtiHeapReferenceKind ref_kind, jlong thread_tag, jlong tid, int depth, 
++    (jvmtiHeapReferenceKind ref_kind, jlong thread_tag, jlong tid, int depth,
+      jmethodID method, jlocation bci, jint slot, oop obj);
+   static inline bool invoke_advanced_object_reference_callback
+-    (jvmtiHeapReferenceKind ref_kind, oop referrer, oop referree, jint index); 
++    (jvmtiHeapReferenceKind ref_kind, oop referrer, oop referree, jint index);
+ 
+   // used to report the value of primitive fields
+   static inline bool report_primitive_field
+     (jvmtiHeapReferenceKind ref_kind, oop obj, jint index, address addr, char type);
+ 
+- public:     
++ public:
+   // initialize for basic mode
+   static void initialize_for_basic_heap_walk(JvmtiTagMap* tag_map,
+-                                             GrowableArray<oop>* visit_stack, 
++                                             GrowableArray<oop>* visit_stack,
+                                              const void* user_data,
+                                              BasicHeapWalkContext context);
+ 
+   // initialize for advanced mode
+   static void initialize_for_advanced_heap_walk(JvmtiTagMap* tag_map,
+-                                                GrowableArray<oop>* visit_stack, 
++                                                GrowableArray<oop>* visit_stack,
+                                                 const void* user_data,
+-						AdvancedHeapWalkContext context);
++                                                AdvancedHeapWalkContext context);
+ 
+    // functions to report roots
+   static inline bool report_simple_root(jvmtiHeapReferenceKind kind, oop o);
+-  static inline bool report_jni_local_root(jlong thread_tag, jlong tid, jint depth, 
++  static inline bool report_jni_local_root(jlong thread_tag, jlong tid, jint depth,
+     jmethodID m, oop o);
+-  static inline bool report_stack_ref_root(jlong thread_tag, jlong tid, jint depth, 
++  static inline bool report_stack_ref_root(jlong thread_tag, jlong tid, jint depth,
+     jmethodID method, jlocation bci, jint slot, oop o);
+ 
+   // functions to report references
+@@ -2006,27 +2003,27 @@
+ 
+ // initialize for basic heap walk (IterateOverReachableObjects et al)
+ void CallbackInvoker::initialize_for_basic_heap_walk(JvmtiTagMap* tag_map,
+-                                                     GrowableArray<oop>* visit_stack, 
++                                                     GrowableArray<oop>* visit_stack,
+                                                      const void* user_data,
+-						     BasicHeapWalkContext context) {
++                                                     BasicHeapWalkContext context) {
+   _tag_map = tag_map;
+   _visit_stack = visit_stack;
+   _user_data = user_data;
+   _basic_context = context;
+-  _advanced_context.invalidate();	// will trigger assertion if used
++  _advanced_context.invalidate();       // will trigger assertion if used
+   _heap_walk_type = basic;
+ }
+ 
+ // initialize for advanced heap walk (FollowReferences)
+ void CallbackInvoker::initialize_for_advanced_heap_walk(JvmtiTagMap* tag_map,
+-                                                        GrowableArray<oop>* visit_stack, 
++                                                        GrowableArray<oop>* visit_stack,
+                                                         const void* user_data,
+-						        AdvancedHeapWalkContext context) {
++                                                        AdvancedHeapWalkContext context) {
+   _tag_map = tag_map;
+   _visit_stack = visit_stack;
+   _user_data = user_data;
+   _advanced_context = context;
+-  _basic_context.invalidate();	    // will trigger assertion if used
++  _basic_context.invalidate();      // will trigger assertion if used
+   _heap_walk_type = advanced;
+ }
+ 
+@@ -2041,27 +2038,27 @@
+     return check_for_visit(obj);
+   }
+ 
+-  CallbackWrapper wrapper(tag_map(), obj);  
+-  jvmtiIterationControl control = (*cb)(root_kind, 
+-                                        wrapper.klass_tag(), 
+-                                        wrapper.obj_size(), 
++  CallbackWrapper wrapper(tag_map(), obj);
++  jvmtiIterationControl control = (*cb)(root_kind,
++                                        wrapper.klass_tag(),
++                                        wrapper.obj_size(),
+                                         wrapper.obj_tag_p(),
+                                         (void*)user_data());
+   // push root to visit stack when following references
+-  if (control == JVMTI_ITERATION_CONTINUE && 
++  if (control == JVMTI_ITERATION_CONTINUE &&
+       basic_context()->object_ref_callback() != NULL) {
+     visit_stack()->push(obj);
+   }
+   return control != JVMTI_ITERATION_ABORT;
+ }
+ 
+-// invoke basic style stack ref callback 
+-inline bool CallbackInvoker::invoke_basic_stack_ref_callback(jvmtiHeapRootKind root_kind, 
+-                                                             jlong thread_tag, 
+-							     jint depth,
+-					                     jmethodID method,
+-                                                             jint slot, 
+-							     oop obj) {
++// invoke basic style stack ref callback
++inline bool CallbackInvoker::invoke_basic_stack_ref_callback(jvmtiHeapRootKind root_kind,
++                                                             jlong thread_tag,
++                                                             jint depth,
++                                                             jmethodID method,
++                                                             jint slot,
++                                                             oop obj) {
+   assert(ServiceUtil::visible_oop(obj), "checking");
+ 
+   // if we stack refs should be reported
+@@ -2070,18 +2067,18 @@
+     return check_for_visit(obj);
+   }
+ 
+-  CallbackWrapper wrapper(tag_map(), obj);  
+-  jvmtiIterationControl control = (*cb)(root_kind, 
+-                                        wrapper.klass_tag(), 
+-                                        wrapper.obj_size(), 
+-                                        wrapper.obj_tag_p(), 
+-                                        thread_tag,                                              
+-                                        depth, 
+-                                        method, 
++  CallbackWrapper wrapper(tag_map(), obj);
++  jvmtiIterationControl control = (*cb)(root_kind,
++                                        wrapper.klass_tag(),
++                                        wrapper.obj_size(),
++                                        wrapper.obj_tag_p(),
++                                        thread_tag,
++                                        depth,
++                                        method,
+                                         slot,
+                                         (void*)user_data());
+   // push root to visit stack when following references
+-  if (control == JVMTI_ITERATION_CONTINUE && 
++  if (control == JVMTI_ITERATION_CONTINUE &&
+       basic_context()->object_ref_callback() != NULL) {
+     visit_stack()->push(obj);
+   }
+@@ -2090,9 +2087,9 @@
+ 
+ // invoke basic style object reference callback
+ inline bool CallbackInvoker::invoke_basic_object_reference_callback(jvmtiObjectReferenceKind ref_kind,
+-					                            oop referrer, 
+-								    oop referree, 
+-								    jint index) {
++                                                                    oop referrer,
++                                                                    oop referree,
++                                                                    jint index) {
+ 
+   assert(ServiceUtil::visible_oop(referrer), "checking");
+   assert(ServiceUtil::visible_oop(referree), "checking");
+@@ -2106,16 +2103,16 @@
+     referrer_tag = context->last_referrer_tag();
+   } else {
+     referrer_tag = tag_for(tag_map(), klassOop_if_java_lang_Class(referrer));
+-  } 
++  }
+ 
+-  // do the callback  
++  // do the callback
+   CallbackWrapper wrapper(tag_map(), referree);
+   jvmtiObjectReferenceCallback cb = context->object_ref_callback();
+-  jvmtiIterationControl control = (*cb)(ref_kind, 
+-                                        wrapper.klass_tag(), 
+-					wrapper.obj_size(), 
+-                                        wrapper.obj_tag_p(), 
+-					referrer_tag, 
++  jvmtiIterationControl control = (*cb)(ref_kind,
++                                        wrapper.klass_tag(),
++                                        wrapper.obj_size(),
++                                        wrapper.obj_tag_p(),
++                                        referrer_tag,
+                                         index,
+                                         (void*)user_data());
+ 
+@@ -2129,15 +2126,15 @@
+   }
+ 
+   if (control == JVMTI_ITERATION_CONTINUE) {
+-    return check_for_visit(referree);    
++    return check_for_visit(referree);
+   } else {
+     return control != JVMTI_ITERATION_ABORT;
+   }
+-}							     
++}
+ 
+ // invoke advanced style heap root callback
+-inline bool CallbackInvoker::invoke_advanced_heap_root_callback(jvmtiHeapReferenceKind ref_kind, 
+-								oop obj) {
++inline bool CallbackInvoker::invoke_advanced_heap_root_callback(jvmtiHeapReferenceKind ref_kind,
++                                                                oop obj) {
+   assert(ServiceUtil::visible_oop(obj), "checking");
+ 
+   AdvancedHeapWalkContext* context = advanced_context();
+@@ -2147,7 +2144,7 @@
+   if (cb == NULL) {
+     return check_for_visit(obj);
+   }
+-    
++
+   // apply class filter
+   if (is_filtered_by_klass_filter(obj, context->klass_filter())) {
+     return check_for_visit(obj);
+@@ -2155,11 +2152,11 @@
+ 
+   // setup the callback wrapper
+   CallbackWrapper wrapper(tag_map(), obj);
+-  
++
+   // apply tag filter
+-  if (is_filtered_by_heap_filter(wrapper.obj_tag(), 
+-                                 wrapper.klass_tag(), 
+-				 context->heap_filter())) {
++  if (is_filtered_by_heap_filter(wrapper.obj_tag(),
++                                 wrapper.klass_tag(),
++                                 context->heap_filter())) {
+     return check_for_visit(obj);
+   }
+ 
+@@ -2167,12 +2164,12 @@
+   jint len = (jint)(obj->is_array() ? arrayOop(obj)->length() : -1);
+ 
+   // invoke the callback
+-  jint res  = (*cb)(ref_kind, 
+-                    NULL, // referrer info 
+-                    wrapper.klass_tag(), 
++  jint res  = (*cb)(ref_kind,
++                    NULL, // referrer info
++                    wrapper.klass_tag(),
+                     0,    // referrer_class_tag is 0 for heap root
+-                    wrapper.obj_size(), 
+-                    wrapper.obj_tag_p(), 
++                    wrapper.obj_size(),
++                    wrapper.obj_tag_p(),
+                     NULL, // referrer_tag_p
+                     len,
+                     (void*)user_data());
+@@ -2186,13 +2183,13 @@
+ }
+ 
+ // report a reference from a thread stack to an object
+-inline bool CallbackInvoker::invoke_advanced_stack_ref_callback(jvmtiHeapReferenceKind ref_kind, 
+-                                                                jlong thread_tag, 
++inline bool CallbackInvoker::invoke_advanced_stack_ref_callback(jvmtiHeapReferenceKind ref_kind,
++                                                                jlong thread_tag,
+                                                                 jlong tid,
+-					                        int depth, 
+-                                                                jmethodID method, 
++                                                                int depth,
++                                                                jmethodID method,
+                                                                 jlocation bci,
+-                                                                jint slot, 
++                                                                jint slot,
+                                                                 oop obj) {
+   assert(ServiceUtil::visible_oop(obj), "checking");
+ 
+@@ -2203,7 +2200,7 @@
+   if (cb == NULL) {
+     return check_for_visit(obj);
+   }
+-    
++
+   // apply class filter
+   if (is_filtered_by_klass_filter(obj, context->klass_filter())) {
+     return check_for_visit(obj);
+@@ -2211,11 +2208,11 @@
+ 
+   // setup the callback wrapper
+   CallbackWrapper wrapper(tag_map(), obj);
+-  
++
+   // apply tag filter
+-  if (is_filtered_by_heap_filter(wrapper.obj_tag(), 
+-                                 wrapper.klass_tag(), 
+-				 context->heap_filter())) {
++  if (is_filtered_by_heap_filter(wrapper.obj_tag(),
++                                 wrapper.klass_tag(),
++                                 context->heap_filter())) {
+     return check_for_visit(obj);
+   }
+ 
+@@ -2251,11 +2248,20 @@
+   return true;
+ }
+ 
++// This mask is used to pass reference_info to a jvmtiHeapReferenceCallback
++// only for ref_kinds defined by the JVM TI spec. Otherwise, NULL is passed.
++#define REF_INFO_MASK  ((1 << JVMTI_HEAP_REFERENCE_FIELD)         \
++                      | (1 << JVMTI_HEAP_REFERENCE_STATIC_FIELD)  \
++                      | (1 << JVMTI_HEAP_REFERENCE_ARRAY_ELEMENT) \
++                      | (1 << JVMTI_HEAP_REFERENCE_CONSTANT_POOL) \
++                      | (1 << JVMTI_HEAP_REFERENCE_STACK_LOCAL)   \
++                      | (1 << JVMTI_HEAP_REFERENCE_JNI_LOCAL))
++
+ // invoke the object reference callback to report a reference
+ inline bool CallbackInvoker::invoke_advanced_object_reference_callback(jvmtiHeapReferenceKind ref_kind,
+-					                               oop referrer, 
+-								       oop obj, 
+-								       jint index) 
++                                                                       oop referrer,
++                                                                       oop obj,
++                                                                       jint index)
+ {
+   // field index is only valid field in reference_info
+   static jvmtiHeapReferenceInfo reference_info = { 0 };
+@@ -2278,11 +2284,11 @@
+ 
+   // setup the callback wrapper
+   TwoOopCallbackWrapper wrapper(tag_map(), referrer, obj);
+-  
++
+   // apply tag filter
+-  if (is_filtered_by_heap_filter(wrapper.obj_tag(), 
+-                                 wrapper.klass_tag(), 
+-				 context->heap_filter())) {
++  if (is_filtered_by_heap_filter(wrapper.obj_tag(),
++                                 wrapper.klass_tag(),
++                                 context->heap_filter())) {
+     return check_for_visit(obj);
+   }
+ 
+@@ -2293,8 +2299,8 @@
+   jint len = (jint)(obj->is_array() ? arrayOop(obj)->length() : -1);
+ 
+   // invoke the callback
+-  int res = (*cb)(ref_kind, 
+-                  &reference_info,
++  int res = (*cb)(ref_kind,
++                  (REF_INFO_MASK & (1 << ref_kind)) ? &reference_info : NULL,
+                   wrapper.klass_tag(),
+                   wrapper.referrer_klass_tag(),
+                   wrapper.obj_size(),
+@@ -2312,9 +2318,9 @@
+   return true;
+ }
+ 
+-// report a "simple root" 
++// report a "simple root"
+ inline bool CallbackInvoker::report_simple_root(jvmtiHeapReferenceKind kind, oop obj) {
+-  assert(kind != JVMTI_HEAP_REFERENCE_STACK_LOCAL && 
++  assert(kind != JVMTI_HEAP_REFERENCE_STACK_LOCAL &&
+          kind != JVMTI_HEAP_REFERENCE_JNI_LOCAL, "not a simple root");
+   assert(ServiceUtil::visible_oop(obj), "checking");
+ 
+@@ -2334,7 +2340,7 @@
+   assert(obj->is_typeArray(), "not a primitive array");
+ 
+   AdvancedHeapWalkContext* context = advanced_context();
+-  assert(context->array_primitive_value_callback() != NULL, "no callback");  
++  assert(context->array_primitive_value_callback() != NULL, "no callback");
+ 
+   // apply class filter
+   if (is_filtered_by_klass_filter(obj, context->klass_filter())) {
+@@ -2344,17 +2350,17 @@
+   CallbackWrapper wrapper(tag_map(), obj);
+ 
+   // apply tag filter
+-  if (is_filtered_by_heap_filter(wrapper.obj_tag(), 
+-                                 wrapper.klass_tag(), 
+-				 context->heap_filter())) {
++  if (is_filtered_by_heap_filter(wrapper.obj_tag(),
++                                 wrapper.klass_tag(),
++                                 context->heap_filter())) {
+     return true;
+   }
+ 
+-  // invoke the callback				 
++  // invoke the callback
+   int res = invoke_array_primitive_value_callback(context->array_primitive_value_callback(),
+                                                   &wrapper,
+-						  obj,
+-						  (void*)user_data());
++                                                  obj,
++                                                  (void*)user_data());
+   return (!(res & JVMTI_VISIT_ABORT));
+ }
+ 
+@@ -2363,7 +2369,7 @@
+   assert(str->klass() == SystemDictionary::string_klass(), "not a string");
+ 
+   AdvancedHeapWalkContext* context = advanced_context();
+-  assert(context->string_primitive_value_callback() != NULL, "no callback");  
++  assert(context->string_primitive_value_callback() != NULL, "no callback");
+ 
+   // apply class filter
+   if (is_filtered_by_klass_filter(str, context->klass_filter())) {
+@@ -2373,32 +2379,32 @@
+   CallbackWrapper wrapper(tag_map(), str);
+ 
+   // apply tag filter
+-  if (is_filtered_by_heap_filter(wrapper.obj_tag(), 
+-                                 wrapper.klass_tag(), 
+-				 context->heap_filter())) {
++  if (is_filtered_by_heap_filter(wrapper.obj_tag(),
++                                 wrapper.klass_tag(),
++                                 context->heap_filter())) {
+     return true;
+   }
+ 
+   // invoke the callback
+   int res = invoke_string_value_callback(context->string_primitive_value_callback(),
+                                          &wrapper,
+-					 str,
+-					 (void*)user_data());
++                                         str,
++                                         (void*)user_data());
+   return (!(res & JVMTI_VISIT_ABORT));
+ }
+ 
+ // invoke the primitive field callback
+-inline bool CallbackInvoker::report_primitive_field(jvmtiHeapReferenceKind ref_kind, 
++inline bool CallbackInvoker::report_primitive_field(jvmtiHeapReferenceKind ref_kind,
+                                                     oop obj,
+                                                     jint index,
+                                                     address addr,
+-                                                    char type) 
++                                                    char type)
+ {
+   // for primitive fields only the index will be set
+   static jvmtiHeapReferenceInfo reference_info = { 0 };
+ 
+   AdvancedHeapWalkContext* context = advanced_context();
+-  assert(context->primitive_field_callback() != NULL, "no callback");  
++  assert(context->primitive_field_callback() != NULL, "no callback");
+ 
+   // apply class filter
+   if (is_filtered_by_klass_filter(obj, context->klass_filter())) {
+@@ -2408,9 +2414,9 @@
+   CallbackWrapper wrapper(tag_map(), obj);
+ 
+   // apply tag filter
+-  if (is_filtered_by_heap_filter(wrapper.obj_tag(), 
+-                                 wrapper.klass_tag(), 
+-				 context->heap_filter())) {
++  if (is_filtered_by_heap_filter(wrapper.obj_tag(),
++                                 wrapper.klass_tag(),
++                                 context->heap_filter())) {
+     return true;
+   }
+ 
+@@ -2420,90 +2426,90 @@
+   // map the type
+   jvmtiPrimitiveType value_type = (jvmtiPrimitiveType)type;
+ 
+-  // setup the jvalue  
++  // setup the jvalue
+   jvalue value;
+   copy_to_jvalue(&value, addr, value_type);
+ 
+   jvmtiPrimitiveFieldCallback cb = context->primitive_field_callback();
+-  int res = (*cb)(ref_kind, 
++  int res = (*cb)(ref_kind,
+                   &reference_info,
+                   wrapper.klass_tag(),
+                   wrapper.obj_tag_p(),
+                   value,
+                   value_type,
+                   (void*)user_data());
+-  return (!(res & JVMTI_VISIT_ABORT)); 
++  return (!(res & JVMTI_VISIT_ABORT));
+ }
+ 
+ 
+ // instance field
+-inline bool CallbackInvoker::report_primitive_instance_field(oop obj, 
+-							     jint index, 
+-							     address value, 
++inline bool CallbackInvoker::report_primitive_instance_field(oop obj,
++                                                             jint index,
++                                                             address value,
+                                                              char type) {
+-  return report_primitive_field(JVMTI_HEAP_REFERENCE_FIELD, 
+-                                obj, 
+-				index, 
+-				value, 
+-				type);
++  return report_primitive_field(JVMTI_HEAP_REFERENCE_FIELD,
++                                obj,
++                                index,
++                                value,
++                                type);
+ }
+ 
+ // static field
+-inline bool CallbackInvoker::report_primitive_static_field(oop obj, 
+-							   jint index, 
+-							   address value, 
++inline bool CallbackInvoker::report_primitive_static_field(oop obj,
++                                                           jint index,
++                                                           address value,
+                                                            char type) {
+-  return report_primitive_field(JVMTI_HEAP_REFERENCE_STATIC_FIELD, 
+-                                obj, 
+-				index, 
+-				value, 
+-				type);
++  return report_primitive_field(JVMTI_HEAP_REFERENCE_STATIC_FIELD,
++                                obj,
++                                index,
++                                value,
++                                type);
+ }
+ 
+ // report a JNI local (root object) to the profiler
+ inline bool CallbackInvoker::report_jni_local_root(jlong thread_tag, jlong tid, jint depth, jmethodID m, oop obj) {
+   if (is_basic_heap_walk()) {
+-    return invoke_basic_stack_ref_callback(JVMTI_HEAP_ROOT_JNI_LOCAL, 
+-	                                   thread_tag,
+-			                   depth, 
+-					   m, 
+-					   -1, 
+-					   obj);
+-  } else {
+-    return invoke_advanced_stack_ref_callback(JVMTI_HEAP_REFERENCE_JNI_LOCAL, 
+-	                                      thread_tag, tid,
+-			                      depth, 
+-					      m, 
+-					      (jlocation)-1, 
+-					      -1,
+-					      obj);
++    return invoke_basic_stack_ref_callback(JVMTI_HEAP_ROOT_JNI_LOCAL,
++                                           thread_tag,
++                                           depth,
++                                           m,
++                                           -1,
++                                           obj);
++  } else {
++    return invoke_advanced_stack_ref_callback(JVMTI_HEAP_REFERENCE_JNI_LOCAL,
++                                              thread_tag, tid,
++                                              depth,
++                                              m,
++                                              (jlocation)-1,
++                                              -1,
++                                              obj);
+   }
+ }
+ 
+ 
+-// report a local (stack reference, root object) 
+-inline bool CallbackInvoker::report_stack_ref_root(jlong thread_tag, 
+-						   jlong tid, 
+-						   jint depth, 
+-				                   jmethodID method, 
+-						   jlocation bci, 
+-						   jint slot, 
+-						   oop obj) {
++// report a local (stack reference, root object)
++inline bool CallbackInvoker::report_stack_ref_root(jlong thread_tag,
++                                                   jlong tid,
++                                                   jint depth,
++                                                   jmethodID method,
++                                                   jlocation bci,
++                                                   jint slot,
++                                                   oop obj) {
+   if (is_basic_heap_walk()) {
+-    return invoke_basic_stack_ref_callback(JVMTI_HEAP_ROOT_STACK_LOCAL, 
+-                                           thread_tag, 
++    return invoke_basic_stack_ref_callback(JVMTI_HEAP_ROOT_STACK_LOCAL,
++                                           thread_tag,
+                                            depth,
+-			                   method, 
+-					   slot,
+-					   obj);
+-  } else { 
+-    return invoke_advanced_stack_ref_callback(JVMTI_HEAP_REFERENCE_STACK_LOCAL, 
++                                           method,
++                                           slot,
++                                           obj);
++  } else {
++    return invoke_advanced_stack_ref_callback(JVMTI_HEAP_REFERENCE_STACK_LOCAL,
+                                               thread_tag,
+                                               tid,
+-                                              depth,			                
++                                              depth,
+                                               method,
+                                               bci,
+-                                              slot, 
++                                              slot,
+                                               obj);
+   }
+ }
+@@ -2601,7 +2607,7 @@
+ 
+ // A supporting closure used to process simple roots
+ class SimpleRootsClosure : public OopClosure {
+- private:  
++ private:
+   jvmtiHeapReferenceKind _kind;
+   bool _continue;
+ 
+@@ -2617,7 +2623,7 @@
+     return !_continue;
+   }
+ 
+-  void do_oop(oop* obj_p) {   
++  void do_oop(oop* obj_p) {
+     // iteration has terminated
+     if (stopped()) {
+       return;
+@@ -2627,11 +2633,11 @@
+     oop o = *obj_p;
+     if (o == NULL || o == JNIHandles::deleted_handle()) {
+       return;
+-    }     
++    }
+ 
+     jvmtiHeapReferenceKind kind = root_kind();
+ 
+-    // many roots are Klasses so we use the java mirror 
++    // many roots are Klasses so we use the java mirror
+     if (o->is_klass()) {
+       klassOop k = (klassOop)o;
+       o = Klass::cast(k)->java_mirror();
+@@ -2641,7 +2647,7 @@
+       // class loader as a root. We want this root to be reported as
+       // a root kind of "OTHER" rather than "SYSTEM_CLASS".
+       if (o->is_instance() && root_kind() == JVMTI_HEAP_REFERENCE_SYSTEM_CLASS) {
+-	kind = JVMTI_HEAP_REFERENCE_OTHER;
++        kind = JVMTI_HEAP_REFERENCE_OTHER;
+       }
+     }
+ 
+@@ -2650,17 +2656,17 @@
+     // here.
+     if (!ServiceUtil::visible_oop(o)) {
+       return;
+-    }     
+- 
++    }
++
+     // invoke the callback
+     _continue = CallbackInvoker::report_simple_root(kind, o);
+-   
++
+   }
+ };
+ 
+ // A supporting closure used to process JNI locals
+ class JNILocalRootsClosure : public OopClosure {
+- private:  
++ private:
+   jlong _thread_tag;
+   jlong _tid;
+   jint _depth;
+@@ -2679,7 +2685,7 @@
+     return !_continue;
+   }
+ 
+-  void do_oop(oop* obj_p) {  
++  void do_oop(oop* obj_p) {
+     // iteration has terminated
+     if (stopped()) {
+       return;
+@@ -2691,11 +2697,11 @@
+       return;
+     }
+ 
+-    if (!ServiceUtil::visible_oop(o)) {    
++    if (!ServiceUtil::visible_oop(o)) {
+       return;
+-    }       
++    }
+ 
+-    // invoke the callback    
++    // invoke the callback
+     _continue = CallbackInvoker::report_jni_local_root(_thread_tag, _tid, _depth, _method, o);
+   }
+ };
+@@ -2721,13 +2727,13 @@
+     initial_visit_stack_size = 4000
+   };
+ 
+-  bool _is_advanced_heap_walk;	                    // indicates FollowReferences
++  bool _is_advanced_heap_walk;                      // indicates FollowReferences
+   JvmtiTagMap* _tag_map;
+   Handle _initial_object;
+-  GrowableArray<oop>* _visit_stack;		    // the visit stack
++  GrowableArray<oop>* _visit_stack;                 // the visit stack
+ 
+-  bool _collecting_heap_roots;			    // are we collecting roots
+-  bool _following_object_refs;			    // are we following object references
++  bool _collecting_heap_roots;                      // are we collecting roots
++  bool _following_object_refs;                      // are we following object references
+ 
+   bool _reporting_primitive_fields;                 // optional reporting
+   bool _reporting_primitive_array_values;
+@@ -2760,32 +2766,32 @@
+   inline bool collect_simple_roots();
+   inline bool collect_stack_roots();
+   inline bool collect_stack_roots(JavaThread* java_thread, JNILocalRootsClosure* blk);
+- 
++
+   // visit an object
+   inline bool visit(oop o);
+ 
+- public:        
+-  VM_HeapWalkOperation(JvmtiTagMap* tag_map, 
+-                       Handle initial_object, 
+-		       BasicHeapWalkContext callbacks, 
+-		       const void* user_data);
+-
+-  VM_HeapWalkOperation(JvmtiTagMap* tag_map, 
+-                       Handle initial_object, 
+-		       AdvancedHeapWalkContext callbacks, 
+-		       const void* user_data);
++ public:
++  VM_HeapWalkOperation(JvmtiTagMap* tag_map,
++                       Handle initial_object,
++                       BasicHeapWalkContext callbacks,
++                       const void* user_data);
++
++  VM_HeapWalkOperation(JvmtiTagMap* tag_map,
++                       Handle initial_object,
++                       AdvancedHeapWalkContext callbacks,
++                       const void* user_data);
+ 
+   ~VM_HeapWalkOperation();
+ 
+   VMOp_Type type() const { return VMOp_HeapWalkOperation; }
+-  void doit(); 
++  void doit();
+ };
+ 
+ 
+-VM_HeapWalkOperation::VM_HeapWalkOperation(JvmtiTagMap* tag_map, 
+-					   Handle initial_object,
+-					   BasicHeapWalkContext callbacks, 
+-					   const void* user_data) {
++VM_HeapWalkOperation::VM_HeapWalkOperation(JvmtiTagMap* tag_map,
++                                           Handle initial_object,
++                                           BasicHeapWalkContext callbacks,
++                                           const void* user_data) {
+   _is_advanced_heap_walk = false;
+   _tag_map = tag_map;
+   _initial_object = initial_object;
+@@ -2799,10 +2805,10 @@
+   CallbackInvoker::initialize_for_basic_heap_walk(tag_map, _visit_stack, user_data, callbacks);
+ }
+ 
+-VM_HeapWalkOperation::VM_HeapWalkOperation(JvmtiTagMap* tag_map, 
+-					   Handle initial_object,
+-					   AdvancedHeapWalkContext callbacks, 
+-					   const void* user_data) {
++VM_HeapWalkOperation::VM_HeapWalkOperation(JvmtiTagMap* tag_map,
++                                           Handle initial_object,
++                                           AdvancedHeapWalkContext callbacks,
++                                           const void* user_data) {
+   _is_advanced_heap_walk = true;
+   _tag_map = tag_map;
+   _initial_object = initial_object;
+@@ -2819,7 +2825,7 @@
+   if (_following_object_refs) {
+     assert(_visit_stack != NULL, "checking");
+     delete _visit_stack;
+-    _visit_stack = NULL;   
++    _visit_stack = NULL;
+   }
+ }
+ 
+@@ -2831,7 +2837,7 @@
+     // filtered out
+     return true;
+   }
+-  
++
+   // array reference to its class
+   oop mirror = objArrayKlass::cast(array->klass())->java_mirror();
+   if (!CallbackInvoker::report_class_reference(o, mirror)) {
+@@ -2843,7 +2849,7 @@
+   for (int index=0; index<array->length(); index++) {
+     oop elem = array->obj_at(index);
+     if (elem == NULL) {
+-      continue;	
++      continue;
+     }
+ 
+     // report the array reference o[index] = elem
+@@ -2862,7 +2868,7 @@
+     return false;
+   }
+ 
+-  // report the array contents if required  
++  // report the array contents if required
+   if (is_reporting_primitive_array_values()) {
+     if (!CallbackInvoker::report_primitive_array_values(o)) {
+       return false;
+@@ -2876,7 +2882,7 @@
+   oop* start = ik->start_of_static_fields();
+   oop* end = start + ik->static_oop_field_size();
+   assert(end >= start, "sanity check");
+-  
++
+   if (obj_p >= start && obj_p < end) {
+     return true;
+   } else {
+@@ -2890,7 +2896,7 @@
+   int i;
+   Klass* klass = klassOop(k)->klass_part();
+ 
+-  if (klass->oop_is_instance()) {         
++  if (klass->oop_is_instance()) {
+     instanceKlass* ik = instanceKlass::cast(k);
+ 
+     // ignore the class if it's has been initialized yet
+@@ -2900,7 +2906,7 @@
+ 
+     // get the java mirror
+     oop mirror = klass->java_mirror();
+-      
++
+     // super (only if something more interesting than java.lang.Object)
+     klassOop java_super = ik->java_super();
+     if (java_super != NULL && java_super != SystemDictionary::object_klass()) {
+@@ -2909,12 +2915,12 @@
+         return false;
+       }
+     }
+- 
+-    // class loader	  
++
++    // class loader
+     oop cl = ik->class_loader();
+     if (cl != NULL) {
+       if (!CallbackInvoker::report_class_loader_reference(mirror, cl)) {
+-	return false;
++        return false;
+       }
+     }
+ 
+@@ -2939,18 +2945,18 @@
+       const constantPoolOop pool = ik->constants();
+       for (int i = 1; i < pool->length(); i++) {
+         constantTag tag = pool->tag_at(i).value();
+-	if (tag.is_string() || tag.is_klass()) {
+-	  oop entry;
+-	  if (tag.is_string()) {
+-	    entry = pool->resolved_string_at(i);
+-	    assert(java_lang_String::is_instance(entry), "must be string");
+-	  } else {
+-	    entry = Klass::cast(pool->resolved_klass_at(i))->java_mirror();
+-	  }
+-	  if (!CallbackInvoker::report_constant_pool_reference(mirror, entry, (jint)i)) {
+-	    return false;
+-	  }
+-        }   
++        if (tag.is_string() || tag.is_klass()) {
++          oop entry;
++          if (tag.is_string()) {
++            entry = pool->resolved_string_at(i);
++            assert(java_lang_String::is_instance(entry), "must be string");
++          } else {
++            entry = Klass::cast(pool->resolved_klass_at(i))->java_mirror();
++          }
++          if (!CallbackInvoker::report_constant_pool_reference(mirror, entry, (jint)i)) {
++            return false;
++          }
++        }
+       }
+     }
+ 
+@@ -2973,28 +2979,28 @@
+     ClassFieldMap* field_map = ClassFieldMap::create_map_of_static_fields(k);
+     for (i=0; i<field_map->field_count(); i++) {
+       ClassFieldDescriptor* field = field_map->field_at(i);
+-      char type = field->field_type();  
+-      if (!is_primitive_field_type(type)) {	 
++      char type = field->field_type();
++      if (!is_primitive_field_type(type)) {
+         address addr = (address)k + field->field_offset();
+-	oop* f = (oop*)addr;
+-	assert(verify_static_oop(ik, f), "sanity check");
+-	oop fld_o = *f;
+-	if (fld_o != NULL) {	 
+-	  int slot = field->field_index();
+-	  if (!CallbackInvoker::report_static_field_reference(mirror, fld_o, slot)) {
+-	    delete field_map;
+-	    return false;
+-	  }
+-	}	  
++        oop* f = (oop*)addr;
++        assert(verify_static_oop(ik, f), "sanity check");
++        oop fld_o = *f;
++        if (fld_o != NULL) {
++          int slot = field->field_index();
++          if (!CallbackInvoker::report_static_field_reference(mirror, fld_o, slot)) {
++            delete field_map;
++            return false;
++          }
++        }
+       } else {
+-	 if (is_reporting_primitive_fields()) {
+-	   address addr = (address)k + field->field_offset();
+-	   int slot = field->field_index();
+-           if (!CallbackInvoker::report_primitive_static_field(mirror, slot, addr, type)) {                       
+-	     delete field_map;
+-             return false;            
++         if (is_reporting_primitive_fields()) {
++           address addr = (address)k + field->field_offset();
++           int slot = field->field_index();
++           if (!CallbackInvoker::report_primitive_static_field(mirror, slot, addr, type)) {
++             delete field_map;
++             return false;
+           }
+-        }	  	 
++        }
+       }
+     }
+     delete field_map;
+@@ -3006,10 +3012,10 @@
+ }
+ 
+ // an object references a class and its instance fields
+-// (static fields are ignored here as we report these as 
++// (static fields are ignored here as we report these as
+ // references from the class).
+ inline bool VM_HeapWalkOperation::iterate_over_object(oop o) {
+-  // reference to the class 
++  // reference to the class
+   if (!CallbackInvoker::report_class_reference(o, Klass::cast(o->klass())->java_mirror())) {
+     return false;
+   }
+@@ -3024,15 +3030,15 @@
+       oop* f = (oop*)addr;
+       oop fld_o = *f;
+       if (fld_o != NULL) {
+- 	// reflection code may have a reference to a klassOop.
+-	// - see sun.reflect.UnsafeStaticFieldAccessorImpl and sun.misc.Unsafe
+-	if (fld_o->is_klass()) {
+-	  klassOop k = (klassOop)fld_o;
+-	  fld_o = Klass::cast(k)->java_mirror();
++        // reflection code may have a reference to a klassOop.
++        // - see sun.reflect.UnsafeStaticFieldAccessorImpl and sun.misc.Unsafe
++        if (fld_o->is_klass()) {
++          klassOop k = (klassOop)fld_o;
++          fld_o = Klass::cast(k)->java_mirror();
+         }
+-	int slot = field->field_index();
++        int slot = field->field_index();
+         if (!CallbackInvoker::report_field_reference(o, fld_o, slot)) {
+-          return false;      
++          return false;
+         }
+       }
+     } else {
+@@ -3040,20 +3046,20 @@
+         // primitive instance field
+         address addr = (address)o + field->field_offset();
+         int slot = field->field_index();
+-        if (!CallbackInvoker::report_primitive_instance_field(o, slot, addr, type)) {                       
++        if (!CallbackInvoker::report_primitive_instance_field(o, slot, addr, type)) {
+           return false;
+         }
+       }
+     }
+   }
+ 
+-  // if the object is a java.lang.String 
++  // if the object is a java.lang.String
+   if (is_reporting_string_values() &&
+       o->klass() == SystemDictionary::string_klass()) {
+     if (!CallbackInvoker::report_string_value(o)) {
+       return false;
+     }
+-  } 
++  }
+   return true;
+ }
+ 
+@@ -3066,11 +3072,11 @@
+ // processed later
+ //
+ inline bool VM_HeapWalkOperation::collect_simple_roots() {
+-  SimpleRootsClosure blk;  
+-      
++  SimpleRootsClosure blk;
++
+   // JNI globals
+   blk.set_kind(JVMTI_HEAP_REFERENCE_JNI_GLOBAL);
+-  JNIHandles::oops_do(&blk);  
++  JNIHandles::oops_do(&blk);
+   if (blk.stopped()) {
+     return false;
+   }
+@@ -3093,9 +3099,9 @@
+   for (JavaThread* thread = Threads::first(); thread != NULL ; thread = thread->next()) {
+     oop threadObj = thread->threadObj();
+     if (threadObj != NULL && !thread->is_exiting() && !thread->is_hidden_from_external_view()) {
+-      bool cont = CallbackInvoker::report_simple_root(JVMTI_HEAP_REFERENCE_THREAD, threadObj);     
++      bool cont = CallbackInvoker::report_simple_root(JVMTI_HEAP_REFERENCE_THREAD, threadObj);
+       if (!cont) {
+-	return false;
++        return false;
+       }
+     }
+   }
+@@ -3110,12 +3116,12 @@
+ 
+ // Walk the stack of a given thread and find all references (locals
+ // and JNI calls) and report these as stack references
+-inline bool VM_HeapWalkOperation::collect_stack_roots(JavaThread* java_thread, 
+-						      JNILocalRootsClosure* blk) 
++inline bool VM_HeapWalkOperation::collect_stack_roots(JavaThread* java_thread,
++                                                      JNILocalRootsClosure* blk)
+ {
+   oop threadObj = java_thread->threadObj();
+   assert(threadObj != NULL, "sanity check");
+-  
++
+   // only need to get the thread's tag once per thread
+   jlong thread_tag = tag_for(_tag_map, threadObj);
+ 
+@@ -3126,48 +3132,48 @@
+   if (java_thread->has_last_Java_frame()) {
+ 
+     // vframes are resource allocated
+-    Thread* current_thread = Thread::current(); 
++    Thread* current_thread = Thread::current();
+     ResourceMark rm(current_thread);
+     HandleMark hm(current_thread);
+ 
+-    RegisterMap reg_map(java_thread);   
++    RegisterMap reg_map(java_thread);
+     frame f = java_thread->last_frame();
+     vframe* vf = vframe::new_vframe(&f, &reg_map, java_thread);
+ 
+-    bool is_top_frame = true; 
++    bool is_top_frame = true;
+     int depth = 0;
+     frame* last_entry_frame = NULL;
+-   
++
+     while (vf != NULL) {
+       if (vf->is_java_frame()) {
+ 
+-	// java frame (interpreted, compiled, ...)
+-	javaVFrame *jvf = javaVFrame::cast(vf);
++        // java frame (interpreted, compiled, ...)
++        javaVFrame *jvf = javaVFrame::cast(vf);
+ 
+         // the jmethodID
+-	jmethodID method = jvf->method()->jmethod_id();        
++        jmethodID method = jvf->method()->jmethod_id();
+ 
+-	if (!(jvf->method()->is_native())) {    
+-          jlocation bci = (jlocation)jvf->bci();     	
+-	  StackValueCollection* locals = jvf->locals();
+-	  for (int slot=0; slot<locals->size(); slot++) {
+-	    if (locals->at(slot)->type() == T_OBJECT) {
+-	      oop o = locals->obj_at(slot)();
+-	      if (o == NULL) {
+-	        continue;
+-	      }	    
+-
+-	      // stack reference	    
+-	      if (!CallbackInvoker::report_stack_ref_root(thread_tag, tid, depth, method, 
+-						   bci, slot, o)) {
+-	        return false;
+-	      }
+-	    }
+-	  }
+-	} else {
++        if (!(jvf->method()->is_native())) {
++          jlocation bci = (jlocation)jvf->bci();
++          StackValueCollection* locals = jvf->locals();
++          for (int slot=0; slot<locals->size(); slot++) {
++            if (locals->at(slot)->type() == T_OBJECT) {
++              oop o = locals->obj_at(slot)();
++              if (o == NULL) {
++                continue;
++              }
++
++              // stack reference
++              if (!CallbackInvoker::report_stack_ref_root(thread_tag, tid, depth, method,
++                                                   bci, slot, o)) {
++                return false;
++              }
++            }
++          }
++        } else {
+           blk->set_context(thread_tag, tid, depth, method);
+           if (is_top_frame) {
+-            // JNI locals for the top frame.            
++            // JNI locals for the top frame.
+             java_thread->active_handles()->oops_do(blk);
+           } else {
+             if (last_entry_frame != NULL) {
+@@ -3180,18 +3186,18 @@
+         last_entry_frame = NULL;
+         depth++;
+       } else {
+-	// externalVFrame - for an entry frame then we report the JNI locals
++        // externalVFrame - for an entry frame then we report the JNI locals
+         // when we find the corresponding javaVFrame
+-	frame* fr = vf->frame_pointer();
++        frame* fr = vf->frame_pointer();
+         assert(fr != NULL, "sanity check");
+         if (fr->is_entry_frame()) {
+-	  last_entry_frame = fr;
+-	}
++          last_entry_frame = fr;
++        }
+       }
+ 
+       vf = vf->sender();
+       is_top_frame = false;
+-    }  
++    }
+   } else {
+     // no last java frame but there may be JNI locals
+     blk->set_context(thread_tag, tid, 0, (jmethodID)NULL);
+@@ -3209,19 +3215,19 @@
+     oop threadObj = thread->threadObj();
+     if (threadObj != NULL && !thread->is_exiting() && !thread->is_hidden_from_external_view()) {
+       if (!collect_stack_roots(thread, &blk)) {
+-	return false;
++        return false;
+       }
+     }
+   }
+   return true;
+ }
+ 
+-// visit an object 
++// visit an object
+ // first mark the object as visited
+ // second get all the outbound references from this object (in other words, all
+ // the objects referenced by this object).
+ //
+-bool VM_HeapWalkOperation::visit(oop o) { 
++bool VM_HeapWalkOperation::visit(oop o) {
+   // mark object as visited
+   assert(!ObjectMarker::visited(o), "can't visit same object more than once");
+   ObjectMarker::mark(o);
+@@ -3231,7 +3237,7 @@
+     if (o->klass() == SystemDictionary::class_klass()) {
+       o = klassOop_if_java_lang_Class(o);
+       if (o->is_klass()) {
+-	// a java.lang.Class	
++        // a java.lang.Class
+         return iterate_over_class(klassOop(o));
+       }
+     } else {
+@@ -3240,14 +3246,14 @@
+   }
+ 
+   // object array
+-  if (o->is_objArray()) {  
++  if (o->is_objArray()) {
+     return iterate_over_array(o);
+   }
+ 
+   // type array
+   if (o->is_typeArray()) {
+     return iterate_over_type_array(o);
+-  }  
++  }
+ 
+   return true;
+ }
+@@ -3261,7 +3267,7 @@
+ 
+   // the heap walk starts with an initial object or the heap roots
+   if (initial_object().is_null()) {
+-    if (!collect_simple_roots()) return;  
++    if (!collect_simple_roots()) return;
+     if (!collect_stack_roots()) return;
+   } else {
+     visit_stack()->push(initial_object()());
+@@ -3276,43 +3282,43 @@
+       oop o = visit_stack()->pop();
+       if (!ObjectMarker::visited(o)) {
+         if (!visit(o)) {
+-	  break;
+-	}
++          break;
++        }
+       }
+     }
+-  } 
++  }
+ }
+ 
+ // iterate over all objects that are reachable from a set of roots
+-void JvmtiTagMap::iterate_over_reachable_objects(jvmtiHeapRootCallback heap_root_callback, 
++void JvmtiTagMap::iterate_over_reachable_objects(jvmtiHeapRootCallback heap_root_callback,
+                                                  jvmtiStackReferenceCallback stack_ref_callback,
+-                                                 jvmtiObjectReferenceCallback object_ref_callback, 
+-						 const void* user_data) {
++                                                 jvmtiObjectReferenceCallback object_ref_callback,
++                                                 const void* user_data) {
+   MutexLocker ml(Heap_lock);
+   BasicHeapWalkContext context(heap_root_callback, stack_ref_callback, object_ref_callback);
+   VM_HeapWalkOperation op(this, Handle(), context, user_data);
+-  VMThread::execute(&op); 
++  VMThread::execute(&op);
+ }
+ 
+ // iterate over all objects that are reachable from a given object
+-void JvmtiTagMap::iterate_over_objects_reachable_from_object(jobject object, 
++void JvmtiTagMap::iterate_over_objects_reachable_from_object(jobject object,
+                                                              jvmtiObjectReferenceCallback object_ref_callback,
+-							     const void* user_data) {
++                                                             const void* user_data) {
+   oop obj = JNIHandles::resolve(object);
+   Handle initial_object(Thread::current(), obj);
+ 
+   MutexLocker ml(Heap_lock);
+   BasicHeapWalkContext context(NULL, NULL, object_ref_callback);
+   VM_HeapWalkOperation op(this, initial_object, context, user_data);
+-  VMThread::execute(&op); 
++  VMThread::execute(&op);
+ }
+ 
+ // follow references from an initial object or the GC roots
+-void JvmtiTagMap::follow_references(jint heap_filter, 
+-				    KlassHandle klass, 
+-				    jobject object, 
+-                                    const jvmtiHeapCallbacks* callbacks, 
+-				    const void* user_data) 
++void JvmtiTagMap::follow_references(jint heap_filter,
++                                    KlassHandle klass,
++                                    jobject object,
++                                    const jvmtiHeapCallbacks* callbacks,
++                                    const void* user_data)
+ {
+   oop obj = JNIHandles::resolve(object);
+   Handle initial_object(Thread::current(), obj);
+@@ -3320,48 +3326,48 @@
+   MutexLocker ml(Heap_lock);
+   AdvancedHeapWalkContext context(heap_filter, klass, callbacks);
+   VM_HeapWalkOperation op(this, initial_object, context, user_data);
+-  VMThread::execute(&op); 
++  VMThread::execute(&op);
+ }
+ 
+ 
+-// called post-GC 
++// called post-GC
+ // - for each JVMTI environment with an object tag map, call its rehash
+ // function to re-sync with the new object locations.
+-void JvmtiTagMap::gc_epilogue(bool full) { 
++void JvmtiTagMap::gc_epilogue(bool full) {
+   assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
+   if (JvmtiEnv::environments_might_exist()) {
+     // re-obtain the memory region for the young generation (might
+     // changed due to adaptive resizing policy)
+-    get_young_generation();  
+-     
+-    JvmtiEnvIterator it; 
++    get_young_generation();
++
++    JvmtiEnvIterator it;
+     for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) {
+-      JvmtiTagMap* tag_map = env->tag_map();      
++      JvmtiTagMap* tag_map = env->tag_map();
+       if (tag_map != NULL && !tag_map->is_empty()) {
+         TraceTime t(full ? "JVMTI Full Rehash " : "JVMTI Rehash ", TraceJVMTIObjectTagging);
+-	if (full) {
++        if (full) {
+           tag_map->rehash(0, n_hashmaps);
+-	} else {
+-	  tag_map->rehash(0, 0);	// tag map for young gen only
+-	}
++        } else {
++          tag_map->rehash(0, 0);        // tag map for young gen only
++        }
+       }
+     }
+   }
+ }
+ 
+ // CMS has completed referencing processing so we may have JNI weak refs
+-// to objects in the CMS generation that have been GC'ed. 
++// to objects in the CMS generation that have been GC'ed.
+ void JvmtiTagMap::cms_ref_processing_epilogue() {
+   assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
+   assert(UseConcMarkSweepGC, "should only be used with CMS");
+   if (JvmtiEnv::environments_might_exist()) {
+-    JvmtiEnvIterator it; 
++    JvmtiEnvIterator it;
+     for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) {
+       JvmtiTagMap* tag_map = ((JvmtiEnvBase *)env)->tag_map();
+       if (tag_map != NULL && !tag_map->is_empty()) {
+         TraceTime t("JVMTI Rehash (CMS) ", TraceJVMTIObjectTagging);
+         tag_map->rehash(1, n_hashmaps);    // assume CMS not used in young gen
+-      } 
++      }
+     }
+   }
+ }
+@@ -3377,8 +3383,8 @@
+ //
+ // 3. If the weak reference resolves to an object then we re-hash the object
+ //    to see if it has moved or has been promoted (from the young to the old
+-//    generation for example). 
+-//   
++//    generation for example).
++//
+ void JvmtiTagMap::rehash(int start, int end) {
+ 
+   // does this environment have the OBJECT_FREE event enabled
+@@ -3387,7 +3393,7 @@
+   // counters used for trace message
+   int freed = 0;
+   int moved = 0;
+-  int promoted = 0;  
++  int promoted = 0;
+ 
+   // we assume there are two hashmaps - one for the young generation
+   // and the other for all other spaces.
+@@ -3428,79 +3434,79 @@
+         JvmtiTagHashmapEntry* next = entry->next();
+ 
+         jweak ref = entry->object();
+-        oop oop = JNIHandles::resolve(ref);	
++        oop oop = JNIHandles::resolve(ref);
+ 
+         // has object been GC'ed
+         if (oop == NULL) {
+-	  // grab the tag 
+-	  jlong tag = entry->tag();
+-	  guarantee(tag != 0, "checking");
+-
+-	  // remove GC'ed entry from hashmap and return the
+-	  // entry to the free list
+-	  hashmap->remove(prev, pos, entry);
+-	  destroy_entry(entry);
++          // grab the tag
++          jlong tag = entry->tag();
++          guarantee(tag != 0, "checking");
++
++          // remove GC'ed entry from hashmap and return the
++          // entry to the free list
++          hashmap->remove(prev, pos, entry);
++          destroy_entry(entry);
+ 
+-	  // destroy the weak ref
++          // destroy the weak ref
+           JNIHandles::destroy_weak_global(ref);
+ 
+-	  // post the event to the profiler
++          // post the event to the profiler
+           if (post_object_free) {
+             JvmtiExport::post_object_free(env(), tag);
+-	  }      
++          }
++
++          freed++;
++          entry = next;
++          continue;
++        }
++
++        // if this is the young hashmap then the object is either promoted
++        // or moved.
++        // if this is the other hashmap then the object is moved.
++
++        bool same_gen;
++        if (i == 0) {
++          assert(hashmap == young_hashmap, "checking");
++          same_gen = is_in_young(oop);
++        } else {
++          same_gen = true;
++        }
+ 
+-	  freed++;
+-	  entry = next;
+-	  continue;
+-	}
+-
+-	// if this is the young hashmap then the object is either promoted
+-	// or moved.
+-	// if this is the other hashmap then the object is moved.
+-
+-	bool same_gen;
+-	if (i == 0) {
+-	  assert(hashmap == young_hashmap, "checking");
+-	  same_gen = is_in_young(oop);
+-	} else {
+-	  same_gen = true;
+-	}
+-  
+-
+-	if (same_gen) {     
+-	  // if the object has moved then re-hash it and move its
+-	  // entry to its new location.
+-	  unsigned int new_pos = JvmtiTagHashmap::hash(oop, size);
+-	  if (new_pos != (unsigned int)pos) {
+-	    if (prev == NULL) {
+-	      table[pos] = next;
+-	    } else {
+-	      prev->set_next(next);
+-	    }
+-	    entry->set_next(table[new_pos]);
+-	    table[new_pos] = entry;
+-	    moved++; 
+-	  } else {
+-	    // object didn't move
+-	    prev = entry;
+-	  }
+-	} else {	    
+-	  // object has been promoted so remove the entry from the
+-	  // young hashmap
+-	  assert(hashmap == young_hashmap, "checking");
+-	  hashmap->remove(prev, pos, entry);
+-	  
+-	  // move the entry to the promoted list 
+-	  entry->set_next(promoted_entries);
+-	  promoted_entries = entry;	  	 
+-	}     
+ 
+-	entry = next;
++        if (same_gen) {
++          // if the object has moved then re-hash it and move its
++          // entry to its new location.
++          unsigned int new_pos = JvmtiTagHashmap::hash(oop, size);
++          if (new_pos != (unsigned int)pos) {
++            if (prev == NULL) {
++              table[pos] = next;
++            } else {
++              prev->set_next(next);
++            }
++            entry->set_next(table[new_pos]);
++            table[new_pos] = entry;
++            moved++;
++          } else {
++            // object didn't move
++            prev = entry;
++          }
++        } else {
++          // object has been promoted so remove the entry from the
++          // young hashmap
++          assert(hashmap == young_hashmap, "checking");
++          hashmap->remove(prev, pos, entry);
++
++          // move the entry to the promoted list
++          entry->set_next(promoted_entries);
++          promoted_entries = entry;
++        }
++
++        entry = next;
+       }
+     }
+-  }  
++  }
++
+ 
+-  
+   // add the entries, corresponding to the promoted objects, to the
+   // other hashmap.
+   JvmtiTagHashmapEntry* entry = promoted_entries;
+@@ -3522,8 +3528,8 @@
+       post_total += _hashmap[i]->_entry_count;
+     }
+     int pre_total = post_total + freed;
+-   
+-    tty->print("(%d->%d, %d freed, %d promoted, %d total moves)", 
+-	pre_total, post_total, freed, promoted, total_moves);     
++
++    tty->print("(%d->%d, %d freed, %d promoted, %d total moves)",
++        pre_total, post_total, freed, promoted, total_moves);
+   }
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiTagMap.hpp openjdk/hotspot/src/share/vm/prims/jvmtiTagMap.hpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiTagMap.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiTagMap.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jvmtiTagMap.hpp	1.25 07/05/05 17:06:40 JVM"
+-#endif
+ /*
+  * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,10 +19,10 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+-// JvmtiTagMap 
++// JvmtiTagMap
+ 
+ #ifndef _JAVA_JVMTI_TAG_MAP_H_
+ #define _JAVA_JVMTI_TAG_MAP_H_
+@@ -38,8 +35,8 @@
+ class JvmtiTagMap :  public CHeapObj {
+  private:
+ 
+-  enum{	    
+-    n_hashmaps = 2,				    // encapsulates 2 hashmaps
++  enum{
++    n_hashmaps = 2,                                 // encapsulates 2 hashmaps
+     max_free_entries = 4096                         // maximum number of free entries per env
+   };
+ 
+@@ -47,19 +44,19 @@
+   static MemRegion _young_gen;
+   static void get_young_generation();
+ 
+-  JvmtiEnv*		_env;			    // the jvmti environment
+-  Mutex			_lock;			    // lock for this tag map
+-  JvmtiTagHashmap*	_hashmap[n_hashmaps];	    // the hashmaps 
++  JvmtiEnv*             _env;                       // the jvmti environment
++  Mutex                 _lock;                      // lock for this tag map
++  JvmtiTagHashmap*      _hashmap[n_hashmaps];       // the hashmaps
+ 
+-  JvmtiTagHashmapEntry* _free_entries;		    // free list for this environment
++  JvmtiTagHashmapEntry* _free_entries;              // free list for this environment
+   int _free_entries_count;                          // number of entries on the free list
+ 
+   // create a tag map
+-  JvmtiTagMap(JvmtiEnv* env);				  
++  JvmtiTagMap(JvmtiEnv* env);
+ 
+   // accessors
+-  inline Mutex* lock()			    { return &_lock; }
+-  inline JvmtiEnv* env() const		    { return _env; }
++  inline Mutex* lock()                      { return &_lock; }
++  inline JvmtiEnv* env() const              { return _env; }
+ 
+   // rehash tags maps for generation start to end
+   void rehash(int start, int end);
+@@ -69,11 +66,11 @@
+ 
+   // iterate over all entries in this tag map
+   void entry_iterate(JvmtiTagHashmapEntryClosure* closure);
+- 
++
+  public:
+ 
+   // indicates if this tag map is locked
+-  bool is_locked()			    { return lock()->is_locked(); }  
++  bool is_locked()                          { return lock()->is_locked(); }
+ 
+   // return the appropriate hashmap for a given object
+   JvmtiTagHashmap* hashmap_for(oop o);
+@@ -97,36 +94,36 @@
+ 
+   // deprecated heap iteration functions
+   void iterate_over_heap(jvmtiHeapObjectFilter object_filter,
+-                         KlassHandle klass,                          
+-			 jvmtiHeapObjectCallback heap_object_callback, 
+-                         const void* user_data); 
+-
+-  void iterate_over_reachable_objects(jvmtiHeapRootCallback heap_root_callback, 
+-				      jvmtiStackReferenceCallback stack_ref_callback, 
+-				      jvmtiObjectReferenceCallback object_ref_callback, 
++                         KlassHandle klass,
++                         jvmtiHeapObjectCallback heap_object_callback,
++                         const void* user_data);
++
++  void iterate_over_reachable_objects(jvmtiHeapRootCallback heap_root_callback,
++                                      jvmtiStackReferenceCallback stack_ref_callback,
++                                      jvmtiObjectReferenceCallback object_ref_callback,
+                                       const void* user_data);
+ 
+-  void iterate_over_objects_reachable_from_object(jobject object, 
+-						  jvmtiObjectReferenceCallback object_reference_callback,
++  void iterate_over_objects_reachable_from_object(jobject object,
++                                                  jvmtiObjectReferenceCallback object_reference_callback,
+                                                   const void* user_data);
+ 
+ 
+   // advanced (JVMTI 1.1) heap iteration functions
+-  void iterate_through_heap(jint heap_filter, 
+-                            KlassHandle klass, 
+-			    const jvmtiHeapCallbacks* callbacks, 
++  void iterate_through_heap(jint heap_filter,
++                            KlassHandle klass,
++                            const jvmtiHeapCallbacks* callbacks,
+                             const void* user_data);
+ 
+-  void follow_references(jint heap_filter, 
+-                         KlassHandle klass, 
+-			 jobject initial_object, 
+-                         const jvmtiHeapCallbacks* callbacks, 
+-			 const void* user_data);
++  void follow_references(jint heap_filter,
++                         KlassHandle klass,
++                         jobject initial_object,
++                         const jvmtiHeapCallbacks* callbacks,
++                         const void* user_data);
+ 
+   // get tagged objects
+-  jvmtiError get_objects_with_tags(const jlong* tags, jint count, 
+-				   jint* count_ptr, jobject** object_result_ptr, 
+-				   jlong** tag_result_ptr);
++  jvmtiError get_objects_with_tags(const jlong* tags, jint count,
++                                   jint* count_ptr, jobject** object_result_ptr,
++                                   jlong** tag_result_ptr);
+ 
+   // call post-GC to rehash the tag maps.
+   static void gc_epilogue(bool full);
+@@ -136,4 +133,3 @@
+ };
+ 
+ #endif   /* _JAVA_JVMTI_TAG_MAP_H_ */
+-
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiThreadState.cpp openjdk/hotspot/src/share/vm/prims/jvmtiThreadState.cpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiThreadState.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiThreadState.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)jvmtiThreadState.cpp	1.44 07/05/05 17:06:36 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -60,7 +57,7 @@
+   _vm_object_alloc_event_collector = NULL;
+   _the_class_for_redefinition_verification = NULL;
+   _scratch_class_for_redefinition_verification = NULL;
+-  
++
+   // JVMTI ForceEarlyReturn support
+   _pending_step_for_earlyret = false;
+   _earlyret_state = earlyret_inactive;
+@@ -70,7 +67,7 @@
+ 
+   // add all the JvmtiEnvThreadState to the new JvmtiThreadState
+   {
+-    JvmtiEnvIterator it; 
++    JvmtiEnvIterator it;
+     for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) {
+       if (env->is_valid()) {
+         add_env(env);
+@@ -97,19 +94,19 @@
+ }
+ 
+ 
+-JvmtiThreadState::~JvmtiThreadState()   { 
++JvmtiThreadState::~JvmtiThreadState()   {
+   assert(JvmtiThreadState_lock->is_locked(), "sanity check");
+ 
+   // clear this as the state for the thread
+   get_thread()->set_jvmti_thread_state(NULL);
+ 
+   // zap our env thread states
+-  { 
++  {
+     JvmtiEnvBase::entering_dying_thread_env_iteration();
+     JvmtiEnvThreadStateIterator it(this);
+     for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ) {
+       JvmtiEnvThreadState* zap = ets;
+-      ets = it.next(ets);     
++      ets = it.next(ets);
+       delete zap;
+     }
+     JvmtiEnvBase::leaving_dying_thread_env_iteration();
+@@ -148,7 +145,7 @@
+   for (JvmtiThreadState *state = _head; state != NULL; state = state->next()) {
+     // For each environment thread state corresponding to an invalid environment
+     // unlink it from the list and deallocate it.
+-    JvmtiEnvThreadStateIterator it(state); 
++    JvmtiEnvThreadStateIterator it(state);
+     JvmtiEnvThreadState* previous_ets = NULL;
+     JvmtiEnvThreadState* ets = it.first();
+     while (ets != NULL) {
+@@ -180,7 +177,7 @@
+     // list deallocation (which occurs at a safepoint) cannot occur simultaneously
+     debug_only(No_Safepoint_Verifier nosafepoint;)
+ 
+-    JvmtiEnvThreadStateIterator it(this); 
++    JvmtiEnvThreadStateIterator it(this);
+     JvmtiEnvThreadState* previous_ets = NULL;
+     for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
+       previous_ets = ets;
+@@ -196,13 +193,13 @@
+ 
+ 
+ 
+-void JvmtiThreadState::enter_interp_only_mode() { 
++void JvmtiThreadState::enter_interp_only_mode() {
+   assert(_thread->get_interp_only_mode() == 0, "entering interp only when mode not zero");
+   _thread->increment_interp_only_mode();
+ }
+ 
+ 
+-void JvmtiThreadState::leave_interp_only_mode() { 
++void JvmtiThreadState::leave_interp_only_mode() {
+   assert(_thread->get_interp_only_mode() == 1, "leaving interp only when mode not one");
+   _thread->decrement_interp_only_mode();
+ }
+@@ -223,7 +220,7 @@
+   RegisterMap reg_map(get_thread());
+   javaVFrame *jvf = get_thread()->last_java_vframe(&reg_map);
+   int n = 0;
+-  // tty->print_cr("CSD: counting frames on %s ...", 
++  // tty->print_cr("CSD: counting frames on %s ...",
+   //               JvmtiTrace::safe_get_thread_name(get_thread()));
+   while (jvf != NULL) {
+     methodOop method = jvf->method();
+@@ -276,7 +273,7 @@
+   }
+ }
+ 
+-int JvmtiThreadState::cur_stack_depth() { 
++int JvmtiThreadState::cur_stack_depth() {
+   uint32_t debug_bits = 0;
+   guarantee(JavaThread::current() == get_thread() ||
+     JvmtiEnv::is_thread_fully_suspended(get_thread(), false, &debug_bits),
+@@ -320,7 +317,7 @@
+   // a repeat step. The new_bci and method_id is same as current_bci
+   // and current method_id after pop and step for recursive calls.
+   // Force the step by clearing the last location.
+-  JvmtiEnvThreadStateIterator it(this); 
++  JvmtiEnvThreadStateIterator it(this);
+   for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
+     ets->clear_current_location();
+   }
+@@ -342,7 +339,7 @@
+     // in any environment
+     int popframe_number = cur_stack_depth();
+     {
+-      JvmtiEnvThreadStateIterator it(this); 
++      JvmtiEnvThreadStateIterator it(this);
+       for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
+         if (ets->is_frame_pop(popframe_number)) {
+           ets->clear_frame_pop(popframe_number);
+@@ -386,7 +383,7 @@
+   // The new_bci and method_id is same as current_bci and current
+   // method_id after earlyret and step for recursive calls.
+   // Force the step by clearing the last location.
+-  JvmtiEnvThreadStateIterator it(this); 
++  JvmtiEnvThreadStateIterator it(this);
+   for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
+     ets->clear_current_location();
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiThreadState.hpp openjdk/hotspot/src/share/vm/prims/jvmtiThreadState.hpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiThreadState.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiThreadState.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jvmtiThreadState.hpp	1.35 07/05/05 17:06:40 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #ifndef _JAVA_JVMTITHREADSTATE_H_
+@@ -80,15 +77,15 @@
+   int               _hide_level;
+ 
+   // Used to send class being redefined/retransformed and kind of transform
+-  // info to the class file load hook event handler. 
++  // info to the class file load hook event handler.
+   KlassHandle           *_class_being_redefined;
+   JvmtiClassLoadKind    _class_load_kind;
+ 
+   // This is only valid when is_interp_only_mode() returns true
+   int               _cur_stack_depth;
+-  
++
+   JvmtiThreadEventEnable _thread_event_enable;
+-  
++
+   // for support of JvmtiEnvThreadState
+   JvmtiEnvThreadState*   _head_env_thread_state;
+ 
+@@ -113,10 +110,10 @@
+  public:
+   ~JvmtiThreadState();
+ 
+-  // is event_type enabled and usable for this thread in any enviroments? 
+-  bool is_enabled(jvmtiEvent event_type) { 
+-    return _thread_event_enable.is_enabled(event_type); 
+-  }       
++  // is event_type enabled and usable for this thread in any enviroments?
++  bool is_enabled(jvmtiEvent event_type) {
++    return _thread_event_enable.is_enabled(event_type);
++  }
+ 
+   JvmtiThreadEventEnable *thread_event_enable() {
+     return &_thread_event_enable;
+@@ -137,15 +134,15 @@
+   void leave_interp_only_mode();
+ 
+   // access to the linked list of all JVMTI thread states
+-  static JvmtiThreadState *first() { 
++  static JvmtiThreadState *first() {
+     assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check");
+-    return _head; 
++    return _head;
+   }
+ 
+   JvmtiThreadState *next()                  {
+     return _next;
+   }
+-                  
++
+   // Current stack depth is only valid when is_interp_only_mode() returns true.
+   // These functions should only be called at a safepoint - usually called from same thread.
+   // Returns the number of Java activations on the stack.
+@@ -159,7 +156,7 @@
+   inline JavaThread *get_thread()      { return _thread;              }
+   inline bool is_exception_detected()  { return _exception_detected;  }
+   inline bool is_exception_caught()    { return _exception_caught;  }
+-  inline void set_exception_detected() { _exception_detected = true; 
++  inline void set_exception_detected() { _exception_detected = true;
+                                          _exception_caught = false; }
+   inline void set_exception_caught()   { _exception_caught = true;
+                                          _exception_detected = false; }
+@@ -188,7 +185,7 @@
+   void set_pending_step_for_popframe() { _pending_step_for_popframe = true;  }
+   void clr_pending_step_for_popframe() { _pending_step_for_popframe = false; }
+   bool is_pending_step_for_popframe()  { return _pending_step_for_popframe;  }
+-  void process_pending_step_for_popframe();   
++  void process_pending_step_for_popframe();
+ 
+   // Step pending flag is set when ForceEarlyReturn is called and it is cleared
+   // when step for the ForceEarlyReturn is completed.
+@@ -220,7 +217,7 @@
+     return _class_load_kind;
+   }
+ 
+-  // RedefineClasses support 
++  // RedefineClasses support
+   // The bug 6214132 caused the verification to fail.
+   //
+   // Below is the detailed description of the fix approach taken:
+@@ -244,7 +241,7 @@
+   //   info about equivalent klass versions and use it to replace a klassOop
+   //   of _the_class with a klassOop of _scratch_class. The function
+   //   class_to_verify_considering_redefinition() must be called for it.
+-  // 
++  //
+   //   Note again, that this redirection happens only for the verifier thread.
+   //   Other threads have very small overhead by checking the existence
+   //   of the jvmtiThreadSate and the information about klasses equivalence.
+@@ -270,14 +267,14 @@
+                                                     JavaThread *thread) {
+     JvmtiThreadState *state = thread->jvmti_thread_state();
+     if (state != NULL && state->_the_class_for_redefinition_verification != NULL) {
+-      if ((*(state->_the_class_for_redefinition_verification))() == klass) {           
++      if ((*(state->_the_class_for_redefinition_verification))() == klass) {
+         klass = (*(state->_scratch_class_for_redefinition_verification))();
+       }
+     }
+     return klass;
+   }
+-            
+-  // Todo: get rid of this!  
++
++  // Todo: get rid of this!
+  private:
+   bool _debuggable;
+  public:
+@@ -290,20 +287,20 @@
+ 
+   bool may_be_walked();
+ 
+-  // Thread local event collector setter and getter methods. 
++  // Thread local event collector setter and getter methods.
+   JvmtiDynamicCodeEventCollector* get_dynamic_code_event_collector() {
+     return _dynamic_code_event_collector;
+-  } 
++  }
+   JvmtiVMObjectAllocEventCollector* get_vm_object_alloc_event_collector() {
+     return _vm_object_alloc_event_collector;
+-  } 
++  }
+   void set_dynamic_code_event_collector(JvmtiDynamicCodeEventCollector* collector) {
+     _dynamic_code_event_collector = collector;
+-  } 
++  }
+   void set_vm_object_alloc_event_collector(JvmtiVMObjectAllocEventCollector* collector) {
+     _vm_object_alloc_event_collector = collector;
+   }
+-    
++
+ 
+   //
+   // Frame routines
+@@ -383,10 +380,10 @@
+  private:
+   JvmtiThreadState *_state;
+ 
+- public: 
++ public:
+   RedefineVerifyMark(KlassHandle *the_class, KlassHandle *scratch_class,
+                      JvmtiThreadState *state) : _state(state)
+-  { 
++  {
+     _state->set_class_versions_map(the_class, scratch_class);
+     (*scratch_class)->set_java_mirror((*the_class)->java_mirror());
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiThreadState.inline.hpp openjdk/hotspot/src/share/vm/prims/jvmtiThreadState.inline.hpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiThreadState.inline.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiThreadState.inline.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jvmtiThreadState.inline.hpp	1.7 07/05/05 17:06:40 JVM"
+-#endif
+ /*
+  * Copyright 2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,14 +19,14 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // JvmtiEnvThreadStateIterator implementation
+ 
+ inline JvmtiEnvThreadStateIterator::JvmtiEnvThreadStateIterator(JvmtiThreadState* thread_state) {
+   state = thread_state;
+-  Thread::current()->entering_jvmti_env_iteration(); 
++  Thread::current()->entering_jvmti_env_iteration();
+ }
+ 
+ inline JvmtiEnvThreadStateIterator::~JvmtiEnvThreadStateIterator() {
+@@ -43,24 +40,23 @@
+ inline JvmtiEnvThreadState* JvmtiEnvThreadStateIterator::next(JvmtiEnvThreadState* ets) {
+   return ets->next();
+ }
+-  
++
+ // JvmtiThreadState implementation
+ 
+ JvmtiEnvThreadState* JvmtiThreadState::env_thread_state(JvmtiEnvBase *env) {
+-  JvmtiEnvThreadStateIterator it(this); 
++  JvmtiEnvThreadStateIterator it(this);
+   for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
+     if ((JvmtiEnvBase*)(ets->get_env()) == env) {
+-      return ets; 
++      return ets;
+     }
+   }
+   return NULL;
+ }
+ 
+-JvmtiEnvThreadState* JvmtiThreadState::head_env_thread_state() { 
+-  return _head_env_thread_state; 
++JvmtiEnvThreadState* JvmtiThreadState::head_env_thread_state() {
++  return _head_env_thread_state;
+ }
+ 
+ void JvmtiThreadState::set_head_env_thread_state(JvmtiEnvThreadState* ets) {
+-  _head_env_thread_state = ets; 
++  _head_env_thread_state = ets;
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiTrace.cpp openjdk/hotspot/src/share/vm/prims/jvmtiTrace.cpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiTrace.cpp	1969-12-31 19:00:00.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiTrace.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -0,0 +1,297 @@
++/*
++ * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++ *
++ * This code is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 only, as
++ * published by the Free Software Foundation.
++ *
++ * This code is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++ * version 2 for more details (a copy is included in the LICENSE file that
++ * accompanied this code).
++ *
++ * You should have received a copy of the GNU General Public License version
++ * 2 along with this work; if not, write to the Free Software Foundation,
++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
++ * CA 95054 USA or visit www.sun.com if you need additional information or
++ * have any questions.
++ *
++ */
++
++# include "incls/_precompiled.incl"
++# include "incls/_jvmtiTrace.cpp.incl"
++
++//
++// class JvmtiTrace
++//
++// Support for JVMTI tracing code
++//
++// ------------
++// Usage:
++//    -XX:TraceJVMTI=DESC,DESC,DESC
++//
++//    DESC is   DOMAIN ACTION KIND
++//
++//    DOMAIN is function name
++//              event name
++//              "all" (all functions and events)
++//              "func" (all functions except boring)
++//              "allfunc" (all functions)
++//              "event" (all events)
++//              "ec" (event controller)
++//
++//    ACTION is "+" (add)
++//              "-" (remove)
++//
++//    KIND is
++//     for func
++//              "i" (input params)
++//              "e" (error returns)
++//              "o" (output)
++//     for event
++//              "t" (event triggered aka posted)
++//              "s" (event sent)
++//
++// Example:
++//            -XX:TraceJVMTI=ec+,GetCallerFrame+ie,Breakpoint+s
++
++#ifdef JVMTI_TRACE
++
++bool JvmtiTrace::_initialized = false;
++bool JvmtiTrace::_on = false;
++bool JvmtiTrace::_trace_event_controller = false;
++
++void JvmtiTrace::initialize() {
++  if (_initialized) {
++    return;
++  }
++  SafeResourceMark rm;
++
++  const char *very_end;
++  const char *curr;
++  if (strlen(TraceJVMTI)) {
++    curr = TraceJVMTI;
++  } else {
++    curr = "";  // hack in fixed tracing here
++  }
++  very_end = curr + strlen(curr);
++  while (curr < very_end) {
++    const char *curr_end = strchr(curr, ',');
++    if (curr_end == NULL) {
++      curr_end = very_end;
++    }
++    const char *op_pos = strchr(curr, '+');
++    const char *minus_pos = strchr(curr, '-');
++    if (minus_pos != NULL && (minus_pos < op_pos || op_pos == NULL)) {
++      op_pos = minus_pos;
++    }
++    char op;
++    const char *flags = op_pos + 1;
++    const char *flags_end = curr_end;
++    if (op_pos == NULL || op_pos > curr_end) {
++      flags = "ies";
++      flags_end = flags + strlen(flags);
++      op_pos = curr_end;
++      op = '+';
++    } else {
++      op = *op_pos;
++    }
++    jbyte bits = 0;
++    for (; flags < flags_end; ++flags) {
++      switch (*flags) {
++      case 'i':
++        bits |= SHOW_IN;
++        break;
++      case 'I':
++        bits |= SHOW_IN_DETAIL;
++        break;
++      case 'e':
++        bits |= SHOW_ERROR;
++        break;
++      case 'o':
++        bits |= SHOW_OUT;
++        break;
++      case 'O':
++        bits |= SHOW_OUT_DETAIL;
++        break;
++      case 't':
++        bits |= SHOW_EVENT_TRIGGER;
++        break;
++      case 's':
++        bits |= SHOW_EVENT_SENT;
++        break;
++      default:
++        tty->print_cr("Invalid trace flag '%c'", *flags);
++        break;
++      }
++    }
++    const int FUNC = 1;
++    const int EXCLUDE  = 2;
++    const int ALL_FUNC = 4;
++    const int EVENT = 8;
++    const int ALL_EVENT = 16;
++    int domain = 0;
++    size_t len = op_pos - curr;
++    if (op_pos == curr) {
++      domain = ALL_FUNC | FUNC | ALL_EVENT | EVENT | EXCLUDE;
++    } else if (len==3 && strncmp(curr, "all", 3)==0) {
++      domain = ALL_FUNC | FUNC | ALL_EVENT | EVENT;
++    } else if (len==7 && strncmp(curr, "allfunc", 7)==0) {
++      domain = ALL_FUNC | FUNC;
++    } else if (len==4 && strncmp(curr, "func", 4)==0) {
++      domain = ALL_FUNC | FUNC | EXCLUDE;
++    } else if (len==8 && strncmp(curr, "allevent", 8)==0) {
++      domain = ALL_EVENT | EVENT;
++    } else if (len==5 && strncmp(curr, "event", 5)==0) {
++      domain = ALL_EVENT | EVENT;
++    } else if (len==2 && strncmp(curr, "ec", 2)==0) {
++      _trace_event_controller = true;
++      tty->print_cr("JVMTI Tracing the event controller");
++    } else {
++      domain = FUNC | EVENT;  // go searching
++    }
++
++    int exclude_index = 0;
++    if (domain & FUNC) {
++      if (domain & ALL_FUNC) {
++        if (domain & EXCLUDE) {
++          tty->print("JVMTI Tracing all significant functions");
++        } else {
++          tty->print_cr("JVMTI Tracing all functions");
++        }
++      }
++      for (int i = 0; i <= _max_function_index; ++i) {
++        if (domain & EXCLUDE && i == _exclude_functions[exclude_index]) {
++          ++exclude_index;
++        } else {
++          bool do_op = false;
++          if (domain & ALL_FUNC) {
++            do_op = true;
++          } else {
++            const char *fname = function_name(i);
++            if (fname != NULL) {
++              size_t fnlen = strlen(fname);
++              if (len==fnlen && strncmp(curr, fname, fnlen)==0) {
++                tty->print_cr("JVMTI Tracing the function: %s", fname);
++                do_op = true;
++              }
++            }
++          }
++          if (do_op) {
++            if (op == '+') {
++              _trace_flags[i] |= bits;
++            } else {
++              _trace_flags[i] &= ~bits;
++            }
++            _on = true;
++          }
++        }
++      }
++    }
++    if (domain & EVENT) {
++      if (domain & ALL_EVENT) {
++        tty->print_cr("JVMTI Tracing all events");
++      }
++      for (int i = 0; i <= _max_event_index; ++i) {
++        bool do_op = false;
++        if (domain & ALL_EVENT) {
++          do_op = true;
++        } else {
++          const char *ename = event_name(i);
++          if (ename != NULL) {
++            size_t evtlen = strlen(ename);
++            if (len==evtlen && strncmp(curr, ename, evtlen)==0) {
++              tty->print_cr("JVMTI Tracing the event: %s", ename);
++              do_op = true;
++            }
++          }
++        }
++        if (do_op) {
++          if (op == '+') {
++            _event_trace_flags[i] |= bits;
++          } else {
++            _event_trace_flags[i] &= ~bits;
++          }
++          _on = true;
++        }
++      }
++    }
++    if (!_on && (domain & (FUNC|EVENT))) {
++      tty->print_cr("JVMTI Trace domain not found");
++    }
++    curr = curr_end + 1;
++  }
++  _initialized = true;
++}
++
++
++void JvmtiTrace::shutdown() {
++  int i;
++  _on = false;
++  _trace_event_controller = false;
++  for (i = 0; i <= _max_function_index; ++i) {
++    _trace_flags[i] = 0;
++  }
++  for (i = 0; i <= _max_event_index; ++i) {
++    _event_trace_flags[i] = 0;
++  }
++}
++
++
++const char* JvmtiTrace::enum_name(const char** names, const jint* values, jint value) {
++  for (int index = 0; names[index] != 0; ++index) {
++    if (values[index] == value) {
++      return names[index];
++    }
++  }
++  return "*INVALID-ENUM-VALUE*";
++}
++
++
++// return a valid string no matter what state the thread is in
++const char *JvmtiTrace::safe_get_thread_name(Thread *thread) {
++  if (thread == NULL) {
++    return "NULL";
++  }
++  if (!thread->is_Java_thread()) {
++    return thread->name();
++  }
++  JavaThread *java_thread = (JavaThread *)thread;
++  oop threadObj = java_thread->threadObj();
++  if (threadObj == NULL) {
++    return "NULL";
++  }
++  typeArrayOop name = java_lang_Thread::name(threadObj);
++  if (name == NULL) {
++    return "<NOT FILLED IN>";
++  }
++  return UNICODE::as_utf8((jchar*) name->base(T_CHAR), name->length());
++}
++
++
++// return the name of the current thread
++const char *JvmtiTrace::safe_get_current_thread_name() {
++  if (JvmtiEnv::is_vm_live()) {
++    return JvmtiTrace::safe_get_thread_name(Thread::current());
++  } else {
++    return "VM not live";
++  }
++}
++
++// return a valid string no matter what the state of k_mirror
++const char * JvmtiTrace::get_class_name(oop k_mirror) {
++  if (java_lang_Class::is_primitive(k_mirror)) {
++    return "primitive";
++  }
++  klassOop k_oop = java_lang_Class::as_klassOop(k_mirror);
++  if (k_oop == NULL) {
++    return "INVALID";
++  }
++  return Klass::cast(k_oop)->external_name();
++}
++
++#endif /*JVMTI_TRACE */
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiTrace.hpp openjdk/hotspot/src/share/vm/prims/jvmtiTrace.hpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiTrace.hpp	1969-12-31 19:00:00.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiTrace.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -0,0 +1,98 @@
++/*
++ * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++ *
++ * This code is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 only, as
++ * published by the Free Software Foundation.
++ *
++ * This code is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++ * version 2 for more details (a copy is included in the LICENSE file that
++ * accompanied this code).
++ *
++ * You should have received a copy of the GNU General Public License version
++ * 2 along with this work; if not, write to the Free Software Foundation,
++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
++ * CA 95054 USA or visit www.sun.com if you need additional information or
++ * have any questions.
++ *
++ */
++
++///////////////////////////////////////////////////////////////
++//
++// class JvmtiTrace
++//
++// Support for JVMTI tracing code
++//
++
++// Support tracing except in product build on the client compiler
++#ifndef PRODUCT
++#define JVMTI_TRACE 1
++#else
++#ifdef COMPILER2
++#define JVMTI_TRACE 1
++#endif
++#endif
++
++#ifdef JVMTI_TRACE
++
++class JvmtiTrace : AllStatic {
++
++  static bool        _initialized;
++  static bool        _on;
++  static bool        _trace_event_controller;
++  static jbyte       _trace_flags[];
++  static jbyte       _event_trace_flags[];
++  static const char* _event_names[];
++  static jint        _max_function_index;
++  static jint        _max_event_index;
++  static short       _exclude_functions[];
++  static const char* _function_names[];
++
++public:
++
++  enum {
++    SHOW_IN =              01,
++    SHOW_OUT =             02,
++    SHOW_ERROR =           04,
++    SHOW_IN_DETAIL =      010,
++    SHOW_OUT_DETAIL =     020,
++    SHOW_EVENT_TRIGGER =  040,
++    SHOW_EVENT_SENT =    0100
++  };
++
++  static bool tracing()                     { return _on; }
++  static bool trace_event_controller()      { return _trace_event_controller; }
++  static jbyte trace_flags(int num)         { return _trace_flags[num]; }
++  static jbyte event_trace_flags(int num)   { return _event_trace_flags[num]; }
++  static const char* function_name(int num) { return _function_names[num]; } // To Do: add range checking
++
++  static const char* event_name(int num) {
++    static char* ext_event_name = (char*)"(extension event)";
++    if (num >= JVMTI_MIN_EVENT_TYPE_VAL && num <= JVMTI_MAX_EVENT_TYPE_VAL) {
++      return _event_names[num];
++    } else {
++      return ext_event_name;
++    }
++  }
++
++  static const char* enum_name(const char** names, const jint* values, jint value);
++
++  static void initialize();
++  static void shutdown();
++
++  // return a valid string no matter what state the thread is in
++  static const char *safe_get_thread_name(Thread *thread);
++
++  // return the name of the current thread
++  static const char *safe_get_current_thread_name();
++
++  // return a valid string no matter what the state of k_mirror
++  static const char *get_class_name(oop k_mirror);
++};
++
++#endif /*JVMTI_TRACE */
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiUtil.cpp openjdk/hotspot/src/share/vm/prims/jvmtiUtil.cpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiUtil.cpp	1969-12-31 19:00:00.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiUtil.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -0,0 +1,40 @@
++/*
++ * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++ *
++ * This code is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 only, as
++ * published by the Free Software Foundation.
++ *
++ * This code is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++ * version 2 for more details (a copy is included in the LICENSE file that
++ * accompanied this code).
++ *
++ * You should have received a copy of the GNU General Public License version
++ * 2 along with this work; if not, write to the Free Software Foundation,
++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
++ * CA 95054 USA or visit www.sun.com if you need additional information or
++ * have any questions.
++ *
++ */
++
++# include "incls/_precompiled.incl"
++# include "incls/_jvmtiUtil.cpp.incl"
++//
++// class JvmtiUtil
++//
++
++ResourceArea* JvmtiUtil::_single_threaded_resource_area = NULL;
++
++ResourceArea* JvmtiUtil::single_threaded_resource_area() {
++  if (_single_threaded_resource_area == NULL) {
++    // lazily create the single threaded resource area
++    // pick a size which is not a standard since the pools don't exist yet
++    _single_threaded_resource_area = new ResourceArea(Chunk::non_pool_size);
++  }
++  return _single_threaded_resource_area;
++}
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmtiUtil.hpp openjdk/hotspot/src/share/vm/prims/jvmtiUtil.hpp
+--- openjdk6/hotspot/src/share/vm/prims/jvmtiUtil.hpp	1969-12-31 19:00:00.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmtiUtil.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -0,0 +1,86 @@
++/*
++ * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++ *
++ * This code is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 only, as
++ * published by the Free Software Foundation.
++ *
++ * This code is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++ * version 2 for more details (a copy is included in the LICENSE file that
++ * accompanied this code).
++ *
++ * You should have received a copy of the GNU General Public License version
++ * 2 along with this work; if not, write to the Free Software Foundation,
++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
++ * CA 95054 USA or visit www.sun.com if you need additional information or
++ * have any questions.
++ *
++ */
++
++///////////////////////////////////////////////////////////////
++//
++// class JvmtiUtil
++//
++// class for miscellaneous jvmti utility static methods
++//
++
++class JvmtiUtil : AllStatic {
++
++  static ResourceArea* _single_threaded_resource_area;
++
++  static const char* _error_names[];
++  static const bool  _event_threaded[];
++
++public:
++
++  static ResourceArea* single_threaded_resource_area();
++
++  static const char* error_name(int num)    { return _error_names[num]; }    // To Do: add range checking
++
++  static const bool has_event_capability(jvmtiEvent event_type, const jvmtiCapabilities* capabilities_ptr);
++
++  static const bool  event_threaded(int num) {
++    if (num >= JVMTI_MIN_EVENT_TYPE_VAL && num <= JVMTI_MAX_EVENT_TYPE_VAL) {
++      return _event_threaded[num];
++    }
++    if (num >= EXT_MIN_EVENT_TYPE_VAL && num <= EXT_MAX_EVENT_TYPE_VAL) {
++      return false;
++    }
++    ShouldNotReachHere();
++    return false;
++  }
++};
++
++
++///////////////////////////////////////////////////////////////
++//
++// class SafeResourceMark
++//
++// ResourceMarks that work before threads exist
++//
++
++class SafeResourceMark : public ResourceMark {
++
++  ResourceArea* safe_resource_area() {
++    Thread* thread;
++
++    if (Threads::number_of_threads() == 0) {
++      return JvmtiUtil::single_threaded_resource_area();
++    }
++    thread = ThreadLocalStorage::thread();
++    if (thread == NULL) {
++      return JvmtiUtil::single_threaded_resource_area();
++    }
++    return thread->resource_area();
++  }
++
++ public:
++
++  SafeResourceMark() : ResourceMark(safe_resource_area()) {}
++
++};
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmti.xml openjdk/hotspot/src/share/vm/prims/jvmti.xml
+--- openjdk6/hotspot/src/share/vm/prims/jvmti.xml	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmti.xml	2008-01-31 09:19:01.000000000 -0500
+@@ -30,7 +30,7 @@
+    <!ATTLIST specification label CDATA #REQUIRED 
+                            majorversion CDATA #REQUIRED 
+                            minorversion CDATA #REQUIRED 
+-                           onedotmicroversion CDATA #REQUIRED>
++                           microversion CDATA #REQUIRED>
+ 
+    <!ELEMENT title (#PCDATA|jvmti|tm)*>
+    <!ATTLIST title subtitle CDATA #REQUIRED>
+@@ -56,6 +56,7 @@
+ 		      callbacksafe (safe|unsafe) #IMPLIED
+                       impl CDATA #IMPLIED
+                       hide CDATA #IMPLIED
++                      jkernel (yes|no) #IMPLIED
+                       since CDATA "1.0">
+ 
+    <!ELEMENT callback ((jmethodID|jfieldID|jframeID|jrawMonitorID|jclass|jthread|jthreadGroup|jobject|
+@@ -248,7 +249,6 @@
+ 
+    <!ELEMENT changehistory (intro*, change*)>
+    <!ATTLIST changehistory update CDATA #REQUIRED
+-                           version CDATA #REQUIRED
+                            id CDATA #REQUIRED>
+ 
+    <!ELEMENT change ANY>
+@@ -360,7 +360,7 @@
+ <specification label="JVM(TM) Tool Interface"
+         majorversion="1"
+         minorversion="1"
+-        onedotmicroversion="1.206">
++        microversion="109">
+   <title subtitle="Version">
+     <tm>JVM</tm> Tool Interface
+   </title>
+@@ -970,7 +970,7 @@
+       allocation libraries and mechanisms.
+     </intro>
+ 
+-    <function id="Allocate" phase="any" callbacksafe="safe" impl="notrace" num="46">
++    <function id="Allocate" jkernel="yes" phase="any" callbacksafe="safe" impl="notrace" num="46">
+       <synopsis>Allocate</synopsis>
+       <description>
+ 	Allocate an area of memory through the <jvmti/> allocator. 
+@@ -1008,7 +1008,7 @@
+       </errors>
+     </function>
+ 
+-    <function id="Deallocate" phase="any" callbacksafe="safe" impl="notrace" num="47">
++    <function id="Deallocate" jkernel="yes" phase="any" callbacksafe="safe" impl="notrace" num="47">
+       <synopsis>Deallocate</synopsis>
+       <description>
+ 	Deallocate <code>mem</code>  using the <jvmti/> allocator. 
+@@ -2075,7 +2075,7 @@
+       </errors>
+     </function>
+ 
+-    <function id="SetThreadLocalStorage" impl="notrace" phase="start" num="103">
++    <function id="SetThreadLocalStorage" jkernel="yes" impl="notrace" phase="start" num="103">
+       <synopsis>Set Thread Local Storage</synopsis>
+       <description>
+ 	The VM stores a pointer value associated with each environment-thread
+@@ -2115,7 +2115,7 @@
+       </errors>
+     </function>
+ 
+-    <function id="GetThreadLocalStorage" impl="innative notrace" phase="start" num="102">
++    <function id="GetThreadLocalStorage" jkernel="yes" impl="innative notrace" phase="start" num="102">
+       <synopsis>Get Thread Local Storage</synopsis>
+       <description>
+         Called by the agent to get the value of the <jvmti/> thread-local
+@@ -6361,7 +6361,7 @@
+     <intro>
+     </intro>
+ 
+-    <function id="GetLoadedClasses" num="78">
++    <function id="GetLoadedClasses" jkernel="yes" num="78">
+       <synopsis>Get Loaded Classes</synopsis>
+       <description>
+ 	Return an array of all classes loaded in the virtual machine.
+@@ -6395,7 +6395,7 @@
+       </errors>
+     </function>
+ 
+-    <function id="GetClassLoaderClasses" num="79">
++    <function id="GetClassLoaderClasses" jkernel="yes" num="79">
+       <synopsis>Get Classloader Classes</synopsis>
+       <description>
+ 	Returns an array of those classes for which this class loader has
+@@ -6936,7 +6936,7 @@
+       </errors>
+     </function>
+ 
+-    <function id="IsModifiableClass" phase="start" num="45" since="1.1">
++    <function id="IsModifiableClass" jkernel="yes" phase="start" num="45" since="1.1">
+       <synopsis>Is Modifiable Class</synopsis>
+       <description>
+ 	Determines whether a class is modifiable.
+@@ -7061,7 +7061,7 @@
+       </errors>
+     </function>
+ 
+-    <function id="RetransformClasses" num="152" since="1.1">
++    <function id="RetransformClasses" jkernel="yes" num="152" since="1.1">
+       <synopsis>Retransform Classes</synopsis>
+       <description>
+         This function facilitates the 
+@@ -7229,7 +7229,7 @@
+       </errors>
+     </function>
+ 
+-    <function id="RedefineClasses" num="87">
++    <function id="RedefineClasses" jkernel="yes" num="87">
+       <synopsis>Redefine Classes</synopsis>
+       <typedef id="jvmtiClassDefinition" label="Class redefinition description">
+ 	<field id="klass">
+@@ -7386,7 +7386,7 @@
+ 
+   <category id="object" label="Object">
+ 
+-    <function id="GetObjectSize" phase="start" num="154">
++    <function id="GetObjectSize" jkernel="yes" phase="start" num="154">
+       <synopsis>Get Object Size</synopsis>
+       <description>
+ 	For the object indicated by <code>object</code>,
+@@ -8311,7 +8311,7 @@
+       </errors>
+     </function>
+ 
+-    <function id="SetNativeMethodPrefix" phase="any" num="73" since="1.1">
++    <function id="SetNativeMethodPrefix" jkernel="yes" phase="any" num="73" since="1.1">
+       <synopsis>Set Native Method Prefix</synopsis>
+       <description>
+ 	This function modifies the failure handling of
+@@ -8434,7 +8434,7 @@
+       </errors>
+     </function>
+ 
+-    <function id="SetNativeMethodPrefixes" phase="any" num="74" since="1.1">
++    <function id="SetNativeMethodPrefixes" jkernel="yes" phase="any" num="74" since="1.1">
+       <synopsis>Set Native Method Prefixes</synopsis>
+       <description>
+ 	 For a normal agent, <functionlink id="SetNativeMethodPrefix"/>
+@@ -8869,7 +8869,7 @@
+ 
+   <category id="eventManagement" label="Event Management">
+ 
+-    <function id="SetEventCallbacks" phase="onload" num="122">
++    <function id="SetEventCallbacks" jkernel="yes" phase="onload" num="122">
+       <synopsis>Set Event Callbacks</synopsis>
+       <description>
+         Set the functions to be called for each event.
+@@ -8912,7 +8912,7 @@
+       </errors>
+     </function>
+ 
+-    <function id="SetEventNotificationMode" phase="onload" num="2">
++    <function id="SetEventNotificationMode" jkernel="yes" phase="onload" num="2">
+       <synopsis>Set Event Notification Mode</synopsis>
+       <description>
+ 	Control the generation of events. 
+@@ -9858,7 +9858,7 @@
+       </capabilityfield>
+     </capabilitiestypedef>
+ 
+-    <function id="GetPotentialCapabilities" phase="onload" num="140">
++    <function id="GetPotentialCapabilities" jkernel="yes" phase="onload" num="140">
+       <synopsis>Get Potential Capabilities</synopsis>
+       <description>
+         Returns via <paramlink id="capabilities_ptr"></paramlink> the <jvmti/> 
+@@ -9973,7 +9973,7 @@
+     </function>
+     </elide>
+ 
+-    <function id="AddCapabilities" phase="onload" num="142">
++    <function id="AddCapabilities" jkernel="yes" phase="onload" num="142">
+       <synopsis>Add Capabilities</synopsis>
+       <description>
+         Set new capabilities by adding the capabilities 
+@@ -10050,7 +10050,7 @@
+ 
+ 
+ 
+-    <function id="GetCapabilities" phase="any" num="89">
++    <function id="GetCapabilities" jkernel="yes" phase="any" num="89">
+       <synopsis>Get Capabilities</synopsis>
+         <description>
+           Returns via <paramlink id="capabilities_ptr"></paramlink> the optional <jvmti/> 
+@@ -10417,7 +10417,7 @@
+       This is useful for installing instrumentation under the correct class loader.
+     </intro>
+ 
+-    <function id="AddToBootstrapClassLoaderSearch" phase="onload" num="149">
++    <function id="AddToBootstrapClassLoaderSearch" jkernel="yes" phase="onload" num="149">
+       <synopsis>Add To Bootstrap Class Loader Search</synopsis>
+       <description>
+           This function can be used to cause instrumentation classes to be defined by the 
+@@ -10469,7 +10469,7 @@
+       </errors>
+     </function>
+ 
+-    <function id="AddToSystemClassLoaderSearch" phase="onload" num="151" since="1.1">
++    <function id="AddToSystemClassLoaderSearch" jkernel="yes" phase="onload" num="151" since="1.1">
+       <synopsis>Add To System Class Loader Search</synopsis>
+       <description>
+ 	  This function can be used to cause instrumentation classes to be
+@@ -10686,7 +10686,7 @@
+     <intro>
+     </intro>
+ 
+-    <function id="GetPhase" phase="any" num="133">
++    <function id="GetPhase" jkernel="yes" phase="any" num="133">
+       <synopsis>Get Phase</synopsis>
+       <description>
+           Return the current phase of VM execution.  
+@@ -10748,7 +10748,7 @@
+       </errors>
+     </function>
+ 
+-    <function id="DisposeEnvironment" phase="any" num="127">
++    <function id="DisposeEnvironment" jkernel="yes" phase="any" num="127">
+       <synopsis>Dispose Environment</synopsis>
+       <description>
+         Shutdown a <jvmti/> connection created with JNI <code>GetEnv</code>
+@@ -10794,7 +10794,7 @@
+       </errors>
+     </function>
+ 
+-    <function id="SetEnvironmentLocalStorage" phase="any" callbacksafe="safe" impl="innative notrace" num="148">
++    <function id="SetEnvironmentLocalStorage" jkernel="yes" phase="any" callbacksafe="safe" impl="innative notrace" num="148">
+       <synopsis>Set Environment Local Storage</synopsis>
+       <description>
+ 	The VM stores a pointer value associated with each environment.
+@@ -10828,7 +10828,7 @@
+       </errors>
+     </function>
+ 
+-    <function id="GetEnvironmentLocalStorage" phase="any" callbacksafe="safe" impl="innative notrace" num="147">
++    <function id="GetEnvironmentLocalStorage" jkernel="yes" phase="any" callbacksafe="safe" impl="innative notrace" num="147">
+       <synopsis>Get Environment Local Storage</synopsis>
+       <description>
+         Called by the agent to get the value of the <jvmti/> environment-local
+@@ -10853,7 +10853,7 @@
+       </errors>
+     </function>
+ 
+-    <function id="GetVersionNumber" phase="any" num="88">
++    <function id="GetVersionNumber" jkernel="yes" phase="any" num="88">
+       <synopsis>Get Version Number</synopsis>
+       <description>
+         Return the <jvmti/> version via <code>version_ptr</code>.
+@@ -13383,7 +13383,7 @@
+   </intro>
+ </issuessection>
+ 
+-<changehistory id="ChangeHistory" version="@(#)jvmti.xml	1.206" update="07/05/05 17:06:33">
++<changehistory id="ChangeHistory" update="09/05/07">
+   <intro>
+     The <jvmti/> specification is an evolving document with major, minor, 
+     and micro version numbers.
+diff -ruN openjdk6/hotspot/src/share/vm/prims/jvmti.xsl openjdk/hotspot/src/share/vm/prims/jvmti.xsl
+--- openjdk6/hotspot/src/share/vm/prims/jvmti.xsl	2008-02-28 05:02:41.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/jvmti.xsl	2008-01-31 09:19:01.000000000 -0500
+@@ -1891,7 +1891,6 @@
+     <p/><hr noshade="noshade" size="3"/>
+     <h2>Change History</h2>
+     Last update: <xsl:value-of select="@update"/><br/>
+-    File version: <xsl:value-of select="@version"/><br/>
+     Version: <xsl:call-template name="showversion"/>
+     <p/>
+     <xsl:apply-templates select="intro"/>
+diff -ruN openjdk6/hotspot/src/share/vm/prims/methodComparator.cpp openjdk/hotspot/src/share/vm/prims/methodComparator.cpp
+--- openjdk6/hotspot/src/share/vm/prims/methodComparator.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/methodComparator.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)methodComparator.cpp	1.15 07/05/05 17:06:41 JVM"
+-#endif
+ /*
+  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -66,16 +63,16 @@
+   return true;
+ }
+ 
+-  
++
+ bool MethodComparator::methods_switchable(methodOop old_method, methodOop new_method,
+-					  BciMap &bci_map) {
++                                          BciMap &bci_map) {
+   if (old_method->code_size() > new_method->code_size())
+     // Something has definitely been deleted in the new method, compared to the old one.
+     return false;
+ 
+   if (! check_stack_and_locals_size(old_method, new_method))
+     return false;
+-  
++
+   _old_cp = old_method->constants();
+   _new_cp = new_method->constants();
+   BytecodeStream s_old(old_method);
+@@ -87,7 +84,7 @@
+   GrowableArray<int> fwd_jmps(16);
+   _fwd_jmps = &fwd_jmps;
+   Bytecodes::Code c_old, c_new;
+-  
++
+   while ((c_old = s_old.next()) >= 0) {
+     if ((c_new = s_new.next()) < 0)
+       return false;
+@@ -96,14 +93,14 @@
+       int new_st_bci = s_new.bci();
+       bool found_match = false;
+       do {
+-	c_new = s_new.next();
+-	if (c_new == c_old && args_same(c_old, c_new)) {
+-	  found_match = true;
+-	  break;
+-	}
++        c_new = s_new.next();
++        if (c_new == c_old && args_same(c_old, c_new)) {
++          found_match = true;
++          break;
++        }
+       } while (c_new >= 0);
+       if (! found_match)
+-	return false;
++        return false;
+       int new_end_bci = s_new.bci();
+       bci_map.store_fragment_location(old_bci, new_st_bci, new_end_bci);
+     }
+@@ -113,9 +110,9 @@
+   for (int i = 0; i < fwd_jmps.length() / 2; i++) {
+     if (! bci_map.old_and_new_locations_same(fwd_jmps.at(i*2), fwd_jmps.at(i*2+1))) {
+       RC_TRACE(0x00800000,
+-	("Fwd jump miss: old dest = %d, calc new dest = %d, act new dest = %d",
++        ("Fwd jump miss: old dest = %d, calc new dest = %d, act new dest = %d",
+         fwd_jmps.at(i*2), bci_map.new_bci_for_old(fwd_jmps.at(i*2)),
+-	fwd_jmps.at(i*2+1)));
++        fwd_jmps.at(i*2+1)));
+       return false;
+     }
+   }
+@@ -133,16 +130,16 @@
+   case Bytecodes::_multianewarray : // fall through
+   case Bytecodes::_checkcast      : // fall through
+   case Bytecodes::_instanceof     : {
+-    u2 cpi_old = _s_old->get_index_big();	
++    u2 cpi_old = _s_old->get_index_big();
+     u2 cpi_new = _s_new->get_index_big();
+     if ((_old_cp->klass_at_noresolve(cpi_old) != _new_cp->klass_at_noresolve(cpi_new)))
+-	return false;
++        return false;
+     if (c_old == Bytecodes::_multianewarray &&
+-	*(jbyte*)(_s_old->bcp() + 3) != *(jbyte*)(_s_new->bcp() + 3))
++        *(jbyte*)(_s_old->bcp() + 3) != *(jbyte*)(_s_new->bcp() + 3))
+       return false;
+     break;
+   }
+-    
++
+   case Bytecodes::_getstatic       : // fall through
+   case Bytecodes::_putstatic       : // fall through
+   case Bytecodes::_getfield        : // fall through
+@@ -157,24 +154,24 @@
+     // are the same. Indices which are really into constantpool cache (rather than constant
+     // pool itself) are accepted by the constantpool query routines below.
+     if ((_old_cp->klass_ref_at_noresolve(cpci_old) != _new_cp->klass_ref_at_noresolve(cpci_new)) ||
+-	(_old_cp->name_ref_at(cpci_old) != _new_cp->name_ref_at(cpci_new)) ||
+-	(_old_cp->signature_ref_at(cpci_old) != _new_cp->signature_ref_at(cpci_new)))
++        (_old_cp->name_ref_at(cpci_old) != _new_cp->name_ref_at(cpci_new)) ||
++        (_old_cp->signature_ref_at(cpci_old) != _new_cp->signature_ref_at(cpci_new)))
+       return false;
+     break;
+   }
+-  
++
+   case Bytecodes::_ldc   : // fall through
+   case Bytecodes::_ldc_w : {
+     u2 cpi_old, cpi_new;
+     if (c_old == Bytecodes::_ldc) {
+-      cpi_old = _s_old->bcp()[1];	
++      cpi_old = _s_old->bcp()[1];
+       cpi_new = _s_new->bcp()[1];
+     } else {
+       cpi_old = _s_old->get_index_big();
+       cpi_new = _s_new->get_index_big();
+     }
+     constantTag tag_old = _old_cp->tag_at(cpi_old);
+-    constantTag tag_new = _new_cp->tag_at(cpi_new);	
++    constantTag tag_new = _new_cp->tag_at(cpi_new);
+     if (tag_old.is_int() || tag_old.is_float()) {
+       if (tag_old.value() != tag_new.value())
+         return false;
+@@ -185,7 +182,7 @@
+         if (_old_cp->float_at(cpi_old) != _new_cp->float_at(cpi_new))
+           return false;
+       }
+-    } else if (tag_old.is_string() || tag_old.is_unresolved_string()) {  
++    } else if (tag_old.is_string() || tag_old.is_unresolved_string()) {
+       if (! (tag_new.is_unresolved_string() || tag_new.is_string()))
+         return false;
+       if (strcmp(_old_cp->string_at_noresolve(cpi_old),
+@@ -197,37 +194,37 @@
+       if (_old_cp->klass_at_noresolve(cpi_old) !=
+           _new_cp->klass_at_noresolve(cpi_new))
+         return false;
+-    }    
++    }
+     break;
+   }
+-  
++
+   case Bytecodes::_ldc2_w : {
+-    u2 cpi_old = _s_old->get_index_big();	
++    u2 cpi_old = _s_old->get_index_big();
+     u2 cpi_new = _s_new->get_index_big();
+     constantTag tag_old = _old_cp->tag_at(cpi_old);
+-    constantTag tag_new = _new_cp->tag_at(cpi_new);	
++    constantTag tag_new = _new_cp->tag_at(cpi_new);
+     if (tag_old.value() != tag_new.value())
+       return false;
+     if (tag_old.is_long()) {
+       if (_old_cp->long_at(cpi_old) != _new_cp->long_at(cpi_new))
+-	return false;
++        return false;
+     } else {
+       if (_old_cp->double_at(cpi_old) != _new_cp->double_at(cpi_new))
+-	return false;
++        return false;
+     }
+     break;
+   }
+-  
++
+   case Bytecodes::_bipush :
+     if (_s_old->bcp()[1] != _s_new->bcp()[1])
+       return false;
+     break;
+-      
++
+   case Bytecodes::_sipush    :
+     if (_s_old->get_index_big() != _s_new->get_index_big())
+       return false;
+     break;
+-    
++
+   case Bytecodes::_aload  : // fall through
+   case Bytecodes::_astore : // fall through
+   case Bytecodes::_dload  : // fall through
+@@ -244,7 +241,7 @@
+     if (_s_old->get_index() != _s_new->get_index())
+       return false;
+     break;
+-    
++
+   case Bytecodes::_goto      : // fall through
+   case Bytecodes::_if_acmpeq : // fall through
+   case Bytecodes::_if_acmpne : // fall through
+@@ -269,33 +266,33 @@
+       int old_dest = _s_old->bci() + old_ofs;
+       int new_dest = _s_new->bci() + new_ofs;
+       if (old_ofs < 0 && new_ofs < 0) {
+-	if (! _bci_map->old_and_new_locations_same(old_dest, new_dest))
+-	  return false;
++        if (! _bci_map->old_and_new_locations_same(old_dest, new_dest))
++          return false;
+       } else if (old_ofs > 0 && new_ofs > 0) {
+-	_fwd_jmps->append(old_dest);
+-	_fwd_jmps->append(new_dest);
++        _fwd_jmps->append(old_dest);
++        _fwd_jmps->append(new_dest);
+       } else {
+-	return false;
++        return false;
+       }
+     } else {
+       if (old_ofs != new_ofs)
+-	return false;
++        return false;
+     }
+     break;
+   }
+-  
++
+   case Bytecodes::_iinc :
+     if (_s_old->is_wide() != _s_new->is_wide())
+       return false;
+     if (! _s_old->is_wide()) {
+       if (_s_old->get_index_big() != _s_new->get_index_big())
+-	return false;
++        return false;
+     } else {
+       if (Bytes::get_Java_u4(_s_old->bcp() + 1) != Bytes::get_Java_u4(_s_new->bcp() + 1))
+-	return false;
++        return false;
+     }
+     break;
+-      
++
+   case Bytecodes::_goto_w : // fall through
+   case Bytecodes::_jsr_w  : {
+     int old_ofs = (int) Bytes::get_Java_u4(_s_old->bcp() + 1);
+@@ -304,21 +301,21 @@
+       int old_dest = _s_old->bci() + old_ofs;
+       int new_dest = _s_new->bci() + new_ofs;
+       if (old_ofs < 0 && new_ofs < 0) {
+-	if (! _bci_map->old_and_new_locations_same(old_dest, new_dest))
+-	  return false;
++        if (! _bci_map->old_and_new_locations_same(old_dest, new_dest))
++          return false;
+       } else if (old_ofs > 0 && new_ofs > 0) {
+-	_fwd_jmps->append(old_dest);
+-	_fwd_jmps->append(new_dest);
++        _fwd_jmps->append(old_dest);
++        _fwd_jmps->append(new_dest);
+       } else {
+-	return false;
++        return false;
+       }
+     } else {
+       if (old_ofs != new_ofs)
+-	return false;
++        return false;
+     }
+     break;
+   }
+-  
++
+   case Bytecodes::_lookupswitch : // fall through
+   case Bytecodes::_tableswitch  : {
+     if (_switchable_test) {
+@@ -329,43 +326,43 @@
+       _fwd_jmps->append(_s_old->bci() + default_old);
+       _fwd_jmps->append(_s_new->bci() + default_new);
+       if (c_old == Bytecodes::_lookupswitch) {
+-	int npairs_old = (int) Bytes::get_Java_u4(aligned_bcp_old + jintSize);
+-	int npairs_new = (int) Bytes::get_Java_u4(aligned_bcp_new + jintSize);
+-	if (npairs_old != npairs_new)
+-	  return false;
+-	for (int i = 0; i < npairs_old; i++) {
+-	  int match_old = (int) Bytes::get_Java_u4(aligned_bcp_old + (2+2*i)*jintSize);
+-	  int match_new = (int) Bytes::get_Java_u4(aligned_bcp_new + (2+2*i)*jintSize);
+-	  if (match_old != match_new)
+-	    return false;
+-	  int ofs_old = (int) Bytes::get_Java_u4(aligned_bcp_old + (2+2*i+1)*jintSize);
+-	  int ofs_new = (int) Bytes::get_Java_u4(aligned_bcp_new + (2+2*i+1)*jintSize);
+-	  _fwd_jmps->append(_s_old->bci() + ofs_old);
+-	  _fwd_jmps->append(_s_new->bci() + ofs_new);
+-	}
++        int npairs_old = (int) Bytes::get_Java_u4(aligned_bcp_old + jintSize);
++        int npairs_new = (int) Bytes::get_Java_u4(aligned_bcp_new + jintSize);
++        if (npairs_old != npairs_new)
++          return false;
++        for (int i = 0; i < npairs_old; i++) {
++          int match_old = (int) Bytes::get_Java_u4(aligned_bcp_old + (2+2*i)*jintSize);
++          int match_new = (int) Bytes::get_Java_u4(aligned_bcp_new + (2+2*i)*jintSize);
++          if (match_old != match_new)
++            return false;
++          int ofs_old = (int) Bytes::get_Java_u4(aligned_bcp_old + (2+2*i+1)*jintSize);
++          int ofs_new = (int) Bytes::get_Java_u4(aligned_bcp_new + (2+2*i+1)*jintSize);
++          _fwd_jmps->append(_s_old->bci() + ofs_old);
++          _fwd_jmps->append(_s_new->bci() + ofs_new);
++        }
+       } else if (c_old == Bytecodes::_tableswitch) {
+-	int lo_old = (int) Bytes::get_Java_u4(aligned_bcp_old + jintSize);
+-	int lo_new = (int) Bytes::get_Java_u4(aligned_bcp_new + jintSize);
+-	if (lo_old != lo_new)
+-	  return false;
+-	int hi_old = (int) Bytes::get_Java_u4(aligned_bcp_old + 2*jintSize);
+-	int hi_new = (int) Bytes::get_Java_u4(aligned_bcp_new + 2*jintSize);
+-	if (hi_old != hi_new)
+-	  return false;
+-	for (int i = 0; i < hi_old - lo_old + 1; i++) {
+-	  int ofs_old = (int) Bytes::get_Java_u4(aligned_bcp_old + (3+i)*jintSize);
+-	  int ofs_new = (int) Bytes::get_Java_u4(aligned_bcp_new + (3+i)*jintSize);
+-	  _fwd_jmps->append(_s_old->bci() + ofs_old);
+-	  _fwd_jmps->append(_s_new->bci() + ofs_new);
+-	}
++        int lo_old = (int) Bytes::get_Java_u4(aligned_bcp_old + jintSize);
++        int lo_new = (int) Bytes::get_Java_u4(aligned_bcp_new + jintSize);
++        if (lo_old != lo_new)
++          return false;
++        int hi_old = (int) Bytes::get_Java_u4(aligned_bcp_old + 2*jintSize);
++        int hi_new = (int) Bytes::get_Java_u4(aligned_bcp_new + 2*jintSize);
++        if (hi_old != hi_new)
++          return false;
++        for (int i = 0; i < hi_old - lo_old + 1; i++) {
++          int ofs_old = (int) Bytes::get_Java_u4(aligned_bcp_old + (3+i)*jintSize);
++          int ofs_new = (int) Bytes::get_Java_u4(aligned_bcp_new + (3+i)*jintSize);
++          _fwd_jmps->append(_s_old->bci() + ofs_old);
++          _fwd_jmps->append(_s_new->bci() + ofs_new);
++        }
+       }
+     } else { // !_switchable_test, can use fast rough compare
+       int len_old = _s_old->next_bcp() - _s_old->bcp();
+       int len_new = _s_new->next_bcp() - _s_new->bcp();
+       if (len_old != len_new)
+-	return false;
++        return false;
+       if (memcmp(_s_old->bcp(), _s_new->bcp(), len_old) != 0)
+-	return false;
++        return false;
+     }
+     break;
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/prims/methodComparator.hpp openjdk/hotspot/src/share/vm/prims/methodComparator.hpp
+--- openjdk6/hotspot/src/share/vm/prims/methodComparator.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/methodComparator.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)methodComparator.hpp	1.12 07/05/05 17:06:40 JVM"
+-#endif
+ /*
+  * Copyright 2000-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class BciMap;
+@@ -84,13 +81,13 @@
+   // -----------------------------------------
+   // Old method   |invokevirtual 5|aload 1|...
+   // -----------------------------------------
+-  //                                                         
+-  //                                 |<- new_st_bci          |<- new_end_bci    
++  //
++  //                                 |<- new_st_bci          |<- new_end_bci
+   // --------------------------------------------------------------------
+   // New method       |invokevirual 5|aload 2|invokevirtual 6|aload 1|...
+   // --------------------------------------------------------------------
+-  //                                 ^^^^^^^^^^^^^^^^^^^^^^^^ 
+-  //                                    Added fragment 
++  //                                 ^^^^^^^^^^^^^^^^^^^^^^^^
++  //                                    Added fragment
+ 
+   void store_fragment_location(int old_bci, int new_st_bci, int new_end_bci) {
+     if (_cur_pos == _cur_size) {
+diff -ruN openjdk6/hotspot/src/share/vm/prims/nativeLookup.cpp openjdk/hotspot/src/share/vm/prims/nativeLookup.cpp
+--- openjdk6/hotspot/src/share/vm/prims/nativeLookup.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/nativeLookup.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)nativeLookup.cpp	1.82 07/05/05 17:06:42 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -133,21 +130,21 @@
+       in_base_library = true;
+       return entry;
+     }
+-  }  
++  }
+ 
+   // Otherwise call static method findNative in ClassLoader
+   KlassHandle   klass (THREAD, SystemDictionary::classloader_klass());
+   Handle name_arg = java_lang_String::create_from_str(jni_name, CHECK_NULL);
+ 
+   JavaValue result(T_LONG);
+-  JavaCalls::call_static(&result, 
+-                         klass,                        
+-                         vmSymbolHandles::findNative_name(), 
++  JavaCalls::call_static(&result,
++                         klass,
++                         vmSymbolHandles::findNative_name(),
+                          vmSymbolHandles::classloader_string_long_signature(),
+-                         // Arguments    
+-                         loader, 
++                         // Arguments
++                         loader,
+                          name_arg,
+-                         CHECK_NULL);  
++                         CHECK_NULL);
+   entry = (address) (intptr_t) result.get_jlong();
+ 
+   if (entry == NULL) {
+@@ -175,9 +172,9 @@
+ 
+   // Compute argument size
+   int args_size = 1                             // JNIEnv
+-                + (method->is_static() ? 1 : 0) // class for static methods 
+-                + method->size_of_parameters(); // actual parameters  
+-  
++                + (method->is_static() ? 1 : 0) // class for static methods
++                + method->size_of_parameters(); // actual parameters
++
+ 
+   // 1) Try JNI short style
+   entry = lookup_style(method, pure_name, "",        args_size, true,  in_base_library, CHECK_NULL);
+@@ -226,7 +223,7 @@
+     symbolHandle wrapper_symbol(THREAD, SymbolTable::probe(wrapper_name, wrapper_name_len));
+     if (!wrapper_symbol.is_null()) {
+       KlassHandle kh(method->method_holder());
+-      methodOop wrapper_method = Klass::cast(kh())->lookup_method(wrapper_symbol(), 
++      methodOop wrapper_method = Klass::cast(kh())->lookup_method(wrapper_symbol(),
+                                                                   method->signature());
+       if (wrapper_method != NULL && !wrapper_method->is_native()) {
+         // we found a wrapper method, use its native entry
+@@ -245,14 +242,14 @@
+   entry = lookup_entry(method, in_base_library, THREAD);
+   if (entry != NULL) return entry;
+ 
+-  // standard native method resolution has failed.  Check if there are any 
++  // standard native method resolution has failed.  Check if there are any
+   // JVM TI prefixes which have been applied to the native method name.
+   entry = lookup_entry_prefixed(method, in_base_library, THREAD);
+   if (entry != NULL) return entry;
+-    
++
+   // Native function not found, throw UnsatisfiedLinkError
+-  THROW_MSG_0(vmSymbols::java_lang_UnsatisfiedLinkError(), 
+-              method->name_and_sig_as_C_string()); 
++  THROW_MSG_0(vmSymbols::java_lang_UnsatisfiedLinkError(),
++              method->name_and_sig_as_C_string());
+ }
+ 
+ 
+@@ -264,8 +261,8 @@
+     // -verbose:jni printing
+     if (PrintJNIResolving) {
+       ResourceMark rm(THREAD);
+-      tty->print_cr("[Dynamic-linking native method %s.%s ... JNI]", 
+-        Klass::cast(method->method_holder())->external_name(), 
++      tty->print_cr("[Dynamic-linking native method %s.%s ... JNI]",
++        Klass::cast(method->method_holder())->external_name(),
+         method->name()->as_C_string());
+     }
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/prims/nativeLookup.hpp openjdk/hotspot/src/share/vm/prims/nativeLookup.hpp
+--- openjdk6/hotspot/src/share/vm/prims/nativeLookup.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/nativeLookup.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)nativeLookup.hpp	1.27 07/05/05 17:06:41 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,11 +19,11 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+-// NativeLookup provides an interface for finding DLL entry points for 
+-// Java native functions. 
++// NativeLookup provides an interface for finding DLL entry points for
++// Java native functions.
+ 
+ class NativeLookup : AllStatic {
+  private:
+@@ -34,7 +31,7 @@
+   static char* pure_jni_name(methodHandle method);
+   static char* long_jni_name(methodHandle method);
+ 
+-  // Style specific lookup 
++  // Style specific lookup
+   static address lookup_style(methodHandle method, char* pure_name, const char* long_name, int args_size, bool os_style, bool& in_base_library, TRAPS);
+   static address lookup_base (methodHandle method, bool& in_base_library, TRAPS);
+   static address lookup_entry(methodHandle method, bool& in_base_library, TRAPS);
+diff -ruN openjdk6/hotspot/src/share/vm/prims/perf.cpp openjdk/hotspot/src/share/vm/prims/perf.cpp
+--- openjdk6/hotspot/src/share/vm/prims/perf.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/perf.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)perf.cpp	1.15 07/05/05 17:06:34 JVM"
+-#endif
+ /*
+  * Copyright 2001-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ /*
+@@ -136,8 +133,8 @@
+     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "PerfLong name already exists");
+   }
+ 
+-  switch(variability) { 
+-  case 1:  /* V_Constant */ 
++  switch(variability) {
++  case 1:  /* V_Constant */
+     pl = PerfDataManager::create_long_constant(NULL_NS, (char *)name_utf,
+                                                (PerfData::Units)units, value,
+                                                CHECK_NULL);
+@@ -314,4 +311,3 @@
+     guarantee(ok == 0, "register perf natives");
+   }
+ JVM_END
+-
+diff -ruN openjdk6/hotspot/src/share/vm/prims/privilegedStack.cpp openjdk/hotspot/src/share/vm/prims/privilegedStack.cpp
+--- openjdk6/hotspot/src/share/vm/prims/privilegedStack.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/privilegedStack.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)privilegedStack.cpp	1.31 07/05/05 17:06:42 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -37,7 +34,7 @@
+   THREAD->allow_unhandled_oop(&_klass);
+   THREAD->allow_unhandled_oop(&_privileged_context);
+ #endif // CHECK_UNHANDLED_OOPS
+-  _frame_id             = vfst->frame_id();  
++  _frame_id             = vfst->frame_id();
+   _next                 = next;
+   assert(_privileged_context == NULL || _privileged_context->is_oop(), "must be an oop");
+   assert(protection_domain() == NULL || protection_domain()->is_oop(), "must be an oop");
+@@ -45,11 +42,11 @@
+ 
+ void PrivilegedElement::oops_do(OopClosure* f) {
+   PrivilegedElement *cur = this;
+-  do {    
++  do {
+     f->do_oop((oop*) &cur->_klass);
+     f->do_oop((oop*) &cur->_privileged_context);
+     cur = cur->_next;
+-  } while(cur != NULL);    
++  } while(cur != NULL);
+ }
+ 
+ //-------------------------------------------------------------------------------
+@@ -77,4 +74,3 @@
+ }
+ 
+ #endif
+-
+diff -ruN openjdk6/hotspot/src/share/vm/prims/privilegedStack.hpp openjdk/hotspot/src/share/vm/prims/privilegedStack.hpp
+--- openjdk6/hotspot/src/share/vm/prims/privilegedStack.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/privilegedStack.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)privilegedStack.hpp	1.25 07/05/05 17:06:41 JVM"
+-#endif
+ /*
+  * Copyright 1997-2000 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,20 +19,20 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class PrivilegedElement VALUE_OBJ_CLASS_SPEC {
+- private:  
+-  klassOop  _klass;                // klass for method 
+-  oop       _privileged_context;   // context for operation  
++ private:
++  klassOop  _klass;                // klass for method
++  oop       _privileged_context;   // context for operation
+   intptr_t*     _frame_id;             // location on stack
+   PrivilegedElement* _next;        // Link to next one on stack
+  public:
+-  void initialize(vframeStream* vf, oop context, PrivilegedElement* next, TRAPS);  
+-  void oops_do(OopClosure* f);    
++  void initialize(vframeStream* vf, oop context, PrivilegedElement* next, TRAPS);
++  void oops_do(OopClosure* f);
+   intptr_t* frame_id() const           { return _frame_id; }
+-  oop  privileged_context() const  { return _privileged_context; }  
++  oop  privileged_context() const  { return _privileged_context; }
+   oop  class_loader() const        { return instanceKlass::cast(_klass)->class_loader(); }
+   oop  protection_domain() const   { return instanceKlass::cast(_klass)->protection_domain(); }
+   PrivilegedElement *next() const  { return _next; }
+@@ -44,4 +41,3 @@
+   void print_on(outputStream* st) const   PRODUCT_RETURN;
+   bool contains(address addr)             PRODUCT_RETURN0;
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/prims/unsafe.cpp openjdk/hotspot/src/share/vm/prims/unsafe.cpp
+--- openjdk6/hotspot/src/share/vm/prims/unsafe.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/prims/unsafe.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)unsafe.cpp	1.64 07/05/17 16:05:09 JVM"
+-#endif
+ /*
+  * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ /*
+@@ -104,7 +101,7 @@
+     if (byte_offset == (jint)byte_offset) {
+       void* ptr_plus_disp = (address)p + byte_offset;
+       assert((void*)p->obj_field_addr((jint)byte_offset) == ptr_plus_disp,
+-	     "raw [ptr+disp] must be consistent with oop::field_base");
++             "raw [ptr+disp] must be consistent with oop::field_base");
+     }
+   }
+ #endif
+@@ -211,7 +208,7 @@
+   {
+     if (VM_Version::supports_cx8()) {
+       GET_FIELD_VOLATILE(obj, offset, jlong, v);
+-      return v; 
++      return v;
+     }
+     else {
+       Handle p (THREAD, JNIHandles::resolve(obj));
+@@ -308,7 +305,7 @@
+ UNSAFE_ENTRY(void, Unsafe_SetOrderedInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint x)) \
+   UnsafeWrapper("Unsafe_SetOrderedInt"); \
+   SET_FIELD_VOLATILE(obj, offset, jint, x); \
+-UNSAFE_END 
++UNSAFE_END
+ 
+ UNSAFE_ENTRY(void, Unsafe_SetOrderedObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h))
+   UnsafeWrapper("Unsafe_SetOrderedObject");
+@@ -392,7 +389,7 @@
+   t->set_doing_unsafe_access(false);
+   return x;
+ UNSAFE_END
+- 
++
+ UNSAFE_ENTRY(void, Unsafe_SetNativeLong(JNIEnv *env, jobject unsafe, jlong addr, jlong x))
+   UnsafeWrapper("Unsafe_SetNativeLong");
+   JavaThread* t = JavaThread::current();
+@@ -714,8 +711,8 @@
+     }
+ 
+     if (data == NULL) {
+-	throw_new(env, "NullPointerException");
+-	return 0;
++        throw_new(env, "NullPointerException");
++        return 0;
+     }
+ 
+     /* Work around 4153825. malloc crashes on Solaris when passed a
+@@ -723,14 +720,14 @@
+      */
+     if (length < 0) {
+         throw_new(env, "ArrayIndexOutOfBoundsException");
+-	return 0;
++        return 0;
+     }
+ 
+     body = NEW_C_HEAP_ARRAY(jbyte, length);
+ 
+     if (body == 0) {
+         throw_new(env, "OutOfMemoryError");
+-	return 0;
++        return 0;
+     }
+ 
+     env->GetByteArrayRegion(data, offset, length, body);
+@@ -740,7 +737,7 @@
+ 
+     if (name != NULL) {
+         uint len = env->GetStringUTFLength(name);
+-	int unicode_len = env->GetStringLength(name);
++        int unicode_len = env->GetStringLength(name);
+         if (len >= sizeof(buf)) {
+             utfName = NEW_C_HEAP_ARRAY(char, len + 1);
+             if (utfName == NULL) {
+@@ -750,18 +747,18 @@
+         } else {
+             utfName = buf;
+         }
+-    	env->GetStringUTFRegion(name, 0, unicode_len, utfName);
+-	//VerifyFixClassname(utfName);
+-	for (uint i = 0; i < len; i++) {
+-	  if (utfName[i] == '.')   utfName[i] = '/';
+-	}
++        env->GetStringUTFRegion(name, 0, unicode_len, utfName);
++        //VerifyFixClassname(utfName);
++        for (uint i = 0; i < len; i++) {
++          if (utfName[i] == '.')   utfName[i] = '/';
++        }
+     } else {
+-	utfName = NULL;
++        utfName = NULL;
+     }
+ 
+     result = JVM_DefineClass(env, utfName, loader, body, length, pd);
+ 
+-    if (utfName && utfName != buf) 
++    if (utfName && utfName != buf)
+         FREE_C_HEAP_ARRAY(char, utfName);
+ 
+  free_body:
+@@ -845,8 +842,8 @@
+ 
+ UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject e_h, jobject x_h))
+   UnsafeWrapper("Unsafe_CompareAndSwapObject");
+-  oop x = JNIHandles::resolve(x_h); 
+-  oop e = JNIHandles::resolve(e_h); 
++  oop x = JNIHandles::resolve(x_h);
++  oop e = JNIHandles::resolve(e_h);
+   oop p = JNIHandles::resolve(obj);
+   intptr_t* addr = (intptr_t *)index_oop_from_field_offset_long(p, offset);
+   intptr_t res = Atomic::cmpxchg_ptr((intptr_t)x, addr, (intptr_t)e);
+@@ -896,14 +893,14 @@
+         // always be zero anyway and the value set is always the same
+         p = (Parker*)addr_from_java(lp);
+       } else {
+-        // Grab lock if apparently null or using older version of library 
+-        MutexLocker mu(Threads_lock);  
++        // Grab lock if apparently null or using older version of library
++        MutexLocker mu(Threads_lock);
+         java_thread = JNIHandles::resolve_non_null(jthread);
+         if (java_thread != NULL) {
+           JavaThread* thr = java_lang_Thread::thread(java_thread);
+           if (thr != NULL) {
+             p = thr->parker();
+-            if (p != NULL) { // Bind to Java thread for next time. 
++            if (p != NULL) { // Bind to Java thread for next time.
+               java_lang_Thread::set_park_event(java_thread, addr_to_java(p));
+             }
+           }
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/aprofiler.cpp openjdk/hotspot/src/share/vm/runtime/aprofiler.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/aprofiler.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/aprofiler.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)aprofiler.cpp	1.33 07/05/05 17:06:43 JVM"
+-#endif
+ /*
+  * Copyright 1997-2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -118,17 +115,17 @@
+ #else
+       const char* name = k->klass_part()->internal_name();
+ #endif
+-      tty->print_cr("%20u %10u %8u  %s", 
+-        alloc_size * BytesPerWord, 
+-        alloc_count, 
+-        average(alloc_size, alloc_count), 
++      tty->print_cr("%20u %10u %8u  %s",
++        alloc_size * BytesPerWord,
++        alloc_count,
++        average(alloc_size, alloc_count),
+         name);
+       total_alloc_size += alloc_size;
+       total_alloc_count += alloc_count;
+     }
+   }
+-  tty->print_cr("%20u %10u %8u  --total--", 
+-    total_alloc_size * BytesPerWord, 
++  tty->print_cr("%20u %10u %8u  --total--",
++    total_alloc_size * BytesPerWord,
+     total_alloc_count,
+     average(total_alloc_size, total_alloc_count));
+   tty->cr();
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/aprofiler.hpp openjdk/hotspot/src/share/vm/runtime/aprofiler.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/aprofiler.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/aprofiler.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)aprofiler.hpp	1.32 07/05/05 17:06:42 JVM"
+-#endif
+ /*
+  * Copyright 1997-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,14 +19,14 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A simple allocation profiler for Java. The profiler collects and prints
+ // the number and total size of instances allocated per class, including
+ // array classes.
+ //
+-// The profiler is currently global for all threads. It can be changed to a 
++// The profiler is currently global for all threads. It can be changed to a
+ // per threads profiler by keeping a more elaborate data structure and calling
+ // iterate_since_last_scavenge at thread switches.
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/arguments.cpp openjdk/hotspot/src/share/vm/runtime/arguments.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/arguments.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/arguments.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)arguments.cpp	1.333 07/09/25 22:04:01 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -82,7 +79,7 @@
+ // Check if head of 'option' matches 'name', and sets 'tail' remaining part of option string
+ 
+ static bool match_option(const JavaVMOption *option, const char* name,
+-                         const char** tail) {  
++                         const char** tail) {
+   int len = (int)strlen(name);
+   if (strncmp(option->optionString, name, len) == 0) {
+     *tail = option->optionString + len;
+@@ -104,7 +101,7 @@
+   // Must do this before setting up other system properties,
+   // as some of them may depend on launcher type.
+   for (int index = 0; index < args->nOptions; index++) {
+-    const JavaVMOption* option = args->options + index;    
++    const JavaVMOption* option = args->options + index;
+     const char* tail;
+ 
+     if (match_option(option, "-Dsun.java.launcher=", &tail)) {
+@@ -161,31 +158,31 @@
+ // for 1.6 but not 1.7.  The string should be cleared at the
+ // beginning of 1.7.
+ static const char*  obsolete_jvm_flags_1_5_0[] = {
+-					   "UseTrainGC", 
+-					   "UseSpecialLargeObjectHandling",
+-					   "UseOversizedCarHandling",
+-					   "TraceCarAllocation",
+-					   "PrintTrainGCProcessingStats",
+-					   "LogOfCarSpaceSize",
+-					   "OversizedCarThreshold",
+-					   "MinTickInterval",
+-					   "DefaultTickInterval",
+-					   "MaxTickInterval",
+-					   "DelayTickAdjustment",
+-					   "ProcessingToTenuringRatio",
+-					   "MinTrainLength",
+-					   0};
++                                           "UseTrainGC",
++                                           "UseSpecialLargeObjectHandling",
++                                           "UseOversizedCarHandling",
++                                           "TraceCarAllocation",
++                                           "PrintTrainGCProcessingStats",
++                                           "LogOfCarSpaceSize",
++                                           "OversizedCarThreshold",
++                                           "MinTickInterval",
++                                           "DefaultTickInterval",
++                                           "MaxTickInterval",
++                                           "DelayTickAdjustment",
++                                           "ProcessingToTenuringRatio",
++                                           "MinTrainLength",
++                                           0};
+ 
+ bool Arguments::made_obsolete_in_1_5_0(const char *s) {
+   int i = 0;
+   while (obsolete_jvm_flags_1_5_0[i] != NULL) {
+     // <flag>=xxx form
+     // [-|+]<flag> form
+-    if ((strncmp(obsolete_jvm_flags_1_5_0[i], s, 
+-	       strlen(obsolete_jvm_flags_1_5_0[i])) == 0) ||
+-	((s[0] == '+' || s[0] == '-') &&
+-	(strncmp(obsolete_jvm_flags_1_5_0[i], &s[1],
+-	       strlen(obsolete_jvm_flags_1_5_0[i])) == 0))) {
++    if ((strncmp(obsolete_jvm_flags_1_5_0[i], s,
++               strlen(obsolete_jvm_flags_1_5_0[i])) == 0) ||
++        ((s[0] == '+' || s[0] == '-') &&
++        (strncmp(obsolete_jvm_flags_1_5_0[i], &s[1],
++               strlen(obsolete_jvm_flags_1_5_0[i])) == 0))) {
+       return true;
+     }
+     i++;
+@@ -195,12 +192,12 @@
+ 
+ // Constructs the system class path (aka boot class path) from the following
+ // components, in order:
+-// 
+-//     prefix		// from -Xbootclasspath/p:...
+-//     endorsed		// the expansion of -Djava.endorsed.dirs=...
+-//     base		// from os::get_system_properties() or -Xbootclasspath=
+-//     suffix		// from -Xbootclasspath/a:...
+-// 
++//
++//     prefix           // from -Xbootclasspath/p:...
++//     endorsed         // the expansion of -Djava.endorsed.dirs=...
++//     base             // from os::get_system_properties() or -Xbootclasspath=
++//     suffix           // from -Xbootclasspath/a:...
++//
+ // java.endorsed.dirs is a list of directories; any jar or zip files in the
+ // directories are added to the sysclasspath just before the base.
+ //
+@@ -241,11 +238,11 @@
+   // Array indices for the items that make up the sysclasspath.  All except the
+   // base are allocated in the C heap and freed by this class.
+   enum {
+-    _scp_prefix,	// from -Xbootclasspath/p:...
+-    _scp_endorsed,	// the expansion of -Djava.endorsed.dirs=...
+-    _scp_base,		// the default sysclasspath
+-    _scp_suffix,	// from -Xbootclasspath/a:...
+-    _scp_nitems		// the number of items, must be last.
++    _scp_prefix,        // from -Xbootclasspath/p:...
++    _scp_endorsed,      // the expansion of -Djava.endorsed.dirs=...
++    _scp_base,          // the default sysclasspath
++    _scp_suffix,        // from -Xbootclasspath/a:...
++    _scp_nitems         // the number of items, must be last.
+   };
+ 
+   const char* _items[_scp_nitems];
+@@ -357,7 +354,7 @@
+       *cp_tmp++ = separator;
+     }
+   }
+-  *--cp_tmp = '\0';	// Replace the extra separator.
++  *--cp_tmp = '\0';     // Replace the extra separator.
+   return cp;
+ }
+ 
+@@ -370,7 +367,7 @@
+   if (path == NULL) {
+     size_t len = strlen(str) + 1;
+     cp = NEW_C_HEAP_ARRAY(char, len);
+-    memcpy(cp, str, len);			// copy the trailing null
++    memcpy(cp, str, len);                       // copy the trailing null
+   } else {
+     const char separator = *os::path_separator();
+     size_t old_len = strlen(path);
+@@ -383,13 +380,13 @@
+       memcpy(cp_tmp, str, str_len);
+       cp_tmp += str_len;
+       *cp_tmp = separator;
+-      memcpy(++cp_tmp, path, old_len + 1);	// copy the trailing null
++      memcpy(++cp_tmp, path, old_len + 1);      // copy the trailing null
+       FREE_C_HEAP_ARRAY(char, path);
+     } else {
+       cp = REALLOC_C_HEAP_ARRAY(char, path, len);
+       char* cp_tmp = cp + old_len;
+       *cp_tmp = separator;
+-      memcpy(++cp_tmp, str, str_len + 1);	// copy the trailing null
++      memcpy(++cp_tmp, str, str_len + 1);       // copy the trailing null
+     }
+   }
+   return cp;
+@@ -400,13 +397,13 @@
+ char* SysClassPath::add_jars_to_path(char* path, const char* directory) {
+   DIR* dir = os::opendir(directory);
+   if (dir == NULL) return path;
+-  
++
+   char dir_sep[2] = { '\0', '\0' };
+   size_t directory_len = strlen(directory);
+   const char fileSep = *os::file_separator();
+   if (directory[directory_len - 1] != fileSep) dir_sep[0] = fileSep;
+-    
+-  /* Scan the directory for jars/zips, appending them to path. */ 
++
++  /* Scan the directory for jars/zips, appending them to path. */
+   struct dirent *entry;
+   char *dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(directory));
+   while ((entry = os::readdir(dir, (dirent *) dbuf)) != NULL) {
+@@ -469,13 +466,13 @@
+   return arg_in_range;
+ }
+ 
+-// Describe an argument out of range error 
++// Describe an argument out of range error
+ void Arguments::describe_range_error(ArgsRange errcode) {
+   switch(errcode) {
+   case arg_too_big:
+     jio_fprintf(defaultStream::error_stream(),
+                 "The specified size exceeds the maximum "
+-		"representable size.\n");
++                "representable size.\n");
+     break;
+   case arg_too_small:
+   case arg_unreadable:
+@@ -545,7 +542,7 @@
+ static bool append_to_string_flag(char* name, const char* new_value, FlagValueOrigin origin) {
+   const char* old_value = "";
+   if (!CommandLineFlags::ccstrAt(name, &old_value))  return false;
+-  size_t old_len = strlen(old_value);
++  size_t old_len = old_value != NULL ? strlen(old_value) : 0;
+   size_t new_len = strlen(new_value);
+   const char* value;
+   char* free_this_too = NULL;
+@@ -589,14 +586,25 @@
+   char punct;
+   if (sscanf(arg, "%" XSTR(BUFLEN) NAME_RANGE "%c", name, &punct) == 2 && punct == '=') {
+     const char* value = strchr(arg, '=') + 1;
+-    // Note that normal -XX:Foo=WWW accumulates.
+-    bool success = append_to_string_flag(name, value, origin);
+-    if (success)  return success;
++    Flag* flag = Flag::find_flag(name, strlen(name));
++    if (flag != NULL && flag->is_ccstr()) {
++      if (flag->ccstr_accumulates()) {
++        return append_to_string_flag(name, value, origin);
++      } else {
++        if (value[0] == '\0') {
++          value = NULL;
++        }
++        return set_string_flag(name, value, origin);
++      }
++    }
+   }
+ 
+   if (sscanf(arg, "%" XSTR(BUFLEN) NAME_RANGE ":%c", name, &punct) == 2 && punct == '=') {
+     const char* value = strchr(arg, '=') + 1;
+     // -XX:Foo:=xxx will reset the string flag to the given value.
++    if (value[0] == '\0') {
++      value = NULL;
++    }
+     return set_string_flag(name, value, origin);
+   }
+ 
+@@ -630,8 +638,8 @@
+ 
+   int index = *count;
+ 
+-  // expand the array and add arg to the last element 
+-  (*count)++; 
++  // expand the array and add arg to the last element
++  (*count)++;
+   if (*bldarray == NULL) {
+     *bldarray = NEW_C_HEAP_ARRAY(char*, *count);
+   } else {
+@@ -704,11 +712,11 @@
+   } else if (made_obsolete_in_1_5_0(arg)) {
+     jio_fprintf(defaultStream::error_stream(),
+       "Warning: The flag %s has been EOL'd as of 1.5.0 and will"
+-      " be ignored\n", arg); 
++      " be ignored\n", arg);
+   } else {
+     if (!ignore_unrecognized) {
+       jio_fprintf(defaultStream::error_stream(),
+-		  "Unrecognized VM option '%s'\n", arg);
++                  "Unrecognized VM option '%s'\n", arg);
+       // allow for commandline "commenting out" options like -XX:#+Verbose
+       if (strlen(arg) == 0 || arg[0] != '#') {
+         return false;
+@@ -724,7 +732,7 @@
+   if (stream == NULL) {
+     if (should_exist) {
+       jio_fprintf(defaultStream::error_stream(),
+-		  "Could not open settings file %s\n", file_name);
++                  "Could not open settings file %s\n", file_name);
+       return false;
+     } else {
+       return true;
+@@ -744,30 +752,30 @@
+   while(c != EOF) {
+     if (in_white_space) {
+       if (in_comment) {
+-	if (c == '\n') in_comment = false;
++        if (c == '\n') in_comment = false;
+       } else {
+         if (c == '#') in_comment = true;
+         else if (!isspace(c)) {
+           in_white_space = false;
+-	  token[pos++] = c;
++          token[pos++] = c;
+         }
+       }
+     } else {
+       if (c == '\n' || (!in_quote && isspace(c))) {
+-	// token ends at newline, or at unquoted whitespace
+-	// this allows a way to include spaces in string-valued options
++        // token ends at newline, or at unquoted whitespace
++        // this allows a way to include spaces in string-valued options
+         token[pos] = '\0';
+-	logOption(token);
++        logOption(token);
+         result &= process_argument(token, ignore_unrecognized, CONFIG_FILE);
+         build_jvm_flags(token);
+-	pos = 0;
+-	in_white_space = true;
+-	in_quote = false;
++        pos = 0;
++        in_white_space = true;
++        in_quote = false;
+       } else if (!in_quote && (c == '\'' || c == '"')) {
+-	in_quote = true;
+-	quote_c = c;
++        in_quote = true;
++        quote_c = c;
+       } else if (in_quote && (c == quote_c)) {
+-	in_quote = false;
++        in_quote = false;
+       } else {
+         token[pos++] = c;
+       }
+@@ -784,7 +792,7 @@
+ }
+ 
+ //=============================================================================================================
+-// Parsing of properties (-D) 
++// Parsing of properties (-D)
+ 
+ const char* Arguments::get_property(const char* key) {
+   return PropertyList_get_value(system_properties(), key);
+@@ -805,7 +813,7 @@
+   if (eq != NULL) {
+     size_t value_len = strlen(prop) - key_len - 1;
+     value = AllocateHeap(value_len + 1, "add_property");
+-    strncpy(value, &prop[key_len + 1], value_len + 1);    
++    strncpy(value, &prop[key_len + 1], value_len + 1);
+   }
+ 
+   if (strcmp(key, "java.compiler") == 0) {
+@@ -846,7 +854,7 @@
+ }
+ 
+ //===========================================================================================================
+-// Setting int/mixed/comp mode flags 
++// Setting int/mixed/comp mode flags
+ 
+ void Arguments::set_mode_flags(Mode mode) {
+   // Set up default values for all flags.
+@@ -859,7 +867,7 @@
+   // This may not be the final mode; mode may change later in onload phase.
+   PropertyList_unique_add(&_system_properties, "java.vm.info",
+      (char*)Abstract_VM_Version::vm_info_string());
+-  
++
+   UseInterpreter             = true;
+   UseCompiler                = true;
+   UseLoopCounter             = true;
+@@ -911,7 +919,7 @@
+ 
+ // If the user has chosen ParallelGCThreads > 0, we set UseParNewGC
+ // if it's not explictly set or unset. If the user has chosen
+-// UseParNewGC and not explicitly set ParallelGCThreads we 
++// UseParNewGC and not explicitly set ParallelGCThreads we
+ // set it, unless this is a single cpu machine.
+ void Arguments::set_parnew_gc_flags() {
+   assert(!UseSerialGC && !UseParallelGC, "control point invariant");
+@@ -928,11 +936,11 @@
+     FLAG_SET_DEFAULT(ParallelGCThreads, 0);
+   } else {
+     no_shared_spaces();
+-    
++
+     // By default YoungPLABSize and OldPLABSize are set to 4096 and 1024 correspondinly,
+     // these settings are default for Parallel Scavenger. For ParNew+Tenured configuration
+     // we set them to 1024 and 1024.
+-    // See CR 6362902.    
++    // See CR 6362902.
+     if (FLAG_IS_DEFAULT(YoungPLABSize)) {
+       FLAG_SET_DEFAULT(YoungPLABSize, (intx)1024);
+     }
+@@ -949,7 +957,7 @@
+ }
+ 
+ // CAUTION: this code is currently shared by UseParallelGC, UseParNewGC and
+-// UseconcMarkSweepGC. Further tuning of individual collectors might 
++// UseconcMarkSweepGC. Further tuning of individual collectors might
+ // dictate refinement on a per-collector basis.
+ int Arguments::nof_parallel_gc_threads() {
+   if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
+@@ -985,11 +993,11 @@
+   // Turn off AdaptiveSizePolicy by default for cms until it is
+   // complete.  Also turn it off in general if the
+   // parnew collector has been selected.
+-  if ((UseConcMarkSweepGC || UseParNewGC) && 
++  if ((UseConcMarkSweepGC || UseParNewGC) &&
+       FLAG_IS_DEFAULT(UseAdaptiveSizePolicy)) {
+     FLAG_SET_DEFAULT(UseAdaptiveSizePolicy, false);
+   }
+- 
++
+   // In either case, adjust ParallelGCThreads and/or UseParNewGC
+   // as needed.
+   set_parnew_gc_flags();
+@@ -1019,11 +1027,11 @@
+   }
+ 
+   // Preferred young gen size for "short" pauses
+-  const uintx parallel_gc_threads = 
++  const uintx parallel_gc_threads =
+     (ParallelGCThreads == 0 ? 1 : ParallelGCThreads);
+   const size_t preferred_max_new_size_unaligned =
+     ScaleForWordSize(young_gen_per_worker * parallel_gc_threads);
+-  const size_t preferred_max_new_size = 
++  const size_t preferred_max_new_size =
+     align_size_up(preferred_max_new_size_unaligned, os::vm_page_size());
+ 
+   // Unless explicitly requested otherwise, size young gen
+@@ -1040,7 +1048,7 @@
+   // to the old generation concurrent collector
+   if (FLAG_IS_DEFAULT(NewRatio)) {
+     FLAG_SET_DEFAULT(NewRatio, MAX2(NewRatio, new_ratio));
+-  
++
+     size_t min_new  = align_size_up(ScaleForWordSize(min_new_default), os::vm_page_size());
+     size_t prev_initial_size = initial_heap_size();
+     if (prev_initial_size != 0 && prev_initial_size < min_new+OldSize) {
+@@ -1079,7 +1087,7 @@
+   if (FLAG_IS_DEFAULT(MaxTenuringThreshold) &&
+       FLAG_IS_DEFAULT(SurvivorRatio)) {
+     FLAG_SET_DEFAULT(MaxTenuringThreshold, tenuring_default);
+-  }  
++  }
+   // If we decided above (or user explicitly requested)
+   // `promote all' (via MaxTenuringThreshold := 0),
+   // prefer minuscule survivor spaces so as not to waste
+@@ -1089,21 +1097,21 @@
+   }
+   // If OldPLABSize is set and CMSParPromoteBlocksToClaim is not,
+   // set CMSParPromoteBlocksToClaim equal to OldPLABSize.
+-  // This is done in order to make ParNew+CMS configuration to work 
++  // This is done in order to make ParNew+CMS configuration to work
+   // with YoungPLABSize and OldPLABSize options.
+   // See CR 6362902.
+   if (!FLAG_IS_DEFAULT(OldPLABSize)) {
+     if (FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim)) {
+       FLAG_SET_CMDLINE(uintx, CMSParPromoteBlocksToClaim, OldPLABSize);
+     }
+-    else {  
++    else {
+       // OldPLABSize and CMSParPromoteBlocksToClaim are both set.
+       // CMSParPromoteBlocksToClaim is a collector-specific flag, so
+       // we'll let it to take precedence.
+       jio_fprintf(defaultStream::error_stream(),
+-		  "Both OldPLABSize and CMSParPromoteBlocksToClaim options are specified "
+-		  "for the CMS collector. CMSParPromoteBlocksToClaim will take precedence.\n");
+-    }    
++                  "Both OldPLABSize and CMSParPromoteBlocksToClaim options are specified "
++                  "for the CMS collector. CMSParPromoteBlocksToClaim will take precedence.\n");
++    }
+   }
+ }
+ 
+@@ -1129,7 +1137,7 @@
+   }
+ 
+   if (os::is_server_class_machine() && !force_client_mode ) {
+-    // If no other collector is requested explicitly, 
++    // If no other collector is requested explicitly,
+     // let the VM select the collector based on
+     // machine class and automatic selection policy.
+     if (!UseSerialGC &&
+@@ -1163,43 +1171,43 @@
+   // of the physical memory, up to a maximum of 1GB.
+   if (UseParallelGC) {
+     if (FLAG_IS_DEFAULT(MaxHeapSize)) {
+-      const uint64_t reasonable_fraction = 
+-	os::physical_memory() / DefaultMaxRAMFraction;
++      const uint64_t reasonable_fraction =
++        os::physical_memory() / DefaultMaxRAMFraction;
+       const uint64_t maximum_size = (uint64_t) DefaultMaxRAM;
+-      size_t reasonable_max = 
+-	(size_t) os::allocatable_physical_memory(reasonable_fraction);
++      size_t reasonable_max =
++        (size_t) os::allocatable_physical_memory(reasonable_fraction);
+       if (reasonable_max > maximum_size) {
+-	reasonable_max = maximum_size;
++        reasonable_max = maximum_size;
+       }
+       if (PrintGCDetails && Verbose) {
+-	// Cannot use gclog_or_tty yet.
+-	tty->print_cr("  Max heap size for server class platform "
+-		      SIZE_FORMAT, reasonable_max);	
++        // Cannot use gclog_or_tty yet.
++        tty->print_cr("  Max heap size for server class platform "
++                      SIZE_FORMAT, reasonable_max);
+       }
+       // If the initial_heap_size has not been set with -Xms,
+       // then set it as fraction of size of physical memory
+-      // respecting the maximum and minimum sizes of the heap.  
++      // respecting the maximum and minimum sizes of the heap.
+       if (initial_heap_size() == 0) {
+-	const uint64_t reasonable_initial_fraction = 
+-	  os::physical_memory() / DefaultInitialRAMFraction;
+-	const size_t reasonable_initial = 
+-	  (size_t) os::allocatable_physical_memory(reasonable_initial_fraction);
+-	const size_t minimum_size = NewSize + OldSize;
+-	set_initial_heap_size(MAX2(MIN2(reasonable_initial, reasonable_max),
+-				  minimum_size));
++        const uint64_t reasonable_initial_fraction =
++          os::physical_memory() / DefaultInitialRAMFraction;
++        const size_t reasonable_initial =
++          (size_t) os::allocatable_physical_memory(reasonable_initial_fraction);
++        const size_t minimum_size = NewSize + OldSize;
++        set_initial_heap_size(MAX2(MIN2(reasonable_initial, reasonable_max),
++                                  minimum_size));
+         // Currently the minimum size and the initial heap sizes are the same.
+-	set_min_heap_size(initial_heap_size());
+-	if (PrintGCDetails && Verbose) {
+-	  // Cannot use gclog_or_tty yet.
+-	  tty->print_cr("  Initial heap size for server class platform "
+-			SIZE_FORMAT, initial_heap_size());	
+-	}
++        set_min_heap_size(initial_heap_size());
++        if (PrintGCDetails && Verbose) {
++          // Cannot use gclog_or_tty yet.
++          tty->print_cr("  Initial heap size for server class platform "
++                        SIZE_FORMAT, initial_heap_size());
++        }
+       } else {
+-	// An minimum size was specified on the command line.  Be sure
+-	// that the maximum size is consistent.
+-	if (initial_heap_size() > reasonable_max) {
+-	  reasonable_max = initial_heap_size();
+-	}
++        // An minimum size was specified on the command line.  Be sure
++        // that the maximum size is consistent.
++        if (initial_heap_size() > reasonable_max) {
++          reasonable_max = initial_heap_size();
++        }
+       }
+       FLAG_SET_ERGO(uintx, MaxHeapSize, (uintx) reasonable_max);
+     }
+@@ -1210,7 +1218,7 @@
+     // See CR 6362902 for details.
+     if (!FLAG_IS_DEFAULT(SurvivorRatio)) {
+       if (FLAG_IS_DEFAULT(InitialSurvivorRatio)) {
+-         FLAG_SET_DEFAULT(InitialSurvivorRatio, SurvivorRatio + 2); 
++         FLAG_SET_DEFAULT(InitialSurvivorRatio, SurvivorRatio + 2);
+       }
+       if (FLAG_IS_DEFAULT(MinSurvivorRatio)) {
+         FLAG_SET_DEFAULT(MinSurvivorRatio, SurvivorRatio + 2);
+@@ -1221,10 +1229,10 @@
+       // Par compact uses lower default values since they are treated as
+       // minimums.
+       if (FLAG_IS_DEFAULT(MarkSweepDeadRatio)) {
+-	MarkSweepDeadRatio = 1;
++        MarkSweepDeadRatio = 1;
+       }
+       if (FLAG_IS_DEFAULT(PermMarkSweepDeadRatio)) {
+-	PermMarkSweepDeadRatio = 5;
++        PermMarkSweepDeadRatio = 5;
+       }
+     }
+   }
+@@ -1253,12 +1261,6 @@
+       FLAG_SET_DEFAULT(CacheTimeMillis, true);
+     }
+ )
+-#ifdef COMPILER2
+-    if (FLAG_IS_DEFAULT(UseSuperWord)) {
+-      // Generate SIMD instructions
+-      FLAG_SET_DEFAULT(UseSuperWord, true);
+-    }
+-#endif /* COMPILER2 */
+   }
+ }
+ 
+@@ -1266,7 +1268,7 @@
+ // Parsing of java.compiler property
+ 
+ void Arguments::process_java_compiler_argument(char* arg) {
+-  // For backwards compatibility, Djava.compiler=NONE or "" 
++  // For backwards compatibility, Djava.compiler=NONE or ""
+   // causes us to switch to -Xint mode UNLESS -Xdebug
+   // is also specified.
+   if (strlen(arg) == 0 || strcasecmp(arg, "NONE") == 0) {
+@@ -1279,7 +1281,7 @@
+ }
+ 
+ bool Arguments::created_by_java_launcher() {
+-  assert(_sun_java_launcher != NULL, "property must have value");  
++  assert(_sun_java_launcher != NULL, "property must have value");
+   return strcmp(DEFAULT_JAVA_LAUNCHER, _sun_java_launcher) != 0;
+ }
+ 
+@@ -1291,8 +1293,8 @@
+     return true;
+   }
+   jio_fprintf(defaultStream::error_stream(),
+-	      "%s of " UINTX_FORMAT " is invalid; must be between 0 and 100\n",
+-	      name, value);
++              "%s of " UINTX_FORMAT " is invalid; must be between 0 and 100\n",
++              name, value);
+   return false;
+ }
+ 
+@@ -1305,8 +1307,8 @@
+ }
+ 
+ static bool verify_serial_gc_flags() {
+-  return (UseSerialGC && 
+-        !(UseParNewGC || UseConcMarkSweepGC || UseParallelGC || 
++  return (UseSerialGC &&
++        !(UseParNewGC || UseConcMarkSweepGC || UseParallelGC ||
+           UseParallelOldGC));
+ }
+ 
+@@ -1317,7 +1319,7 @@
+   // before returning an error.
+   // Note: Needs platform-dependent factoring.
+   bool status = true;
+-  
++
+ #if ( (defined(COMPILER2) && defined(SPARC)))
+   // NOTE: The call to VM_Version_init depends on the fact that VM_Version_init
+   // on sparc doesn't require generation of a stub as is the case on, e.g.,
+@@ -1328,7 +1330,7 @@
+   VM_Version_init();
+   if (!VM_Version::has_v9()) {
+     jio_fprintf(defaultStream::error_stream(),
+-		"V8 Machine detected, Server requires V9\n");
++                "V8 Machine detected, Server requires V9\n");
+     status = false;
+   }
+ #endif /* COMPILER2 && SPARC */
+@@ -1338,8 +1340,8 @@
+ #if (defined(PRODUCT) && defined(SOLARIS))
+   if (!UseBoundThreads && !UseStackBanging) {
+     jio_fprintf(defaultStream::error_stream(),
+-		"-UseStackBanging conflicts with -UseBoundThreads\n");
+-     
++                "-UseStackBanging conflicts with -UseBoundThreads\n");
++
+      status = false;
+   }
+ #endif
+@@ -1353,9 +1355,9 @@
+   }
+ 
+   status &= verify_percentage(MaxLiveObjectEvacuationRatio,
+-			      "MaxLiveObjectEvacuationRatio");
++                              "MaxLiveObjectEvacuationRatio");
+   status &= verify_percentage(AdaptiveSizePolicyWeight,
+-			      "AdaptiveSizePolicyWeight");
++                              "AdaptiveSizePolicyWeight");
+   status &= verify_percentage(AdaptivePermSizeWeight, "AdaptivePermSizeWeight");
+   status &= verify_percentage(ThresholdTolerance, "ThresholdTolerance");
+   status &= verify_percentage(MinHeapFreeRatio, "MinHeapFreeRatio");
+@@ -1364,8 +1366,8 @@
+   if (MinHeapFreeRatio > MaxHeapFreeRatio) {
+     jio_fprintf(defaultStream::error_stream(),
+                 "MinHeapFreeRatio (" UINTX_FORMAT ") must be less than or "
+-		"equal to MaxHeapFreeRatio (" UINTX_FORMAT ")\n",
+-		MinHeapFreeRatio, MaxHeapFreeRatio);
++                "equal to MaxHeapFreeRatio (" UINTX_FORMAT ")\n",
++                MinHeapFreeRatio, MaxHeapFreeRatio);
+     status = false;
+   }
+   // Keeping the heap 100% free is hard ;-) so limit it to 99%.
+@@ -1385,7 +1387,7 @@
+   status &= verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit");
+ 
+   // Check user specified sharing option conflict with Parallel GC
+-  bool cannot_share = (UseConcMarkSweepGC || UseParallelGC || 
++  bool cannot_share = (UseConcMarkSweepGC || UseParallelGC ||
+                        UseParallelOldGC || UseParNewGC ||
+                        SOLARIS_ONLY(UseISM) NOT_SOLARIS(UseLargePages));
+ 
+@@ -1439,30 +1441,30 @@
+   if (CMSIncrementalMode) {
+     if (!UseConcMarkSweepGC) {
+       jio_fprintf(defaultStream::error_stream(),
+-		  "error:  invalid argument combination.\n"
+-		  "The CMS collector (-XX:+UseConcMarkSweepGC) must be "
+-		  "selected in order\nto use CMSIncrementalMode.\n");
++                  "error:  invalid argument combination.\n"
++                  "The CMS collector (-XX:+UseConcMarkSweepGC) must be "
++                  "selected in order\nto use CMSIncrementalMode.\n");
+       status = false;
+     } else if (!UseTLAB) {
+       jio_fprintf(defaultStream::error_stream(),
+-		  "error:  CMSIncrementalMode requires thread-local "
+-		  "allocation buffers\n(-XX:+UseTLAB).\n");
++                  "error:  CMSIncrementalMode requires thread-local "
++                  "allocation buffers\n(-XX:+UseTLAB).\n");
+       status = false;
+     } else {
+       status &= verify_percentage(CMSIncrementalDutyCycle,
+-				  "CMSIncrementalDutyCycle");
++                                  "CMSIncrementalDutyCycle");
+       status &= verify_percentage(CMSIncrementalDutyCycleMin,
+-				  "CMSIncrementalDutyCycleMin");
++                                  "CMSIncrementalDutyCycleMin");
+       status &= verify_percentage(CMSIncrementalSafetyFactor,
+-				  "CMSIncrementalSafetyFactor");
++                                  "CMSIncrementalSafetyFactor");
+       status &= verify_percentage(CMSIncrementalOffset,
+-				  "CMSIncrementalOffset");
++                                  "CMSIncrementalOffset");
+       status &= verify_percentage(CMSExpAvgFactor,
+-				  "CMSExpAvgFactor");
++                                  "CMSExpAvgFactor");
+       // If it was not set on the command line, set
+       // CMSInitiatingOccupancyFraction to 1 so icms can initiate cycles early.
+       if (CMSInitiatingOccupancyFraction < 0) {
+-	FLAG_SET_DEFAULT(CMSInitiatingOccupancyFraction, 1);
++        FLAG_SET_DEFAULT(CMSInitiatingOccupancyFraction, 1);
+       }
+     }
+   }
+@@ -1504,7 +1506,7 @@
+                 " with -UseAsyncConcMarkSweepGC");
+     status = false;
+   }
+-  
++
+   return status;
+ }
+ 
+@@ -1519,12 +1521,12 @@
+ 
+   if (os::obsolete_option(option)) {
+     jio_fprintf(defaultStream::error_stream(),
+-		"Obsolete %s%soption: %s\n", option_type, spacer,
++                "Obsolete %s%soption: %s\n", option_type, spacer,
+       option->optionString);
+     return false;
+   } else {
+     jio_fprintf(defaultStream::error_stream(),
+-		"Unrecognized %s%soption: %s\n", option_type, spacer,
++                "Unrecognized %s%soption: %s\n", option_type, spacer,
+       option->optionString);
+     return true;
+   }
+@@ -1546,7 +1548,7 @@
+   for (/* empty */; *names != NULL; ++names) {
+     if (match_option(option, *names, tail)) {
+       if (**tail == '\0' || tail_allowed && **tail == ':') {
+-	return true;
++        return true;
+       }
+     }
+   }
+@@ -1554,8 +1556,8 @@
+ }
+ 
+ Arguments::ArgsRange Arguments::parse_memory_size(const char* s,
+-						  jlong* long_arg,
+-						  jlong min_size) {
++                                                  jlong* long_arg,
++                                                  jlong min_size) {
+   if (!atomll(s, long_arg)) return arg_unreadable;
+   return check_memory_size(*long_arg, min_size);
+ }
+@@ -1574,7 +1576,7 @@
+   Arguments::_BackgroundCompilation    = BackgroundCompilation;
+   Arguments::_Tier2CompileThreshold    = Tier2CompileThreshold;
+ 
+-  // Parse JAVA_TOOL_OPTIONS environment variable (if present) 
++  // Parse JAVA_TOOL_OPTIONS environment variable (if present)
+   jint result = parse_java_tool_options_environment_variable(&scp, &scp_assembly_required);
+   if (result != JNI_OK) {
+     return result;
+@@ -1602,29 +1604,29 @@
+ }
+ 
+ 
+-jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, 
++jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
+                                        SysClassPath* scp_p,
+                                        bool* scp_assembly_required_p,
+                                        FlagValueOrigin origin) {
+   // Remaining part of option string
+   const char* tail;
+ 
+-  // iterate over arguments  
++  // iterate over arguments
+   for (int index = 0; index < args->nOptions; index++) {
+     bool is_absolute_path = false;  // for -agentpath vs -agentlib
+ 
+-    const JavaVMOption* option = args->options + index;    
++    const JavaVMOption* option = args->options + index;
+ 
+     if (!match_option(option, "-Djava.class.path", &tail) &&
+         !match_option(option, "-Dsun.java.command", &tail) &&
+-        !match_option(option, "-Dsun.java.launcher", &tail)) { 
++        !match_option(option, "-Dsun.java.launcher", &tail)) {
+ 
+         // add all jvm options to the jvm_args string. This string
+         // is used later to set the java.vm.args PerfData string constant.
+         // the -Djava.class.path and the -Dsun.java.command options are
+         // omitted from jvm_args string as each have their own PerfData
+         // string constant object.
+-	build_jvm_args(option->optionString);
++        build_jvm_args(option->optionString);
+     }
+ 
+     // -verbose:[class/gc/jni]
+@@ -1637,21 +1639,21 @@
+         FLAG_SET_CMDLINE(bool, TraceClassUnloading, true);
+       } else if (!strcmp(tail, ":jni")) {
+         FLAG_SET_CMDLINE(bool, PrintJNIResolving, true);
+-      }    
++      }
+     // -da / -ea / -disableassertions / -enableassertions
+     // These accept an optional class/package name separated by a colon, e.g.,
+     // -da:java.lang.Thread.
+     } else if (match_option(option, user_assertion_options, &tail, true)) {
+-      bool enable = option->optionString[1] == 'e';	// char after '-' is 'e'
++      bool enable = option->optionString[1] == 'e';     // char after '-' is 'e'
+       if (*tail == '\0') {
+-	JavaAssertions::setUserClassDefault(enable);
++        JavaAssertions::setUserClassDefault(enable);
+       } else {
+-	assert(*tail == ':', "bogus match by match_option()");
+-	JavaAssertions::addOption(tail + 1, enable);
++        assert(*tail == ':', "bogus match by match_option()");
++        JavaAssertions::addOption(tail + 1, enable);
+       }
+     // -dsa / -esa / -disablesystemassertions / -enablesystemassertions
+     } else if (match_option(option, system_assertion_options, &tail, false)) {
+-      bool enable = option->optionString[1] == 'e';	// char after '-' is 'e'
++      bool enable = option->optionString[1] == 'e';     // char after '-' is 'e'
+       JavaAssertions::setSystemClassDefault(enable);
+     // -bootclasspath:
+     } else if (match_option(option, "-Xbootclasspath:", &tail)) {
+@@ -1678,6 +1680,11 @@
+           size_t len2 = strlen(pos+1) + 1; // options start after ':'.  Final zero must be copied.
+           options = (char*)memcpy(NEW_C_HEAP_ARRAY(char, len2), pos+1, len2);
+         }
++#ifdef JVMTI_KERNEL
++        if ((strcmp(name, "hprof") == 0) || (strcmp(name, "jdwp") == 0)) {
++          warning("profiling and debugging agents are not supported with Kernel VM");
++        } else
++#endif // JVMTI_KERNEL
+         add_init_library(name, options);
+       }
+     // -agentlib and -agentpath
+@@ -1693,7 +1700,13 @@
+         if(pos != NULL) {
+           options = strcpy(NEW_C_HEAP_ARRAY(char, strlen(pos + 1) + 1), pos + 1);
+         }
++#ifdef JVMTI_KERNEL
++        if ((strcmp(name, "hprof") == 0) || (strcmp(name, "jdwp") == 0)) {
++          warning("profiling and debugging agents are not supported with Kernel VM");
++        } else
++#endif // JVMTI_KERNEL
+         add_init_agent(name, options, is_absolute_path);
++
+       }
+     // -javaagent
+     } else if (match_option(option, "-javaagent:", &tail)) {
+@@ -1727,7 +1740,7 @@
+       ArgsRange errcode = parse_memory_size(tail, &long_initial_eden_size, 1);
+       if (errcode != arg_in_range) {
+         jio_fprintf(defaultStream::error_stream(),
+-		    "Invalid initial eden size: %s\n", option->optionString);
++                    "Invalid initial eden size: %s\n", option->optionString);
+         describe_range_error(errcode);
+         return JNI_EINVAL;
+       }
+@@ -1739,7 +1752,7 @@
+       ArgsRange errcode = parse_memory_size(tail, &long_initial_heap_size, 1);
+       if (errcode != arg_in_range) {
+         jio_fprintf(defaultStream::error_stream(),
+-		    "Invalid initial heap size: %s\n", option->optionString);
++                    "Invalid initial heap size: %s\n", option->optionString);
+         describe_range_error(errcode);
+         return JNI_EINVAL;
+       }
+@@ -1752,7 +1765,7 @@
+       ArgsRange errcode = parse_memory_size(tail, &long_max_heap_size, 1);
+       if (errcode != arg_in_range) {
+         jio_fprintf(defaultStream::error_stream(),
+-		    "Invalid maximum heap size: %s\n", option->optionString);
++                    "Invalid maximum heap size: %s\n", option->optionString);
+         describe_range_error(errcode);
+         return JNI_EINVAL;
+       }
+@@ -1762,8 +1775,8 @@
+       int maxf = (int)(atof(tail) * 100);
+       if (maxf < 0 || maxf > 100) {
+         jio_fprintf(defaultStream::error_stream(),
+-		    "Bad max heap free percentage size: %s\n",
+-		    option->optionString);
++                    "Bad max heap free percentage size: %s\n",
++                    option->optionString);
+         return JNI_EINVAL;
+       } else {
+         FLAG_SET_CMDLINE(uintx, MaxHeapFreeRatio, maxf);
+@@ -1773,8 +1786,8 @@
+       int minf = (int)(atof(tail) * 100);
+       if (minf < 0 || minf > 100) {
+         jio_fprintf(defaultStream::error_stream(),
+-		    "Bad min heap free percentage size: %s\n",
+-		    option->optionString);
++                    "Bad min heap free percentage size: %s\n",
++                    option->optionString);
+         return JNI_EINVAL;
+       } else {
+         FLAG_SET_CMDLINE(uintx, MinHeapFreeRatio, minf);
+@@ -1785,7 +1798,7 @@
+       ArgsRange errcode = parse_memory_size(tail, &long_ThreadStackSize, 1000);
+       if (errcode != arg_in_range) {
+         jio_fprintf(defaultStream::error_stream(),
+-		    "Invalid thread stack size: %s\n", option->optionString);
++                    "Invalid thread stack size: %s\n", option->optionString);
+         describe_range_error(errcode);
+         return JNI_EINVAL;
+       }
+@@ -1794,16 +1807,16 @@
+                               round_to((int)long_ThreadStackSize, K) / K);
+     // -Xoss
+     } else if (match_option(option, "-Xoss", &tail)) {
+-	  // HotSpot does not have separate native and Java stacks, ignore silently for compatibility
++          // HotSpot does not have separate native and Java stacks, ignore silently for compatibility
+     // -Xmaxjitcodesize
+     } else if (match_option(option, "-Xmaxjitcodesize", &tail)) {
+       jlong long_ReservedCodeCacheSize = 0;
+       ArgsRange errcode = parse_memory_size(tail, &long_ReservedCodeCacheSize,
+-					    InitialCodeCacheSize);
++                                            InitialCodeCacheSize);
+       if (errcode != arg_in_range) {
+         jio_fprintf(defaultStream::error_stream(),
+-		    "Invalid maximum code cache size: %s\n",
+-		    option->optionString);
++                    "Invalid maximum code cache size: %s\n",
++                    option->optionString);
+         describe_range_error(errcode);
+         return JNI_EINVAL;
+       }
+@@ -1811,27 +1824,32 @@
+     // -green
+     } else if (match_option(option, "-green", &tail)) {
+       jio_fprintf(defaultStream::error_stream(),
+-		  "Green threads support not available\n");
+-	  return JNI_EINVAL;
++                  "Green threads support not available\n");
++          return JNI_EINVAL;
+     // -native
+     } else if (match_option(option, "-native", &tail)) {
+-	  // HotSpot always uses native threads, ignore silently for compatibility
++          // HotSpot always uses native threads, ignore silently for compatibility
+     // -Xsqnopause
+     } else if (match_option(option, "-Xsqnopause", &tail)) {
+-	  // EVM option, ignore silently for compatibility
++          // EVM option, ignore silently for compatibility
+     // -Xrs
+     } else if (match_option(option, "-Xrs", &tail)) {
+-	  // Classic/EVM option, new functionality
++          // Classic/EVM option, new functionality
+       FLAG_SET_CMDLINE(bool, ReduceSignalUsage, true);
+     } else if (match_option(option, "-Xusealtsigs", &tail)) {
+           // change default internal VM signals used - lower case for back compat
+       FLAG_SET_CMDLINE(bool, UseAltSigs, true);
+     // -Xoptimize
+     } else if (match_option(option, "-Xoptimize", &tail)) {
+-	  // EVM option, ignore silently for compatibility
++          // EVM option, ignore silently for compatibility
+     // -Xprof
+     } else if (match_option(option, "-Xprof", &tail)) {
++#ifndef FPROF_KERNEL
+       _has_profile = true;
++#else // FPROF_KERNEL
++      // do we have to exit?
++      warning("Kernel VM does not support flat profiling.");
++#endif // FPROF_KERNEL
+     // -Xaprof
+     } else if (match_option(option, "-Xaprof", &tail)) {
+       _has_alloc_profile = true;
+@@ -1846,7 +1864,7 @@
+       // -Xinternalversion
+     } else if (match_option(option, "-Xinternalversion", &tail)) {
+       jio_fprintf(defaultStream::output_stream(), "%s\n",
+-		  VM_Version::internal_vm_info_string());
++                  VM_Version::internal_vm_info_string());
+       vm_exit(0);
+ #ifndef PRODUCT
+     // -Xprintflags
+@@ -1855,7 +1873,7 @@
+       vm_exit(0);
+ #endif
+     // -D
+-    } else if (match_option(option, "-D", &tail)) {      
++    } else if (match_option(option, "-D", &tail)) {
+       if (!add_property(tail)) {
+         return JNI_ENOMEM;
+       }
+@@ -1865,26 +1883,29 @@
+       }
+     // -Xint
+     } else if (match_option(option, "-Xint", &tail)) {
+-	  set_mode_flags(_int);
++          set_mode_flags(_int);
+     // -Xmixed
+     } else if (match_option(option, "-Xmixed", &tail)) {
+-	  set_mode_flags(_mixed);
++          set_mode_flags(_mixed);
+     // -Xcomp
+     } else if (match_option(option, "-Xcomp", &tail)) {
+       // for testing the compiler; turn off all flags that inhibit compilation
+-	  set_mode_flags(_comp);
++          set_mode_flags(_comp);
+ 
+     // -Xshare:dump
+     } else if (match_option(option, "-Xshare:dump", &tail)) {
+ #ifdef TIERED
+       FLAG_SET_CMDLINE(bool, DumpSharedSpaces, true);
+-      set_mode_flags(_int);	// Prevent compilation, which creates objects
++      set_mode_flags(_int);     // Prevent compilation, which creates objects
+ #elif defined(COMPILER2)
+       vm_exit_during_initialization(
+           "Dumping a shared archive is not supported on the Server JVM.", NULL);
++#elif defined(KERNEL)
++      vm_exit_during_initialization(
++          "Dumping a shared archive is not supported on the Kernel JVM.", NULL);
+ #else
+       FLAG_SET_CMDLINE(bool, DumpSharedSpaces, true);
+-      set_mode_flags(_int);	// Prevent compilation, which creates objects
++      set_mode_flags(_int);     // Prevent compilation, which creates objects
+ #endif
+     // -Xshare:on
+     } else if (match_option(option, "-Xshare:on", &tail)) {
+@@ -1903,7 +1924,7 @@
+       FLAG_SET_CMDLINE(bool, RequireSharedSpaces, false);
+ 
+     // -Xverify
+-    } else if (match_option(option, "-Xverify", &tail)) {      
++    } else if (match_option(option, "-Xverify", &tail)) {
+       if (strcmp(tail, ":all") == 0 || strcmp(tail, "") == 0) {
+         FLAG_SET_CMDLINE(bool, BytecodeVerificationLocal, true);
+         FLAG_SET_CMDLINE(bool, BytecodeVerificationRemote, true);
+@@ -1914,16 +1935,16 @@
+         FLAG_SET_CMDLINE(bool, BytecodeVerificationLocal, false);
+         FLAG_SET_CMDLINE(bool, BytecodeVerificationRemote, false);
+       } else if (is_bad_option(option, args->ignoreUnrecognized, "verification")) {
+-	return JNI_EINVAL;
++        return JNI_EINVAL;
+       }
+     // -Xdebug
+     } else if (match_option(option, "-Xdebug", &tail)) {
+       // note this flag has been used, then ignore
+       set_xdebug_mode(true);
+-    // -Xnoagent 
+-    } else if (match_option(option, "-Xnoagent", &tail)) {    
++    // -Xnoagent
++    } else if (match_option(option, "-Xnoagent", &tail)) {
+       // For compatibility with classic. HotSpot refuses to load the old style agent.dll.
+-    } else if (match_option(option, "-Xboundthreads", &tail)) {    
++    } else if (match_option(option, "-Xboundthreads", &tail)) {
+       // Bind user level threads to kernel threads (Solaris only)
+       FLAG_SET_CMDLINE(bool, UseBoundThreads, true);
+     } else if (match_option(option, "-Xloggc:", &tail)) {
+@@ -1936,18 +1957,18 @@
+       FLAG_SET_CMDLINE(bool, TraceClassUnloading, true);
+ 
+     // JNI hooks
+-    } else if (match_option(option, "-Xcheck", &tail)) {    
++    } else if (match_option(option, "-Xcheck", &tail)) {
+       if (!strcmp(tail, ":jni")) {
+         CheckJNICalls = true;
+-      } else if (is_bad_option(option, args->ignoreUnrecognized, 
++      } else if (is_bad_option(option, args->ignoreUnrecognized,
+                                      "check")) {
+         return JNI_EINVAL;
+       }
+-    } else if (match_option(option, "vfprintf", &tail)) {    
++    } else if (match_option(option, "vfprintf", &tail)) {
+       _vfprintf_hook = CAST_TO_FN_PTR(vfprintf_hook_t, option->extraInfo);
+-    } else if (match_option(option, "exit", &tail)) {    
++    } else if (match_option(option, "exit", &tail)) {
+       _exit_hook = CAST_TO_FN_PTR(exit_hook_t, option->extraInfo);
+-    } else if (match_option(option, "abort", &tail)) {    
++    } else if (match_option(option, "abort", &tail)) {
+       _abort_hook = CAST_TO_FN_PTR(abort_hook_t, option->extraInfo);
+     // -XX:+AggressiveHeap
+     } else if (match_option(option, "-XX:+AggressiveHeap", &tail)) {
+@@ -1967,7 +1988,7 @@
+ 
+       if (total_memory < (julong)256*M) {
+         jio_fprintf(defaultStream::error_stream(),
+-		    "You need at least 256mb of memory to use -XX:+AggressiveHeap\n");
++                    "You need at least 256mb of memory to use -XX:+AggressiveHeap\n");
+         vm_exit(1);
+       }
+ 
+@@ -1989,7 +2010,7 @@
+       if (initHeapSize > MaxPermSize) {
+         initHeapSize = initHeapSize - MaxPermSize;
+       } else {
+-	warning("AggressiveHeap and MaxPermSize values may conflict");
++        warning("AggressiveHeap and MaxPermSize values may conflict");
+       }
+ 
+       if (FLAG_IS_DEFAULT(MaxHeapSize)) {
+@@ -2040,7 +2061,7 @@
+       // explicitly here in case the default changes.
+       // See runtime/compilationPolicy.*.
+       FLAG_SET_CMDLINE(intx, CompilationPolicyChoice, 0);
+-	
++
+       // Enable parallel GC and adaptive generation sizing
+       FLAG_SET_CMDLINE(bool, UseParallelGC, true);
+       FLAG_SET_DEFAULT(ParallelGCThreads, nof_parallel_gc_threads());
+@@ -2050,17 +2071,17 @@
+ 
+       // This appears to improve mutator locality
+       FLAG_SET_CMDLINE(bool, ScavengeBeforeFullGC, false);
+-      
+-      // Get around early Solaris scheduling bug 
++
++      // Get around early Solaris scheduling bug
+       // (affinity vs other jobs on system)
+       // but disallow DR and offlining (5008695).
+       FLAG_SET_CMDLINE(bool, BindGCTaskThreadsToCPUs, true);
+ 
+-    } else if (match_option(option, "-XX:+NeverTenure", &tail)) {    
++    } else if (match_option(option, "-XX:+NeverTenure", &tail)) {
+       // The last option must always win.
+       FLAG_SET_CMDLINE(bool, AlwaysTenure, false);
+       FLAG_SET_CMDLINE(bool, NeverTenure, true);
+-    } else if (match_option(option, "-XX:+AlwaysTenure", &tail)) {    
++    } else if (match_option(option, "-XX:+AlwaysTenure", &tail)) {
+       // The last option must always win.
+       FLAG_SET_CMDLINE(bool, NeverTenure, false);
+       FLAG_SET_CMDLINE(bool, AlwaysTenure, true);
+@@ -2069,16 +2090,16 @@
+       jio_fprintf(defaultStream::error_stream(),
+         "Please use CMSClassUnloadingEnabled in place of "
+         "CMSPermGenSweepingEnabled in the future\n");
+-    } else if (match_option(option, "-XX:+UseGCTimeLimit", &tail)) {    
++    } else if (match_option(option, "-XX:+UseGCTimeLimit", &tail)) {
+       FLAG_SET_CMDLINE(bool, UseGCOverheadLimit, true);
+       jio_fprintf(defaultStream::error_stream(),
+         "Please use -XX:+UseGCOverheadLimit in place of "
+-	"-XX:+UseGCTimeLimit in the future\n");
+-    } else if (match_option(option, "-XX:-UseGCTimeLimit", &tail)) {    
++        "-XX:+UseGCTimeLimit in the future\n");
++    } else if (match_option(option, "-XX:-UseGCTimeLimit", &tail)) {
+       FLAG_SET_CMDLINE(bool, UseGCOverheadLimit, false);
+       jio_fprintf(defaultStream::error_stream(),
+         "Please use -XX:-UseGCOverheadLimit in place of "
+-	"-XX:-UseGCTimeLimit in the future\n");
++        "-XX:-UseGCTimeLimit in the future\n");
+     // The TLE options are for compatibility with 1.3 and will be
+     // removed without notice in a future release.  These options
+     // are not to be documented.
+@@ -2099,7 +2120,7 @@
+       ArgsRange errcode = parse_memory_size(tail, &long_tlab_size, 1);
+       if (errcode != arg_in_range) {
+         jio_fprintf(defaultStream::error_stream(),
+-		    "Invalid TLAB size: %s\n", option->optionString);
++                    "Invalid TLAB size: %s\n", option->optionString);
+         describe_range_error(errcode);
+         return JNI_EINVAL;
+       }
+@@ -2134,49 +2155,49 @@
+                   "ExtendedDTraceProbes flag is only applicable on Solaris\n");
+       return JNI_EINVAL;
+ #endif // ndef SOLARIS
+-    } else 
+-#ifdef ASSERT    
++    } else
++#ifdef ASSERT
+     if (match_option(option, "-XX:+FullGCALot", &tail)) {
+       FLAG_SET_CMDLINE(bool, FullGCALot, true);
+       // disable scavenge before parallel mark-compact
+       FLAG_SET_CMDLINE(bool, ScavengeBeforeFullGC, false);
+-    } else 
++    } else
+ #endif
+     if (match_option(option, "-XX:ParCMSPromoteBlocksToClaim=", &tail)) {
+-      julong cms_blocks_to_claim = (julong)atol(tail); 
++      julong cms_blocks_to_claim = (julong)atol(tail);
+       FLAG_SET_CMDLINE(uintx, CMSParPromoteBlocksToClaim, cms_blocks_to_claim);
+       jio_fprintf(defaultStream::error_stream(),
+         "Please use -XX:CMSParPromoteBlocksToClaim in place of "
+-	"-XX:ParCMSPromoteBlocksToClaim in the future\n"); 
++        "-XX:ParCMSPromoteBlocksToClaim in the future\n");
+     } else
+     if (match_option(option, "-XX:ParallelGCOldGenAllocBufferSize=", &tail)) {
+       jlong old_plab_size = 0;
+       ArgsRange errcode = parse_memory_size(tail, &old_plab_size, 1);
+       if (errcode != arg_in_range) {
+         jio_fprintf(defaultStream::error_stream(),
+-		    "Invalid old PLAB size: %s\n", option->optionString);
++                    "Invalid old PLAB size: %s\n", option->optionString);
+         describe_range_error(errcode);
+         return JNI_EINVAL;
+       }
+       FLAG_SET_CMDLINE(uintx, OldPLABSize, (julong)old_plab_size);
+       jio_fprintf(defaultStream::error_stream(),
+-		  "Please use -XX:OldPLABSize in place of "
+-		  "-XX:ParallelGCOldGenAllocBufferSize in the future\n"); 
++                  "Please use -XX:OldPLABSize in place of "
++                  "-XX:ParallelGCOldGenAllocBufferSize in the future\n");
+     } else
+     if (match_option(option, "-XX:ParallelGCToSpaceAllocBufferSize=", &tail)) {
+       jlong young_plab_size = 0;
+       ArgsRange errcode = parse_memory_size(tail, &young_plab_size, 1);
+       if (errcode != arg_in_range) {
+         jio_fprintf(defaultStream::error_stream(),
+-		    "Invalid young PLAB size: %s\n", option->optionString);
++                    "Invalid young PLAB size: %s\n", option->optionString);
+         describe_range_error(errcode);
+         return JNI_EINVAL;
+       }
+       FLAG_SET_CMDLINE(uintx, YoungPLABSize, (julong)young_plab_size);
+       jio_fprintf(defaultStream::error_stream(),
+-		  "Please use -XX:YoungPLABSize in place of "
+-		  "-XX:ParallelGCToSpaceAllocBufferSize in the future\n"); 
+-    } else    
++                  "Please use -XX:YoungPLABSize in place of "
++                  "-XX:ParallelGCToSpaceAllocBufferSize in the future\n");
++    } else
+     if (match_option(option, "-XX:", &tail)) { // -XX:xxxx
+       // Skip -XX:Flags= since that case has already been handled
+       if (strncmp(tail, "Flags=", strlen("Flags=")) != 0) {
+@@ -2210,6 +2231,9 @@
+     // not specified.
+     set_mode_flags(_int);
+   }
++  if (CompileThreshold == 0) {
++    set_mode_flags(_int);
++  }
+ 
+ #ifdef TIERED
+   // If we are using tiered compilation in the tiered vm then c1 will
+@@ -2220,7 +2244,7 @@
+   } else {
+     // Since we are running vanilla server we must adjust the compile threshold
+     // unless the user has already adjusted it because the default threshold assumes
+-    // we will run tiered. 
++    // we will run tiered.
+ 
+     if (FLAG_IS_DEFAULT(CompileThreshold)) {
+       CompileThreshold = Tier2CompileThreshold;
+@@ -2232,8 +2256,8 @@
+   // Don't degrade server performance for footprint
+   if (FLAG_IS_DEFAULT(UseLargePages) &&
+       MaxHeapSize < LargePageHeapSizeThreshold) {
+-    // No need for large granularity pages w/small heaps.  
+-    // Note that large pages are enabled/disabled for both the 
++    // No need for large granularity pages w/small heaps.
++    // Note that large pages are enabled/disabled for both the
+     // Java heap and the code cache.
+     FLAG_SET_DEFAULT(UseLargePages, false);
+     SOLARIS_ONLY(FLAG_SET_DEFAULT(UseMPSS, false));
+@@ -2254,12 +2278,12 @@
+ 
+ jint Arguments::parse_java_options_environment_variable(SysClassPath* scp_p, bool* scp_assembly_required_p) {
+   return parse_options_environment_variable("_JAVA_OPTIONS", scp_p,
+-					    scp_assembly_required_p);
++                                            scp_assembly_required_p);
+ }
+ 
+ jint Arguments::parse_java_tool_options_environment_variable(SysClassPath* scp_p, bool* scp_assembly_required_p) {
+   return parse_options_environment_variable("JAVA_TOOL_OPTIONS", scp_p,
+-					    scp_assembly_required_p);
++                                            scp_assembly_required_p);
+ }
+ 
+ jint Arguments::parse_options_environment_variable(const char* name, SysClassPath* scp_p, bool* scp_assembly_required_p) {
+@@ -2274,7 +2298,7 @@
+       !os::have_special_privileges()) {
+     JavaVMOption options[N_MAX_OPTIONS];      // Construct option array
+     jio_fprintf(defaultStream::error_stream(),
+-		"Picked up %s: %s\n", name, buffer);
++                "Picked up %s: %s\n", name, buffer);
+     char* rd = buffer;                        // pointer to the input string (rd)
+     int i;
+     for (i = 0; i < N_MAX_OPTIONS;) {         // repeat for all options in the input string
+@@ -2282,7 +2306,7 @@
+       if (*rd == 0) break;                    // we re done when the input string is read completely
+ 
+       // The output, option string, overwrites the input string.
+-      // Because of quoting, the pointer to the option string (wrt) may lag the pointer to 
++      // Because of quoting, the pointer to the option string (wrt) may lag the pointer to
+       // input string (rd).
+       char* wrt = rd;
+ 
+@@ -2294,7 +2318,7 @@
+           while (*rd != quote) {              // include everything (even spaces) up until quote
+             if (*rd == 0) {                   // string termination means unmatched string
+               jio_fprintf(defaultStream::error_stream(),
+-			  "Unmatched quote in %s\n", name);
++                          "Unmatched quote in %s\n", name);
+               return JNI_ERR;
+             }
+             *wrt++ = *rd++;                   // copy to option string
+@@ -2318,14 +2342,14 @@
+     vm_args.options = options;
+     vm_args.nOptions = i;
+     vm_args.ignoreUnrecognized = false;
+-    
++
+     if (PrintVMOptions) {
+       const char* tail;
+       for (int i = 0; i < vm_args.nOptions; i++) {
+-	const JavaVMOption *option = vm_args.options + i;
+-	if (match_option(option, "-XX:", &tail)) {
+-	  logOption(tail);
+-	}
++        const JavaVMOption *option = vm_args.options + i;
++        if (match_option(option, "-XX:", &tail)) {
++          logOption(tail);
++        }
+       }
+     }
+ 
+@@ -2360,7 +2384,7 @@
+   SharedArchivePath = shared_archive_path;
+ 
+   // Remaining part of option string
+-  const char* tail;   
++  const char* tail;
+ 
+   // If flag "-XX:Flags=flags-file" is used it will be the first option to be processed.
+   bool settings_file_specified = false;
+@@ -2371,8 +2395,8 @@
+       if (!process_settings_file(tail, true, args->ignoreUnrecognized)) {
+         return JNI_EINVAL;
+       }
+-      settings_file_specified = true;     
+-    } 
++      settings_file_specified = true;
++    }
+     if (match_option(option, "-XX:+PrintVMOptions", &tail)) {
+       PrintVMOptions = true;
+     }
+@@ -2384,12 +2408,12 @@
+       return JNI_EINVAL;
+     }
+   }
+-  
++
+   if (PrintVMOptions) {
+     for (index = 0; index < args->nOptions; index++) {
+       const JavaVMOption *option = args->options + index;
+       if (match_option(option, "-XX:", &tail)) {
+-	logOption(tail);	
++        logOption(tail);
+       }
+     }
+   }
+@@ -2423,7 +2447,10 @@
+ #ifdef SERIALGC
+   set_serial_gc_flags();
+ #endif // SERIALGC
+-  
++#ifdef KERNEL
++  no_shared_spaces();
++#endif // KERNEL
++
+   // Set some flags for ParallelGC if needed.
+   set_parallel_gc_flags();
+ 
+@@ -2443,10 +2470,10 @@
+   // Set flags if Aggressive optimization flags (-XX:+AggressiveOpts) enabled.
+   set_aggressive_opts_flags();
+ 
+-#ifdef IA64
+-  // Biased locking is not implemented on IA64
++#ifdef CC_INTERP
++  // Biased locking is not implemented with c++ interpreter
+   FLAG_SET_DEFAULT(UseBiasedLocking, false);
+-#endif /* IA64 */
++#endif /* CC_INTERP */
+ 
+   if (PrintCommandLineFlags) {
+     CommandLineFlags::printSetFlags();
+@@ -2538,26 +2565,56 @@
+       return;
+     }
+   }
+-      
++
+   PropertyList_add(plist, k, v);
+ }
+ 
++#ifdef KERNEL
++char *Arguments::get_kernel_properties() {
++  // Find properties starting with kernel and append them to string
++  // We need to find out how long they are first because the URL's that they
++  // might point to could get long.
++  int length = 0;
++  SystemProperty* prop;
++  for (prop = _system_properties; prop != NULL; prop = prop->next()) {
++    if (strncmp(prop->key(), "kernel.", 7 ) == 0) {
++      length += (strlen(prop->key()) + strlen(prop->value()) + 5);  // "-D ="
++    }
++  }
++  // Add one for null terminator.
++  char *props = AllocateHeap(length + 1, "get_kernel_properties");
++  if (length != 0) {
++    int pos = 0;
++    for (prop = _system_properties; prop != NULL; prop = prop->next()) {
++      if (strncmp(prop->key(), "kernel.", 7 ) == 0) {
++        jio_snprintf(&props[pos], length-pos,
++                     "-D%s=%s ", prop->key(), prop->value());
++        pos = strlen(props);
++      }
++    }
++  }
++  // null terminate props in case of null
++  props[length] = '\0';
++  return props;
++}
++#endif // KERNEL
++
+ // Copies src into buf, replacing "%%" with "%" and "%p" with pid
+ // Returns true if all of the source pointed by src has been copied over to
+ // the destination buffer pointed by buf. Otherwise, returns false.
+-// Notes: 
+-// 1. If the length (buflen) of the destination buffer excluding the 
++// Notes:
++// 1. If the length (buflen) of the destination buffer excluding the
+ // NULL terminator character is not long enough for holding the expanded
+ // pid characters, it also returns false instead of returning the partially
+ // expanded one.
+ // 2. The passed in "buflen" should be large enough to hold the null terminator.
+-bool Arguments::copy_expand_pid(const char* src, size_t srclen, 
++bool Arguments::copy_expand_pid(const char* src, size_t srclen,
+                                 char* buf, size_t buflen) {
+   const char* p = src;
+   char* b = buf;
+   const char* src_end = &src[srclen];
+   char* buf_end = &buf[buflen - 1];
+- 
++
+   while (p < src_end && b < buf_end) {
+     if (*p == '%') {
+       switch (*(++p)) {
+@@ -2569,7 +2626,7 @@
+         // that we could write '\0' to the end of the buffer.
+         size_t buf_sz = buf_end - b + 1;
+         int ret = jio_snprintf(b, buf_sz, "%d", os::current_process_id());
+-        
++
+         // if jio_snprintf fails or the buffer is not long enough to hold
+         // the expanded pid, returns false.
+         if (ret < 0 || ret >= (int)buf_sz) {
+@@ -2585,7 +2642,7 @@
+         p++;
+         break;
+       }
+-      default : 
++      default :
+         *b++ = '%';
+       }
+     } else {
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/arguments.hpp openjdk/hotspot/src/share/vm/runtime/arguments.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/arguments.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/arguments.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)arguments.hpp	1.103 07/06/27 11:12:35 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Arguments parses the command line and recognizes options
+@@ -67,7 +64,7 @@
+     }
+     return false;
+   }
+-    
++
+   void append_value(const char *value) {
+     char *sp;
+     size_t len = 0;
+@@ -89,7 +86,7 @@
+         _value = sp;
+       }
+     }
+-  } 
++  }
+ 
+   // Constructor
+   SystemProperty(const char* key, const char* value, bool writeable) {
+@@ -156,18 +153,18 @@
+   AgentLibrary* first() const               { return _first; }
+ 
+   // add to the end of the list
+-  void add(AgentLibrary* lib) { 
+-    if (is_empty()) { 
+-      _first = _last = lib; 
+-    } else { 
+-      _last->_next = lib; 
+-      _last = lib; 
++  void add(AgentLibrary* lib) {
++    if (is_empty()) {
++      _first = _last = lib;
++    } else {
++      _last->_next = lib;
++      _last = lib;
+     }
+     lib->_next = NULL;
+   }
+ 
+   // search for and remove a library known to be in the list
+-  void remove(AgentLibrary* lib) { 
++  void remove(AgentLibrary* lib) {
+     AgentLibrary* curr;
+     AgentLibrary* prev = NULL;
+     for (curr = first(); curr != NULL; prev = curr, curr = curr->next()) {
+@@ -195,7 +192,7 @@
+     _first = NULL;
+     _last = NULL;
+   }
+-};  
++};
+ 
+ 
+ class Arguments : AllStatic {
+@@ -216,7 +213,7 @@
+     arg_in_range   = 0
+   };
+ 
+- private:  
++ private:
+ 
+   // an array containing all flags specified in the .hotspotrc file
+   static char** _jvm_flags_array;
+@@ -227,9 +224,9 @@
+   // string containing all java command (class/jarfile name and app args)
+   static char* _java_command;
+ 
+-  // Property list 
++  // Property list
+   static SystemProperty* _system_properties;
+-  
++
+   // Quick accessor to System properties in the list:
+   static SystemProperty *_java_ext_dirs;
+   static SystemProperty *_java_endorsed_dirs;
+@@ -242,19 +239,19 @@
+   // Meta-index for knowing what packages are in the boot class path
+   static char* _meta_index_path;
+   static char* _meta_index_dir;
+-    
++
+   // java.vendor.url.bug, bug reporting URL for fatal errors.
+   static const char* _java_vendor_url_bug;
+ 
+-  // sun.java.launcher, private property to provide information about 
++  // sun.java.launcher, private property to provide information about
+   // java/gamma launcher
+   static const char* _sun_java_launcher;
+ 
+   // sun.java.launcher.pid, private property
+   static int    _sun_java_launcher_pid;
+ 
+-  // Option flags       
+-  static bool   _has_profile;  
++  // Option flags
++  static bool   _has_profile;
+   static bool   _has_alloc_profile;
+   static const char*  _gc_log_filename;
+   static uintx  _initial_heap_size;
+@@ -342,7 +339,7 @@
+   static void describe_range_error(ArgsRange errcode);
+   static ArgsRange check_memory_size(jlong size, jlong min_size);
+   static ArgsRange parse_memory_size(const char* s, jlong* long_arg,
+-				     jlong min_size);
++                                     jlong min_size);
+ 
+   // methods to build strings from individual args
+   static void build_jvm_args(const char* arg);
+@@ -351,7 +348,7 @@
+   static const char* build_resource_string(char** args, int count);
+ 
+   static bool methodExists(
+-    char* className, char* methodName, 
++    char* className, char* methodName,
+     int classesNum, char** classes, bool* allMethods,
+     int methodsNum, char** methods, bool* allClasses
+   );
+@@ -360,28 +357,28 @@
+     const char* line,
+     short* classesNum, short* classesMax, char*** classes, bool** allMethods,
+     short* methodsNum, short* methodsMax, char*** methods, bool** allClasses
+-  ); 
++  );
+ 
+   // Returns true if the string s is in the list of
+   // flags made obsolete in 1.5.0.
+   static bool made_obsolete_in_1_5_0(const char* s);
+ 
+-  static short	CompileOnlyClassesNum;
+-  static short	CompileOnlyClassesMax;
+-  static char**	CompileOnlyClasses;
+-  static bool*	CompileOnlyAllMethods;
+-
+-  static short	CompileOnlyMethodsNum;
+-  static short	CompileOnlyMethodsMax;
+-  static char**	CompileOnlyMethods;
+-  static bool*	CompileOnlyAllClasses;
+-  
+-  static short	InterpretOnlyClassesNum;
+-  static short	InterpretOnlyClassesMax;
+-  static char**	InterpretOnlyClasses;
+-  static bool*	InterpretOnlyAllMethods;
++  static short  CompileOnlyClassesNum;
++  static short  CompileOnlyClassesMax;
++  static char** CompileOnlyClasses;
++  static bool*  CompileOnlyAllMethods;
++
++  static short  CompileOnlyMethodsNum;
++  static short  CompileOnlyMethodsMax;
++  static char** CompileOnlyMethods;
++  static bool*  CompileOnlyAllClasses;
++
++  static short  InterpretOnlyClassesNum;
++  static short  InterpretOnlyClassesMax;
++  static char** InterpretOnlyClasses;
++  static bool*  InterpretOnlyAllMethods;
+ 
+-  static bool	CheckCompileOnly;
++  static bool   CheckCompileOnly;
+ 
+   static char*  SharedArchivePath;
+ 
+@@ -393,7 +390,7 @@
+   // Used by os_solaris
+   static bool process_settings_file(const char* file_name, bool should_exist, jboolean ignore_unrecognized);
+ 
+-  // return a char* array containing all options 
++  // return a char* array containing all options
+   static char** jvm_flags_array()          { return _jvm_flags_array; }
+   static char** jvm_args_array()           { return _jvm_args_array; }
+   static int num_jvm_flags()               { return _num_jvm_flags; }
+@@ -441,7 +438,7 @@
+   static AgentLibrary* libraries()          { return _libraryList.first(); }
+   static bool init_libraries_at_startup()   { return !_libraryList.is_empty(); }
+   static void convert_library_to_agent(AgentLibrary* lib)
+-                                            { _libraryList.remove(lib); 
++                                            { _libraryList.remove(lib);
+                                               _agentList.add(lib); }
+ 
+   // -agentlib -agentpath
+@@ -453,14 +450,14 @@
+   static exit_hook_t     exit_hook()        { return _exit_hook; }
+   static vfprintf_hook_t vfprintf_hook()    { return _vfprintf_hook; }
+ 
+-  static bool GetCheckCompileOnly ()	    { return CheckCompileOnly; }
++  static bool GetCheckCompileOnly ()        { return CheckCompileOnly; }
+ 
+   static const char* GetSharedArchivePath() { return SharedArchivePath; }
+ 
+   static bool CompileMethod(char* className, char* methodName) {
+     return
+       methodExists(
+-        className, methodName, 
++        className, methodName,
+         CompileOnlyClassesNum, CompileOnlyClasses, CompileOnlyAllMethods,
+         CompileOnlyMethodsNum, CompileOnlyMethods, CompileOnlyAllClasses
+       );
+@@ -480,10 +477,10 @@
+   static int  PropertyList_count(SystemProperty* pl);
+   static const char* PropertyList_get_key_at(SystemProperty* pl,int index);
+   static char* PropertyList_get_value_at(SystemProperty* pl,int index);
+-    
++
+   // Miscellaneous System property value getter and setters.
+   static void set_dll_dir(char *value) { _sun_boot_library_path->set_value(value); }
+-  static void set_java_home(char *value) { _java_home->set_value(value); }       
++  static void set_java_home(char *value) { _java_home->set_value(value); }
+   static void set_library_path(char *value) { _java_library_path->set_value(value); }
+   static void set_ext_dirs(char *value) { _java_ext_dirs->set_value(value); }
+   static void set_endorsed_dirs(char *value) { _java_endorsed_dirs->set_value(value); }
+@@ -493,17 +490,22 @@
+     _meta_index_path = meta_index_path;
+     _meta_index_dir  = meta_index_dir;
+   }
+- 
++
+   static char *get_java_home() { return _java_home->value(); }
+   static char *get_dll_dir() { return _sun_boot_library_path->value(); }
+   static char *get_endorsed_dir() { return _java_endorsed_dirs->value(); }
+   static char *get_sysclasspath() { return _sun_boot_class_path->value(); }
+   static char* get_meta_index_path() { return _meta_index_path; }
+   static char* get_meta_index_dir()  { return _meta_index_dir;  }
+-             
++
+   // Operation modi
+   static Mode mode()                        { return _mode; }
+ 
+   // Utility: copies src into buf, replacing "%%" with "%" and "%p" with pid.
+   static bool copy_expand_pid(const char* src, size_t srclen, char* buf, size_t buflen);
++
++#ifdef KERNEL
++  // For java kernel vm, return property string for kernel properties.
++  static char *get_kernel_properties();
++#endif // KERNEL
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/atomic.cpp openjdk/hotspot/src/share/vm/runtime/atomic.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/atomic.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/atomic.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)atomic.cpp	1.14 07/05/05 17:06:42 JVM"
+-#endif
+ /*
+  * Copyright 2001-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/atomic.hpp openjdk/hotspot/src/share/vm/runtime/atomic.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/atomic.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/atomic.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)atomic.hpp	1.22 07/05/05 17:06:42 JVM"
+-#endif
+ /*
+  * Copyright 1999-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class Atomic : AllStatic {
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/biasedLocking.cpp openjdk/hotspot/src/share/vm/runtime/biasedLocking.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/biasedLocking.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/biasedLocking.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)biasedLocking.cpp	1.15 07/05/23 10:53:58 JVM"
+-#endif
+ 
+ /*
+  * Copyright 2005-2007 Sun Microsystems, Inc.  All Rights Reserved.
+@@ -23,7 +20,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -292,7 +289,7 @@
+   if (revocation_count <= BiasedLockingBulkRevokeThreshold) {
+     revocation_count = k->atomic_incr_biased_lock_revocation_count();
+   }
+-    
++
+   if (revocation_count == BiasedLockingBulkRevokeThreshold) {
+     return HR_BULK_REVOKE;
+   }
+@@ -447,7 +444,7 @@
+     , _status_code(BiasedLocking::NOT_BIASED) {}
+ 
+   virtual VMOp_Type type() const { return VMOp_RevokeBias; }
+-  
++
+   virtual bool doit_prologue() {
+     // Verify that there is actual work to do since the callers just
+     // give us locked object(s). If we don't find any biased objects
+@@ -503,7 +500,7 @@
+     , _bulk_rebias(bulk_rebias)
+     , _attempt_rebias_of_object(attempt_rebias_of_object) {}
+ 
+-  virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; }  
++  virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; }
+   virtual bool doit_prologue()   { return true; }
+ 
+   virtual void doit() {
+@@ -573,7 +570,7 @@
+         }
+       }
+     }
+-  }    
++  }
+ 
+   HeuristicsResult heuristics = update_heuristics(obj(), attempt_rebias);
+   if (heuristics == HR_NOT_BIASED) {
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/biasedLocking.hpp openjdk/hotspot/src/share/vm/runtime/biasedLocking.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/biasedLocking.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/biasedLocking.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)biasedLocking.hpp	1.11 07/05/17 16:05:21 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This class describes operations to implement Store-Free Biased
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/compilationPolicy.cpp openjdk/hotspot/src/share/vm/runtime/compilationPolicy.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/compilationPolicy.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/compilationPolicy.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)compilationPolicy.cpp	1.45 07/05/05 17:06:45 JVM"
+-#endif
+ /*
+  * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -63,7 +60,7 @@
+ 
+ // Returns true if m must be compiled before executing it
+ // This is intended to force compiles for methods (usually for
+-// debugging) that would otherwise be interpreted for some reason. 
++// debugging) that would otherwise be interpreted for some reason.
+ bool CompilationPolicy::mustBeCompiled(methodHandle m) {
+   if (m->has_compiled_code()) return false;       // already compiled
+   if (!canBeCompiled(m))      return false;
+@@ -72,7 +69,7 @@
+          (UseCompiler && AlwaysCompileLoopMethods && m->has_loops()); // eagerly compile loop methods
+ }
+ 
+-// Returns true if m is allowed to be compiled   
++// Returns true if m is allowed to be compiled
+ bool CompilationPolicy::canBeCompiled(methodHandle m) {
+   if (m->is_abstract()) return false;
+   if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false;
+@@ -114,7 +111,7 @@
+   InvocationCounter* b = m->backedge_counter();
+ 
+   // Don't set invocation_counter's value too low otherwise the method will
+-  // look like immature (ic < ~5300) which prevents the inlining based on 
++  // look like immature (ic < ~5300) which prevents the inlining based on
+   // the type profiling.
+   i->set(i->state(), CompileThreshold);
+   // Don't reset counter too low - it is used to check if OSR method is ready.
+@@ -198,14 +195,14 @@
+   const char* comment = "count";
+ 
+   if (m->code() == NULL && !delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler) {
+-    ResourceMark rm(THREAD);  
++    ResourceMark rm(THREAD);
+     JavaThread *thread = (JavaThread*)THREAD;
+     frame       fr     = thread->last_frame();
+     assert(fr.is_interpreted_frame(), "must be interpreted");
+     assert(fr.interpreter_frame_method() == m(), "bad method");
+ 
+     if (TraceCompilationPolicy) {
+-      tty->print("method invocation trigger: "); 
++      tty->print("method invocation trigger: ");
+       m->print_short_name(tty);
+       tty->print(" ( interpreted " INTPTR_FORMAT ", size=%d ) ", (address)m(), m->code_size());
+     }
+@@ -216,9 +213,9 @@
+ 
+     if (first->top_method()->code() != NULL) {
+       // called obsolete method/nmethod -- no need to recompile
+-      if (TraceCompilationPolicy) tty->print_cr(" --> " INTPTR_FORMAT, first->top_method()->code());    
++      if (TraceCompilationPolicy) tty->print_cr(" --> " INTPTR_FORMAT, first->top_method()->code());
+     } else if (compilation_level(m, InvocationEntryBci) == CompLevel_fast_compile) {
+-      // Tier1 compilation policy avaoids stack walking.      
++      // Tier1 compilation policy avaoids stack walking.
+       CompileBroker::compile_method(m, InvocationEntryBci,
+                                     m, hot_count, comment, CHECK);
+     } else {
+@@ -265,8 +262,8 @@
+         if (m->interpreter_invocation_count() < Tier2CompileThreshold) {
+           comp_level = CompLevel_fast_compile;
+         }
+-      } else if (m->invocation_count() + m->backedge_count() < 
+-                 Tier2CompileThreshold) { 
++      } else if (m->invocation_count() + m->backedge_count() <
++                 Tier2CompileThreshold) {
+         comp_level = CompLevel_fast_compile;
+       }
+     }
+@@ -281,7 +278,7 @@
+   // into its caller
+   RFrame* current = stack->at(0); // current choice for stopping
+   assert( current && !current->is_compiled(), "" );
+-  const char* msg = NULL; 
++  const char* msg = NULL;
+ 
+   while (1) {
+ 
+@@ -295,9 +292,9 @@
+     methodHandle next_m = next->top_method();
+ 
+     if (TraceCompilationPolicy && Verbose) {
+-      tty->print("[caller: "); 
+-      next_m->print_short_name(tty); 
+-      tty->print("] "); 
++      tty->print("[caller: ");
++      next_m->print_short_name(tty);
++      tty->print("] ");
+     }
+ 
+     if( !Inline ) {           // Inlining turned off
+@@ -352,7 +349,7 @@
+     // Caller counts / call-site counts; i.e. is this call site
+     // a hot call site for method next_m?
+     int freq = (invcnt) ? cnt/invcnt : cnt;
+-    
++
+     // Check size and frequency limits
+     if ((msg = shouldInline(m, freq, cnt)) != NULL) {
+       break;
+@@ -376,8 +373,8 @@
+     }
+ 
+     if (TraceCompilationPolicy && Verbose) {
+-      tty->print("\n\t     check caller: "); 
+-      next_m->print_short_name(tty); 
++      tty->print("\n\t     check caller: ");
++      next_m->print_short_name(tty);
+       tty->print(" ( interpreted " INTPTR_FORMAT ", size=%d ) ", (address)next_m(), next_m->code_size());
+     }
+ 
+@@ -426,17 +423,17 @@
+ 
+ 
+ const char* StackWalkCompPolicy::shouldNotInline(methodHandle m) {
+-  // negative filter: should send NOT be inlined?  returns NULL (--> inline) or rejection msg 
++  // negative filter: should send NOT be inlined?  returns NULL (--> inline) or rejection msg
+   if (m->is_abstract()) return (_msg = "abstract method");
+   // note: we allow ik->is_abstract()
+   if (!instanceKlass::cast(m->method_holder())->is_initialized()) return (_msg = "method holder not initialized");
+-  if (m->is_native()) return (_msg = "native method"); 
++  if (m->is_native()) return (_msg = "native method");
+   nmethod* m_code = m->code();
+-  if( m_code != NULL && m_code->instructions_size() > InlineSmallCode ) 
++  if( m_code != NULL && m_code->instructions_size() > InlineSmallCode )
+     return (_msg = "already compiled into a big method");
+ 
+   // use frequency-based objections only for non-trivial methods
+-  if (m->code_size() <= MaxTrivialSize) return NULL;    
++  if (m->code_size() <= MaxTrivialSize) return NULL;
+   if (UseInterpreter) {     // don't use counts with -Xcomp
+     if ((m->code() == NULL) && m->was_never_executed()) return (_msg = "never executed");
+     if (!m->was_executed_more_than(MIN2(MinInliningThreshold, CompileThreshold >> 1))) return (_msg = "executed < MinInliningThreshold times");
+@@ -449,4 +446,3 @@
+ 
+ 
+ #endif // COMPILER2
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/compilationPolicy.hpp openjdk/hotspot/src/share/vm/runtime/compilationPolicy.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/compilationPolicy.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/compilationPolicy.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)compilationPolicy.hpp	1.15 07/05/05 17:06:44 JVM"
+-#endif
+ /*
+  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The CompilationPolicy selects which method (if any) should be compiled.
+@@ -34,7 +31,7 @@
+   static CompilationPolicy* _policy;
+   // Accumulated time
+   static elapsedTimer       _accumulated_time;
+-  
++
+   static bool               _in_vm_startup;
+ 
+  public:
+@@ -50,7 +47,7 @@
+   static  bool delayCompilationDuringStartup() { return _in_vm_startup; }
+ 
+   static bool mustBeCompiled(methodHandle m);      // m must be compiled before executing it
+-  static bool canBeCompiled(methodHandle m);       // m is allowed to be compiled   
++  static bool canBeCompiled(methodHandle m);       // m is allowed to be compiled
+ 
+   static void set_policy(CompilationPolicy* policy) { _policy = policy; }
+   static CompilationPolicy* policy() { return _policy; }
+@@ -85,9 +82,9 @@
+   static const char* _msg;            // reason for not inlining
+ 
+   static const char* shouldInline   (methodHandle callee, float frequency, int cnt);
+-  // positive filter: should send be inlined?  returns NULL (--> yes) or rejection msg 
++  // positive filter: should send be inlined?  returns NULL (--> yes) or rejection msg
+   static const char* shouldNotInline(methodHandle callee);
+-  // negative filter: should send NOT be inlined?  returns NULL (--> inline) or rejection msg 
++  // negative filter: should send NOT be inlined?  returns NULL (--> inline) or rejection msg
+ 
+ };
+ #endif
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/deoptimization.cpp openjdk/hotspot/src/share/vm/runtime/deoptimization.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/deoptimization.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/deoptimization.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)deoptimization.cpp	1.282 07/05/17 16:05:24 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -89,12 +86,15 @@
+ }
+ 
+ 
+-JRT_LEAF(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* thread))
++// In order to make fetch_unroll_info work properly with escape
++// analysis, The method was changed from JRT_LEAF to JRT_BLOCK_ENTRY and
++// ResetNoHandleMark and HandleMark were removed from it. The actual reallocation
++// of previously eliminated objects occurs in realloc_objects, which is
++// called from the method fetch_unroll_info_helper below.
++JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* thread))
+   // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
+   // but makes the entry a little slower. There is however a little dance we have to
+   // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
+-  ResetNoHandleMark rnhm; // No-op in release/product versions  
+-  HandleMark hm;
+ 
+   // fetch_unroll_info() is called at the beginning of the deoptimization
+   // handler. Note this fact before we start generating temporary frames
+@@ -111,7 +111,7 @@
+ 
+   // Note: there is a safepoint safety issue here. No matter whether we enter
+   // via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once
+-  // the vframeArray is created. 
++  // the vframeArray is created.
+   //
+ 
+   // Allocate our special deoptimization ResourceMark
+@@ -125,13 +125,68 @@
+   // Now get the deoptee with a valid map
+   frame deoptee = stub_frame.sender(&map);
+ 
+-  // We are safepoint safe up to this call
+-  vframeArray* array = create_vframeArray(thread, deoptee, &map);
++  // Create a growable array of VFrames where each VFrame represents an inlined
++  // Java frame.  This storage is allocated with the usual system arena.
++  assert(deoptee.is_compiled_frame(), "Wrong frame type");
++  GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10);
++  vframe* vf = vframe::new_vframe(&deoptee, &map, thread);
++  while (!vf->is_top()) {
++    assert(vf->is_compiled_frame(), "Wrong frame type");
++    chunk->push(compiledVFrame::cast(vf));
++    vf = vf->sender();
++  }
++  assert(vf->is_compiled_frame(), "Wrong frame type");
++  chunk->push(compiledVFrame::cast(vf));
+ 
+-  // We are no longer safepoint safe. If a safepoint occurs from here on
++#ifdef COMPILER2
++  // Reallocate the non-escaping objects and restore their fields. Then
++  // relock objects if synchronization on them was eliminated.
++  if (DoEscapeAnalysis && EliminateAllocations) {
++    GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
++    bool reallocated = false;
++    if (objects != NULL) {
++      JRT_BLOCK
++        reallocated = realloc_objects(thread, &deoptee, objects, THREAD);
++      JRT_END
++    }
++    if (reallocated) {
++      reassign_fields(&deoptee, &map, objects);
++#ifndef PRODUCT
++      if (TraceDeoptimization) {
++        ttyLocker ttyl;
++        tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, thread);
++        print_objects(objects);
++      }
++#endif
++    }
++    for (int i = 0; i < chunk->length(); i++) {
++      GrowableArray<MonitorValue*>* monitors = chunk->at(i)->scope()->monitors();
++      if (monitors != NULL) {
++        relock_objects(&deoptee, &map, monitors);
++#ifndef PRODUCT
++        if (TraceDeoptimization) {
++          ttyLocker ttyl;
++          tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, thread);
++          for (int j = 0; i < monitors->length(); i++) {
++            MonitorValue* mv = monitors->at(i);
++            if (mv->eliminated()) {
++              StackValue* owner = StackValue::create_stack_value(&deoptee, &map, mv->owner());
++              tty->print_cr("     object <" INTPTR_FORMAT "> locked", owner->get_obj()());
++            }
++          }
++        }
++#endif
++      }
++    }
++  }
++#endif // COMPILER2
++  // Ensure that no safepoint is taken after pointers have been stored
++  // in fields of rematerialized objects.  If a safepoint occurs from here on
+   // out the java state residing in the vframeArray will be missed.
+   No_Safepoint_Verifier no_safepoint;
+ 
++  vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk);
++
+   assert(thread->vframe_array_head() == NULL, "Pending deopt!");;
+   thread->set_vframe_array_head(array);
+ 
+@@ -146,12 +201,12 @@
+       // Because of inlining we could have multiple vframes for a single frame
+       // and several of the vframes could have deferred writes. Find them all.
+       if (list->at(i)->id() == array->original().id()) {
+-	jvmtiDeferredLocalVariableSet* dlv = list->at(i);
+-	list->remove_at(i);
+-	// individual jvmtiDeferredLocalVariableSet are CHeapObj's
+-	delete dlv;
++        jvmtiDeferredLocalVariableSet* dlv = list->at(i);
++        list->remove_at(i);
++        // individual jvmtiDeferredLocalVariableSet are CHeapObj's
++        delete dlv;
+       } else {
+-	i++;
++        i++;
+       }
+     } while ( i < list->length() );
+     if (list->length() == 0) {
+@@ -159,7 +214,7 @@
+       // free the list and elements back to C heap.
+       delete list;
+     }
+-    
++
+   }
+ 
+   // Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info.
+@@ -194,7 +249,7 @@
+   int popframe_extra_args = 0;
+   // Create an interpreter return address for the stub to use as its return
+   // address so the skeletal frames are perfectly walkable
+-  frame_pcs[number_of_frames] = AbstractInterpreter::deopt_entry(vtos, 0);
++  frame_pcs[number_of_frames] = Interpreter::deopt_entry(vtos, 0);
+ 
+   // PopFrame requires that the preserved incoming arguments from the recently-popped topmost
+   // activation be put back on the expression stack of the caller for reexecution
+@@ -209,7 +264,7 @@
+   //
+   // Now a pc in frame_pcs is actually the return address to the frame's caller (a frame
+   // owns the space for the return address to it's caller).  Confusing ain't it.
+-  // 
++  //
+   // The vframe array can address vframes with indices running from
+   // 0.._frames-1. Index  0 is the youngest frame and _frame - 1 is the oldest (root) frame.
+   // When we create the skeletal frames we need the oldest frame to be in the zero slot
+@@ -220,15 +275,15 @@
+     // frame[number_of_frames - 1 ] = on_stack_size(youngest)
+     // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest))
+     // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest)))
+-    frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters, 
+-												    callee_locals,
+-												    index == 0,
+-												    popframe_extra_args);
++    frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters,
++                                                                                                    callee_locals,
++                                                                                                    index == 0,
++                                                                                                    popframe_extra_args);
+     // This pc doesn't have to be perfect just good enough to identify the frame
+     // as interpreted so the skeleton frame will be walkable
+     // The correct pc will be set when the skeleton frame is completely filled out
+     // The final pc we store in the loop is wrong and will be overwritten below
+-    frame_pcs[number_of_frames - 1 - index ] = AbstractInterpreter::deopt_entry(vtos, 0) - frame::pc_return_offset;
++    frame_pcs[number_of_frames - 1 - index ] = Interpreter::deopt_entry(vtos, 0) - frame::pc_return_offset;
+ 
+     callee_parameters = array->element(index)->method()->size_of_parameters();
+     callee_locals = array->element(index)->method()->max_locals();
+@@ -261,10 +316,10 @@
+   // locals-parms. This is because without a c2i adapter the parm
+   // area as created by the compiled frame will not be usable by
+   // the interpreter. (Depending on the calling convention there
+-  // may not even be enough space). 
++  // may not even be enough space).
+ 
+   // QQQ I'd rather see this pushed down into last_frame_adjust
+-  // and have it take the sender (aka caller). 
++  // and have it take the sender (aka caller).
+ 
+   if (deopt_sender.is_compiled_frame()) {
+     caller_adjustment = last_frame_adjust(0, callee_locals);
+@@ -284,12 +339,12 @@
+ 
+   assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
+ 
+-  UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord, 
+-				      caller_adjustment * BytesPerWord,
+-				      number_of_frames,
+-				      frame_sizes,
+-				      frame_pcs,
+-				      return_type);
++  UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord,
++                                      caller_adjustment * BytesPerWord,
++                                      number_of_frames,
++                                      frame_sizes,
++                                      frame_pcs,
++                                      return_type);
+ #if defined(IA32) || defined(AMD64)
+   // We need a way to pass fp to the unpacking code so the skeletal frames
+   // come out correct. This is only needed for x86 because of c2 using ebp
+@@ -351,7 +406,7 @@
+     // C++ interpeter will clear has_pending_popframe when it enters
+     // with method_resume. For deopt_resume2 we clear it now.
+     if (thread->popframe_forcing_deopt_reexecution())
+-	thread->clear_popframe_condition();
++        thread->clear_popframe_condition();
+ #endif /* CC_INTERP */
+   }
+ 
+@@ -395,8 +450,8 @@
+ 
+   BasicType bt = info->return_type();
+ 
+-  // If we have an exception pending, claim that the return type is an oop 
+-  // so the deopt_blob does not overwrite the exception_oop. 
++  // If we have an exception pending, claim that the return type is an oop
++  // so the deopt_blob does not overwrite the exception_oop.
+ 
+   if (exec_mode == Unpack_exception)
+     bt = T_OBJECT;
+@@ -461,10 +516,10 @@
+           // a given bytecode or the state after, so we try both
+           switch (cur_code) {
+             case Bytecodes::_invokevirtual:
+-            case Bytecodes::_invokespecial:     
+-            case Bytecodes::_invokestatic:      
+-            case Bytecodes::_invokeinterface:   
+-            case Bytecodes::_athrow:   
++            case Bytecodes::_invokespecial:
++            case Bytecodes::_invokestatic:
++            case Bytecodes::_invokeinterface:
++            case Bytecodes::_athrow:
+               break;
+             default: {
+               InterpreterOopMap next_mask;
+@@ -543,21 +598,206 @@
+   return 0;
+ }
+ 
+-vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map) {
+-  // Create a growable array of VFrames where each VFrame represents an inlined
+-  // Java frame.  This storage is allocated with the usual system arena.
+-#ifdef ASSERT
+-  assert(fr.is_compiled_frame(), "Wrong frame type");
+-#endif /* ASSERT */
+-  GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10);  
+-  vframe* vf = vframe::new_vframe(&fr, reg_map, thread);        
+-  while (!vf->is_top()) {
+-    assert(vf->is_compiled_frame(), "Wrong frame type");
+-    chunk->push(compiledVFrame::cast(vf));
+-    vf = vf->sender();
++
++#ifdef COMPILER2
++bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, GrowableArray<ScopeValue*>* objects, TRAPS) {
++  Handle pending_exception(thread->pending_exception());
++  const char* exception_file = thread->exception_file();
++  int exception_line = thread->exception_line();
++  thread->clear_pending_exception();
++
++  for (int i = 0; i < objects->length(); i++) {
++    assert(objects->at(i)->is_object(), "invalid debug information");
++    ObjectValue* sv = (ObjectValue*) objects->at(i);
++
++    KlassHandle k(((ConstantOopReadValue*) sv->klass())->value()());
++    oop obj = NULL;
++
++    if (k->oop_is_instance()) {
++      instanceKlass* ik = instanceKlass::cast(k());
++      obj = ik->allocate_instance(CHECK_(false));
++    } else if (k->oop_is_typeArray()) {
++      typeArrayKlass* ak = typeArrayKlass::cast(k());
++      assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
++      int len = sv->field_size() / type2size[ak->element_type()];
++      obj = ak->allocate(len, CHECK_(false));
++    } else if (k->oop_is_objArray()) {
++      objArrayKlass* ak = objArrayKlass::cast(k());
++      obj = ak->allocate(sv->field_size(), CHECK_(false));
++    }
++
++    assert(obj != NULL, "allocation failed");
++    assert(sv->value().is_null(), "redundant reallocation");
++    sv->set_value(obj);
++  }
++
++  if (pending_exception.not_null()) {
++    thread->set_pending_exception(pending_exception(), exception_file, exception_line);
++  }
++
++  return true;
++}
++
++// This assumes that the fields are stored in ObjectValue in the same order
++// they are yielded by do_nonstatic_fields.
++class FieldReassigner: public FieldClosure {
++  frame* _fr;
++  RegisterMap* _reg_map;
++  ObjectValue* _sv;
++  instanceKlass* _ik;
++  oop _obj;
++
++  int _i;
++public:
++  FieldReassigner(frame* fr, RegisterMap* reg_map, ObjectValue* sv, oop obj) :
++    _fr(fr), _reg_map(reg_map), _sv(sv), _obj(obj), _i(0) {}
++
++  int i() const { return _i; }
++
++
++  void do_field(fieldDescriptor* fd) {
++    StackValue* value =
++      StackValue::create_stack_value(_fr, _reg_map, _sv->field_at(i()));
++    int offset = fd->offset();
++    switch (fd->field_type()) {
++    case T_OBJECT: case T_ARRAY:
++      assert(value->type() == T_OBJECT, "Agreement.");
++      _obj->obj_field_put(offset, value->get_obj()());
++      break;
++
++    case T_LONG: case T_DOUBLE: {
++      assert(value->type() == T_INT, "Agreement.");
++      StackValue* low =
++        StackValue::create_stack_value(_fr, _reg_map, _sv->field_at(++_i));
++      jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
++      _obj->long_field_put(offset, res);
++      break;
++    }
++
++    case T_INT: case T_FLOAT: // 4 bytes.
++      assert(value->type() == T_INT, "Agreement.");
++      _obj->int_field_put(offset, (jint)value->get_int());
++      break;
++
++    case T_SHORT: case T_CHAR: // 2 bytes
++      assert(value->type() == T_INT, "Agreement.");
++      _obj->short_field_put(offset, (jshort)value->get_int());
++      break;
++
++    case T_BOOLEAN: // 1 byte
++      assert(value->type() == T_INT, "Agreement.");
++      _obj->bool_field_put(offset, (jboolean)value->get_int());
++      break;
++
++    default:
++      ShouldNotReachHere();
++    }
++    _i++;
+   }
+-  assert(vf->is_compiled_frame(), "Wrong frame type");
+-  chunk->push(compiledVFrame::cast(vf));
++};
++
++// restore elements of an eliminated type array
++void Deoptimization::reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type) {
++  StackValue* low;
++  jlong lval;
++  int index = 0;
++
++  for (int i = 0; i < sv->field_size(); i++) {
++    StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
++    switch(type) {
++      case T_BOOLEAN: obj->bool_at_put (index, (jboolean) value->get_int()); break;
++      case T_BYTE:    obj->byte_at_put (index, (jbyte)    value->get_int()); break;
++      case T_CHAR:    obj->char_at_put (index, (jchar)    value->get_int()); break;
++      case T_SHORT:   obj->short_at_put(index, (jshort)   value->get_int()); break;
++      case T_INT:     obj->int_at_put  (index, (jint)     value->get_int()); break;
++      case T_FLOAT:   obj->float_at_put(index, (jfloat)   value->get_int()); break;
++      case T_LONG:
++      case T_DOUBLE:
++        low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++i));
++        lval = jlong_from((jint)value->get_int(), (jint)low->get_int());
++        sv->value()->long_field_put(index, lval);
++        break;
++      default:
++        ShouldNotReachHere();
++    }
++    index++;
++  }
++}
++
++
++// restore fields of an eliminated object array
++void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
++  for (int i = 0; i < sv->field_size(); i++) {
++    StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
++    assert(value->type() == T_OBJECT, "object element expected");
++    obj->obj_at_put(i, value->get_obj()());
++  }
++}
++
++
++// restore fields of all eliminated objects and arrays
++void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects) {
++  for (int i = 0; i < objects->length(); i++) {
++    ObjectValue* sv = (ObjectValue*) objects->at(i);
++    KlassHandle k(((ConstantOopReadValue*) sv->klass())->value()());
++    Handle obj = sv->value();
++    assert(obj.not_null(), "reallocation was missed");
++
++    if (k->oop_is_instance()) {
++      instanceKlass* ik = instanceKlass::cast(k());
++      FieldReassigner reassign(fr, reg_map, sv, obj());
++      ik->do_nonstatic_fields(&reassign);
++    } else if (k->oop_is_typeArray()) {
++      typeArrayKlass* ak = typeArrayKlass::cast(k());
++      reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
++    } else if (k->oop_is_objArray()) {
++      reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
++    }
++  }
++}
++
++
++// relock objects for which synchronization was eliminated
++void Deoptimization::relock_objects(frame* fr, RegisterMap* reg_map, GrowableArray<MonitorValue*>* monitors) {
++  for (int i = 0; i < monitors->length(); i++) {
++    MonitorValue* mv = monitors->at(i);
++    StackValue* owner = StackValue::create_stack_value(fr, reg_map, mv->owner());
++    if (mv->eliminated()) {
++      Handle obj = owner->get_obj();
++      assert(obj.not_null(), "reallocation was missed");
++      BasicLock* lock = StackValue::resolve_monitor_lock(fr, mv->basic_lock());
++      lock->set_displaced_header(obj->mark());
++      obj->set_mark((markOop) lock);
++    }
++    assert(owner->get_obj()->is_locked(), "object must be locked now");
++  }
++}
++
++
++#ifndef PRODUCT
++// print information about reallocated objects
++void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects) {
++  fieldDescriptor fd;
++
++  for (int i = 0; i < objects->length(); i++) {
++    ObjectValue* sv = (ObjectValue*) objects->at(i);
++    KlassHandle k(((ConstantOopReadValue*) sv->klass())->value()());
++    Handle obj = sv->value();
++
++    tty->print("     object <" INTPTR_FORMAT "> of type ", sv->value()());
++    k->as_klassOop()->print_value();
++    tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize);
++    tty->cr();
++
++    if (Verbose) {
++      k->oop_print_on(obj(), tty);
++    }
++  }
++}
++#endif
++#endif // COMPILER2
++
++vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk) {
+ 
+ #ifndef PRODUCT
+   if (TraceDeoptimization) {
+@@ -595,7 +835,7 @@
+   int frame_size = caller.sp() - fr.sp();
+ 
+   frame sender = caller;
+- 
++
+   // Since the Java thread being deoptimized will eventually adjust it's own stack,
+   // the vframeArray containing the unpacking information is allocated in the C heap.
+   // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
+@@ -616,12 +856,12 @@
+         vframeArrayElement* e = array->element(index);
+         e->print(tty);
+ 
+-	/*
+-	  No printing yet.
++        /*
++          No printing yet.
+         array->vframe_at(index)->print_activation(count++);
+-	// better as...
++        // better as...
+         array->print_activation_for(index, count++);
+-	*/
++        */
+       }
+     }
+   }
+@@ -725,13 +965,13 @@
+ 
+ void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map) {
+   // Deoptimize only if the frame comes from compile code.
+-  // Do not deoptimize the frame which is already patched 
++  // Do not deoptimize the frame which is already patched
+   // during the execution of the loops below.
+   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
+     return;
+   }
+   ResourceMark rm;
+-  DeoptimizationMarker dm;    
++  DeoptimizationMarker dm;
+   if (UseBiasedLocking) {
+     revoke_biases_of_monitors(thread, fr, map);
+   }
+@@ -797,7 +1037,7 @@
+   if (HAS_PENDING_EXCEPTION) {
+     // Exception happened during classloading. We ignore the exception here, since it
+     // is going to be rethrown since the current activation is going to be deoptimzied and
+-    // the interpreter will re-execute the bytecode. 
++    // the interpreter will re-execute the bytecode.
+     CLEAR_PENDING_EXCEPTION;
+   }
+ }
+@@ -815,13 +1055,13 @@
+   RegisterMap reg_map(thread, UseBiasedLocking);
+   frame stub_frame = thread->last_frame();
+   frame fr = stub_frame.sender(&reg_map);
+-  // Make sure the calling nmethod is not getting deoptimized and removed 
++  // Make sure the calling nmethod is not getting deoptimized and removed
+   // before we are done with it.
+   nmethodLocker nl(fr.pc());
+-  
++
+   {
+     ResourceMark rm;
+-  
++
+     // Revoke biases of any monitors in the frame to ensure we can migrate them
+     revoke_biases_of_monitors(thread, fr, &reg_map);
+ 
+@@ -832,7 +1072,7 @@
+     Events::log("Uncommon trap occurred @" INTPTR_FORMAT " unloaded_class_index = %d", fr.pc(), (int) trap_request);
+     vframe*  vf  = vframe::new_vframe(&fr, &reg_map, thread);
+     compiledVFrame* cvf = compiledVFrame::cast(vf);
+-    
++
+     nmethod* nm = cvf->code();
+ 
+     ScopeDesc*      trap_scope  = cvf->scope();
+@@ -856,26 +1096,26 @@
+       char buf[100];
+       if (xtty != NULL) {
+         xtty->begin_head("uncommon_trap thread='" UINTX_FORMAT"' %s",
+-			 os::current_thread_id(),
++                         os::current_thread_id(),
+                          format_trap_request(buf, sizeof(buf), trap_request));
+         nm->log_identity(xtty);
+       }
+       symbolHandle class_name;
+       bool unresolved = false;
+       if (unloaded_class_index >= 0) {
+-	constantPoolHandle constants (THREAD, trap_method->constants());
+-	if (constants->tag_at(unloaded_class_index).is_unresolved_klass()) {
+-	  class_name = symbolHandle(THREAD, 
++        constantPoolHandle constants (THREAD, trap_method->constants());
++        if (constants->tag_at(unloaded_class_index).is_unresolved_klass()) {
++          class_name = symbolHandle(THREAD,
+             constants->klass_name_at(unloaded_class_index));
+-	  unresolved = true;
+-	  if (xtty != NULL)
+-	    xtty->print(" unresolved='1'");
+-	} else if (constants->tag_at(unloaded_class_index).is_symbol()) {
+-	  class_name = symbolHandle(THREAD, 
++          unresolved = true;
++          if (xtty != NULL)
++            xtty->print(" unresolved='1'");
++        } else if (constants->tag_at(unloaded_class_index).is_symbol()) {
++          class_name = symbolHandle(THREAD,
+             constants->symbol_at(unloaded_class_index));
+-	}
+-	if (xtty != NULL)
+-	  xtty->name(class_name);
++        }
++        if (xtty != NULL)
++          xtty->name(class_name);
+       }
+       if (xtty != NULL && trap_mdo.not_null()) {
+         // Dump the relevant MDO state.
+@@ -896,38 +1136,38 @@
+         }
+       }
+       if (xtty != NULL) {
+-	xtty->stamp();
+-	xtty->end_head();
++        xtty->stamp();
++        xtty->end_head();
+       }
+       if (TraceDeoptimization) {  // make noise on the tty
+-	tty->print("Uncommon trap occurred in");
+-	nm->method()->print_short_name(tty);
+-	tty->print(" (@" INTPTR_FORMAT ") thread=%d reason=%s action=%s unloaded_class_index=%d",
+-                   fr.pc(), 
++        tty->print("Uncommon trap occurred in");
++        nm->method()->print_short_name(tty);
++        tty->print(" (@" INTPTR_FORMAT ") thread=%d reason=%s action=%s unloaded_class_index=%d",
++                   fr.pc(),
+                    (int) os::current_thread_id(),
+                    trap_reason_name(reason),
+                    trap_action_name(action),
+-		   unloaded_class_index);
+-	if (class_name.not_null()) {
+-	  tty->print(unresolved ? " unresolved class: " : " symbol: ");
+-	  class_name->print_symbol_on(tty);
+-	}
+-	tty->cr();
++                   unloaded_class_index);
++        if (class_name.not_null()) {
++          tty->print(unresolved ? " unresolved class: " : " symbol: ");
++          class_name->print_symbol_on(tty);
++        }
++        tty->cr();
+       }
+       if (xtty != NULL) {
+-	// Log the precise location of the trap.
+-	for (ScopeDesc* sd = trap_scope; ; sd = sd->sender()) {
+-	  xtty->begin_elem("jvms bci='%d'", sd->bci());
+-	  xtty->method(sd->method());
+-	  xtty->end_elem();
+-	  if (sd->is_top())  break;
+-	}
+-	xtty->tail("uncommon_trap");
++        // Log the precise location of the trap.
++        for (ScopeDesc* sd = trap_scope; ; sd = sd->sender()) {
++          xtty->begin_elem("jvms bci='%d'", sd->bci());
++          xtty->method(sd->method());
++          xtty->end_elem();
++          if (sd->is_top())  break;
++        }
++        xtty->tail("uncommon_trap");
+       }
+     }
+     // (End diagnostic printout.)
+ 
+-    // Load class if necessary 
++    // Load class if necessary
+     if (unloaded_class_index >= 0) {
+       constantPoolHandle constants(THREAD, trap_method->constants());
+       load_class_by_index(constants, unloaded_class_index);
+@@ -988,7 +1228,7 @@
+       break;
+     case Action_maybe_recompile:
+       // Do not need to invalidate the present code, but we can
+-      // initiate another 
++      // initiate another
+       // Start compiler without (necessarily) invalidating the nmethod.
+       // The system will tolerate the old code, but new code should be
+       // generated when possible.
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/deoptimization.hpp openjdk/hotspot/src/share/vm/runtime/deoptimization.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/deoptimization.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/deoptimization.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)deoptimization.hpp	1.91 07/05/05 17:06:46 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,11 +19,13 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class ProfileData;
+ class vframeArray;
++class MonitorValue;
++class ObjectValue;
+ 
+ class Deoptimization : AllStatic {
+  public:
+@@ -85,7 +84,7 @@
+   // Checks all compiled methods. Invalid methods are deleted and
+   // corresponding activations are deoptimized.
+   static int deoptimize_dependents();
+-  
++
+   // Deoptimizes a frame lazily. nmethod gets patched deopt happens on return to the frame
+   static void deoptimize(JavaThread* thread, frame fr, RegisterMap *reg_map);
+ 
+@@ -100,10 +99,20 @@
+   // executing in a particular CodeBlob if UseBiasedLocking is enabled
+   static void revoke_biases_of_monitors(CodeBlob* cb);
+ 
++#ifdef COMPILER2
++  // Support for restoring non-escaping objects
++  static bool realloc_objects(JavaThread* thread, frame* fr, GrowableArray<ScopeValue*>* objects, TRAPS);
++  static void reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type);
++  static void reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj);
++  static void reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects);
++  static void relock_objects(frame* fr, RegisterMap* reg_map, GrowableArray<MonitorValue*>* monitors);
++  NOT_PRODUCT(static void print_objects(GrowableArray<ScopeValue*>* objects);)
++#endif // COMPILER2
++
+   public:
+-  static vframeArray* create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map);
+- 
+-  // Interface used for unpacking deoptimized frames 
++  static vframeArray* create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk);
++
++  // Interface used for unpacking deoptimized frames
+ 
+   // UnrollBlock is returned by fetch_unroll_info() to the deoptimization handler (blob).
+   // This is only a CheapObj to ease debugging after a deopt failure
+@@ -111,7 +120,7 @@
+    private:
+     int       _size_of_deoptimized_frame; // Size, in bytes, of current deoptimized frame
+     int       _caller_adjustment;         // Adjustment, in bytes, to caller's SP by initial interpreted frame
+-    int       _number_of_frames;          // Number frames to unroll 
++    int       _number_of_frames;          // Number frames to unroll
+     int       _total_frame_sizes;         // Total of number*sizes frames
+     intptr_t* _frame_sizes;               // Array of frame sizes, in bytes, for unrolling the stack
+     address*  _frame_pcs;                 // Array of frame pc's, in bytes, for unrolling the stack
+@@ -121,10 +130,10 @@
+     // (which is tight on registers, especially on x86). They really ought
+     // to be PD variables but that involves moving this class into its own
+     // file to use the pd include mechanism. Maybe in a later cleanup ...
+-    intptr_t  _counter_temp;	          // SHOULD BE PD VARIABLE (x86 frame count temp)
+-    intptr_t  _initial_fp;		  // SHOULD BE PD VARIABLE (x86/c2 initial ebp)
+-    intptr_t  _unpack_kind;		  // SHOULD BE PD VARIABLE (x86 unpack kind)
+-    intptr_t  _sender_sp_temp;		  // SHOULD BE PD VARIABLE (x86 sender_sp)
++    intptr_t  _counter_temp;              // SHOULD BE PD VARIABLE (x86 frame count temp)
++    intptr_t  _initial_fp;                // SHOULD BE PD VARIABLE (x86/c2 initial ebp)
++    intptr_t  _unpack_kind;               // SHOULD BE PD VARIABLE (x86 unpack kind)
++    intptr_t  _sender_sp_temp;            // SHOULD BE PD VARIABLE (x86 sender_sp)
+    public:
+     // Constructor
+     UnrollBlock(int  size_of_deoptimized_frame,
+@@ -142,7 +151,7 @@
+     intptr_t* frame_sizes()  const { return _frame_sizes; }
+     int number_of_frames()  const { return _number_of_frames; }
+     address*  frame_pcs()   const { return _frame_pcs ; }
+-   
++
+     // Returns the total size of frames
+     int size_of_frames() const;
+ 
+@@ -153,20 +162,20 @@
+     static int frame_sizes_offset_in_bytes()               { return offset_of(UnrollBlock, _frame_sizes);               }
+     static int total_frame_sizes_offset_in_bytes()         { return offset_of(UnrollBlock, _total_frame_sizes);         }
+     static int frame_pcs_offset_in_bytes()                 { return offset_of(UnrollBlock, _frame_pcs);                 }
+-    static int register_block_offset_in_bytes()            { return offset_of(UnrollBlock, _register_block);            }  
+-    static int return_type_offset_in_bytes()               { return offset_of(UnrollBlock, _return_type);               }  
+-    static int counter_temp_offset_in_bytes()              { return offset_of(UnrollBlock, _counter_temp);              }  
+-    static int initial_fp_offset_in_bytes()                { return offset_of(UnrollBlock, _initial_fp);                }  
+-    static int unpack_kind_offset_in_bytes()               { return offset_of(UnrollBlock, _unpack_kind);               }  
+-    static int sender_sp_temp_offset_in_bytes()            { return offset_of(UnrollBlock, _sender_sp_temp);            }  
++    static int register_block_offset_in_bytes()            { return offset_of(UnrollBlock, _register_block);            }
++    static int return_type_offset_in_bytes()               { return offset_of(UnrollBlock, _return_type);               }
++    static int counter_temp_offset_in_bytes()              { return offset_of(UnrollBlock, _counter_temp);              }
++    static int initial_fp_offset_in_bytes()                { return offset_of(UnrollBlock, _initial_fp);                }
++    static int unpack_kind_offset_in_bytes()               { return offset_of(UnrollBlock, _unpack_kind);               }
++    static int sender_sp_temp_offset_in_bytes()            { return offset_of(UnrollBlock, _sender_sp_temp);            }
+ 
+     BasicType return_type() const { return _return_type; }
+     void print();
+   };
+ 
+-  //** Returns an UnrollBlock continuing information 
+-  // how to make room for the resulting interpreter frames. 
+-  // Called by assembly stub after execution has returned to 
++  //** Returns an UnrollBlock continuing information
++  // how to make room for the resulting interpreter frames.
++  // Called by assembly stub after execution has returned to
+   // deoptimized frame.
+   // @argument thread.     Thread where stub_frame resides.
+   // @see OptoRuntime::deoptimization_fetch_unroll_info_C
+@@ -331,7 +340,7 @@
+ };
+ 
+ class DeoptimizationMarker : StackObj {  // for profiling
+-  static bool _is_active;  
++  static bool _is_active;
+ public:
+   DeoptimizationMarker()  { _is_active = true; }
+   ~DeoptimizationMarker() { _is_active = false; }
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/extendedPC.hpp openjdk/hotspot/src/share/vm/runtime/extendedPC.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/extendedPC.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/extendedPC.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)extendedPC.hpp	1.17 07/05/05 17:06:46 JVM"
+-#endif
+ /*
+  * Copyright 1998-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // An ExtendedPC contains the _pc from a signal handler in a platform
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/fieldDescriptor.cpp openjdk/hotspot/src/share/vm/runtime/fieldDescriptor.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/fieldDescriptor.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/fieldDescriptor.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)fieldDescriptor.cpp	1.56 07/05/05 17:06:46 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -30,9 +27,9 @@
+ 
+ 
+ oop fieldDescriptor::loader() const {
+-  return instanceKlass::cast(_cp->pool_holder())->class_loader(); 
++  return instanceKlass::cast(_cp->pool_holder())->class_loader();
+ }
+-                   
++
+ typeArrayOop fieldDescriptor::annotations() const {
+   instanceKlass* ik = instanceKlass::cast(field_holder());
+   objArrayOop md = ik->fields_annotations();
+@@ -66,7 +63,7 @@
+   return constants()->string_at(_initial_value_index, CHECK_0);
+ }
+ 
+-void fieldDescriptor::initialize(klassOop k, int index) {    
++void fieldDescriptor::initialize(klassOop k, int index) {
+   instanceKlass* ik = instanceKlass::cast(k);
+   _cp = ik->constants();
+   typeArrayOop fields = ik->fields();
+@@ -119,7 +116,7 @@
+     case T_CHAR:
+       {
+         jchar c = obj->char_field(offset());
+-	as_int = c;
++        as_int = c;
+         st->print(" %c %d", isprint(c) ? c : ' ', c);
+       }
+       break;
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/fieldDescriptor.hpp openjdk/hotspot/src/share/vm/runtime/fieldDescriptor.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/fieldDescriptor.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/fieldDescriptor.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)fieldDescriptor.hpp	1.46 07/05/05 17:06:47 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A fieldDescriptor describes the attributes of a single field (instance or class variable).
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/fieldType.cpp openjdk/hotspot/src/share/vm/runtime/fieldType.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/fieldType.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/fieldType.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)fieldType.cpp	1.27 07/05/05 17:06:47 JVM"
+-#endif
+ /*
+  * Copyright 1997-2000 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -45,11 +42,11 @@
+   assert(sig->utf8_length() > 1, "this should already have been checked");
+   assert(sig->byte_at(0) == '[', "this should already have been checked");
+   // The first character is already checked
+-  int i = 1; 
+-  int len = sig->utf8_length();   
++  int i = 1;
++  int len = sig->utf8_length();
+   // First skip all '['s
+   while(i < len - 1 && sig->byte_at(i) == '[') i++;
+-  
++
+   // Check type
+   switch(sig->byte_at(i)) {
+     case 'B': // T_BYTE
+@@ -59,20 +56,20 @@
+     case 'I': // T_INT
+     case 'J': // T_LONG
+     case 'S': // T_SHORT
+-    case 'Z': // T_BOOLEAN 
++    case 'Z': // T_BOOLEAN
+       // If it is an array, the type is the last character
+       return (i + 1 == len);
+     case 'L':
+       // If it is an object, the last character must be a ';'
+       return sig->byte_at(len - 1) == ';';
+   }
+-  
++
+   return false;
+ }
+-  
++
+ 
+ BasicType FieldType::get_array_info(symbolOop signature, jint* dimension, symbolOop* object_key, TRAPS) {
+-  assert(basic_type(signature) == T_ARRAY, "must be array");  
++  assert(basic_type(signature) == T_ARRAY, "must be array");
+   int index = 1;
+   int dim   = 1;
+   skip_optional_size(signature, &index);
+@@ -82,15 +79,14 @@
+     skip_optional_size(signature, &index);
+   }
+   ResourceMark rm;
+-  symbolOop element = oopFactory::new_symbol(signature->as_C_string() + index, CHECK_(T_BYTE));  
++  symbolOop element = oopFactory::new_symbol(signature->as_C_string() + index, CHECK_(T_BYTE));
+   BasicType element_type = FieldType::basic_type(element);
+   if (element_type == T_OBJECT) {
+     char* object_type = element->as_C_string();
+     object_type[element->utf8_length() - 1] = '\0';
+-    *object_key = oopFactory::new_symbol(object_type + 1, CHECK_(T_BYTE));                   
++    *object_key = oopFactory::new_symbol(object_type + 1, CHECK_(T_BYTE));
+   }
+   // Pass dimension back to caller
+   *dimension = dim;
+   return element_type;
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/fieldType.hpp openjdk/hotspot/src/share/vm/runtime/fieldType.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/fieldType.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/fieldType.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)fieldType.hpp	1.28 07/05/05 17:06:47 JVM"
+-#endif
+ /*
+  * Copyright 1997-2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Note: FieldType should be based on the SignatureIterator (or vice versa).
+@@ -31,13 +28,13 @@
+ // A FieldType is used to determine the type of a field from a signature string.
+ 
+ class FieldType: public AllStatic {
+- private:  
++ private:
+   static void skip_optional_size(symbolOop signature, int* index);
+   static bool is_valid_array_signature(symbolOop signature);
+  public:
+- 
++
+   // Return basic type
+-  static BasicType basic_type(symbolOop signature);  
++  static BasicType basic_type(symbolOop signature);
+ 
+   // Testing
+   static bool is_array(symbolOop signature) { return signature->utf8_length() > 1 && signature->byte_at(0) == '[' && is_valid_array_signature(signature); }
+@@ -50,7 +47,6 @@
+              (signature->byte_at(sig_length - 1) == ';'));
+   }
+ 
+-  // Parse field and extract array information. Works for T_ARRAY only.  
++  // Parse field and extract array information. Works for T_ARRAY only.
+   static BasicType get_array_info(symbolOop signature, jint* dimension, symbolOop *object_key, TRAPS);
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/fprofiler.cpp openjdk/hotspot/src/share/vm/runtime/fprofiler.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/fprofiler.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/fprofiler.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)fprofiler.cpp	1.136 07/05/05 17:06:47 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -107,10 +104,10 @@
+ }
+ 
+ // Random other statics
+-static const int col1 = 2;	// position of output column 1
+-static const int col2 = 11;	// position of output column 2
+-static const int col3 = 25;	// position of output column 3
+-static const int col4 = 55;	// position of output column 4
++static const int col1 = 2;      // position of output column 1
++static const int col2 = 11;     // position of output column 2
++static const int col3 = 25;     // position of output column 3
++static const int col4 = 55;     // position of output column 4
+ 
+ 
+ // Used for detailed profiling of nmethods.
+@@ -127,7 +124,7 @@
+     return ((int)CodeCache::max_capacity())/bucket_size * BytesPerWord;
+   }
+  public:
+-  static address bucket_start_for(address pc) { 
++  static address bucket_start_for(address pc) {
+     if (counters == NULL) return NULL;
+     return pc_for(index_for(pc));
+   }
+@@ -178,7 +175,7 @@
+ 
+ 
+   int s;
+-  { 
++  {
+     MutexLockerEx lm(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+     s = size();
+   }
+@@ -229,8 +226,8 @@
+ 
+   void update(TickPosition where) {
+     switch(where) {
+-      case tp_code:	ticks_in_code++;       break;
+-      case tp_native:	ticks_in_native++;      break;
++      case tp_code:     ticks_in_code++;       break;
++      case tp_native:   ticks_in_native++;      break;
+     }
+   }
+ 
+@@ -238,7 +235,7 @@
+     st->print("%5.1f%% %5d ", total() * 100.0 / total_ticks, ticks_in_code);
+   }
+ 
+-  void print_native(outputStream* st) {    
++  void print_native(outputStream* st) {
+     st->print(" + %5d ", ticks_in_native);
+   }
+ };
+@@ -271,7 +268,7 @@
+ 
+   virtual bool is_interpreted() const { return false; }
+   virtual bool is_compiled()    const { return false; }
+-  virtual bool is_stub()	const { return false; }
++  virtual bool is_stub()        const { return false; }
+   virtual bool is_runtime_stub() const{ return false; }
+   virtual void oops_do(OopClosure* f) = 0;
+ 
+@@ -300,7 +297,7 @@
+   }
+ 
+   virtual methodOop method()         = 0;
+-  
++
+   virtual void print_method_on(outputStream* st) {
+     int limit;
+     int i;
+@@ -311,7 +308,7 @@
+     for (i = 0 ; i < limit ; i += 1) {
+       char c = (char) k->byte_at(i);
+       if (c == '/') {
+-	c = '.';
++        c = '.';
+       }
+       st->print("%c", c);
+     }
+@@ -346,15 +343,15 @@
+       // out of the fields we can read without grabbing any locks
+       // since the method may be locked when we need the hash.
+       return (
+-          method->code_size() ^ 
+-          method->max_stack() ^ 
+-          method->max_locals() ^ 
++          method->code_size() ^
++          method->max_stack() ^
++          method->max_locals() ^
+           method->size_of_parameters());
+   }
+ 
+   // for sorting
+-  static int compare(ProfilerNode** a, ProfilerNode** b) { 
+-    return (*b)->total_ticks() - (*a)->total_ticks(); 
++  static int compare(ProfilerNode** a, ProfilerNode** b) {
++    return (*b)->total_ticks() - (*a)->total_ticks();
+   }
+ };
+ 
+@@ -418,8 +415,8 @@
+   }
+   bool is_compiled()    const { return true; }
+ 
+-  bool compiled_match(methodOop m) const { 
+-    return _method == m; 
++  bool compiled_match(methodOop m) const {
++    return _method == m;
+   }
+ 
+   methodOop method()         { return _method; }
+@@ -527,7 +524,7 @@
+   bool runtimeStub_match(const CodeBlob* stub, const char* name) const {
+     assert(stub->is_runtime_stub(), "wrong code blob");
+     return ((RuntimeStub*)_stub)->entry_point() == ((RuntimeStub*)stub)->entry_point() &&
+-	    (_symbol == name);
++            (_symbol == name);
+   }
+ 
+   methodOop method() { return NULL; }
+@@ -563,16 +560,16 @@
+  const char *_name;
+  public:
+    unknown_compiledNode(const CodeBlob* cb, TickPosition where) : ProfilerNode() {
+-     if ( cb->is_buffer_blob() ) 
++     if ( cb->is_buffer_blob() )
+        _name = ((BufferBlob*)cb)->name();
+-     else 
++     else
+        _name = ((SingletonBlob*)cb)->name();
+      update(where);
+   }
+   bool is_compiled()    const { return true; }
+ 
+   bool unknown_compiled_match(const CodeBlob* cb) const {
+-     if ( cb->is_buffer_blob() ) 
++     if ( cb->is_buffer_blob() )
+        return !strcmp(((BufferBlob*)cb)->name(), _name);
+      else
+        return !strcmp(((SingletonBlob*)cb)->name(), _name);
+@@ -606,7 +603,7 @@
+     _name = name;
+     update(where);
+   }
+-  
++
+   const char *name()    const { return _name; }
+   bool is_compiled()    const { return true; }
+ 
+@@ -614,15 +611,15 @@
+ 
+   methodOop method()          { return NULL; }
+ 
+-  static int hash(const char* name){ 
++  static int hash(const char* name){
+     // Compute a simple hash
+     const char* cp = name;
+     int h = 0;
+ 
+     if(name != NULL){
+       while(*cp != '\0'){
+-	h = (h << 1) ^ *cp;
+-	cp++;
++        h = (h << 1) ^ *cp;
++        cp++;
+       }
+     }
+     return h;
+@@ -705,15 +702,15 @@
+     ProfilerNode* prev = table[index];
+     for(ProfilerNode* node = prev; node; node = node->next()) {
+       if (node->adapter_match()) {
+-        node->update(where);  
+-        return;  
+-      }  
++        node->update(where);
++        return;
++      }
+       prev = node;
+     }
+     prev->set_next(new (this) adapterNode(where));
+   }
+ }
+- 
++
+ void ThreadProfiler::runtime_stub_update(const CodeBlob* stub, const char* name, TickPosition where) {
+   int index = 0;
+   if (!table[index]) {
+@@ -722,9 +719,9 @@
+     ProfilerNode* prev = table[index];
+     for(ProfilerNode* node = prev; node; node = node->next()) {
+       if (node->runtimeStub_match(stub, name)) {
+-        node->update(where);  
+-        return;  
+-      }  
++        node->update(where);
++        return;
++      }
+       prev = node;
+     }
+     prev->set_next(new (this) runtimeStubNode(stub, name, where));
+@@ -740,9 +737,9 @@
+     ProfilerNode* prev = table[index];
+     for(ProfilerNode* node = prev; node; node = node->next()) {
+       if (node->unknown_compiled_match(cb)) {
+-        node->update(where);  
+-        return;  
+-      }  
++        node->update(where);
++        return;
++      }
+       prev = node;
+     }
+     prev->set_next(new (this) unknown_compiledNode(cb, where));
+@@ -763,9 +760,9 @@
+     ProfilerNode* prev = table[index];
+     for(ProfilerNode* node = prev; node; node = node->next()) {
+       if (((vmNode *)node)->vm_match(name)) {
+-        node->update(where);  
+-        return;  
+-      }  
++        node->update(where);
++        return;
++      }
+       prev = node;
+     }
+     prev->set_next(new (this) vmNode(os::strdup(name), where));
+@@ -803,11 +800,11 @@
+     const char *name = NULL;
+     char buf[256];
+     buf[0] = '\0';
+-    
++
+     vm_thread_profiler->inc_thread_ticks();
+ 
+     // Get a snapshot of a current VMThread pc (and leave it running!)
+-    // The call may fail if, for instance the VM thread is interrupted while 
++    // The call may fail if, for instance the VM thread is interrupted while
+     // holding the Interrupt_lock or for other reasons.
+     epc = os::get_thread_pc(VMThread::vm_thread());
+     if(epc.pc() != NULL) {
+@@ -827,7 +824,7 @@
+   JavaThread** threadsList;
+   bool interval_expired = false;
+ 
+-  if (ProfileIntervals && 
++  if (ProfileIntervals &&
+       (FlatProfiler::received_ticks >= interval_ticks_previous + ProfileIntervalsTicks)) {
+     interval_expired = true;
+     interval_ticks_previous = FlatProfiler::received_ticks;
+@@ -927,16 +924,6 @@
+   FlatProfiler::record_thread_ticks();
+ }
+ 
+-inline bool is_valid_method(methodOop method) {
+-  if (method == NULL || 
+-      !method->is_perm() || 
+-      oop(method)->klass() != Universe::methodKlassObj() ||
+-      !method->is_method()) {
+-    return false;   // doesn't look good
+-  }
+-  return true;      // hopefully this is a method indeed
+-}
+-
+ void ThreadProfiler::record_interpreted_tick(frame fr, TickPosition where, int* ticks) {
+   FlatProfiler::all_int_ticks++;
+   if (!FlatProfiler::full_profile()) {
+@@ -954,14 +941,14 @@
+   if (fr.fp() != NULL) {
+     method = *fr.interpreter_frame_method_addr();
+   }
+-  if (!is_valid_method(method)) {
++  if (!Universe::heap()->is_valid_method(method)) {
+     // tick came at a bad time, stack frame not initialized correctly
+     interpreter_ticks += 1;
+     FlatProfiler::interpreter_ticks += 1;
+     return;
+   }
+   interpreted_update(method, where);
+-  
++
+   // update byte code table
+   InterpreterCodelet* desc = Interpreter::codelet_containing(fr.pc());
+   if (desc != NULL && desc->bytecode() >= 0) {
+@@ -985,8 +972,8 @@
+         cb = fr.cb();
+         localwhere = tp_native;
+   }
+-  methodOop method = (cb->is_nmethod()) ? ((nmethod *)cb)->method() : 
+-                                          (methodOop)NULL;  
++  methodOop method = (cb->is_nmethod()) ? ((nmethod *)cb)->method() :
++                                          (methodOop)NULL;
+ 
+   if (method == NULL) {
+     if (cb->is_runtime_stub())
+@@ -1019,13 +1006,13 @@
+     PCRecorder::record(fr.pc());
+     record_compiled_tick(thread, fr, tp_code);
+     return;
+-  } 
++  }
+ 
+   if (VtableStubs::stub_containing(fr.pc()) != NULL) {
+     unknown_ticks_array[ut_vtable_stubs] += 1;
+     return;
+   }
+-  
++
+   frame caller = fr.profile_find_Java_sender_frame(thread);
+ 
+   if (caller.sp() != NULL && caller.pc() != NULL) {
+@@ -1047,7 +1034,7 @@
+   if (CodeCache::contains(fr.pc())) {
+     record_compiled_tick(thread, fr, tp_native);
+     return;
+-  } 
++  }
+ 
+   frame caller = fr.profile_find_Java_sender_frame(thread);
+ 
+@@ -1067,12 +1054,12 @@
+   // Here's another way to track global state changes.
+   // When the class loader starts it marks the ThreadProfiler to tell it it is in the class loader
+   // and we check that here.
+-  // This is more direct, and more than one thread can be in the class loader at a time, 
++  // This is more direct, and more than one thread can be in the class loader at a time,
+   // but it does mean the class loader has to know about the profiler.
+   if (region_flag[ThreadProfilerMark::classLoaderRegion]) {
+     class_loader_ticks += 1;
+     FlatProfiler::class_loader_ticks += 1;
+-    return; 
++    return;
+   } else if (region_flag[ThreadProfilerMark::extraRegion]) {
+     extra_ticks += 1;
+     FlatProfiler::extra_ticks += 1;
+@@ -1087,7 +1074,7 @@
+     return;
+   }
+ 
+-  frame fr; 
++  frame fr;
+ 
+   switch (thread->thread_state()) {
+   case _thread_in_native:
+@@ -1100,13 +1087,13 @@
+         fr = fr.sender(&map);
+       }
+       record_tick_for_calling_frame(thread, fr);
+-    } else {     
++    } else {
+       unknown_ticks_array[ut_no_last_Java_frame] += 1;
+       FlatProfiler::unknown_ticks += 1;
+     }
+     break;
+   // handle_special_runtime_exit_condition self-suspends threads in Java
+-  case _thread_in_Java: 
++  case _thread_in_Java:
+   case _thread_in_Java_trans:
+     if (thread->profile_last_Java_frame(&fr)) {
+       if (fr.is_safepoint_blob_frame()) {
+@@ -1188,7 +1175,7 @@
+   if (table != NULL) {
+     for (int index = 0; index < table_size; index++) {
+       ProfilerNode* n = table[index];
+-      if (n != NULL) { 
++      if (n != NULL) {
+         delete n;
+       }
+     }
+@@ -1221,7 +1208,7 @@
+     vm_thread_profiler = new ThreadProfiler();
+   }
+   if (task == NULL) {
+-    task = new FlatProfilerTask(WatcherThread::delay_interval); 
++    task = new FlatProfilerTask(WatcherThread::delay_interval);
+     task->enroll();
+   }
+   timer.start();
+@@ -1286,13 +1273,13 @@
+ 
+ void FlatProfiler::print_byte_code_statistics() {
+   GrowableArray <ProfilerNode*>* array = new GrowableArray<ProfilerNode*>(200);
+-  
++
+   tty->print_cr(" Bytecode ticks:");
+   for (int index = 0; index < Bytecodes::number_of_codes; index++) {
+     if (FlatProfiler::bytecode_ticks[index] > 0 || FlatProfiler::bytecode_ticks_stub[index] > 0) {
+-      tty->print_cr("  %4d %4d = %s", 
+-        FlatProfiler::bytecode_ticks[index], 
+-        FlatProfiler::bytecode_ticks_stub[index], 
++      tty->print_cr("  %4d %4d = %s",
++        FlatProfiler::bytecode_ticks[index],
++        FlatProfiler::bytecode_ticks_stub[index],
+         Bytecodes::name( (Bytecodes::Code) index));
+     }
+   }
+@@ -1311,45 +1298,45 @@
+ void ThreadProfiler::print(const char* thread_name) {
+   ResourceMark rm;
+   MutexLocker ppl(ProfilePrint_lock);
+-  int index = 0; // Declared outside for loops for portability 
++  int index = 0; // Declared outside for loops for portability
+ 
+   if (table == NULL) {
+     return;
+   }
+-  
++
+   if (thread_ticks <= 0) {
+     return;
+   }
+ 
+   const char* title = "too soon to tell";
+   double secs = timer.seconds();
+-  
++
+   GrowableArray <ProfilerNode*>* array = new GrowableArray<ProfilerNode*>(200);
+   for(index = 0; index < table_size; index++) {
+     for(ProfilerNode* node = table[index]; node; node = node->next())
+       array->append(node);
+   }
+-  
++
+   array->sort(&ProfilerNode::compare);
+-  
++
+   // compute total (sanity check)
+-  int active = 
+-    class_loader_ticks + 
+-    compiler_ticks + 
+-    interpreter_ticks + 
++  int active =
++    class_loader_ticks +
++    compiler_ticks +
++    interpreter_ticks +
+     unknown_ticks();
+   for (index = 0; index < array->length(); index++) {
+     active += array->at(index)->ticks.total();
+   }
+   int total = active + blocked_ticks;
+-  
++
+   tty->cr();
+   tty->print_cr("Flat profile of %3.2f secs (%d total ticks): %s", secs, total, thread_name);
+   if (total != thread_ticks) {
+     print_ticks("Lost ticks", thread_ticks-total, thread_ticks);
+   }
+   tty->cr();
+-  
++
+   // print interpreted methods
+   tick_counter interpreted_ticks;
+   bool has_interpreted_ticks = false;
+@@ -1376,7 +1363,7 @@
+     interpretedNode::print_total(tty, &interpreted_ticks, active, title);
+     tty->cr();
+   }
+-  
++
+   // print compiled methods
+   tick_counter compiled_ticks;
+   bool has_compiled_ticks = false;
+@@ -1457,7 +1444,7 @@
+     runtimeStubNode::print_total(tty, &runtime_stub_ticks, active, title);
+     tty->cr();
+   }
+-  
++
+   if (blocked_ticks + class_loader_ticks + interpreter_ticks + compiler_ticks + unknown_ticks() != 0) {
+     tty->fill_to(col1);
+     tty->print_cr("Thread-local ticks:");
+@@ -1487,7 +1474,7 @@
+   if (table == NULL) {
+     return;
+   }
+-  
++
+   if (thread_ticks <= 0) {
+     return;
+   }
+@@ -1532,11 +1519,11 @@
+   }
+ 
+   PCRecorder::print();
+-    
++
+   if(ProfileVM){
+     tty->cr();
+     vm_thread_profiler->print("VM Thread");
+-  }  
++  }
+ }
+ 
+ void IntervalData::print_header(outputStream* st) {
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/fprofiler.hpp openjdk/hotspot/src/share/vm/runtime/fprofiler.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/fprofiler.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/fprofiler.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)fprofiler.hpp	1.54 07/05/05 17:06:47 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,11 +19,12 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // a simple flat profiler for Java
+ 
++
+ // Forward declaration of classes defined in this header file
+ class ThreadProfiler;
+ class ThreadProfilerMark;
+@@ -46,18 +44,20 @@
+ // and destructed as we exit the region.  While we are in the region
+ // ticks are allotted to the region.
+ class ThreadProfilerMark: public StackObj {
+-public: 
++public:
+   // For now, the only thread-specific region is the class loader.
+   enum Region { noRegion, classLoaderRegion, extraRegion, maxRegion };
+ 
+-  ThreadProfilerMark(Region);
+-  ~ThreadProfilerMark();
++  ThreadProfilerMark(Region)  KERNEL_RETURN;
++  ~ThreadProfilerMark()       KERNEL_RETURN;
+ 
+ private:
+   ThreadProfiler* _pp;
+   Region _r;
+ };
+ 
++#ifndef FPROF_KERNEL
++
+ class IntervalData VALUE_OBJ_CLASS_SPEC {
+   // Just to keep these things all together
+ private:
+@@ -98,31 +98,33 @@
+     _compiled = 0;
+     _native = 0;
+     _compiling = 0;
+-  } 
++  }
+   static void print_header(outputStream* st);
+   void print_data(outputStream* st);
+ };
++#endif // FPROF_KERNEL
+ 
+ class ThreadProfiler: public CHeapObj {
+ public:
+-  ThreadProfiler();
+-  ~ThreadProfiler();
++  ThreadProfiler()    KERNEL_RETURN;
++  ~ThreadProfiler()   KERNEL_RETURN;
+ 
+   // Resets the profiler
+-  void reset();
++  void reset()        KERNEL_RETURN;
+ 
+   // Activates the profiler for a certain thread
+-  void engage();
++  void engage()       KERNEL_RETURN;
+ 
+   // Deactivates the profiler
+-  void disengage();
++  void disengage()    KERNEL_RETURN;
+ 
+   // Prints the collected profiling information
+-  void print(const char* thread_name);
++  void print(const char* thread_name) KERNEL_RETURN;
+ 
+   // Garbage Collection Support
+-  void oops_do(OopClosure* f);
++  void oops_do(OopClosure* f)         KERNEL_RETURN;
+ 
++#ifndef FPROF_KERNEL
+ private:
+   // for recording ticks.
+   friend class ProfilerNode;
+@@ -148,7 +150,7 @@
+ 
+   void record_tick_for_running_frame(JavaThread* thread, frame fr);
+   void record_tick_for_calling_frame(JavaThread* thread, frame fr);
+-  
++
+   void initialize();
+ 
+   static int  entry(int value);
+@@ -156,7 +158,7 @@
+ 
+ private:
+   friend class FlatProfiler;
+-  void record_tick(JavaThread* thread); 
++  void record_tick(JavaThread* thread);
+   bool engaged;
+   // so we can do percentages for this thread, and quick checks for activity
+   int thread_ticks;
+@@ -206,26 +208,45 @@
+   IntervalData* interval_data_ref() {
+     return &_interval_data;
+   }
++#endif // FPROF_KERNEL
+ };
+ 
+ class FlatProfiler: AllStatic {
+ public:
+-  static void reset();
+-  static void engage(JavaThread* mainThread, bool fullProfile);
+-  static void disengage();
+-  static void print(int unused);
+-  static bool is_active();
+-  static bool full_profile() {
+-    return full_profile_flag;
+-  }
++  static void reset() KERNEL_RETURN ;
++  static void engage(JavaThread* mainThread, bool fullProfile) KERNEL_RETURN ;
++  static void disengage() KERNEL_RETURN ;
++  static void print(int unused) KERNEL_RETURN ;
++  static bool is_active() KERNEL_RETURN_(return false;) ;
+ 
+   // This is NULL if each thread has its own thread profiler,
+   // else this is the single thread profiler used by all threads.
+   // In particular it makes a difference during garbage collection,
+   // where you only want to traverse each thread profiler once.
+-  static ThreadProfiler* get_thread_profiler();
++  static ThreadProfiler* get_thread_profiler() KERNEL_RETURN_(return NULL;);
++
++  // Garbage Collection Support
++  static void oops_do(OopClosure* f) KERNEL_RETURN ;
++
++  // Support for disassembler to inspect the PCRecorder
++
++  // Returns the start address for a given pc
++  // NULL is returned if the PCRecorder is inactive
++  static address bucket_start_for(address pc) KERNEL_RETURN_(return NULL;);
++
++  enum { MillisecsPerTick = 10 };   // ms per profiling ticks
++
++  // Returns the number of ticks recorded for the bucket
++  // pc belongs to.
++  static int bucket_count_for(address pc) KERNEL_RETURN_(return 0;);
++
++#ifndef FPROF_KERNEL
+ 
+  private:
++  static bool full_profile() {
++    return full_profile_flag;
++  }
++
+   friend class ThreadProfiler;
+   // the following group of ticks cover everything that's not attributed to individual Java methods
+   static int  received_gc_ticks;      // ticks during which gc was active
+@@ -241,14 +262,14 @@
+   static int     received_ticks;      // ticks that were received by task
+   static int    delivered_ticks;      // ticks that were delivered by task
+   static int non_method_ticks() {
+-    return 
+-      ( received_gc_ticks 
++    return
++      ( received_gc_ticks
+       + vm_operation_ticks
+-      + deopt_ticks 
++      + deopt_ticks
+       + threads_lock_ticks
+       + blocked_ticks
+-      + compiler_ticks 
+-      + interpreter_ticks 
++      + compiler_ticks
++      + interpreter_ticks
+       + unknown_ticks );
+   }
+   static elapsedTimer timer;
+@@ -264,7 +285,7 @@
+   static int     all_comp_ticks;      // ticks in compiled code (+ native)
+   static bool full_profile_flag;      // collecting full profile?
+ 
+-  // to accumulate thread-specific data 
++  // to accumulate thread-specific data
+   // if we aren't profiling individual threads.
+   static ThreadProfiler* thread_profiler;
+   static ThreadProfiler* vm_thread_profiler;
+@@ -278,24 +299,6 @@
+   static void record_vm_tick();
+   static void record_thread_ticks();
+ 
+- public:
+-  enum { MillisecsPerTick = 10 };   // ms per profiling ticks
+-
+-  // Garbage Collection Support
+- public:
+-   static void oops_do(OopClosure* f);
+-
+- public:
+-  // Support for disassembler to inspect the PCRecorder
+-
+-  // Returns the start address for a given pc
+-  // NULL is returned if the PCRecorder is inactive
+-  static address bucket_start_for(address pc);
+-
+-  // Returns the number of ticks recorded for the bucket
+-  // pc belongs to.
+-  static int bucket_count_for(address pc);
+-
+   // For interval analysis
+  private:
+   static int interval_ticks_previous;  // delivered_ticks from the last interval
+@@ -304,6 +307,5 @@
+   static void interval_reset();       // reset interval data.
+   enum {interval_print_size = 10};
+   static IntervalData* interval_data;
++#endif // FPROF_KERNEL
+ };
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/frame.cpp openjdk/hotspot/src/share/vm/runtime/frame.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/frame.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/frame.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)frame.cpp	1.233 07/05/05 17:06:44 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -30,7 +27,7 @@
+ 
+ RegisterMap::RegisterMap(JavaThread *thread, bool update_map) {
+   _thread         = thread;
+-  _update_map     = update_map;      
++  _update_map     = update_map;
+   clear();
+   debug_only(_update_for_id = NULL;)
+ #ifndef PRODUCT
+@@ -43,10 +40,10 @@
+   assert(map != NULL, "RegisterMap must be present");
+   _thread                = map->thread();
+   _update_map            = map->update_map();
+-  _include_argument_oops = map->include_argument_oops();  
++  _include_argument_oops = map->include_argument_oops();
+   debug_only(_update_for_id = map->_update_for_id;)
+   pd_initialize_from(map);
+-  if (update_map()) {  
++  if (update_map()) {
+     for(int i = 0; i < location_valid_size; i++) {
+       LocationValidType bits = !update_map() ? 0 : map->_location_valid[i];
+       _location_valid[i] = bits;
+@@ -64,7 +61,7 @@
+   }
+ }
+ 
+-void RegisterMap::clear() {  
++void RegisterMap::clear() {
+   set_include_argument_oops(true);
+   if (_update_map) {
+     for(int i = 0; i < location_valid_size; i++) {
+@@ -81,7 +78,7 @@
+ void RegisterMap::print_on(outputStream* st) const {
+   st->print_cr("Register map");
+   for(int i = 0; i < reg_count; i++) {
+-    
++
+     VMReg r = VMRegImpl::as_VMReg(i);
+     intptr_t* src = (intptr_t*) location(r);
+     if (src != NULL) {
+@@ -89,9 +86,9 @@
+       r->print();
+       tty->print(" [" INTPTR_FORMAT "] = ", src);
+       if (((uintptr_t)src & (sizeof(*src)-1)) != 0) {
+-	tty->print_cr("<misaligned>");
++        tty->print_cr("<misaligned>");
+       } else {
+-	tty->print_cr(INTPTR_FORMAT, *src);
++        tty->print_cr(INTPTR_FORMAT, *src);
+       }
+     }
+   }
+@@ -118,7 +115,7 @@
+ 
+ // Change the pc in a frame object. This does not change the actual pc in
+ // actual frame. To do that use patch_pc.
+-// 
++//
+ void frame::set_pc(address   newpc ) {
+ #ifdef ASSERT
+   if (_cb != NULL && _cb->is_nmethod()) {
+@@ -140,9 +137,9 @@
+ }
+ 
+ bool frame::is_native_frame() const {
+-  return (_cb != NULL && 
++  return (_cb != NULL &&
+           _cb->is_nmethod() &&
+-          ((nmethod*)_cb)->is_native_method());    
++          ((nmethod*)_cb)->is_native_method());
+ }
+ 
+ bool frame::is_java_frame() const {
+@@ -153,11 +150,11 @@
+ 
+ 
+ bool frame::is_compiled_frame() const {
+-  if (_cb != NULL && 
++  if (_cb != NULL &&
+       _cb->is_nmethod() &&
+       ((nmethod*)_cb)->is_java_method()) {
+     return true;
+-  } 
++  }
+   return false;
+ }
+ 
+@@ -186,12 +183,12 @@
+ 
+ 
+ bool frame::should_be_deoptimized() const {
+-  if (_deopt_state == is_deoptimized || 
++  if (_deopt_state == is_deoptimized ||
+       !is_compiled_frame() ) return false;
+   assert(_cb != NULL && _cb->is_nmethod(), "must be an nmethod");
+   nmethod* nm = (nmethod *)_cb;
+   if (TraceDependencies) {
+-    tty->print("checking (%s) ", nm->is_marked_for_deoptimization() ? "true" : "false"); 
++    tty->print("checking (%s) ", nm->is_marked_for_deoptimization() ? "true" : "false");
+     nm->print_value_on(tty);
+     tty->cr();
+   }
+@@ -227,7 +224,7 @@
+   if (NeedsDeoptSuspend && !thread_is_known_safe) {
+ 
+     // It is possible especially with DeoptimizeALot/DeoptimizeRandom that
+-    // we could see the frame again and ask for it to be deoptimized since 
++    // we could see the frame again and ask for it to be deoptimized since
+     // it might move for a long time. That is harmless and we just ignore it.
+     if (id() == thread->must_deopt_id()) {
+       assert(thread->is_deopt_suspend(), "lost suspension");
+@@ -254,7 +251,7 @@
+       // Therefore we can put an additional request for the thread to stop
+       // no matter what no (like a suspend). This will cause the thread
+       // to notice it needs to do the deopt on its own once it leaves native.
+-      // 
++      //
+       // The only reason we must do this is because on machine with register
+       // windows we have a race with patching the return address and the
+       // window coming live as the thread returns to the Java code (but still
+@@ -316,7 +313,7 @@
+     first_java_frame = *this;
+   } else if (safe_for_sender(thread)) {
+     for (frame sender_frame = sender(&map);
+-      sender_frame.safe_for_sender(thread) && !sender_frame.is_first_frame(); 
++      sender_frame.safe_for_sender(thread) && !sender_frame.is_first_frame();
+       sender_frame = sender_frame.sender(&map)) {
+       if (sender_frame.is_java_frame()) {
+         first_java_frame = sender_frame;
+@@ -329,10 +326,10 @@
+ 
+ // Interpreter frames
+ 
+- 
+-void frame::interpreter_frame_set_locals(intptr_t* locs)  { 
++
++void frame::interpreter_frame_set_locals(intptr_t* locs)  {
+   assert(is_interpreted_frame(), "Not an interpreted frame");
+-  *interpreter_frame_locals_addr() = locs; 
++  *interpreter_frame_locals_addr() = locs;
+ }
+ 
+ methodOop frame::interpreter_frame_method() const {
+@@ -435,7 +432,7 @@
+   interpreter_frame_set_mdx((intptr_t)mdp);
+ }
+ 
+-BasicObjectLock* frame::next_monitor_in_interpreter_frame(BasicObjectLock* current) const { 
++BasicObjectLock* frame::next_monitor_in_interpreter_frame(BasicObjectLock* current) const {
+   assert(is_interpreted_frame(), "Not an interpreted frame");
+ #ifdef ASSERT
+   interpreter_frame_verify_monitor(current);
+@@ -444,7 +441,7 @@
+   return next;
+ }
+ 
+-BasicObjectLock* frame::previous_monitor_in_interpreter_frame(BasicObjectLock* current) const { 
++BasicObjectLock* frame::previous_monitor_in_interpreter_frame(BasicObjectLock* current) const {
+   assert(is_interpreted_frame(), "Not an interpreted frame");
+ #ifdef ASSERT
+ //   // This verification needs to be checked before being enabled
+@@ -493,7 +490,7 @@
+   interpreter_frame_expression_stack()[n] = (intptr_t)tag;
+ }
+ 
+-jint frame::interpreter_frame_expression_stack_size() const { 
++jint frame::interpreter_frame_expression_stack_size() const {
+   // Number of elements on the interpreter expression stack
+   // Callers should span by stackElementWords
+   int element_size = Interpreter::stackElementWords();
+@@ -501,7 +498,7 @@
+     return (interpreter_frame_expression_stack() -
+             interpreter_frame_tos_address() + 1)/element_size;
+   } else {
+-    return (interpreter_frame_tos_address() - 
++    return (interpreter_frame_tos_address() -
+             interpreter_frame_expression_stack() + 1)/element_size;
+   }
+ }
+@@ -524,7 +521,7 @@
+   NOT_PRODUCT(address begin = pc()-40;)
+   NOT_PRODUCT(address end   = NULL;)
+ 
+-  st->print("%s frame (sp=" INTPTR_FORMAT, print_name(), sp());
++  st->print("%s frame (sp=" INTPTR_FORMAT " unextended sp=" INTPTR_FORMAT, print_name(), sp(), unextended_sp());
+   if (sp() != NULL)
+     st->print(", fp=" INTPTR_FORMAT ", pc=" INTPTR_FORMAT, fp(), pc());
+ 
+@@ -543,7 +540,7 @@
+       desc->print();
+       NOT_PRODUCT(begin = desc->code_begin(); end = desc->code_end();)
+     } else {
+-      st->print("~interpreter"); 
++      st->print("~interpreter");
+     }
+   }
+   st->print_cr(")");
+@@ -648,7 +645,7 @@
+   }
+ 
+   // function name - os::dll_address_to_function_name() may return confusing
+-  // names if pc is within jvm.dll or libjvm.so, because JVM only has 
++  // names if pc is within jvm.dll or libjvm.so, because JVM only has
+   // JVM_xxxx and a few other symbols in the dynamic symbol table. Do this
+   // only for native libraries.
+   if (!in_vm) {
+@@ -660,8 +657,8 @@
+   }
+ }
+ 
+-// frame::print_on_error() is called by fatal error handler. Notice that we may 
+-// crash inside this function if stack frame is corrupted. The fatal error 
++// frame::print_on_error() is called by fatal error handler. Notice that we may
++// crash inside this function if stack frame is corrupted. The fatal error
+ // handler can catch and handle the crash. Here we assume the frame is valid.
+ //
+ // First letter indicates type of the frame:
+@@ -676,7 +673,23 @@
+ 
+ void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose) const {
+   if (_cb != NULL) {
+-    if (_cb->is_buffer_blob()) {
++    if (Interpreter::contains(pc())) {
++      methodOop m = this->interpreter_frame_method();
++      if (m != NULL) {
++        m->name_and_sig_as_C_string(buf, buflen);
++        st->print("j  %s", buf);
++        st->print("+%d", this->interpreter_frame_bci());
++      } else {
++        st->print("j  " PTR_FORMAT, pc());
++      }
++    } else if (StubRoutines::contains(pc())) {
++      StubCodeDesc* desc = StubCodeDesc::desc_for(pc());
++      if (desc != NULL) {
++        st->print("v  ~StubRoutines::%s", desc->name());
++      } else {
++        st->print("v  ~StubRoutines::" PTR_FORMAT, pc());
++      }
++    } else if (_cb->is_buffer_blob()) {
+       st->print("v  ~BufferBlob::%s", ((BufferBlob *)_cb)->name());
+     } else if (_cb->is_nmethod()) {
+       methodOop m = ((nmethod *)_cb)->method();
+@@ -697,24 +710,6 @@
+     } else {
+       st->print("v  blob " PTR_FORMAT, pc());
+     }
+-  } else if (!is_init_completed()) {
+-    print_C_frame(st, buf, buflen, pc());
+-  } else if (Interpreter::contains(pc())) {
+-    methodOop m = this->interpreter_frame_method();
+-    if (m != NULL) {
+-      m->name_and_sig_as_C_string(buf, buflen);
+-      st->print("j  %s", buf);
+-      st->print("+%d", this->interpreter_frame_bci());
+-    } else {
+-      st->print("j  " PTR_FORMAT, pc());
+-    }
+-  } else if (StubRoutines::contains(pc())) {
+-    StubCodeDesc* desc = StubCodeDesc::desc_for(pc());
+-    if (desc != NULL) {
+-      st->print("v  ~StubRoutines::%s", desc->name());
+-    } else {
+-      st->print("v  ~StubRoutines::" PTR_FORMAT, pc());
+-    }
+   } else {
+     print_C_frame(st, buf, buflen, pc());
+   }
+@@ -738,7 +733,7 @@
+ 
+  public:
+   InterpreterFrameClosure(frame* fr, int max_locals, int max_stack,
+-			  OopClosure* f) {
++                          OopClosure* f) {
+     _fr         = fr;
+     _max_locals = max_locals;
+     _max_stack  = max_stack;
+@@ -757,12 +752,12 @@
+       // this condition. Therefore, we call f only if addr is 'inside' the stack (i.e., addr >= esp for Intel).
+       bool in_stack;
+       if (frame::interpreter_frame_expression_stack_direction() > 0) {
+-	in_stack = (intptr_t*)addr <= _fr->interpreter_frame_tos_address();
++        in_stack = (intptr_t*)addr <= _fr->interpreter_frame_tos_address();
+       } else {
+-	in_stack = (intptr_t*)addr >= _fr->interpreter_frame_tos_address();
++        in_stack = (intptr_t*)addr >= _fr->interpreter_frame_tos_address();
+       }
+       if (in_stack) {
+-	_f->do_oop(addr);
++        _f->do_oop(addr);
+       }
+     }
+   }
+@@ -781,7 +776,7 @@
+ 
+   void set(int size, BasicType type) {
+     _offset -= size;
+-    if (type == T_OBJECT || type == T_ARRAY) oop_offset_do();    
++    if (type == T_OBJECT || type == T_ARRAY) oop_offset_do();
+   }
+ 
+   void oop_offset_do() {
+@@ -794,12 +789,12 @@
+   InterpretedArgumentOopFinder(symbolHandle signature, bool is_static, frame* fr, OopClosure* f) : SignatureInfo(signature) {
+     // compute size of arguments
+     int args_size = ArgumentSizeComputer(signature).size() + (is_static ? 0 : 1);
+-    assert(!fr->is_interpreted_frame() || 
++    assert(!fr->is_interpreted_frame() ||
+            args_size <= fr->interpreter_frame_expression_stack_size(),
+-	    "args cannot be on stack anymore");
++            "args cannot be on stack anymore");
+     // initialize InterpretedArgumentOopFinder
+     _f         = f;
+-    _fr        = fr;    
++    _fr        = fr;
+     _offset    = args_size;
+     _is_static = is_static;
+   }
+@@ -823,7 +818,7 @@
+ // (sp+n)->|  first arg|
+ //         +-----------+
+ 
+- 
++
+ 
+ // visits and GC's all the arguments in entry frame
+ class EntryFrameOopFinder: public SignatureInfo {
+@@ -863,8 +858,8 @@
+ 
+ oop* frame::interpreter_callee_receiver_addr(symbolHandle signature) {
+   ArgumentSizeComputer asc(signature);
+-  int size = asc.size();  
+-  return (oop *)interpreter_frame_tos_at(size); 
++  int size = asc.size();
++  return (oop *)interpreter_frame_tos_at(size);
+ }
+ 
+ 
+@@ -887,7 +882,7 @@
+   ) {
+ #ifdef ASSERT
+     interpreter_frame_verify_monitor(current);
+-#endif    
++#endif
+     current->oops_do(f);
+   }
+ 
+@@ -898,9 +893,9 @@
+   // Hmm what about the mdp?
+ #ifdef CC_INTERP
+   // Interpreter frame in the midst of a call have a methodOop within the
+-  // object. 
++  // object.
+   interpreterState istate = get_interpreterState();
+-  if (istate->msg() == cInterpreter::call_method) {
++  if (istate->msg() == BytecodeInterpreter::call_method) {
+     f->do_oop((oop*)&istate->_result._to_call._callee);
+   }
+ 
+@@ -915,12 +910,12 @@
+   }
+ 
+   int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals();
+-  
++
+   symbolHandle signature;
+   bool is_static = false;
+ 
+   // Process a callee's arguments if we are at a call site
+-  // (i.e., if we are at an invoke bytecode)  
++  // (i.e., if we are at an invoke bytecode)
+   // This is used sometimes for calling into the VM, not for another
+   // interpreted or compiled frame.
+   if (!m->is_native()) {
+@@ -928,7 +923,7 @@
+     if (call != NULL) {
+       signature = symbolHandle(thread, call->signature());
+       is_static = call->is_invokestatic();
+-      if (map->include_argument_oops() && 
++      if (map->include_argument_oops() &&
+           interpreter_frame_expression_stack_size() > 0) {
+         ResourceMark rm(thread);  // is this right ???
+         // we are at a call site & the expression stack is not empty
+@@ -940,7 +935,7 @@
+         //       fore handling the exception (the exception handling
+         //       code in the interpreter calls a blocking runtime
+         //       routine which can cause this code to be executed).
+-        //       (was bug gri 7/27/98)      
++        //       (was bug gri 7/27/98)
+         oops_interpreted_arguments_do(signature, is_static, f);
+       }
+     }
+@@ -955,12 +950,12 @@
+     mask = &oopmap_mask;
+ #endif // ASSERT
+     oops_interpreted_locals_do(f, max_locals, mask);
+-    oops_interpreted_expressions_do(f, signature, is_static, 
++    oops_interpreted_expressions_do(f, signature, is_static,
+                                     m->max_stack(),
+                                     max_locals, mask);
+   } else {
+     InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f);
+-  
++
+     // process locals & expression stack
+     InterpreterOopMap mask;
+     if (query_oop_map_cache) {
+@@ -1043,7 +1038,7 @@
+   }
+ }
+ 
+-void frame::oops_interpreted_arguments_do(symbolHandle signature, bool is_static, OopClosure* f) {  
++void frame::oops_interpreted_arguments_do(symbolHandle signature, bool is_static, OopClosure* f) {
+   InterpretedArgumentOopFinder finder(signature, is_static, this, f);
+   finder.oops_do();
+ }
+@@ -1056,7 +1051,7 @@
+     // Preserve potential arguments for a callee. We handle this by dispatching
+     // on the codeblob. For c2i, we do
+     if (reg_map->include_argument_oops()) {
+-      _cb->preserve_callee_argument_oops(*this, reg_map, f);      
++      _cb->preserve_callee_argument_oops(*this, reg_map, f);
+     }
+   }
+   // In cases where perm gen is collected, GC will want to mark
+@@ -1082,14 +1077,14 @@
+ 
+ class CompiledArgumentOopFinder: public SignatureInfo {
+  protected:
+-  OopClosure*     _f;  
++  OopClosure*     _f;
+   int             _offset;      // the current offset, incremented with each argument
+   bool            _is_static;   // true if the callee is a static method
+   frame           _fr;
+-  RegisterMap*    _reg_map;      
++  RegisterMap*    _reg_map;
+   int             _arg_size;
+   VMRegPair*      _regs;        // VMReg list of arguments
+-  
++
+   void set(int size, BasicType type) {
+     if (type == T_OBJECT || type == T_ARRAY) handle_oop_offset();
+     _offset += size;
+@@ -1100,18 +1095,18 @@
+     // In LP64-land, the high-order bits are valid but unhelpful.
+     VMReg reg = _regs[_offset].first();
+     oop *loc = _fr.oopmapreg_to_location(reg, _reg_map);
+-    _f->do_oop(loc); 
++    _f->do_oop(loc);
+   }
+ 
+  public:
+-  CompiledArgumentOopFinder(symbolHandle signature, bool is_static, OopClosure* f, frame fr,  const RegisterMap* reg_map) 
++  CompiledArgumentOopFinder(symbolHandle signature, bool is_static, OopClosure* f, frame fr,  const RegisterMap* reg_map)
+     : SignatureInfo(signature) {
+ 
+     // initialize CompiledArgumentOopFinder
+     _f         = f;
+     _offset    = 0;
+-    _is_static = is_static;    
+-    _fr        = fr;    
++    _is_static = is_static;
++    _fr        = fr;
+     _reg_map   = (RegisterMap*)reg_map;
+     _arg_size  = ArgumentSizeComputer(signature).size() + (is_static ? 0 : 1);
+ 
+@@ -1122,11 +1117,11 @@
+ 
+   void oops_do() {
+     if (!_is_static) {
+-      handle_oop_offset(); 
++      handle_oop_offset();
+       _offset++;
+     }
+     iterate_parameters();
+-  }  
++  }
+ };
+ 
+ void frame::oops_compiled_arguments_do(symbolHandle signature, bool is_static, const RegisterMap* reg_map, OopClosure* f) {
+@@ -1136,28 +1131,28 @@
+ }
+ 
+ 
+-// Get receiver out of callers frame, i.e. find parameter 0 in callers 
+-// frame.  Consult ADLC for where parameter 0 is to be found.  Then 
++// Get receiver out of callers frame, i.e. find parameter 0 in callers
++// frame.  Consult ADLC for where parameter 0 is to be found.  Then
+ // check local reg_map for it being a callee-save register or argument
+ // register, both of which are saved in the local frame.  If not found
+-// there, it must be an in-stack argument of the caller. 
++// there, it must be an in-stack argument of the caller.
+ // Note: caller.sp() points to callee-arguments
+ oop frame::retrieve_receiver(RegisterMap* reg_map) {
+   frame caller = *this;
+ 
+   // First consult the ADLC on where it puts parameter 0 for this signature.
+   VMReg reg = SharedRuntime::name_for_receiver();
+-  oop r = *caller.oopmapreg_to_location(reg, reg_map);  
++  oop r = *caller.oopmapreg_to_location(reg, reg_map);
+   assert( Universe::heap()->is_in_or_null(r), "bad receiver" );
+   return r;
+ }
+ 
+ 
+ oop* frame::oopmapreg_to_location(VMReg reg, const RegisterMap* reg_map) const {
+-  if(reg->is_reg()) {    
+-    // If it is passed in a register, it got spilled in the stub frame.  
++  if(reg->is_reg()) {
++    // If it is passed in a register, it got spilled in the stub frame.
+     return (oop *)reg_map->location(reg);
+-  } else {			        
++  } else {
+     int sp_offset_in_stack_slots = reg->reg2stack();
+     int sp_offset = sp_offset_in_stack_slots >> (LogBytesPerWord - LogBytesPerInt);
+     return (oop *)&unextended_sp()[sp_offset];
+@@ -1211,19 +1206,19 @@
+          if (is_interpreted_frame())    { oops_interpreted_do(f, map, use_interpreter_oop_map_cache);
+   } else if (is_entry_frame())          { oops_entry_do      (f, map);
+   } else if (CodeCache::contains(pc())) { oops_code_blob_do  (f, map);
+-  } else { 
++  } else {
+     ShouldNotReachHere();
+-  }  
++  }
+ }
+ 
+ void frame::nmethods_do() {
+   if (_cb != NULL && _cb->is_nmethod()) {
+     nmethods_code_blob_do();
+-  } 
++  }
+ }
+ 
+ 
+-void frame::gc_prologue() {  
++void frame::gc_prologue() {
+   if (is_interpreted_frame()) {
+     // set bcx to bci to become methodOop position independent during GC
+     interpreter_frame_set_bcx(interpreter_frame_bci());
+@@ -1267,7 +1262,7 @@
+   if (TraceZapDeadLocals) tty->print_cr("zapping @ " INTPTR_FORMAT " containing " INTPTR_FORMAT, p, (address)*p);
+   // Need cast because on _LP64 the conversion to oop is ambiguous.  Constant
+   // can be either long or int.
+-  *p = (oop)(int)0xbabebabe; 
++  *p = (oop)(int)0xbabebabe;
+ }
+ frame::ZapDeadClosure frame::_zap_dead;
+ 
+@@ -1326,7 +1321,7 @@
+     // get frame map
+     InterpreterOopMap mask;
+     m->mask_for(bci, &mask);
+-    mask.iterate_all( &oop_blk, &value_blk, &dead_blk); 
++    mask.iterate_all( &oop_blk, &value_blk, &dead_blk);
+   }
+ }
+ 
+@@ -1337,7 +1332,7 @@
+   assert(_cb != NULL, "sanity check");
+   if (_cb->oop_maps() != NULL) {
+     OopMapSet::all_do(this, reg_map, &_check_oop, check_derived_oop,
+-		      &_check_value, &_zap_dead);
++                      &_check_value, &_zap_dead);
+   }
+ }
+ 
+@@ -1373,9 +1368,9 @@
+ bool frame::verify_return_pc(address x) {
+   if (StubRoutines::returns_to_call_stub(x)) {
+     return true;
+-  }  
++  }
+   if (CodeCache::contains(x)) {
+-    return true; 
++    return true;
+   }
+   if (Interpreter::contains(x)) {
+     return true;
+@@ -1407,8 +1402,7 @@
+ // StackFrameStream implementation
+ 
+ StackFrameStream::StackFrameStream(JavaThread *thread, bool update) : _reg_map(thread, update) {
+-  assert(thread->has_last_Java_frame(), "sanity check");  
+-  _fr = thread->last_frame(); 
++  assert(thread->has_last_Java_frame(), "sanity check");
++  _fr = thread->last_frame();
+   _is_done = false;
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/frame.hpp openjdk/hotspot/src/share/vm/runtime/frame.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/frame.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/frame.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)frame.hpp	1.163 07/05/05 17:06:42 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,12 +19,10 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+-#ifdef CC_INTERP
+-typedef class cInterpreter* interpreterState;
+-#endif /* CC_INTERP */
++typedef class BytecodeInterpreter* interpreterState;
+ 
+ class CodeBlob;
+ 
+@@ -41,9 +36,7 @@
+ class frame VALUE_OBJ_CLASS_SPEC {
+  private:
+   // Instance variables:
+-#ifndef CC_INTERP
+   intptr_t* _sp; // stack pointer (from Thread::last_Java_sp)
+-#endif // !CC_INTERP
+   address   _pc; // program counter (the next instruction after the call)
+ 
+   CodeBlob* _cb; // CodeBlob that "owns" pc
+@@ -73,10 +66,9 @@
+   address raw_pc() const;
+ 
+   void set_pc( address   newpc );
+-#ifndef CC_INTERP
+-  void set_sp( intptr_t* newsp ) { _sp = newsp; }
++
+   intptr_t* sp() const           { return _sp; }
+-#endif // !CC_INTERP
++  void set_sp( intptr_t* newsp ) { _sp = newsp; }
+ 
+ 
+   CodeBlob* cb() const           { return _cb; }
+@@ -106,7 +98,7 @@
+   bool is_interpreted_frame()    const;
+   bool is_java_frame()           const;
+   bool is_entry_frame()          const;             // Java frame called from C?
+-  bool is_native_frame()         const;  
++  bool is_native_frame()         const;
+   bool is_runtime_frame()        const;
+   bool is_compiled_frame()       const;
+   bool is_safepoint_blob_frame() const;
+@@ -138,7 +130,7 @@
+   // returns the sender, but skips conversion frames
+   frame real_sender(RegisterMap* map) const;
+ 
+-  // returns the the sending Java frame, skipping any intermediate C frames 
++  // returns the the sending Java frame, skipping any intermediate C frames
+   // NB: receiver must not be first frame
+   frame java_sender() const;
+ 
+@@ -147,7 +139,7 @@
+   frame sender_for_compiled_frame(RegisterMap* map) const;
+   frame sender_for_entry_frame(RegisterMap* map) const;
+   frame sender_for_interpreter_frame(RegisterMap* map) const;
+-  frame sender_for_native_frame(RegisterMap* map) const;  
++  frame sender_for_native_frame(RegisterMap* map) const;
+ 
+   // All frames:
+ 
+@@ -161,10 +153,10 @@
+   // accessors for locals
+   oop obj_at(int offset) const                   { return *obj_at_addr(offset);  }
+   void obj_at_put(int offset, oop value)         { *obj_at_addr(offset) = value; }
+-  
++
+   jint int_at(int offset) const                  { return *int_at_addr(offset);  }
+   void int_at_put(int offset, jint value)        { *int_at_addr(offset) = value; }
+-                                                
++
+   oop*      obj_at_addr(int offset) const        { return (oop*)     addr_at(offset); }
+ 
+   oop*      adjusted_obj_at_addr(methodOop method, int index) { return obj_at_addr(adjust_offset(method, index)); }
+@@ -190,7 +182,7 @@
+   // returns the stack pointer of the calling frame
+   intptr_t* sender_sp() const;
+ 
+-  
++
+   // Interpreter frames:
+ 
+  private:
+@@ -253,7 +245,7 @@
+   BasicLock* compiled_synchronized_native_monitor      (nmethod* nm = NULL);
+   oop        compiled_synchronized_native_monitor_owner(nmethod* nm = NULL);
+ 
+-  // Find receiver for an invoke when arguments are just pushed on stack (i.e., callee stack-frame is 
++  // Find receiver for an invoke when arguments are just pushed on stack (i.e., callee stack-frame is
+   // not setup)
+   oop interpreter_callee_receiver(symbolHandle signature)     { return *interpreter_callee_receiver_addr(signature); }
+ 
+@@ -267,10 +259,10 @@
+   static  jint  interpreter_frame_expression_stack_direction();
+ 
+   // The _at version returns a pointer because the address is used for GC.
+-  intptr_t* interpreter_frame_expression_stack_at(jint offset) const;  
+-  Tag       interpreter_frame_expression_stack_tag(jint offset) const;  
+-  void      interpreter_frame_set_expression_stack_tag(jint offset, Tag tag) const;  
+- 
++  intptr_t* interpreter_frame_expression_stack_at(jint offset) const;
++  Tag       interpreter_frame_expression_stack_tag(jint offset) const;
++  void      interpreter_frame_set_expression_stack_tag(jint offset, Tag tag) const;
++
+   // top of expression stack
+   intptr_t* interpreter_frame_tos_at(jint offset) const;
+   intptr_t* interpreter_frame_tos_address() const;
+@@ -279,7 +271,12 @@
+   jint  interpreter_frame_expression_stack_size() const;
+ 
+   intptr_t* interpreter_frame_sender_sp() const;
++
++#ifndef CC_INTERP
++  // template based interpreter deoptimization support
+   void  set_interpreter_frame_sender_sp(intptr_t* sender_sp);
++  void interpreter_frame_set_monitor_end(BasicObjectLock* value);
++#endif // CC_INTERP
+ 
+   // BasicObjectLocks:
+   //
+@@ -296,7 +293,6 @@
+   BasicObjectLock* previous_monitor_in_interpreter_frame(BasicObjectLock* current) const;
+   static int interpreter_frame_monitor_size();
+ 
+-  void interpreter_frame_set_monitor_end(BasicObjectLock* value);
+   void interpreter_frame_verify_monitor(BasicObjectLock* value) const;
+ 
+   // Tells whether the current interpreter_frame frame pointer
+@@ -308,9 +304,9 @@
+   // If the method return type is T_OBJECT or T_ARRAY populates oop_result
+   // For other (non-T_VOID) the appropriate field in the jvalue is populated
+   // with the result value.
+-  // Should only be called when at method exit when the method is not 
++  // Should only be called when at method exit when the method is not
+   // exiting due to an exception.
+-  BasicType interpreter_frame_result(oop* oop_result, jvalue* value_result); 
++  BasicType interpreter_frame_result(oop* oop_result, jvalue* value_result);
+ 
+  public:
+   // Method & constant pool cache
+@@ -382,18 +378,18 @@
+   void oops_interpreted_locals_do(OopClosure *f,
+                                  int max_locals,
+                                  InterpreterOopMap *mask);
+-  void oops_interpreted_expressions_do(OopClosure *f, symbolHandle signature, 
++  void oops_interpreted_expressions_do(OopClosure *f, symbolHandle signature,
+                                  bool is_static, int max_stack, int max_locals,
+                                  InterpreterOopMap *mask);
+   void oops_interpreted_arguments_do(symbolHandle signature, bool is_static, OopClosure* f);
+ 
+   // Iteration of oops
+   void oops_do_internal(OopClosure* f, RegisterMap* map, bool use_interpreter_oop_map_cache);
+-  void oops_entry_do(OopClosure* f, const RegisterMap* map);  
+-  void oops_code_blob_do(OopClosure* f, const RegisterMap* map);    
+-  int adjust_offset(methodOop method, int index); // helper for above fn  
++  void oops_entry_do(OopClosure* f, const RegisterMap* map);
++  void oops_code_blob_do(OopClosure* f, const RegisterMap* map);
++  int adjust_offset(methodOop method, int index); // helper for above fn
+   // Iteration of nmethods
+-  void nmethods_code_blob_do();    
++  void nmethods_code_blob_do();
+  public:
+   // Memory management
+   void oops_do(OopClosure* f, RegisterMap* map) { oops_do_internal(f, map, true); }
+@@ -450,7 +446,7 @@
+ // a safepoint, all registers are saved, not only the callee-saved ones.
+ //
+ // Use:
+-//   
++//
+ //   for(StackFrameStream fst(thread); !fst.is_done(); fst.next()) {
+ //     ...
+ //   }
+@@ -459,7 +455,7 @@
+  private:
+   frame       _fr;
+   RegisterMap _reg_map;
+-  bool        _is_done;  
++  bool        _is_done;
+  public:
+    StackFrameStream(JavaThread *thread, bool update = true);
+ 
+@@ -471,4 +467,3 @@
+   frame *current()                { return &_fr; }
+   RegisterMap* register_map()     { return &_reg_map; }
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/frame.inline.hpp openjdk/hotspot/src/share/vm/runtime/frame.inline.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/frame.inline.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/frame.inline.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)frame.inline.hpp	1.23 07/05/05 17:06:47 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This file holds platform-independant bodies of inline functions for frames.
+@@ -39,8 +36,8 @@
+ 
+ inline bool frame::is_bci(intptr_t bcx) {
+ #ifdef _LP64
+-  return ((uintptr_t) bcx) <= ((uintptr_t) max_method_code_size) ; 
+-#else	
++  return ((uintptr_t) bcx) <= ((uintptr_t) max_method_code_size) ;
++#else
+   return 0 <= bcx && bcx <= max_method_code_size;
+ #endif
+ }
+@@ -50,7 +47,7 @@
+ }
+ 
+ inline bool frame::is_first_frame() const {
+-  return is_entry_frame() && entry_frame_is_first(); 
++  return is_entry_frame() && entry_frame_is_first();
+ }
+ 
+ // here are the platform-dependent bodies:
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/globals.cpp openjdk/hotspot/src/share/vm/runtime/globals.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/globals.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/globals.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)globals.cpp	1.49 07/05/17 16:05:26 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -75,8 +72,8 @@
+     while ((eol = strchr(cp, '\n')) != NULL) {
+       char format_buffer[FORMAT_BUFFER_LEN];
+       size_t llen = pointer_delta(eol, cp, sizeof(char));
+-      jio_snprintf(format_buffer, FORMAT_BUFFER_LEN, 
+-		   "%%." SIZE_FORMAT "s", llen);
++      jio_snprintf(format_buffer, FORMAT_BUFFER_LEN,
++                   "%%." SIZE_FORMAT "s", llen);
+       st->print(format_buffer, cp);
+       st->cr();
+       cp = eol+1;
+@@ -98,15 +95,15 @@
+   } else if (is_ccstr()) {
+     st->print("-XX:%s=", name);
+     // Need to turn embedded '\n's back into separate arguments
+-    // Not so efficient to print one character at a time, 
+-    // but the choice is to do the transformation to a buffer 
++    // Not so efficient to print one character at a time,
++    // but the choice is to do the transformation to a buffer
+     // and print that.  And this need not be efficient.
+     for (const char* cp = get_ccstr(); *cp != '\0'; cp += 1) {
+       switch (*cp) {
+       default:
+         st->print("%c", *cp);
+         break;
+-      case '\n': 
++      case '\n':
+         st->print(" -XX:%s=", name);
+         break;
+       }
+@@ -126,24 +123,24 @@
+ #define RUNTIME_PRODUCT_RW_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{product rw}", DEFAULT },
+ 
+ #ifdef PRODUCT
+-  #define RUNTIME_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */ 
+-  #define RUNTIME_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     /* flag is constant */ 
+-  #define RUNTIME_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) 
++  #define RUNTIME_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
++  #define RUNTIME_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     /* flag is constant */
++  #define RUNTIME_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc)
+ #else
+   #define RUNTIME_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "", DEFAULT },
+-  #define RUNTIME_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, "{pd}", DEFAULT }, 
++  #define RUNTIME_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, "{pd}", DEFAULT },
+   #define RUNTIME_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{notproduct}", DEFAULT },
+ #endif
+ 
+ #define C1_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C1 product}", DEFAULT },
+ #define C1_PD_PRODUCT_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, "{C1 pd product}", DEFAULT },
+ #ifdef PRODUCT
+-  #define C1_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */ 
+-  #define C1_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     /* flag is constant */ 
+-  #define C1_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) 
++  #define C1_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
++  #define C1_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     /* flag is constant */
++  #define C1_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc)
+ #else
+   #define C1_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C1}", DEFAULT },
+-  #define C1_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, "{C1 pd}", DEFAULT }, 
++  #define C1_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, "{C1 pd}", DEFAULT },
+   #define C1_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C1 notproduct}", DEFAULT },
+ #endif
+ 
+@@ -152,12 +149,12 @@
+ #define C2_PD_PRODUCT_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, "{C2 pd product}", DEFAULT },
+ #define C2_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C2 diagnostic}", DEFAULT },
+ #ifdef PRODUCT
+-  #define C2_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */ 
+-  #define C2_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     /* flag is constant */ 
+-  #define C2_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) 
++  #define C2_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
++  #define C2_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     /* flag is constant */
++  #define C2_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc)
+ #else
+   #define C2_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C2}", DEFAULT },
+-  #define C2_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, "{C2 pd}", DEFAULT }, 
++  #define C2_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, "{C2 pd}", DEFAULT },
+   #define C2_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C2 notproduct}", DEFAULT },
+ #endif
+ 
+@@ -187,8 +184,8 @@
+   for (Flag* current = &flagTable[0]; current->name; current++) {
+     if (str_equal(current->name, name, length)) {
+       if (!(current->is_unlocked() || current->is_unlocker())) {
+-	// disable use of diagnostic flags until they are unlocked
+-	return NULL;
++        // disable use of diagnostic flags until they are unlocked
++        return NULL;
+       }
+       return current;
+     }
+@@ -218,7 +215,7 @@
+ bool CommandLineFlags::boolAt(char* name, size_t len, bool* value) {
+   Flag* result = Flag::find_flag(name, len);
+   if (result == NULL) return false;
+-  if (!result->is_bool()) return false;   
++  if (!result->is_bool()) return false;
+   *value = result->get_bool();
+   return true;
+ }
+@@ -226,9 +223,9 @@
+ bool CommandLineFlags::boolAtPut(char* name, size_t len, bool* value, FlagValueOrigin origin) {
+   Flag* result = Flag::find_flag(name, len);
+   if (result == NULL) return false;
+-  if (!result->is_bool()) return false;   
++  if (!result->is_bool()) return false;
+   bool old_value = result->get_bool();
+-  result->set_bool(*value); 
++  result->set_bool(*value);
+   *value = old_value;
+   result->origin = origin;
+   return true;
+@@ -237,14 +234,14 @@
+ void CommandLineFlagsEx::boolAtPut(CommandLineFlagWithType flag, bool value, FlagValueOrigin origin) {
+   Flag* faddr = address_of_flag(flag);
+   guarantee(faddr != NULL && faddr->is_bool(), "wrong flag type");
+-  faddr->set_bool(value); 
++  faddr->set_bool(value);
+   faddr->origin = origin;
+ }
+ 
+ bool CommandLineFlags::intxAt(char* name, size_t len, intx* value) {
+   Flag* result = Flag::find_flag(name, len);
+   if (result == NULL) return false;
+-  if (!result->is_intx()) return false;   
++  if (!result->is_intx()) return false;
+   *value = result->get_intx();
+   return true;
+ }
+@@ -252,9 +249,9 @@
+ bool CommandLineFlags::intxAtPut(char* name, size_t len, intx* value, FlagValueOrigin origin) {
+   Flag* result = Flag::find_flag(name, len);
+   if (result == NULL) return false;
+-  if (!result->is_intx()) return false;   
++  if (!result->is_intx()) return false;
+   intx old_value = result->get_intx();
+-  result->set_intx(*value); 
++  result->set_intx(*value);
+   *value = old_value;
+   result->origin = origin;
+   return true;
+@@ -263,7 +260,7 @@
+ void CommandLineFlagsEx::intxAtPut(CommandLineFlagWithType flag, intx value, FlagValueOrigin origin) {
+   Flag* faddr = address_of_flag(flag);
+   guarantee(faddr != NULL && faddr->is_intx(), "wrong flag type");
+-  faddr->set_intx(value); 
++  faddr->set_intx(value);
+   faddr->origin = origin;
+ }
+ 
+@@ -337,7 +334,7 @@
+   char* new_value = NEW_C_HEAP_ARRAY(char, strlen(*value)+1);
+   strcpy(new_value, *value);
+   result->set_ccstr(new_value);
+-  if (result->origin == DEFAULT) {
++  if (result->origin == DEFAULT && old_value != NULL) {
+     // Prior value is NOT heap allocated, but was a literal constant.
+     char* old_value_to_free = NEW_C_HEAP_ARRAY(char, strlen(old_value)+1);
+     strcpy(old_value_to_free, old_value);
+@@ -356,7 +353,7 @@
+   char* new_value = NEW_C_HEAP_ARRAY(char, strlen(value)+1);
+   strcpy(new_value, value);
+   faddr->set_ccstr(new_value);
+-  if (faddr->origin != DEFAULT) {
++  if (faddr->origin != DEFAULT && old_value != NULL) {
+     // Prior value is heap allocated so free it.
+     FREE_C_HEAP_ARRAY(char, old_value);
+   }
+@@ -430,4 +427,3 @@
+ }
+ 
+ #endif
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/globals_extension.hpp openjdk/hotspot/src/share/vm/runtime/globals_extension.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/globals_extension.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/globals_extension.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)globals_extension.hpp	1.17 07/05/17 16:05:46 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Construct enum of Flag_<cmdline-arg> constants.
+@@ -36,9 +33,9 @@
+ #define RUNTIME_MANAGEABLE_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
+ #define RUNTIME_PRODUCT_RW_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
+ #ifdef PRODUCT
+-  #define RUNTIME_DEVELOP_FLAG_MEMBER(type, name, value, doc)  /* flag is constant */ 
+-  #define RUNTIME_PD_DEVELOP_FLAG_MEMBER(type, name, doc)      /* flag is constant */ 
+-  #define RUNTIME_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc) 
++  #define RUNTIME_DEVELOP_FLAG_MEMBER(type, name, value, doc)  /* flag is constant */
++  #define RUNTIME_PD_DEVELOP_FLAG_MEMBER(type, name, doc)      /* flag is constant */
++  #define RUNTIME_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)
+ #else
+   #define RUNTIME_DEVELOP_FLAG_MEMBER(type, name, value, doc)  FLAG_MEMBER(name),
+   #define RUNTIME_PD_DEVELOP_FLAG_MEMBER(type, name, doc)      FLAG_MEMBER(name),
+@@ -48,9 +45,9 @@
+ #define C1_PRODUCT_FLAG_MEMBER(type, name, value, doc)         FLAG_MEMBER(name),
+ #define C1_PD_PRODUCT_FLAG_MEMBER(type, name, doc)             FLAG_MEMBER(name),
+ #ifdef PRODUCT
+-  #define C1_DEVELOP_FLAG_MEMBER(type, name, value, doc)       /* flag is constant */ 
+-  #define C1_PD_DEVELOP_FLAG_MEMBER(type, name, doc)           /* flag is constant */ 
+-  #define C1_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc) 
++  #define C1_DEVELOP_FLAG_MEMBER(type, name, value, doc)       /* flag is constant */
++  #define C1_PD_DEVELOP_FLAG_MEMBER(type, name, doc)           /* flag is constant */
++  #define C1_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)
+ #else
+   #define C1_DEVELOP_FLAG_MEMBER(type, name, value, doc)       FLAG_MEMBER(name),
+   #define C1_PD_DEVELOP_FLAG_MEMBER(type, name, doc)           FLAG_MEMBER(name),
+@@ -62,9 +59,9 @@
+ #define C2_PD_PRODUCT_FLAG_MEMBER(type, name, doc)             FLAG_MEMBER(name),
+ #define C2_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc)      FLAG_MEMBER(name),
+ #ifdef PRODUCT
+-  #define C2_DEVELOP_FLAG_MEMBER(type, name, value, doc)       /* flag is constant */ 
+-  #define C2_PD_DEVELOP_FLAG_MEMBER(type, name, doc)           /* flag is constant */ 
+-  #define C2_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc) 
++  #define C2_DEVELOP_FLAG_MEMBER(type, name, value, doc)       /* flag is constant */
++  #define C2_PD_DEVELOP_FLAG_MEMBER(type, name, doc)           /* flag is constant */
++  #define C2_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)
+ #else
+   #define C2_DEVELOP_FLAG_MEMBER(type, name, value, doc)       FLAG_MEMBER(name),
+   #define C2_PD_DEVELOP_FLAG_MEMBER(type, name, doc)           FLAG_MEMBER(name),
+@@ -99,9 +96,9 @@
+ #define RUNTIME_MANAGEABLE_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+ #define RUNTIME_PRODUCT_RW_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+ #ifdef PRODUCT
+-  #define RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)     /* flag is constant */ 
+-  #define RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)         /* flag is constant */ 
+-  #define RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) 
++  #define RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)     /* flag is constant */
++  #define RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)         /* flag is constant */
++  #define RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)
+ #else
+   #define RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)     FLAG_MEMBER_WITH_TYPE(name,type),
+   #define RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)         FLAG_MEMBER_WITH_TYPE(name,type),
+@@ -111,9 +108,9 @@
+ #define C1_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)         FLAG_MEMBER_WITH_TYPE(name,type),
+ #define C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc)             FLAG_MEMBER_WITH_TYPE(name,type),
+ #ifdef PRODUCT
+-  #define C1_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)       /* flag is constant */ 
+-  #define C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)           /* flag is constant */ 
+-  #define C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) 
++  #define C1_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)       /* flag is constant */
++  #define C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)           /* flag is constant */
++  #define C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)
+ #else
+   #define C1_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)       FLAG_MEMBER_WITH_TYPE(name,type),
+   #define C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)           FLAG_MEMBER_WITH_TYPE(name,type),
+@@ -125,9 +122,9 @@
+ #define C2_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc)             FLAG_MEMBER_WITH_TYPE(name,type),
+ #define C2_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)      FLAG_MEMBER_WITH_TYPE(name,type),
+ #ifdef PRODUCT
+-  #define C2_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)       /* flag is constant */ 
+-  #define C2_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)           /* flag is constant */ 
+-  #define C2_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) 
++  #define C2_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)       /* flag is constant */
++  #define C2_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)           /* flag is constant */
++  #define C2_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)
+ #else
+   #define C2_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)       FLAG_MEMBER_WITH_TYPE(name,type),
+   #define C2_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)           FLAG_MEMBER_WITH_TYPE(name,type),
+@@ -136,15 +133,15 @@
+ 
+ typedef enum {
+  RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE, RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE,
+-	       RUNTIME_PRODUCT_FLAG_MEMBER_WITH_TYPE, RUNTIME_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE,
++               RUNTIME_PRODUCT_FLAG_MEMBER_WITH_TYPE, RUNTIME_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE,
+                RUNTIME_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE,
+- 	       RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE,
++               RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE,
+                RUNTIME_MANAGEABLE_FLAG_MEMBER_WITH_TYPE,
+                RUNTIME_PRODUCT_RW_FLAG_MEMBER_WITH_TYPE)
+ RUNTIME_OS_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE, RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE,
+-	       RUNTIME_PRODUCT_FLAG_MEMBER_WITH_TYPE, RUNTIME_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE,
++               RUNTIME_PRODUCT_FLAG_MEMBER_WITH_TYPE, RUNTIME_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE,
+                RUNTIME_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE,
+- 	       RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE)
++               RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE)
+ #ifdef COMPILER1
+  C1_FLAGS(C1_DEVELOP_FLAG_MEMBER_WITH_TYPE, C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE, C1_PRODUCT_FLAG_MEMBER_WITH_TYPE,
+           C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE, C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE)
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/globals.hpp openjdk/hotspot/src/share/vm/runtime/globals.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/globals.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/globals.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)globals.hpp	1.967 07/07/13 14:51:27 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,14 +19,14 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #if !defined(COMPILER1) && !defined(COMPILER2)
+ define_pd_global(bool, BackgroundCompilation,        false);
+ define_pd_global(bool, UseTLAB,                      false);
+ define_pd_global(bool, CICompileOSR,                 false);
+-define_pd_global(bool, UseTypeProfile,               false); 
++define_pd_global(bool, UseTypeProfile,               false);
+ define_pd_global(bool, UseOnStackReplacement,        false);
+ define_pd_global(bool, InlineIntrinsics,             false);
+ define_pd_global(bool, PreferInterpreterNativeStubs, true);
+@@ -37,21 +34,21 @@
+ define_pd_global(bool, ProfileTraps,                 false);
+ define_pd_global(bool, TieredCompilation,            false);
+ 
+-define_pd_global(intx, CompileThreshold,	     0);
++define_pd_global(intx, CompileThreshold,             0);
+ define_pd_global(intx, Tier2CompileThreshold,        0);
+ define_pd_global(intx, Tier3CompileThreshold,        0);
+ define_pd_global(intx, Tier4CompileThreshold,        0);
+ 
+-define_pd_global(intx, BackEdgeThreshold,	     0);
++define_pd_global(intx, BackEdgeThreshold,            0);
+ define_pd_global(intx, Tier2BackEdgeThreshold,       0);
+ define_pd_global(intx, Tier3BackEdgeThreshold,       0);
+ define_pd_global(intx, Tier4BackEdgeThreshold,       0);
+ 
+ define_pd_global(intx, OnStackReplacePercentage,     0);
+-define_pd_global(bool, ResizeTLAB,		     false);
+-define_pd_global(intx, FreqInlineSize,		     0);
+-define_pd_global(intx, NewSizeThreadIncrease,	     4*K);
+-define_pd_global(intx, NewRatio,	     	     4);
++define_pd_global(bool, ResizeTLAB,                   false);
++define_pd_global(intx, FreqInlineSize,               0);
++define_pd_global(intx, NewSizeThreadIncrease,        4*K);
++define_pd_global(intx, NewRatio,                     4);
+ define_pd_global(intx, InlineClassNatives,           true);
+ define_pd_global(intx, InlineUnsafeOps,              true);
+ define_pd_global(intx, InitialCodeCacheSize,         160*K);
+@@ -61,6 +58,7 @@
+ define_pd_global(uintx,PermSize,    ScaleForWordSize(4*M));
+ define_pd_global(uintx,MaxPermSize, ScaleForWordSize(64*M));
+ define_pd_global(bool, NeverActAsServerClassMachine, true);
++define_pd_global(uintx, DefaultMaxRAM,               1*G);
+ #define CI_COMPILER_COUNT 0
+ #else
+ 
+@@ -73,7 +71,9 @@
+ #endif // no compilers
+ 
+ 
+-typedef const char* ccstr;  // string type alias used only in this file
++// string type aliases used only in this file
++typedef const char* ccstr;
++typedef const char* ccstrlist;   // represents string arguments which accumulate
+ 
+ enum FlagValueOrigin {
+   DEFAULT          = 0,
+@@ -117,7 +117,8 @@
+   double get_double() const     { return *((double*) addr); }
+   void set_double(double value) { *((double*) addr) = value; }
+ 
+-  bool is_ccstr() const       { return strcmp(type, "ccstr") == 0; }
++  bool is_ccstr() const          { return strcmp(type, "ccstr") == 0 || strcmp(type, "ccstrlist") == 0; }
++  bool ccstr_accumulates() const { return strcmp(type, "ccstrlist") == 0; }
+   ccstr get_ccstr() const     { return *((ccstr*) addr); }
+   void set_ccstr(ccstr value) { *((ccstr*) addr) = value; }
+ 
+@@ -133,7 +134,7 @@
+ // debug flags control various aspects of the VM and are global accessible
+ 
+ // use FlagSetting to temporarily change some debug flag
+-// e.g. FlagSetting fs(DebugThisAndThat, true);   
++// e.g. FlagSetting fs(DebugThisAndThat, true);
+ // restored to previous value upon leaving scope
+ class FlagSetting {
+   bool val;
+@@ -202,7 +203,7 @@
+   static void printSetFlags();
+ 
+   static void printFlags() PRODUCT_RETURN;
+-  
++
+   static void verify() PRODUCT_RETURN;
+ };
+ 
+@@ -256,28 +257,28 @@
+ // option, you must first specify +UnlockDiagnosticVMOptions.
+ // (This master switch also affects the behavior of -Xprintflags.)
+ 
+-// manageable flags are writeable external product flags. 
+-//    They are dynamically writeable through the JDK management interface 
+-//    (com.sun.management.HotSpotDiagnosticMXBean API) and also through JConsole. 
+-//    These flags are external exported interface (see CCC).  The list of 
++// manageable flags are writeable external product flags.
++//    They are dynamically writeable through the JDK management interface
++//    (com.sun.management.HotSpotDiagnosticMXBean API) and also through JConsole.
++//    These flags are external exported interface (see CCC).  The list of
+ //    manageable flags can be queried programmatically through the management
+ //    interface.
+ //
+-//    A flag can be made as "manageable" only if 
++//    A flag can be made as "manageable" only if
+ //    - the flag is defined in a CCC as an external exported interface.
+ //    - the VM implementation supports dynamic setting of the flag.
+ //      This implies that the VM must *always* query the flag variable
+ //      and not reuse state related to the flag state at any given time.
+ //    - you want the flag to be queried programmatically by the customers.
+-// 
++//
+ // product_rw flags are writeable internal product flags.
+ //    They are like "manageable" flags but for internal/private use.
+ //    The list of product_rw flags are internal/private flags which
+-//    may be changed/removed in a future release.  It can be set 
++//    may be changed/removed in a future release.  It can be set
+ //    through the management interface to get/set value
+ //    when the name of flag is supplied.
+-// 
+-//    A flag can be made as "product_rw" only if 
++//
++//    A flag can be made as "product_rw" only if
+ //    - the VM implementation supports dynamic setting of the flag.
+ //      This implies that the VM must *always* query the flag variable
+ //      and not reuse state related to the flag state at any given time.
+@@ -305,6 +306,9 @@
+   product_pd(bool, UseLargePages,                                           \
+           "Use large page memory")                                          \
+                                                                             \
++  develop(bool, TracePageSizes, false,                                      \
++          "Trace page size selection and usage.")                           \
++                                                                            \
+   product(bool, UseNUMA, false,                                             \
+           "Use NUMA if available")                                          \
+                                                                             \
+@@ -333,10 +337,10 @@
+                                                                             \
+   product(uintx, LargePageSizeInBytes, 0,                                   \
+           "Large page size (0 to let VM choose the page size")              \
+-                                               				    \
++                                                                            \
+   product(uintx, LargePageHeapSizeThreshold, 128*M,                         \
+-          "Use large pages if max heap is at least this big")		    \
+-                                               				    \
++          "Use large pages if max heap is at least this big")               \
++                                                                            \
+   product(bool, ForceTimeHighResolution, false,                             \
+           "Using high time resolution(For Win32 only)")                     \
+                                                                             \
+@@ -389,6 +393,9 @@
+   develop(bool, VerifyStack, false,                                         \
+           "Verify stack of each thread when it is entering a runtime call") \
+                                                                             \
++  develop(bool, ForceUnreachable, false,                                    \
++          "(amd64) Make all non code cache addresses to be unreachable with rip-rel forcing use of 64bit literal fixups") \
++                                                                            \
+   notproduct(bool, StressDerivedPointers, false,                            \
+           "Force scavenge when a derived pointers is detected on stack "    \
+           "after rtm call")                                                 \
+@@ -459,7 +466,7 @@
+   develop(bool, DeoptimizeALot, false,                                      \
+           "deoptimize at every exit from the runtime system")               \
+                                                                             \
+-  develop(ccstr, DeoptimizeOnlyAt, "",                                      \
++  develop(ccstrlist, DeoptimizeOnlyAt, "",                                  \
+           "a comma separated list of bcis to deoptimize at")                \
+                                                                             \
+   product(bool, DeoptimizeRandom, false,                                    \
+@@ -497,7 +504,7 @@
+   develop(bool, ShowSafepointMsgs, false,                                   \
+           "Show msg. about safepoint synch.")                               \
+                                                                             \
+-  develop(bool, SafepointTimeout, false,                                    \
++  product(bool, SafepointTimeout, false,                                    \
+           "Time out and warn or fail after SafepointTimeoutDelay "          \
+           "milliseconds if failed to reach safepoint")                      \
+                                                                             \
+@@ -584,12 +591,12 @@
+                                                                             \
+   develop(bool, PrintVMMessages, true,                                      \
+           "Print vm messages on console")                                   \
+-									    \
+-  product(bool, PrintGCApplicationConcurrentTime, false,		    \
+-	  "Print the time the application has been running") 		    \
+-									    \
+-  product(bool, PrintGCApplicationStoppedTime, false,			    \
+-	  "Print the time the application has been stopped") 		    \
++                                                                            \
++  product(bool, PrintGCApplicationConcurrentTime, false,                    \
++          "Print the time the application has been running")                \
++                                                                            \
++  product(bool, PrintGCApplicationStoppedTime, false,                       \
++          "Print the time the application has been stopped")                \
+                                                                             \
+   develop(bool, Verbose, false,                                             \
+           "Prints additional debugging information from other modes")       \
+@@ -609,17 +616,17 @@
+   product(bool, SuppressFatalErrorMessage, false,                           \
+           "Do NO Fatal Error report [Avoid deadlock]")                      \
+                                                                             \
+-  product(ccstr, OnError, "",                                               \
++  product(ccstrlist, OnError, "",                                           \
+           "Run user-defined commands on fatal error; see VMError.cpp "      \
+           "for examples")                                                   \
+-									    \
+-  product(ccstr, OnOutOfMemoryError, "",                                    \
++                                                                            \
++  product(ccstrlist, OnOutOfMemoryError, "",                                \
+           "Run user-defined commands on first java.lang.OutOfMemoryError")  \
+                                                                             \
+   manageable(bool, HeapDumpOnOutOfMemoryError, false,                       \
+           "Dump heap to file when java.lang.OutOfMemoryError is thrown")    \
+                                                                             \
+-  manageable(ccstr, HeapDumpPath, "",                                       \
++  manageable(ccstr, HeapDumpPath, NULL,                                     \
+           "When HeapDumpOnOutOfMemoryError is on, the path (filename or"    \
+           "directory) of the dump file (defaults to java_pid<pid>.hprof"    \
+           "in the working directory)")                                      \
+@@ -823,8 +830,6 @@
+   product(intx, FenceInstruction, 0,                                        \
+           "(Unsafe,Unstable) Experimental")                                 \
+                                                                             \
+-  product(intx, AppendRatio, 11, "(Unstable) Monitor queue fairness" )      \
+-                                                                            \
+   product(intx, SyncFlags, 0, "(Unsafe,Unstable) Experimental Sync flags" ) \
+                                                                             \
+   product(intx, SyncVerbose, 0, "(Unstable)" )                              \
+@@ -839,8 +844,12 @@
+          " avoid NPTL-FUTEX hang pthread_cond_timedwait" )                  \
+                                                                             \
+   product(bool, FilterSpuriousWakeups , true,                               \
+-	  "Prevent spurious or premature wakeups from object.wait"          \
+-	  "(Solaris only)")                                                 \
++          "Prevent spurious or premature wakeups from object.wait"              \
++          "(Solaris only)")                                                     \
++                                                                            \
++  product(intx, NativeMonitorTimeout, -1, "(Unstable)" )                    \
++  product(intx, NativeMonitorFlags, 0, "(Unstable)" )                       \
++  product(intx, NativeMonitorSpinLimit, 20, "(Unstable)" )                  \
+                                                                             \
+   develop(bool, UsePthreads, false,                                         \
+           "Use pthread-based instead of libthread-based synchronization "   \
+@@ -862,7 +871,7 @@
+                                                                             \
+   notproduct(bool, RecordMarkSweepCompaction, false,                        \
+           "Enable GC-to-GC recording and querying of compaction during "    \
+-          "MarkSweep")							    \
++          "MarkSweep")                                                      \
+                                                                             \
+   develop_pd(bool, ShareVtableStubs,                                        \
+           "Share vtable stubs (smaller code but worse branch prediction")   \
+@@ -919,7 +928,7 @@
+   product(bool, PreserveAllAnnotations, false,                              \
+           "Preserve RuntimeInvisibleAnnotations as well as RuntimeVisibleAnnotations") \
+                                                                             \
+-  develop(uintx, PreallocatedOutOfMemoryErrorCount, 4,			    \
++  develop(uintx, PreallocatedOutOfMemoryErrorCount, 4,                      \
+           "Number of OutOfMemoryErrors preallocated with backtrace")        \
+                                                                             \
+   product(bool, LazyBootClassLoader, true,                                  \
+@@ -1063,7 +1072,7 @@
+   develop(bool, TraceHPI, false,                                            \
+           "Trace Host Porting Interface (HPI)")                             \
+                                                                             \
+-  product(ccstr, HPILibPath, "",                                            \
++  product(ccstr, HPILibPath, NULL,                                          \
+           "Specify alternate path to HPI library")                          \
+                                                                             \
+   develop(bool, TraceProtectionDomainVerification, false,                   \
+@@ -1093,39 +1102,39 @@
+   product(bool, UseParallelGC, false,                                       \
+           "Use the Parallel Scavenge garbage collector")                    \
+                                                                             \
+-  product(bool, UseParallelOldGC, false,				    \
+-	  "Use the Parallel Old garbage collector")			    \
++  product(bool, UseParallelOldGC, false,                                    \
++          "Use the Parallel Old garbage collector")                         \
+                                                                             \
+-  product(bool, UseParallelOldGCCompacting, true,			    \
+-	  "In the Parallel Old garbage collector use parallel compaction")  \
++  product(bool, UseParallelOldGCCompacting, true,                           \
++          "In the Parallel Old garbage collector use parallel compaction")  \
+                                                                             \
+-  product(bool, UseParallelDensePrefixUpdate, true,			    \
+-	  "In the Parallel Old garbage collector use parallel dense"        \
+-	  " prefix update")                                                 \
+-                                                                            \
+-  develop(bool, UseParallelOldGCChunkPointerCalc, true,			    \
+-	  "In the Parallel Old garbage collector use chucks to calculate"   \
+-	  " new object locations")                                          \
++  product(bool, UseParallelDensePrefixUpdate, true,                         \
++          "In the Parallel Old garbage collector use parallel dense"        \
++          " prefix update")                                                 \
++                                                                            \
++  develop(bool, UseParallelOldGCChunkPointerCalc, true,                     \
++          "In the Parallel Old garbage collector use chucks to calculate"   \
++          " new object locations")                                          \
+                                                                             \
+   product(uintx, HeapMaximumCompactionInterval, 20,                         \
+           "How often should we maximally compact the heap (not allowing "   \
+-	  "any dead space)")                                                \
++          "any dead space)")                                                \
+                                                                             \
+   product(uintx, HeapFirstMaximumCompactionCount, 3,                        \
+           "The collection count for the first maximum compaction")          \
+                                                                             \
+-  product(bool, UseMaximumCompactionOnSystemGC, true,	                    \
+-	  "In the Parallel Old garbage collector maximum compaction for "   \
+-	  "a system GC")                                                    \
+-									    \
+-  product(uintx, ParallelOldDeadWoodLimiterMean, 50,			    \
+-          "The mean used by the par compact dead wood"			    \
+-	  "limiter (a number between 0-100).")				    \
+-                                                                            \
+-  product(uintx, ParallelOldDeadWoodLimiterStdDev, 80,			    \
+-	  "The standard deviation used by the par compact dead wood"	    \
+-	  "limiter (a number between 0-100).")				    \
+-									    \
++  product(bool, UseMaximumCompactionOnSystemGC, true,                       \
++          "In the Parallel Old garbage collector maximum compaction for "   \
++          "a system GC")                                                    \
++                                                                            \
++  product(uintx, ParallelOldDeadWoodLimiterMean, 50,                        \
++          "The mean used by the par compact dead wood"                      \
++          "limiter (a number between 0-100).")                              \
++                                                                            \
++  product(uintx, ParallelOldDeadWoodLimiterStdDev, 80,                      \
++          "The standard deviation used by the par compact dead wood"        \
++          "limiter (a number between 0-100).")                              \
++                                                                            \
+   product(bool, UseParallelOldGCDensePrefix, true,                          \
+           "Use a dense prefix with the Parallel Old garbage collector")     \
+                                                                             \
+@@ -1140,7 +1149,7 @@
+                                                                             \
+   develop(uintx, VerifyParallelOldWithMarkSweepInterval, 1,                 \
+           "Interval at which the MarkSweep code is used to verify "         \
+-	  "phases of Parallel Old")                                         \
++          "phases of Parallel Old")                                         \
+                                                                             \
+   develop(bool, ParallelOldMTUnsafeMarkBitMap, false,                       \
+           "Use the Parallel Old MT unsafe in marking the bitmap")           \
+@@ -1151,7 +1160,7 @@
+   develop(bool, TraceChunkTasksQueuing, false,                              \
+           "Trace the queuing of the chunk tasks")                           \
+                                                                             \
+-  product(uintx, YoungPLABSize, 4096,                     		    \
++  product(uintx, YoungPLABSize, 4096,                                       \
+           "Size of young gen promotion labs (in HeapWords)")                \
+                                                                             \
+   product(uintx, OldPLABSize, 1024,                                         \
+@@ -1171,8 +1180,8 @@
+           "Scavenge youngest generation before each full GC,"               \
+           " used with UseParallelGC")                                       \
+                                                                             \
+-  develop(bool, ScavengeWithObjectsInToSpace, false,			    \
+-          "Allow scavenges to occur when to_space contains objects.")	    \
++  develop(bool, ScavengeWithObjectsInToSpace, false,                        \
++          "Allow scavenges to occur when to_space contains objects.")       \
+                                                                             \
+   product(bool, UseConcMarkSweepGC, false,                                  \
+           "Use Concurrent Mark-Sweep GC in the old generation")             \
+@@ -1187,19 +1196,19 @@
+           " (effective only when UseConcMarkSweepGC)")                      \
+                                                                             \
+   develop(bool, UseCMSAdaptiveFreeLists, true,                              \
+-          "Use Adaptive Free Lists in the CMS generation")      	    \
++          "Use Adaptive Free Lists in the CMS generation")                  \
+                                                                             \
+   develop(bool, UseAsyncConcMarkSweepGC, true,                              \
+           "Use Asynchronous Concurrent Mark-Sweep GC in the old generation")\
+                                                                             \
+   develop(bool, RotateCMSCollectionTypes, false,                            \
+-          "Rotate the CMS collections among concurrent and STW")	    \
+-									    \
+-  product(bool, UseCMSBestFit, true,                                  	    \
+-          "Use CMS best fit allocation strategy")			    \
++          "Rotate the CMS collections among concurrent and STW")            \
++                                                                            \
++  product(bool, UseCMSBestFit, true,                                        \
++          "Use CMS best fit allocation strategy")                           \
+                                                                             \
+   product(bool, UseCMSCollectionPassing, true,                              \
+-          "Use passing of collection from background to foreground")	    \
++          "Use passing of collection from background to foreground")        \
+                                                                             \
+   product(bool, UseParNewGC, false,                                         \
+           "Use parallel threads in the new generation.")                    \
+@@ -1216,10 +1225,10 @@
+   product(intx, TargetPLABWastePct, 10,                                     \
+           "target wasted space in last buffer as pct of overall allocation")\
+                                                                             \
+-  product(uintx, PLABWeight, 75,				    	    \
+-	  "Percentage (0-100) used to weight the current sample when"	    \
+-	  "computing exponentially decaying average for ResizePLAB.")       \
+-									    \
++  product(uintx, PLABWeight, 75,                                            \
++          "Percentage (0-100) used to weight the current sample when"       \
++          "computing exponentially decaying average for ResizePLAB.")       \
++                                                                            \
+   product(bool, ResizePLAB, true,                                           \
+           "Dynamically resize (survivor space) promotion labs")             \
+                                                                             \
+@@ -1234,87 +1243,87 @@
+                                                                             \
+   product(uintx, CMSParPromoteBlocksToClaim, 50,                            \
+           "Number of blocks to attempt to claim when refilling CMS LAB for "\
+-          "parallel GC.")				                    \
++          "parallel GC.")                                                   \
+                                                                             \
+   product(bool, AlwaysPreTouch, false,                                      \
+- 	  "It forces all freshly committed pages to be pre-touched.")       \
++          "It forces all freshly committed pages to be pre-touched.")       \
+                                                                             \
+-  product(bool, CMSUseOldDefaults, false,                  	            \
+-	  "A flag temporarily  introduced to allow reverting to some older" \
++  product(bool, CMSUseOldDefaults, false,                                   \
++          "A flag temporarily  introduced to allow reverting to some older" \
+           "default settings; older as of 6.0 ")                             \
+                                                                             \
+-  product(intx, CMSYoungGenPerWorker, 16*M,                  	            \
+-	  "The amount of young gen chosen by default per GC worker "        \
++  product(intx, CMSYoungGenPerWorker, 16*M,                                 \
++          "The amount of young gen chosen by default per GC worker "        \
+           "thread available ")                                              \
+                                                                             \
+-  product(bool, CMSIncrementalMode, false,                    	            \
+-	  "Whether CMS GC should operate in \"incremental\" mode")          \
++  product(bool, CMSIncrementalMode, false,                                  \
++          "Whether CMS GC should operate in \"incremental\" mode")          \
+                                                                             \
+-  product(uintx, CMSIncrementalDutyCycle, 10,				    \
+-	  "CMS incremental mode duty cycle (a percentage, 0-100).  If"	    \
+-	  "CMSIncrementalPacing is enabled, then this is just the initial"  \
+- 	  "value")							    \
+-									    \
+-  product(bool, CMSIncrementalPacing, true,				    \
+-	  "Whether the CMS incremental mode duty cycle should be "	    \
+-	  "automatically adjusted")					    \
+-									    \
+-  product(uintx, CMSIncrementalDutyCycleMin, 0,			            \
+-	  "Lower bound on the duty cycle when CMSIncrementalPacing is"	    \
+- 	  "enabled (a percentage, 0-100).")				    \
+-									    \
+-  product(uintx, CMSIncrementalSafetyFactor, 10,	    		    \
+-	  "Percentage (0-100) used to add conservatism when computing the"  \
+-	  "duty cycle.")						    \
+-									    \
+-  product(uintx, CMSIncrementalOffset, 0,				    \
+-	  "Percentage (0-100) by which the CMS incremental mode duty cycle" \
+-	  "is shifted to the right within the period between young GCs")    \
+-									    \
+-  product(uintx, CMSExpAvgFactor, 25,				    	    \
+-	  "Percentage (0-100) used to weight the current sample when"	    \
+-	  "computing exponential averages for CMS statistics.")		    \
+-									    \
+-  product(uintx, CMS_FLSWeight, 50,				    	    \
+-	  "Percentage (0-100) used to weight the current sample when"	    \
+-	  "computing exponentially decating averages for CMS FLS statistics.") \
+-									    \
+-  product(uintx, CMS_FLSPadding, 2,			    	            \
+-	  "The multiple of deviation from mean to use for buffering"        \
+-          "against volatility in free list demand.")	                    \
+-									    \
+-  product(uintx, FLSCoalescePolicy, 2,			    	            \
+-	  "CMS: Aggression level for coalescing, increasing from 0 to 4")   \
+-                                                                            \
+-  product(uintx, CMS_SweepWeight, 50,				    	    \
+-	  "Percentage (0-100) used to weight the current sample when"	    \
+-	  "computing exponentially decaying average for inter-sweep duration.") \
+-									    \
+-  product(uintx, CMS_SweepPadding, 2,			    	            \
+-	  "The multiple of deviation from mean to use for buffering"        \
++  product(uintx, CMSIncrementalDutyCycle, 10,                               \
++          "CMS incremental mode duty cycle (a percentage, 0-100).  If"      \
++          "CMSIncrementalPacing is enabled, then this is just the initial"  \
++          "value")                                                          \
++                                                                            \
++  product(bool, CMSIncrementalPacing, true,                                 \
++          "Whether the CMS incremental mode duty cycle should be "          \
++          "automatically adjusted")                                         \
++                                                                            \
++  product(uintx, CMSIncrementalDutyCycleMin, 0,                             \
++          "Lower bound on the duty cycle when CMSIncrementalPacing is"      \
++          "enabled (a percentage, 0-100).")                                 \
++                                                                            \
++  product(uintx, CMSIncrementalSafetyFactor, 10,                            \
++          "Percentage (0-100) used to add conservatism when computing the"  \
++          "duty cycle.")                                                    \
++                                                                            \
++  product(uintx, CMSIncrementalOffset, 0,                                   \
++          "Percentage (0-100) by which the CMS incremental mode duty cycle" \
++          "is shifted to the right within the period between young GCs")    \
++                                                                            \
++  product(uintx, CMSExpAvgFactor, 25,                                       \
++          "Percentage (0-100) used to weight the current sample when"       \
++          "computing exponential averages for CMS statistics.")             \
++                                                                            \
++  product(uintx, CMS_FLSWeight, 50,                                         \
++          "Percentage (0-100) used to weight the current sample when"       \
++          "computing exponentially decating averages for CMS FLS statistics.") \
++                                                                            \
++  product(uintx, CMS_FLSPadding, 2,                                         \
++          "The multiple of deviation from mean to use for buffering"        \
++          "against volatility in free list demand.")                        \
++                                                                            \
++  product(uintx, FLSCoalescePolicy, 2,                                      \
++          "CMS: Aggression level for coalescing, increasing from 0 to 4")   \
++                                                                            \
++  product(uintx, CMS_SweepWeight, 50,                                       \
++          "Percentage (0-100) used to weight the current sample when"       \
++          "computing exponentially decaying average for inter-sweep duration.") \
++                                                                            \
++  product(uintx, CMS_SweepPadding, 2,                                       \
++          "The multiple of deviation from mean to use for buffering"        \
+           "against volatility in inter-sweep duration.")                    \
+-									    \
+-  product(uintx, CMS_SweepTimerThresholdMillis, 10,	    	            \
+-	  "Skip block flux-rate sampling for an epoch unless inter-sweep "  \
++                                                                            \
++  product(uintx, CMS_SweepTimerThresholdMillis, 10,                         \
++          "Skip block flux-rate sampling for an epoch unless inter-sweep "  \
+           " duration exceeds this threhold in milliseconds")                \
+-									    \
+-  develop(bool, CMSTraceIncrementalMode, false,				    \
+-	  "Trace CMS incremental mode")					    \
+-									    \
+-  develop(bool, CMSTraceIncrementalPacing, false,			    \
+-	  "Trace CMS incremental mode pacing computation")		    \
+-									    \
+-  develop(bool, CMSTraceThreadState, false,				    \
+-	  "Trace the CMS thread state (enable the trace_state() method)")   \
+-									    \
+-  product(bool, CMSClassUnloadingEnabled, false,                      	    \
++                                                                            \
++  develop(bool, CMSTraceIncrementalMode, false,                             \
++          "Trace CMS incremental mode")                                     \
++                                                                            \
++  develop(bool, CMSTraceIncrementalPacing, false,                           \
++          "Trace CMS incremental mode pacing computation")                  \
++                                                                            \
++  develop(bool, CMSTraceThreadState, false,                                 \
++          "Trace the CMS thread state (enable the trace_state() method)")   \
++                                                                            \
++  product(bool, CMSClassUnloadingEnabled, false,                            \
+           "Whether class unloading enabled when using CMS GC")              \
+                                                                             \
+-  product(bool, CMSCompactWhenClearAllSoftRefs, true,                 	    \
++  product(bool, CMSCompactWhenClearAllSoftRefs, true,                       \
+           "Compact when asked to collect CMS gen with clear_all_soft_refs") \
+                                                                             \
+   product(bool, UseCMSCompactAtFullCollection, true,                        \
+-          "Use mark sweep compact at full collections")	    		    \
++          "Use mark sweep compact at full collections")                     \
+                                                                             \
+   product(uintx, CMSFullGCsBeforeCompaction, 0,                             \
+           "Number of CMS full collection done before compaction if > 0")    \
+@@ -1322,55 +1331,55 @@
+   develop(intx, CMSDictionaryChoice, 0,                                     \
+           "Use BinaryTreeDictionary as default in the CMS generation")      \
+                                                                             \
+-  product(uintx, CMSIndexedFreeListReplenish, 4,			    \
+-	  "Replenish and indexed free list with this number of chunks")	    \
++  product(uintx, CMSIndexedFreeListReplenish, 4,                            \
++          "Replenish and indexed free list with this number of chunks")     \
+                                                                             \
+-  product(bool, CMSLoopWarn, false,                                 	    \
+-          "Warn in case of excessive CMS looping")		            \
++  product(bool, CMSLoopWarn, false,                                         \
++          "Warn in case of excessive CMS looping")                          \
+                                                                             \
+-  develop(bool, CMSOverflowEarlyRestoration, false,                    	    \
+-          "Whether preserved marks should be restored early")	            \
++  develop(bool, CMSOverflowEarlyRestoration, false,                         \
++          "Whether preserved marks should be restored early")               \
+                                                                             \
+   product(uintx, CMSMarkStackSize, 32*K,                                    \
+           "Size of CMS marking stack")                                      \
+-									    \
++                                                                            \
+   product(uintx, CMSMarkStackSizeMax, 4*M,                                  \
+           "Max size of CMS marking stack")                                  \
+-									    \
++                                                                            \
+   notproduct(bool, CMSMarkStackOverflowALot, false,                         \
+           "Whether we should simulate frequent marking stack / work queue"  \
+           " overflow")                                                      \
+-									    \
++                                                                            \
+   notproduct(intx, CMSMarkStackOverflowInterval, 1000,                      \
+           "A per-thread `interval' counter that determines how frequently"  \
+           " we simulate overflow; a smaller number increases frequency")    \
+-									    \
++                                                                            \
+   product(uintx, CMSMaxAbortablePrecleanLoops, 0,                           \
+           "(Temporary, subject to experimentation)"                         \
+           "Maximum number of abortable preclean iterations, if > 0")        \
+-									    \
++                                                                            \
+   product(intx, CMSMaxAbortablePrecleanTime, 5000,                          \
+           "(Temporary, subject to experimentation)"                         \
+           "Maximum time in abortable preclean in ms")                       \
+-									    \
++                                                                            \
+   product(uintx, CMSAbortablePrecleanMinWorkPerIteration, 100,              \
+           "(Temporary, subject to experimentation)"                         \
+           "Nominal minimum work per abortable preclean iteration")          \
+-									    \
++                                                                            \
+   product(intx, CMSAbortablePrecleanWaitMillis, 100,                        \
+           "(Temporary, subject to experimentation)"                         \
+           " Time that we sleep between iterations when not given"           \
+           " enough work per iteration")                                     \
+-									    \
++                                                                            \
+   product(uintx, CMSRescanMultiple, 32,                                     \
+           "Size (in cards) of CMS parallel rescan task")                    \
+-									    \
++                                                                            \
+   product(uintx, CMSConcMarkMultiple, 32,                                   \
+           "Size (in cards) of CMS concurrent MT marking task")              \
+-									    \
++                                                                            \
+   product(uintx, CMSRevisitStackSize, 1*M,                                  \
+           "Size of CMS KlassKlass revisit stack")                           \
+-									    \
++                                                                            \
+   product(bool, CMSAbortSemantics, false,                                   \
+           "Whether abort-on-overflow semantics is implemented")             \
+                                                                             \
+@@ -1420,35 +1429,35 @@
+                                                                             \
+   product(uintx, CMSPrecleanThreshold, 1000,                                \
+           "Don't re-iterate if #dirty cards less than this")                \
+-									    \
++                                                                            \
+   product(bool, CMSCleanOnEnter, true,                                      \
+           "Clean-on-enter optimization for reducing number of dirty cards") \
+                                                                             \
+   product(uintx, CMSRemarkVerifyVariant, 1,                                 \
+           "Choose variant (1,2) of verification following remark")          \
+-									    \
++                                                                            \
+   product(uintx, CMSScheduleRemarkEdenSizeThreshold, 2*M,                   \
+           "If Eden used is below this value, don't try to schedule remark") \
+                                                                             \
+-  product(uintx, CMSScheduleRemarkEdenPenetration, 50,             	    \
++  product(uintx, CMSScheduleRemarkEdenPenetration, 50,                      \
+           "The Eden occupancy % at which to try and schedule remark pause") \
+                                                                             \
+-  product(uintx, CMSScheduleRemarkSamplingRatio, 5,             	    \
++  product(uintx, CMSScheduleRemarkSamplingRatio, 5,                         \
+           "Start sampling Eden top at least before yg occupancy reaches"    \
+           " 1/<ratio> of the size at which we plan to schedule remark")     \
+                                                                             \
+-  product(uintx, CMSSamplingGrain, 16*K,             	                    \
++  product(uintx, CMSSamplingGrain, 16*K,                                    \
+           "The minimum distance between eden samples for CMS (see above)")  \
+                                                                             \
+-  product(bool, CMSScavengeBeforeRemark, false,                        	    \
+-          "Attempt scavenge before the CMS remark step")		    \
++  product(bool, CMSScavengeBeforeRemark, false,                             \
++          "Attempt scavenge before the CMS remark step")                    \
+                                                                             \
+-  develop(bool, CMSTraceSweeper, false,                               	    \
+-          "Trace some actions of the CMS sweeper")			    \
++  develop(bool, CMSTraceSweeper, false,                                     \
++          "Trace some actions of the CMS sweeper")                          \
+                                                                             \
+   product(uintx, CMSWorkQueueDrainThreshold, 10,                            \
+           "Don't drain below this size per parallel worker/thief")          \
+-									    \
++                                                                            \
+   product(intx, CMSWaitDuration, 2000,                                      \
+           "Time in milliseconds that CMS thread waits for young GC")        \
+                                                                             \
+@@ -1456,8 +1465,8 @@
+           "Yield between steps of concurrent mark & sweep")                 \
+                                                                             \
+   product(uintx, CMSBitMapYieldQuantum, 10*M,                               \
+-          "Bitmap operations should process at most this many bits"	    \
+-	  "between yields")						    \
++          "Bitmap operations should process at most this many bits"         \
++          "between yields")                                                 \
+                                                                             \
+   diagnostic(bool, FLSVerifyAllHeapReferences, false,                       \
+           "Verify that all refs across the FLS boundary "                   \
+@@ -1472,20 +1481,20 @@
+   develop(bool, FLSVerifyDictionary, false,                                 \
+           "Do lots of (expensive) FLS dictionary verification")             \
+                                                                             \
+-  develop(bool, VerifyBlockOffsetArray, false,				    \
+-          "Do (expensive!) block offset array verification")		    \
++  develop(bool, VerifyBlockOffsetArray, false,                              \
++          "Do (expensive!) block offset array verification")                \
+                                                                             \
+   product(bool, BlockOffsetArrayUseUnallocatedBlock, trueInDebug,           \
+           "Maintain _unallocated_block in BlockOffsetArray"                 \
+-          " (currently applicable only to CMS collector)")       	    \
++          " (currently applicable only to CMS collector)")                  \
+                                                                             \
+-  develop(bool, TraceCMSState, false,                                 	    \
+-          "Trace the state of the CMS collection")			    \
++  develop(bool, TraceCMSState, false,                                       \
++          "Trace the state of the CMS collection")                          \
+                                                                             \
+-  product(intx, RefDiscoveryPolicy, 0,                             	    \
+-          "Whether reference-based(0) or referent-based(1)")	            \
++  product(intx, RefDiscoveryPolicy, 0,                                      \
++          "Whether reference-based(0) or referent-based(1)")                \
+                                                                             \
+-  product(bool, ParallelRefProcEnabled, false,                        	    \
++  product(bool, ParallelRefProcEnabled, false,                              \
+           "Enable parallel reference processing whenever possible")         \
+                                                                             \
+   product(bool, ParallelRefProcBalancingEnabled, true,                      \
+@@ -1502,17 +1511,17 @@
+   product(intx, CMSInitiatingOccupancyFraction, -1,                         \
+           "Percentage CMS generation occupancy to start a CMS collection "  \
+           " cycle (A negative value means that CMSTirggerRatio is used)")   \
+-									    \
+-  product(bool, UseCMSInitiatingOccupancyOnly, false,			    \
+-	  "Only use occupancy as a crierion for starting a CMS collection") \
++                                                                            \
++  product(bool, UseCMSInitiatingOccupancyOnly, false,                       \
++          "Only use occupancy as a crierion for starting a CMS collection") \
+                                                                             \
+   develop(bool, CMSTestInFreeList, false,                                   \
+           "Check if the coalesced range is already in the "                 \
+           "free lists as claimed.")                                         \
+                                                                             \
+   notproduct(bool, CMSVerifyReturnedBytes, false,                           \
+-          "Check that all the garbage collected was returned to the "	    \
+-          "free lists.")                                         	    \
++          "Check that all the garbage collected was returned to the "       \
++          "free lists.")                                                    \
+                                                                             \
+   notproduct(bool, ScavengeALot, false,                                     \
+           "Force scavenge at every Nth exit from the runtime system "       \
+@@ -1524,7 +1533,7 @@
+                                                                             \
+   notproduct(bool, GCALotAtAllSafepoints, false,                            \
+           "Enforce ScavengeALot/GCALot at all potential safepoints")        \
+-									    \
++                                                                            \
+   product(bool, HandlePromotionFailure, true,                               \
+           "The youngest generation collection does not require"             \
+           " a guarantee of full promotion of all live objects.")            \
+@@ -1535,8 +1544,8 @@
+                                                                             \
+   develop(uintx, PromotionFailureALotCount, 1000,                           \
+           "Number of promotion failures occurring at ParGCAllocBuffer"      \
+-          "refill attempts (ParNew) or promotion attempts "		    \
+-	  "(other young collectors) ")                                      \
++          "refill attempts (ParNew) or promotion attempts "                 \
++          "(other young collectors) ")                                      \
+                                                                             \
+   develop(uintx, PromotionFailureALotInterval, 5,                           \
+           "Total collections between promotion failures alot")              \
+@@ -1547,8 +1556,8 @@
+   develop(uintx, WorkStealingYieldsBeforeSleep, 1000,                       \
+           "Number of yields before a sleep is done during workstealing")    \
+                                                                             \
+-  product(uintx, PreserveMarkStackSize, 40,				    \
+-	   "Size for stack used in promotion failure handling")		    \
++  product(uintx, PreserveMarkStackSize, 40,                                 \
++           "Size for stack used in promotion failure handling")             \
+                                                                             \
+   product_pd(bool, UseTLAB, "Use thread-local object allocation")           \
+                                                                             \
+@@ -1570,25 +1579,25 @@
+   product(bool, AlwaysActAsServerClassMachine, false,                       \
+           "Always act like a server-class machine")                         \
+                                                                             \
+-  product(uintx, DefaultMaxRAM, G,					    \
+-	  "Maximum real memory size for setting server class heap size")    \
+-									    \
+-  product(uintx, DefaultMaxRAMFraction, 4,				    \
+-	  "Fraction (1/n) of real memory used for server class max heap")   \
+-									    \
+-  product(uintx, DefaultInitialRAMFraction, 64,				    \
+-	  "Fraction (1/n) of real memory used for server class initial heap")  \
+-									    \
++  product_pd(uintx, DefaultMaxRAM,                                          \
++          "Maximum real memory size for setting server class heap size")    \
++                                                                            \
++  product(uintx, DefaultMaxRAMFraction, 4,                                  \
++          "Fraction (1/n) of real memory used for server class max heap")   \
++                                                                            \
++  product(uintx, DefaultInitialRAMFraction, 64,                             \
++          "Fraction (1/n) of real memory used for server class initial heap")  \
++                                                                            \
+   product(bool, UseAutoGCSelectPolicy, false,                               \
+           "Use automatic collection selection policy")                      \
+                                                                             \
+-  product(uintx, AutoGCSelectPauseMillis, 5000,                  	    \
++  product(uintx, AutoGCSelectPauseMillis, 5000,                             \
+           "Automatic GC selection pause threshhold in ms")                  \
+-									    \
++                                                                            \
+   product(bool, UseAdaptiveSizePolicy, true,                                \
+           "Use adaptive generation sizing policies")                        \
+                                                                             \
+-  product(bool, UsePSAdaptiveSurvivorSizePolicy, true,     		    \
++  product(bool, UsePSAdaptiveSurvivorSizePolicy, true,                      \
+           "Use adaptive survivor sizing policies")                          \
+                                                                             \
+   product(bool, UseAdaptiveGenerationSizePolicyAtMinorCollection, true,     \
+@@ -1597,41 +1606,41 @@
+   product(bool, UseAdaptiveGenerationSizePolicyAtMajorCollection, true,     \
+           "Use adaptive young-old sizing policies at major collections")    \
+                                                                             \
+-  product(bool, UseAdaptiveSizePolicyWithSystemGC, false,   		    \
+-          "Use statistics from System.GC for adaptive size policy")	    \
++  product(bool, UseAdaptiveSizePolicyWithSystemGC, false,                   \
++          "Use statistics from System.GC for adaptive size policy")         \
+                                                                             \
+-  product(bool, UseAdaptiveGCBoundary, false,				    \
+-          "Allow young-old boundary to move")    			    \
++  product(bool, UseAdaptiveGCBoundary, false,                               \
++          "Allow young-old boundary to move")                               \
+                                                                             \
+-  develop(bool, TraceAdaptiveGCBoundary, false,				    \
+-          "Trace young-old boundary moves")    			            \
++  develop(bool, TraceAdaptiveGCBoundary, false,                             \
++          "Trace young-old boundary moves")                                 \
+                                                                             \
+-  develop(intx, PSAdaptiveSizePolicyResizeVirtualSpaceAlot, -1,   	    \
++  develop(intx, PSAdaptiveSizePolicyResizeVirtualSpaceAlot, -1,             \
+           "Resize the virtual spaces of the young or old generations")      \
+                                                                             \
+   product(uintx, AdaptiveSizeThroughPutPolicy, 0,                           \
+-          "Policy for changeing generation size for throughput goals")	    \
++          "Policy for changeing generation size for throughput goals")      \
+                                                                             \
+   product(uintx, AdaptiveSizePausePolicy, 0,                                \
+-          "Policy for changing generation size for pause goals")	    \
++          "Policy for changing generation size for pause goals")            \
++                                                                            \
++  develop(bool, PSAdjustTenuredGenForMinorPause, false,                     \
++          "Adjust tenured generation to achive a minor pause goal")         \
+                                                                             \
+-  develop(bool, PSAdjustTenuredGenForMinorPause, false,			    \
+-	  "Adjust tenured generation to achive a minor pause goal")	    \
++  develop(bool, PSAdjustYoungGenForMajorPause, false,                       \
++          "Adjust young generation to achive a major pause goal")           \
+                                                                             \
+-  develop(bool, PSAdjustYoungGenForMajorPause, false,			    \
+-	  "Adjust young generation to achive a major pause goal")	    \
+-									    \
+   product(uintx, AdaptiveSizePolicyInitializingSteps, 20,                   \
+           "Number of steps where heuristics is used before data is used")   \
+-									    \
++                                                                            \
+   develop(uintx, AdaptiveSizePolicyReadyThreshold, 5,                       \
+           "Number of collections before the adaptive sizing is started")    \
+-									    \
++                                                                            \
+   product(uintx, AdaptiveSizePolicyOutputInterval, 0,                       \
+           "Collecton interval for printing information, zero => never")     \
+                                                                             \
+   product(bool, UseAdaptiveSizePolicyFootprintGoal, true,                   \
+-          "Use adaptive minimum footprint as a goal")			    \
++          "Use adaptive minimum footprint as a goal")                       \
+                                                                             \
+   product(uintx, AdaptiveSizePolicyWeight, 10,                              \
+           "Weight given to exponential resizing, between 0 and 100")        \
+@@ -1640,7 +1649,7 @@
+           "Weight given to time in adaptive policy, between 0 and 100")     \
+                                                                             \
+   product(uintx, PausePadding, 1,                                           \
+-          "How much buffer to keep for pause time")                  	    \
++          "How much buffer to keep for pause time")                         \
+                                                                             \
+   product(uintx, PromotedPadding, 3,                                        \
+           "How much buffer to keep for promotion failure")                  \
+@@ -1667,7 +1676,7 @@
+           "Supplement to YoungedGenerationSizeIncrement used at startup")   \
+                                                                             \
+   product(uintx, YoungGenerationSizeSupplementDecay, 8,                     \
+-          "Decay factor to YoungedGenerationSizeSupplement")    	    \
++          "Decay factor to YoungedGenerationSizeSupplement")                \
+                                                                             \
+   product(uintx, TenuredGenerationSizeIncrement, 20,                        \
+           "Adaptive size percentage change in tenured generation")          \
+@@ -1676,24 +1685,24 @@
+           "Supplement to TenuredGenerationSizeIncrement used at startup")   \
+                                                                             \
+   product(uintx, TenuredGenerationSizeSupplementDecay, 2,                   \
+-          "Decay factor to TenuredGenerationSizeIncrement")  		    \
++          "Decay factor to TenuredGenerationSizeIncrement")                 \
+                                                                             \
+-  product(uintx, MaxGCPauseMillis, max_uintx,                        	    \
++  product(uintx, MaxGCPauseMillis, max_uintx,                               \
+           "Adaptive size policy maximum GC pause time goal in msec")        \
+                                                                             \
+-  product(uintx, MaxGCMinorPauseMillis, max_uintx,                     	    \
++  product(uintx, MaxGCMinorPauseMillis, max_uintx,                          \
+           "Adaptive size policy maximum GC minor pause time goal in msec")  \
+                                                                             \
+-  product(uintx, GCTimeRatio, 99,                     	                    \
++  product(uintx, GCTimeRatio, 99,                                           \
+           "Adaptive size policy application time to GC time ratio")         \
+                                                                             \
+   product(uintx, AdaptiveSizeDecrementScaleFactor, 4,                       \
+-          "Adaptive size scale down factor for shrinking")		    \
++          "Adaptive size scale down factor for shrinking")                  \
+                                                                             \
+-  product(bool, UseAdaptiveSizeDecayMajorGCCost, true,            	    \
++  product(bool, UseAdaptiveSizeDecayMajorGCCost, true,                      \
+           "Adaptive size decays the major cost for long major intervals")   \
+                                                                             \
+-  product(uintx, AdaptiveSizeMajorGCDecayTimeScale, 10,           	    \
++  product(uintx, AdaptiveSizeMajorGCDecayTimeScale, 10,                     \
+           "Time scale over which major costs decay")                        \
+                                                                             \
+   product(uintx, MinSurvivorRatio, 3,                                       \
+@@ -1702,12 +1711,12 @@
+   product(uintx, InitialSurvivorRatio, 8,                                   \
+           "Initial ratio of eden/survivor space size")                      \
+                                                                             \
+-  product(uintx, BaseFootPrintEstimate, 256*M,             	 	    \
++  product(uintx, BaseFootPrintEstimate, 256*M,                              \
+           "Estimate of footprint other than Java Heap")                     \
+                                                                             \
+   product(bool, UseGCOverheadLimit, true,                                   \
+-          "Use policy to limit of proportion of time spent in GC "	    \
+-	  "before an OutOfMemory error is thrown")                    	    \
++          "Use policy to limit of proportion of time spent in GC "          \
++          "before an OutOfMemory error is thrown")                          \
+                                                                             \
+   product(uintx, GCTimeLimit, 98,                                           \
+           "Limit of proportion of time spent in GC before an OutOfMemory"   \
+@@ -1716,7 +1725,7 @@
+   product(uintx, GCHeapFreeLimit, 2,                                        \
+           "Minimum percentage of free space after a full GC before an "     \
+           "OutOfMemoryError is thrown (used with GCTimeLimit)")             \
+-									    \
++                                                                            \
+   develop(uintx, AdaptiveSizePolicyGCTimeLimitThreshold, 5,                 \
+           "Number of consecutive collections before gc time limit fires")   \
+                                                                             \
+@@ -1846,9 +1855,9 @@
+                                                                             \
+   product(bool, PrintHeapAtSIGBREAK, true,                                  \
+           "Print heap layout in response to SIGBREAK")                      \
+-									    \
+-  manageable(bool, PrintClassHistogram, false,			            \
+-	  "Print a histogram of class instances") 		            \
++                                                                            \
++  manageable(bool, PrintClassHistogram, false,                              \
++          "Print a histogram of class instances")                           \
+                                                                             \
+   develop(bool, TraceWorkGang, false,                                       \
+           "Trace activities of work gangs")                                 \
+@@ -1871,32 +1880,32 @@
+   develop(bool, TraceGCTaskThread, false,                                   \
+           "Trace actions of the GC task threads")                           \
+                                                                             \
+-  product(bool, PrintParallelOldGCPhaseTimes, false,			    \
+-          "Print the time taken by each parallel old gc phase."		    \
+-	  "PrintGCDetails must also be enabled.")			    \
+-                                                                            \
+-  develop(bool, TraceParallelOldGCMarkingPhase, false,			    \
+-	  "Trace parallel old gc marking phase")			    \
+-									    \
+-  develop(bool, TraceParallelOldGCSummaryPhase, false,			    \
+-	  "Trace parallel old gc summary phase")			    \
+-									    \
+-  develop(bool, TraceParallelOldGCCompactionPhase, false,		    \
+-	  "Trace parallel old gc compaction phase")			    \
++  product(bool, PrintParallelOldGCPhaseTimes, false,                        \
++          "Print the time taken by each parallel old gc phase."             \
++          "PrintGCDetails must also be enabled.")                           \
++                                                                            \
++  develop(bool, TraceParallelOldGCMarkingPhase, false,                      \
++          "Trace parallel old gc marking phase")                            \
+                                                                             \
+-  develop(bool, TraceParallelOldGCDensePrefix, false,			    \
+-	  "Trace parallel old gc dense prefix computation")		    \
++  develop(bool, TraceParallelOldGCSummaryPhase, false,                      \
++          "Trace parallel old gc summary phase")                            \
++                                                                            \
++  develop(bool, TraceParallelOldGCCompactionPhase, false,                   \
++          "Trace parallel old gc compaction phase")                         \
++                                                                            \
++  develop(bool, TraceParallelOldGCDensePrefix, false,                       \
++          "Trace parallel old gc dense prefix computation")                 \
+                                                                             \
+   develop(bool, IgnoreLibthreadGPFault, false,                              \
+           "Suppress workaround for libthread GP fault")                     \
+                                                                             \
+-  /* JVMTI heap profiling */						    \
+-									    \
++  /* JVMTI heap profiling */                                                \
++                                                                            \
+   diagnostic(bool, TraceJVMTIObjectTagging, false,                          \
+-	  "Trace JVMTI object tagging calls")				    \
+-									    \
++          "Trace JVMTI object tagging calls")                               \
++                                                                            \
+   diagnostic(bool, VerifyBeforeIteration, false,                            \
+-          "Verify memory system before JVMTI iteration")		    \
++          "Verify memory system before JVMTI iteration")                    \
+                                                                             \
+   /* compiler interface */                                                  \
+                                                                             \
+@@ -2029,7 +2038,7 @@
+   diagnostic(bool, PrintIntrinsics, false,                                  \
+           "prints attempted and successful inlining of intrinsics")         \
+                                                                             \
+-  diagnostic(ccstr, DisableIntrinsic, "",                                   \
++  diagnostic(ccstrlist, DisableIntrinsic, "",                               \
+           "do not expand intrinsics whose (internal) names appear here")    \
+                                                                             \
+   develop(bool, StressReflectiveCode, false,                                \
+@@ -2075,10 +2084,10 @@
+   diagnostic(bool, LogVMOutput, trueInDebug,                                \
+          "Save VM output to hotspot.log, or to LogFile")                    \
+                                                                             \
+-  diagnostic(ccstr, LogFile, "",                                            \
++  diagnostic(ccstr, LogFile, NULL,                                          \
+          "If LogVMOutput is on, save VM output to this file [hotspot.log]") \
+                                                                             \
+-  product(ccstr, ErrorFile, "",                                             \
++  product(ccstr, ErrorFile, NULL,                                           \
+          "If an error occurs, save the error data to this file "            \
+          "[default: ./hs_err_pid%p.log] (%p replaced with pid)")            \
+                                                                             \
+@@ -2098,7 +2107,7 @@
+           "standard exit from VM if bytecode verify error "                 \
+           "(only in debug mode)")                                           \
+                                                                             \
+-  notproduct(ccstr, AbortVMOnException, "",                                 \
++  notproduct(ccstr, AbortVMOnException, NULL,                               \
+           "Call fatal if this exception is thrown.  Example: "              \
+           "java -XX:AbortVMOnException=java.lang.NullPointerException Foo") \
+                                                                             \
+@@ -2205,9 +2214,6 @@
+   develop(bool, CountCompiledCalls, false,                                  \
+           "counts method invocations")                                      \
+                                                                             \
+-  notproduct(bool, CountVMLocks, false,                                     \
+-          "counts VM internal lock attempts and contention")                \
+-                                                                            \
+   notproduct(bool, CountRuntimeCalls, false,                                \
+           "counts VM runtime calls")                                        \
+                                                                             \
+@@ -2425,7 +2431,7 @@
+           "Guarantee a safepoint (at least) every so many milliseconds "    \
+           "(0 means none)")                                                 \
+                                                                             \
+-  develop(intx, SafepointTimeoutDelay, 10000,                               \
++  product(intx, SafepointTimeoutDelay, 10000,                               \
+           "Delay in milliseconds for option SafepointTimeout")              \
+                                                                             \
+   product(intx, NmethodSweepFraction, 4,                                    \
+@@ -2441,7 +2447,7 @@
+           "number of times to evaluate expression in assert "               \
+           "(to estimate overhead); only works with -DUSE_REPEATED_ASSERTS") \
+                                                                             \
+-  notproduct(ccstr, SuppressErrorAt, "",                                    \
++  notproduct(ccstrlist, SuppressErrorAt, "",                                \
+           "List of assertions (file:line) to muzzle")                       \
+                                                                             \
+   notproduct(uintx, HandleAllocationLimit, 1024,                            \
+@@ -2609,10 +2615,10 @@
+   product(uintx, MaxHeapSize, ScaleForWordSize(64*M),                       \
+           "Default maximum size for object heap (in bytes)")                \
+                                                                             \
+-  product_pd(uintx, NewSize, 						    \
++  product_pd(uintx, NewSize,                                                \
+           "Default size of new generation (in bytes)")                      \
+                                                                             \
+-  product(uintx, MaxNewSize, max_uintx,                             	    \
++  product(uintx, MaxNewSize, max_uintx,                                     \
+           "Maximum size of new generation (in bytes)")                      \
+                                                                             \
+   product(uintx, PretenureSizeThreshold, 0,                                 \
+@@ -2655,7 +2661,7 @@
+   product_pd(uintx, PermSize,                                               \
+           "Default size of permanent generation (in bytes)")                \
+                                                                             \
+-  product_pd(uintx, MaxPermSize,					    \
++  product_pd(uintx, MaxPermSize,                                            \
+           "Maximum size of permanent generation (in bytes)")                \
+                                                                             \
+   product(uintx, MinHeapFreeRatio,    40,                                   \
+@@ -2687,7 +2693,7 @@
+           "Generation level at which to start +VerifyBefore/AfterGC")       \
+                                                                             \
+   develop(uintx, ExitAfterGCNum,   0,                                       \
+-          "If non-zero, exit after this GC.")	                            \
++          "If non-zero, exit after this GC.")                               \
+                                                                             \
+   product(intx, MaxTenuringThreshold,    15,                                \
+           "Maximum value for tenuring threshold")                           \
+@@ -2699,17 +2705,17 @@
+           "Desired percentage of survivor space used after scavenge")       \
+                                                                             \
+   product(intx, MarkSweepDeadRatio,     5,                                  \
+-          "Percentage (0-100) of the old gen allowed as dead wood."	    \
++          "Percentage (0-100) of the old gen allowed as dead wood."         \
+           "Serial mark sweep treats this as both the min and max value."    \
+-          "CMS uses this value only if it falls back to mark sweep."	    \
++          "CMS uses this value only if it falls back to mark sweep."        \
+           "Par compact uses a variable scale based on the density of the"   \
+           "generation and treats this as the max value when the heap is"    \
+           "either completely full or completely empty.  Par compact also"   \
+-          "has a smaller default value; see arguments.cpp.")		    \
++          "has a smaller default value; see arguments.cpp.")                \
+                                                                             \
+   product(intx, PermMarkSweepDeadRatio,    20,                              \
+-          "Percentage (0-100) of the perm gen allowed as dead wood."	    \
+-          "See MarkSweepDeadRatio for collector-specific comments.")	    \
++          "Percentage (0-100) of the perm gen allowed as dead wood."        \
++          "See MarkSweepDeadRatio for collector-specific comments.")        \
+                                                                             \
+   product(intx, MarkSweepAlwaysCompactCount,     4,                         \
+           "How often should we fully compact the heap (ignoring the dead "  \
+@@ -2744,6 +2750,9 @@
+           "true: the scavenge order will be depth-first, "                  \
+           "false: the scavenge order will be breadth-first")                \
+                                                                             \
++  product(bool, PSChunkLargeArrays, true,                                   \
++          "true: process large arrays in chunks")                           \
++                                                                            \
+   product(uintx, GCDrainStackTargetSize, 64,                                \
+           "how many entries we'll try to leave on the stack during "        \
+           "parallel GC")                                                    \
+@@ -2784,7 +2793,7 @@
+   develop_pd(intx, CodeEntryAlignment,                                      \
+           "Code entry alignment for generated code (in bytes)")             \
+                                                                             \
+-  product_pd(uintx, InitialCodeCacheSize, 			            \
++  product_pd(uintx, InitialCodeCacheSize,                                   \
+           "Initial code cache size (in bytes)")                             \
+                                                                             \
+   product_pd(uintx, ReservedCodeCacheSize,                                  \
+@@ -2834,13 +2843,13 @@
+   develop(intx, CIBreakAt,    -1,                                           \
+           "id of compilation to break at")                                  \
+                                                                             \
+-  product(ccstr, CompileOnly, "",                                           \
++  product(ccstrlist, CompileOnly, "",                                       \
+           "List of methods (pkg/class.name) to restrict compilation to")    \
+                                                                             \
+-  product(ccstr, CompileCommandFile, "",                                    \
++  product(ccstr, CompileCommandFile, NULL,                                  \
+           "Read compiler commands from this file [.hotspot_compiler]")      \
+                                                                             \
+-  product(ccstr, CompileCommand, "",                                        \
++  product(ccstrlist, CompileCommand, "",                                    \
+           "Prepend to .hotspot_compiler; e.g. log,java/lang/String.<init>") \
+                                                                             \
+   product(bool, CICompilerCountPerCPU, false,                               \
+@@ -3033,7 +3042,7 @@
+   product(bool, PerfDataSaveToFile, false,                                  \
+           "Save PerfData memory to hsperfdata_<pid> file on exit")          \
+                                                                             \
+-  product(ccstr, PerfDataSaveFile, "",                                      \
++  product(ccstr, PerfDataSaveFile, NULL,                                    \
+           "Save PerfData memory to the specified absolute pathname,"        \
+            "%p in the file name if present will be replaced by pid")        \
+                                                                             \
+@@ -3076,10 +3085,10 @@
+                                                                             \
+   product(bool, StartAttachListener, false,                                 \
+           "Always start Attach Listener at VM startup")                     \
+-                                                                    	    \
++                                                                            \
+   manageable(bool, PrintConcurrentLocks, false,                             \
+           "Print java.util.concurrent locks in thread dump")                \
+-                                                                    	    \
++                                                                            \
+   /* Shared spaces */                                                       \
+                                                                             \
+   product(bool, UseSharedSpaces, true,                                      \
+@@ -3134,7 +3143,7 @@
+           "Causes the VM to pause at startup time and wait for the pause "  \
+           "file to be removed (default: ./vm.paused.<pid>)")                \
+                                                                             \
+-  diagnostic(ccstr, PauseAtStartupFile, "",                                 \
++  diagnostic(ccstr, PauseAtStartupFile, NULL,                               \
+           "The file to create and for whose removal to await when pausing " \
+           "at startup. (default: ./vm.paused.<pid>)")                       \
+                                                                             \
+@@ -3169,9 +3178,9 @@
+ #define DECLARE_MANAGEABLE_FLAG(type, name, value, doc) extern "C" type name;
+ #define DECLARE_PRODUCT_RW_FLAG(type, name, value, doc) extern "C" type name;
+ #ifdef PRODUCT
+-#define DECLARE_DEVELOPER_FLAG(type, name, value, doc)  const type name = value; 
+-#define DECLARE_PD_DEVELOPER_FLAG(type, name, doc)      const type name = pd_##name; 
+-#define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc) 
++#define DECLARE_DEVELOPER_FLAG(type, name, value, doc)  const type name = value;
++#define DECLARE_PD_DEVELOPER_FLAG(type, name, doc)      const type name = pd_##name;
++#define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc)
+ #else
+ #define DECLARE_DEVELOPER_FLAG(type, name, value, doc)  extern "C" type name;
+ #define DECLARE_PD_DEVELOPER_FLAG(type, name, doc)      extern "C" type name;
+@@ -3180,22 +3189,20 @@
+ 
+ // Implementation macros
+ #define MATERIALIZE_PRODUCT_FLAG(type, name, value, doc)   type name = value;
+-#define MATERIALIZE_PD_PRODUCT_FLAG(type, name, doc)       type name = pd_##name; 
++#define MATERIALIZE_PD_PRODUCT_FLAG(type, name, doc)       type name = pd_##name;
+ #define MATERIALIZE_DIAGNOSTIC_FLAG(type, name, value, doc) type name = value;
+-#define MATERIALIZE_MANAGEABLE_FLAG(type, name, value, doc) type name = value; 
++#define MATERIALIZE_MANAGEABLE_FLAG(type, name, value, doc) type name = value;
+ #define MATERIALIZE_PRODUCT_RW_FLAG(type, name, value, doc) type name = value;
+ #ifdef PRODUCT
+-#define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) /* flag name is constant */ 
+-#define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc)     /* flag name is constant */ 
+-#define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) 
++#define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) /* flag name is constant */
++#define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc)     /* flag name is constant */
++#define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc)
+ #else
+-#define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) type name = value; 
+-#define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc)     type name = pd_##name; 
+-#define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) type name = value; 
++#define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) type name = value;
++#define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc)     type name = pd_##name;
++#define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) type name = value;
+ #endif
+ 
+ RUNTIME_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
+ 
+ RUNTIME_OS_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG)
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/handles.cpp openjdk/hotspot/src/share/vm/runtime/handles.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/handles.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/handles.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)handles.cpp	1.110 07/05/05 17:06:42 JVM"
+-#endif
+ /*
+  * Copyright 1997-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -41,7 +38,7 @@
+   if (obj == NULL) {
+     _handle = NULL;
+   } else {
+-    _handle = thread->handle_area()->allocate_handle(obj);    
++    _handle = thread->handle_area()->allocate_handle(obj);
+   }
+ }
+ 
+@@ -62,7 +59,7 @@
+   return handles_visited;
+ }
+ 
+-// Used for debugging handle allocation. 
++// Used for debugging handle allocation.
+ NOT_PRODUCT(jint _nof_handlemarks  = 0;)
+ 
+ void HandleArea::oops_do(OopClosure* f) {
+@@ -75,16 +72,16 @@
+     handles_visited += chunk_oops_do(f, k, k->top());
+     k = k->next();
+   }
+-  
++
+   // The thread local handle areas should not get very large
+-  if (TraceHandleAllocation && handles_visited > TotalHandleAllocationLimit) {    
++  if (TraceHandleAllocation && handles_visited > TotalHandleAllocationLimit) {
+ #ifdef ASSERT
+-    warning("%d: Visited in HandleMark : %d", 
+-      _nof_handlemarks, handles_visited);      
++    warning("%d: Visited in HandleMark : %d",
++      _nof_handlemarks, handles_visited);
+ #else
+     warning("Visited in HandleMark : %d", handles_visited);
+ #endif
+-  }  
++  }
+   if (_prev != NULL) _prev->oops_do(f);
+ }
+ 
+@@ -98,7 +95,7 @@
+   _max   = _area->_max;
+   NOT_PRODUCT(_size_in_bytes = _area->_size_in_bytes;)
+   debug_only(_area->_handle_mark_nesting++);
+-  assert(_area->_handle_mark_nesting > 0, "must stack allocate HandleMarks"); 
++  assert(_area->_handle_mark_nesting > 0, "must stack allocate HandleMarks");
+   debug_only(Atomic::inc(&_nof_handlemarks);)
+ 
+   // Link this in the thread
+@@ -107,32 +104,32 @@
+ }
+ 
+ 
+-HandleMark::~HandleMark() { 
++HandleMark::~HandleMark() {
+   HandleArea* area = _area;   // help compilers with poor alias analysis
+   assert(area == _thread->handle_area(), "sanity check");
+-  assert(area->_handle_mark_nesting > 0, "must stack allocate HandleMarks" );  
++  assert(area->_handle_mark_nesting > 0, "must stack allocate HandleMarks" );
+   debug_only(area->_handle_mark_nesting--);
+-  
++
+   // Debug code to trace the number of handles allocated per mark/
+ #ifdef ASSERT
+   if (TraceHandleAllocation) {
+-    size_t handles = 0;  
+-    Chunk *c = _chunk->next();         
+-    if (c == NULL) {      
++    size_t handles = 0;
++    Chunk *c = _chunk->next();
++    if (c == NULL) {
+       handles = area->_hwm - _hwm; // no new chunk allocated
+     } else {
+-      handles = _max - _hwm;      // add rest in first chunk      
++      handles = _max - _hwm;      // add rest in first chunk
+       while(c != NULL) {
+-        handles += c->length(); 
++        handles += c->length();
+         c = c->next();
+-      }    
++      }
+       handles -= area->_max - area->_hwm; // adjust for last trunk not full
+     }
+     handles /= sizeof(void *); // Adjust for size of a handle
+-    if (handles > HandleAllocationLimit) {   
++    if (handles > HandleAllocationLimit) {
+       // Note: _nof_handlemarks is only set in debug mode
+       warning("%d: Allocated in HandleMark : %d", _nof_handlemarks, handles);
+-    } 
++    }
+   }
+ #endif
+ 
+@@ -145,7 +142,7 @@
+   area->_hwm = _hwm;
+   area->_max = _max;
+   NOT_PRODUCT(area->set_size_in_bytes(_size_in_bytes);)
+-#ifdef ASSERT 
++#ifdef ASSERT
+   // clear out first chunk (to detect allocation bugs)
+   if (ZapVMHandleArea) {
+     memset(_hwm, badHandleValue, _max - _hwm);
+@@ -168,7 +165,7 @@
+ 
+ NoHandleMark::~NoHandleMark() {
+   HandleArea* area = Thread::current()->handle_area();
+-  assert(area->_no_handle_mark_nesting > 0, "must stack allocate NoHandleMark" ); 
++  assert(area->_no_handle_mark_nesting > 0, "must stack allocate NoHandleMark" );
+   area->_no_handle_mark_nesting--;
+ }
+ 
+@@ -181,7 +178,7 @@
+ 
+ 
+ ResetNoHandleMark::~ResetNoHandleMark() {
+-  HandleArea* area = Thread::current()->handle_area();  
++  HandleArea* area = Thread::current()->handle_area();
+   area->_no_handle_mark_nesting = _no_handle_mark_nesting;
+ }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/handles.hpp openjdk/hotspot/src/share/vm/runtime/handles.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/handles.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/handles.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)handles.hpp	1.120 07/05/05 17:06:47 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,24 +19,24 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //------------------------------------------------------------------------------------------------------------------------
+-// In order to preserve oops during garbage collection, they should be 
++// In order to preserve oops during garbage collection, they should be
+ // allocated and passed around via Handles within the VM. A handle is
+ // simply an extra indirection allocated in a thread local handle area.
+-// 
++//
+ // A handle is a ValueObj, so it can be passed around as a value, can
+ // be used as a parameter w/o using &-passing, and can be returned as a
+-// return value. 
++// return value.
+ //
+ // oop parameters and return types should be Handles whenever feasible.
+-// 
++//
+ // Handles are declared in a straight-forward manner, e.g.
+ //
+ //   oop obj = ...;
+-//   Handle h1(obj);              // allocate new handle 
++//   Handle h1(obj);              // allocate new handle
+ //   Handle h2(thread, obj);      // faster allocation when current thread is known
+ //   Handle h3;                   // declare handle only, no allocation occurs
+ //   ...
+@@ -48,7 +45,7 @@
+ //   h1->print();                 // invoking operation on oop
+ //
+ // Handles are specialized for different oop types to provide extra type
+-// information and avoid unnecessary casting. For each oop type xxxOop 
++// information and avoid unnecessary casting. For each oop type xxxOop
+ // there is a corresponding handle called xxxHandle, e.g.
+ //
+ //   oop           Handle
+@@ -92,7 +89,7 @@
+   oop     operator -> () const                   { return non_null_obj(); }
+   bool    operator == (oop o) const              { return obj() == o; }
+   bool    operator == (const Handle& h) const          { return obj() == h.obj(); }
+-   
++
+   // Null checks
+   bool    is_null() const                        { return _handle == NULL; }
+   bool    not_null() const                       { return _handle != NULL; }
+@@ -105,7 +102,7 @@
+   // Constructor takes a dummy argument to prevent unintentional type conversion in C++.
+   Handle(oop *handle, bool dummy)                { _handle = handle; }
+ 
+-  // Raw handle access. Allows easy duplication of Handles. This can be very unsafe 
++  // Raw handle access. Allows easy duplication of Handles. This can be very unsafe
+   // since duplicates is only valid as long as original handle is alive.
+   oop* raw_value()                               { return _handle; }
+   static oop raw_resolve(oop *handle)            { return handle == NULL ? (oop)NULL : *handle; }
+@@ -119,7 +116,7 @@
+  protected:
+   klassOop    obj() const                        { return (klassOop)Handle::obj(); }
+   klassOop    non_null_obj() const               { return (klassOop)Handle::non_null_obj(); }
+-  Klass*      as_klass() const                   { return non_null_obj()->klass_part(); }  
++  Klass*      as_klass() const                   { return non_null_obj()->klass_part(); }
+ 
+  public:
+   // Constructors
+@@ -139,10 +136,10 @@
+     : Handle(thread, kl ? kl->as_klassOop() : (klassOop)NULL) {
+     assert(is_null() || obj()->is_klass(), "not a klassOop");
+   }
+-   
++
+   // General access
+-  klassOop    operator () () const               { return obj(); }  
+-  Klass*      operator -> () const               { return as_klass(); }  
++  klassOop    operator () () const               { return obj(); }
++  Klass*      operator -> () const               { return as_klass(); }
+ };
+ 
+ 
+@@ -268,19 +265,19 @@
+   void oops_do(OopClosure* f);
+ 
+   // Number of handles in use
+-  size_t used() const     { return Arena::used() / oopSize; }  
++  size_t used() const     { return Arena::used() / oopSize; }
+ 
+   debug_only(bool no_handle_mark_active() { return _no_handle_mark_nesting > 0; })
+ };
+ 
+ 
+ //------------------------------------------------------------------------------------------------------------------------
+-// Handles are allocated in a (growable) thread local handle area. Deallocation 
++// Handles are allocated in a (growable) thread local handle area. Deallocation
+ // is managed using a HandleMark. It should normally not be necessary to use
+ // HandleMarks manually.
+ //
+-// A HandleMark constructor will record the current handle area top, and the 
+-// desctructor will reset the top, destroying all handles allocated in between. 
++// A HandleMark constructor will record the current handle area top, and the
++// desctructor will reset the top, destroying all handles allocated in between.
+ // The following code will therefore NOT work:
+ //
+ //   Handle h;
+@@ -294,7 +291,7 @@
+ // across the HandleMark boundary.
+ 
+ // The base class of HandleMark should have been StackObj but we also heap allocate
+-// a HandleMark when a thread is created. 
++// a HandleMark when a thread is created.
+ 
+ class HandleMark {
+  private:
+@@ -302,8 +299,8 @@
+   HandleArea *_area;            // saved handle area
+   Chunk *_chunk;                // saved arena chunk
+   char *_hwm, *_max;            // saved arena info
+-  NOT_PRODUCT(size_t _size_in_bytes;) // size of handle area  
+-  // Link to previous active HandleMark in thread 
++  NOT_PRODUCT(size_t _size_in_bytes;) // size of handle area
++  // Link to previous active HandleMark in thread
+   HandleMark* _previous_handle_mark;
+ 
+   void initialize(Thread* thread);                // common code for constructors
+@@ -323,7 +320,7 @@
+ };
+ 
+ //------------------------------------------------------------------------------------------------------------------------
+-// A NoHandleMark stack object will verify that no handles are allocated 
++// A NoHandleMark stack object will verify that no handles are allocated
+ // in its scope. Enabled in debug mode only.
+ 
+ class NoHandleMark: public StackObj {
+@@ -349,4 +346,3 @@
+   ~ResetNoHandleMark() {}
+ #endif
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/handles.inline.hpp openjdk/hotspot/src/share/vm/runtime/handles.inline.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/handles.inline.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/handles.inline.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)handles.inline.hpp	1.28 07/05/05 17:06:48 JVM"
+-#endif
+ /*
+  * Copyright 1998-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,17 +19,17 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // these inline functions are in a separate file to break an include cycle
+ // between Thread and Handle
+ 
+-inline Handle::Handle(oop obj) { 
++inline Handle::Handle(oop obj) {
+   if (obj == NULL) {
+     _handle = NULL;
+   } else {
+-    _handle = Thread::current()->handle_area()->allocate_handle(obj);     
++    _handle = Thread::current()->handle_area()->allocate_handle(obj);
+   }
+ }
+ 
+@@ -43,7 +40,7 @@
+   if (obj == NULL) {
+     _handle = NULL;
+   } else {
+-    _handle = thread->handle_area()->allocate_handle(obj);    
++    _handle = thread->handle_area()->allocate_handle(obj);
+   }
+ }
+ #endif // ASSERT
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/hpi.cpp openjdk/hotspot/src/share/vm/runtime/hpi.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/hpi.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/hpi.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)hpi.cpp	1.18 07/05/17 16:05:48 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -42,7 +39,7 @@
+   jio_fprintf,
+   unimplemented_panic,
+   unimplemented_monitorRegister,
+-  
++
+   NULL, // unused
+   NULL, // unused
+   NULL  // unused
+@@ -59,28 +56,28 @@
+   initialize_get_interface(&callbacks);
+   if (_get_interface == NULL)
+     return JNI_ERR;
+-  
++
+   jint result;
+-  
++
+   result = (*_get_interface)((void **)&_file, "File", 1);
+   if (result != 0) {
+     if (TraceHPI) tty->print_cr("Can't find HPI_FileInterface");
+     return JNI_ERR;
+   }
+-  
+-  
++
++
+   result = (*_get_interface)((void **)&_library, "Library", 1);
+   if (result != 0) {
+     if (TraceHPI) tty->print_cr("Can't find HPI_LibraryInterface");
+     return JNI_ERR;
+   }
+-  
++
+   result = (*_get_interface)((void **)&_system, "System", 1);
+   if (result != 0) {
+     if (TraceHPI) tty->print_cr("Can't find HPI_SystemInterface");
+     return JNI_ERR;
+   }
+-  
++
+   return JNI_OK;
+ }
+ 
+@@ -92,13 +89,13 @@
+     }
+     return JNI_ERR;
+   }
+-  
++
+   jint result;
+   result = (*_get_interface)((void **)&_socket, "Socket", 1);
+   if (result != 0) {
+     if (TraceHPI) tty->print_cr("Can't find HPI_SocketInterface");
+     return JNI_ERR;
+   }
+-  
++
+   return JNI_OK;
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/hpi.hpp openjdk/hotspot/src/share/vm/runtime/hpi.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/hpi.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/hpi.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)hpi.hpp	1.22 07/05/05 17:06:48 JVM"
+-#endif
+ /*
+  * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,13 +19,13 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //
+ // C++ wrapper to HPI.
+ //
+-               
++
+ class hpi : AllStatic {
+ 
+ private:
+@@ -81,9 +78,9 @@
+                               struct sockaddr *to, int tolen);
+   static inline int    socket_available(int fd, jint *pbytes);
+ 
+-  static inline int    get_sock_opt(int fd, int level, int optname, 
++  static inline int    get_sock_opt(int fd, int level, int optname,
+                               char *optval, int* optlen);
+-  static inline int    set_sock_opt(int fd, int level, int optname, 
++  static inline int    set_sock_opt(int fd, int level, int optname,
+                               const char *optval, int optlen);
+   static inline int    get_host_name(char* name, int namelen);
+   static inline struct hostent*  get_host_by_addr(const char* name, int len, int type);
+@@ -161,7 +158,7 @@
+         (char *path),
+         ("path = %s", path),
+         (path));
+-    
++
+ HPIDECL(file_type, "file_type", _file, FileType, int, "%d",
+         (const char *path),
+         ("path = %s", path),
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/icache.cpp openjdk/hotspot/src/share/vm/runtime/icache.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/icache.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/icache.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)icache.cpp	1.23 07/05/05 17:06:49 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -32,7 +29,7 @@
+ AbstractICache::flush_icache_stub_t AbstractICache::_flush_icache_stub = NULL;
+ 
+ void AbstractICache::initialize() {
+-  // Making this stub must be FIRST use of assembler 
++  // Making this stub must be FIRST use of assembler
+   ResourceMark rm;
+ 
+   BufferBlob* b = BufferBlob::create("flush_icache_stub", ICache::stub_size);
+@@ -53,7 +50,7 @@
+   // The business with the magic number is just a little security.
+   // We cannot call the flush stub when generating the flush stub
+   // because it isn't there yet.  So, the stub also returns its third
+-  // parameter.  This is a cheap check that the stub was really executed. 
++  // parameter.  This is a cheap check that the stub was really executed.
+   static int magic = 0xbaadbabe;
+ 
+   int auto_magic = magic; // Make a local copy to avoid race condition
+@@ -84,7 +81,7 @@
+   static bool firstTime = true;
+   if (firstTime) {
+     guarantee(start == CAST_FROM_FN_PTR(address, _flush_icache_stub),
+-	      "first flush should be for flush stub");
++              "first flush should be for flush stub");
+     firstTime = false;
+     return;
+   }
+@@ -99,7 +96,7 @@
+     nbytes += line_offset;
+   }
+   call_flush_stub(start, round_to(nbytes, ICache::line_size) >>
+-		         ICache::log2_line_size);
++                         ICache::log2_line_size);
+ }
+ 
+ // For init.cpp
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/icache.hpp openjdk/hotspot/src/share/vm/runtime/icache.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/icache.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/icache.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)icache.hpp	1.18 07/05/05 17:06:44 JVM"
+-#endif
+ /*
+  * Copyright 1997-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Interface for updating the instruction cache.  Whenever the VM modifies
+@@ -77,7 +74,7 @@
+   // Since we cannot flush the cache when this stub is generated,
+   // it must be generated first, and just to be sure, we do extra
+   // work to allow a check that these instructions got executed.
+-  // 
++  //
+   // The flush stub has three parameters (see flush_icache_stub_t).
+   //
+   //   addr  - Start address, must be aligned at log2_line_size
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/init.cpp openjdk/hotspot/src/share/vm/runtime/init.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/init.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/init.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)init.cpp	1.122 07/05/23 10:54:05 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -45,7 +42,7 @@
+ void VM_Version_init();
+ void JDK_Version_init();
+ void stubRoutines_init1();
+-jint universe_init();  // dependent on codeCache_init and stubRoutines_init 
++jint universe_init();  // dependent on codeCache_init and stubRoutines_init
+ void interpreter_init();  // before any methods loaded
+ void invocationCounter_init();  // before any methods loaded
+ void marksweep_init();
+@@ -66,7 +63,7 @@
+ // Initialization after compiler initialization
+ bool universe_post_init();  // must happen after compiler_init
+ void javaClasses_init();  // must happen after vtable initialization
+-void stubRoutines_init2(); // note: StubRoutines need 2-phase init 
++void stubRoutines_init2(); // note: StubRoutines need 2-phase init
+ 
+ // Do not disable thread-local-storage, as it is important for some
+ // JNI/JVM/JVMTI functions and signal handlers to work properly
+@@ -84,7 +81,8 @@
+ }
+ 
+ 
+-jint init_globals() {  
++jint init_globals() {
++  HandleMark hm;
+   management_init();
+   vtune_init();
+   bytecodes_init();
+@@ -93,7 +91,7 @@
+   VM_Version_init();
+   JDK_Version_init();
+   stubRoutines_init1();
+-  jint status = universe_init();  // dependent on codeCache_init and stubRoutines_init 
++  jint status = universe_init();  // dependent on codeCache_init and stubRoutines_init
+   if (status != JNI_OK)
+     return status;
+ 
+@@ -107,7 +105,9 @@
+   universe2_init();  // dependent on codeCache_init and stubRoutines_init
+   referenceProcessor_init();
+   jni_handles_init();
++#ifndef VM_STRUCTS_KERNEL
+   vmStructs_init();
++#endif // VM_STRUCTS_KERNEL
+ 
+   vtableStubs_init();
+   InlineCacheBuffer_init();
+@@ -115,17 +115,17 @@
+   compilationPolicy_init();
+   VMRegImpl::set_regName();
+ 
+-  if (!universe_post_init()) { 
++  if (!universe_post_init()) {
+     return JNI_ERR;
+   }
+   javaClasses_init();  // must happen after vtable initialization
+-  stubRoutines_init2(); // note: StubRoutines need 2-phase init 
++  stubRoutines_init2(); // note: StubRoutines need 2-phase init
+ 
+   // Although we'd like to, we can't easily do a heap verify
+   // here because the main thread isn't yet a JavaThread, so
+   // its TLAB may not be made parseable from the usual interfaces.
+   if (VerifyBeforeGC && !UseTLAB &&
+-      Universe::heap()->total_collections() >= VerifyGCStartAt) { 
++      Universe::heap()->total_collections() >= VerifyGCStartAt) {
+     Universe::heap()->prepare_for_verify();
+     Universe::verify();   // make sure we're starting with a clean slate
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/init.hpp openjdk/hotspot/src/share/vm/runtime/init.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/init.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/init.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)init.hpp	1.19 07/05/05 17:06:49 JVM"
+-#endif
+ /*
+  * Copyright 1997-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,14 +19,14 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // init_globals replaces C++ global objects so we can use the standard linker
+-// to link Delta (which is at least twice as fast as using the GNU C++ linker). 
++// to link Delta (which is at least twice as fast as using the GNU C++ linker).
+ // Also, init.c gives explicit control over the sequence of initialization.
+ 
+-// Programming convention: instead of using a global object (e,g, "Foo foo;"), 
++// Programming convention: instead of using a global object (e,g, "Foo foo;"),
+ // use "Foo* foo;", create a function init_foo() in foo.c, and add a call
+ // to init_foo in init.cpp.
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/interfaceSupport.cpp openjdk/hotspot/src/share/vm/runtime/interfaceSupport.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/interfaceSupport.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/interfaceSupport.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)interfaceSupport.cpp	1.91 07/05/05 17:06:50 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,11 +19,11 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+-#include "incls/_interfaceSupport.cpp.incl" 
++#include "incls/_interfaceSupport.cpp.incl"
+ 
+ 
+ // Implementation of InterfaceSupport
+@@ -72,7 +69,7 @@
+   if (thread->is_VM_thread()) return; // Avoid concurrent calls
+   // Check for new, not quite initialized thread. A thread in new mode cannot initiate a GC.
+   JavaThread *current_thread = (JavaThread *)thread;
+-  if (current_thread->active_handles() == NULL) return; 
++  if (current_thread->active_handles() == NULL) return;
+ 
+   if (is_init_completed()) {
+ 
+@@ -97,7 +94,7 @@
+       if (FullGCALotInterval > 1) {
+         _fullgc_alot_counter = 1+(long)((double)FullGCALotInterval*os::random()/(max_jint+1.0));
+         if (PrintGCDetails && Verbose) {
+-          tty->print_cr("Full gc no: %u\tInterval: %d", invocations, 
++          tty->print_cr("Full gc no: %u\tInterval: %d", invocations,
+                         _fullgc_alot_counter);
+         }
+       } else {
+@@ -138,7 +135,7 @@
+ int walk_stack_counter = 0;
+ 
+ void InterfaceSupport::walk_stack_from(vframe* start_vf) {
+-  // walk 
++  // walk
+   int i = 0;
+   for (vframe* f = start_vf; f; f = f->sender() ) {
+     if (i < 50) vframe_array[i++] = f;
+@@ -158,7 +155,7 @@
+ 
+ # ifdef ENABLE_ZAP_DEAD_LOCALS
+ 
+-static int zap_traversals = 0;  
++static int zap_traversals = 0;
+ 
+ void InterfaceSupport::zap_dead_locals_old() {
+   JavaThread* thread = JavaThread::current();
+@@ -205,12 +202,12 @@
+ 
+ 
+ void InterfaceSupport::stress_derived_pointers() {
+-#ifdef COMPILER2  
++#ifdef COMPILER2
+   JavaThread *thread = JavaThread::current();
+   if (!is_init_completed()) return;
+   ResourceMark rm(thread);
+-  bool found = false;  
+-  for (StackFrameStream sfs(thread); !sfs.is_done() && !found; sfs.next()) {    
++  bool found = false;
++  for (StackFrameStream sfs(thread); !sfs.is_done() && !found; sfs.next()) {
+     CodeBlob* cb = sfs.current()->cb();
+     if (cb != NULL && cb->oop_maps() ) {
+       // Find oopmap for current method
+@@ -234,10 +231,10 @@
+   ResourceMark rm(thread);
+   // disabled because it throws warnings that oop maps should only be accessed
+   // in VM thread or during debugging
+-  
++
+   if (!thread->has_pending_exception()) {
+     // verification does not work if there are pending exceptions
+-    StackFrameStream sfs(thread);  
++    StackFrameStream sfs(thread);
+     CodeBlob* cb = sfs.current()->cb();
+       // In case of exceptions we might not have a runtime_stub on
+       // top of stack, hence, all callee-saved registers are not going
+@@ -270,4 +267,3 @@
+   }
+ #endif
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/interfaceSupport.hpp openjdk/hotspot/src/share/vm/runtime/interfaceSupport.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/interfaceSupport.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/interfaceSupport.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)interfaceSupport.hpp	1.176 07/05/17 16:05:52 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,12 +19,12 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Wrapper for all entry points to the virtual machine.
+ // The HandleMarkCleaner is a faster version of HandleMark.
+-// It relies on the fact that there is a HandleMark further 
++// It relies on the fact that there is a HandleMark further
+ // down the stack (in JavaCalls::call_helper), and just resets
+ // to the saved values in that HandleMark.
+ 
+@@ -89,15 +86,15 @@
+ };
+ 
+ 
+-// Basic class for all thread transition classes.  
++// Basic class for all thread transition classes.
+ 
+ class ThreadStateTransition : public StackObj {
+  protected:
+-  JavaThread* _thread; 
++  JavaThread* _thread;
+  public:
+-  ThreadStateTransition(JavaThread *thread) { 
+-    _thread = thread; 
+-    assert(thread != NULL && thread->is_Java_thread(), "must be Java thread"); 
++  ThreadStateTransition(JavaThread *thread) {
++    _thread = thread;
++    assert(thread != NULL && thread->is_Java_thread(), "must be Java thread");
+   }
+ 
+   // Change threadstate in a manner, so safepoint can detect changes.
+@@ -108,9 +105,9 @@
+     assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
+     assert(thread->thread_state() == from, "coming from wrong thread state");
+     // Change to transition state (assumes total store ordering!  -Urs)
+-    thread->set_thread_state((JavaThreadState)(from + 1)); 
++    thread->set_thread_state((JavaThreadState)(from + 1));
+ 
+-    // Make sure new state is seen by VM thread 
++    // Make sure new state is seen by VM thread
+     if (os::is_MP()) {
+       if (UseMembar) {
+         // Force a fence between the write above and read below
+@@ -124,7 +121,7 @@
+     if (SafepointSynchronize::do_call_back()) {
+       SafepointSynchronize::block(thread);
+     }
+-    thread->set_thread_state(to); 
++    thread->set_thread_state(to);
+ 
+     CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
+   }
+@@ -139,23 +136,23 @@
+     assert(thread->thread_state() == from, "coming from wrong thread state");
+     assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
+     // Change to transition state (assumes total store ordering!  -Urs)
+-    thread->set_thread_state((JavaThreadState)(from + 1)); 
++    thread->set_thread_state((JavaThreadState)(from + 1));
+ 
+-    // Make sure new state is seen by VM thread 
++    // Make sure new state is seen by VM thread
+     if (os::is_MP()) {
+       if (UseMembar) {
+         // Force a fence between the write above and read below
+         OrderAccess::fence();
+       } else {
+         // Must use this rather than serialization page in particular on Windows
+-        InterfaceSupport::serialize_memory(thread);      
++        InterfaceSupport::serialize_memory(thread);
+       }
+     }
+ 
+     if (SafepointSynchronize::do_call_back()) {
+       SafepointSynchronize::block(thread);
+     }
+-    thread->set_thread_state(to); 
++    thread->set_thread_state(to);
+ 
+     CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
+   }
+@@ -163,9 +160,9 @@
+   // Same as above, but assumes from = _thread_in_Java. This is simpler, since we
+   // never block on entry to the VM. This will break the code, since e.g. preserve arguments
+   // have not been setup.
+-  static inline void transition_from_java(JavaThread *thread, JavaThreadState to) {    
+-    assert(thread->thread_state() == _thread_in_Java, "coming from wrong thread state");    
+-    thread->set_thread_state(to); 
++  static inline void transition_from_java(JavaThread *thread, JavaThreadState to) {
++    assert(thread->thread_state() == _thread_in_Java, "coming from wrong thread state");
++    thread->set_thread_state(to);
+   }
+ 
+   static inline void transition_from_native(JavaThread *thread, JavaThreadState to) {
+@@ -181,7 +178,7 @@
+         OrderAccess::fence();
+       } else {
+         // Must use this rather than serialization page in particular on Windows
+-        InterfaceSupport::serialize_memory(thread);      
++        InterfaceSupport::serialize_memory(thread);
+       }
+     }
+ 
+@@ -197,7 +194,7 @@
+ 
+     thread->set_thread_state(to);
+   }
+- protected:  
++ protected:
+    void trans(JavaThreadState from, JavaThreadState to)  { transition(_thread, from, to); }
+    void trans_from_java(JavaThreadState to)              { transition_from_java(_thread, to); }
+    void trans_from_native(JavaThreadState to)            { transition_from_native(_thread, to); }
+@@ -207,10 +204,10 @@
+ 
+ class ThreadInVMfromJava : public ThreadStateTransition {
+  public:
+-  ThreadInVMfromJava(JavaThread* thread) : ThreadStateTransition(thread) { 
+-    trans_from_java(_thread_in_vm); 
++  ThreadInVMfromJava(JavaThread* thread) : ThreadStateTransition(thread) {
++    trans_from_java(_thread_in_vm);
+   }
+-  ~ThreadInVMfromJava()  {     
++  ~ThreadInVMfromJava()  {
+     trans(_thread_in_vm, _thread_in_Java);
+     // Check for pending. async. exceptions or suspends.
+     if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition();
+@@ -227,12 +224,12 @@
+     if (t->is_Java_thread()) {
+       JavaThread* t2 = (JavaThread*) t;
+       if (t2->thread_state() == _thread_in_native) {
+-	_thread = t2;
+-	ThreadStateTransition::transition_from_native(t2, _thread_in_vm);
+-	// Used to have a HandleMarkCleaner but that is dangerous as
+-	// it could free a handle in our (indirect, nested) caller.
+-	// We expect any handles will be short lived and figure we
+-	// don't need an actual HandleMark.
++        _thread = t2;
++        ThreadStateTransition::transition_from_native(t2, _thread_in_vm);
++        // Used to have a HandleMarkCleaner but that is dangerous as
++        // it could free a handle in our (indirect, nested) caller.
++        // We expect any handles will be short lived and figure we
++        // don't need an actual HandleMark.
+       }
+     }
+   }
+@@ -244,22 +241,22 @@
+ };
+ 
+ 
+-class ThreadInVMfromNative : public ThreadStateTransition { 
++class ThreadInVMfromNative : public ThreadStateTransition {
+  public:
+-  ThreadInVMfromNative(JavaThread* thread) : ThreadStateTransition(thread) { 
+-    trans_from_native(_thread_in_vm); 
++  ThreadInVMfromNative(JavaThread* thread) : ThreadStateTransition(thread) {
++    trans_from_native(_thread_in_vm);
+   }
+-  ~ThreadInVMfromNative() {     
++  ~ThreadInVMfromNative() {
+     trans_and_fence(_thread_in_vm, _thread_in_native);
+   }
+ };
+ 
+ 
+-class ThreadToNativeFromVM : public ThreadStateTransition { 
++class ThreadToNativeFromVM : public ThreadStateTransition {
+  public:
+   ThreadToNativeFromVM(JavaThread *thread) : ThreadStateTransition(thread) {
+-    // We are leaving the VM at this point and going directly to native code. 
+-    // Block, if we are in the middle of a safepoint synchronization.    
++    // We are leaving the VM at this point and going directly to native code.
++    // Block, if we are in the middle of a safepoint synchronization.
+     assert(!thread->owns_locks(), "must release all locks when leaving VM");
+     thread->frame_anchor()->make_walkable(thread);
+     trans_and_fence(_thread_in_vm, _thread_in_native);
+@@ -267,8 +264,8 @@
+     if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition(false);
+   }
+ 
+-  ~ThreadToNativeFromVM() { 
+-    trans_from_native(_thread_in_vm); 
++  ~ThreadToNativeFromVM() {
++    trans_from_native(_thread_in_vm);
+     // We don't need to clear_walkable because it will happen automagically when we return to java
+   }
+ };
+@@ -277,13 +274,13 @@
+ class ThreadBlockInVM : public ThreadStateTransition {
+  public:
+   ThreadBlockInVM(JavaThread *thread)
+-  : ThreadStateTransition(thread) { 
+-    // Once we are blocked vm expects stack to be walkable 
++  : ThreadStateTransition(thread) {
++    // Once we are blocked vm expects stack to be walkable
+     thread->frame_anchor()->make_walkable(thread);
+-    trans_and_fence(_thread_in_vm, _thread_blocked); 
++    trans_and_fence(_thread_in_vm, _thread_blocked);
+   }
+-  ~ThreadBlockInVM() { 
+-    trans_and_fence(_thread_blocked, _thread_in_vm); 
++  ~ThreadBlockInVM() {
++    trans_and_fence(_thread_blocked, _thread_in_vm);
+     // We don't need to clear_walkable because it will happen automagically when we return to java
+   }
+ };
+@@ -294,10 +291,10 @@
+ // See bugs: 4324348, 4854693, 4998314, 5040492, 5050705.
+ class ThreadInVMfromJavaNoAsyncException : public ThreadStateTransition {
+  public:
+-  ThreadInVMfromJavaNoAsyncException(JavaThread* thread) : ThreadStateTransition(thread) { 
+-    trans_from_java(_thread_in_vm); 
++  ThreadInVMfromJavaNoAsyncException(JavaThread* thread) : ThreadStateTransition(thread) {
++    trans_from_java(_thread_in_vm);
+   }
+-  ~ThreadInVMfromJavaNoAsyncException()  {     
++  ~ThreadInVMfromJavaNoAsyncException()  {
+     trans(_thread_in_vm, _thread_in_Java);
+     // NOTE: We do not check for pending. async. exceptions.
+     // If we did and moved the pending async exception over into the
+@@ -349,7 +346,7 @@
+     // do verification AFTER potential deoptimization
+     if (VerifyStack) {
+       InterfaceSupport::verify_stack();
+-    }    
++    }
+ 
+   }
+ };
+@@ -505,7 +502,7 @@
+     ThreadInVMfromNative __tiv(thread);                              \
+     debug_only(VMNativeEntryWrapper __vew;)                          \
+     __ENTRY(result_type, header, thread)
+-    
++
+ 
+ // Ensure that the VMNativeEntryWrapper constructor, which can cause
+ // a GC, is called outside the NoHandleMark (set via __QUICK_ENTRY).
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/javaCalls.cpp openjdk/hotspot/src/share/vm/runtime/javaCalls.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/javaCalls.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/javaCalls.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)javaCalls.cpp	1.220 07/05/05 17:06:51 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -36,7 +33,7 @@
+   bool clear_pending_exception = true;
+ 
+   guarantee(thread->is_Java_thread(), "crucial check - the VM thread cannot and must not escape to Java code");
+-  assert(!thread->owns_locks(), "must release all locks when leaving VM"); 
++  assert(!thread->owns_locks(), "must release all locks when leaving VM");
+   guarantee(!thread->is_Compiler_thread(), "cannot make java calls from the compiler");
+   _result   = result;
+ 
+@@ -46,14 +43,14 @@
+   thread->update_highest_lock((address)this);
+ 
+   // Allocate handle block for Java code. This must be done before we change thread_state to _thread_in_Java_or_stub,
+-  // since it can potentially block. 
++  // since it can potentially block.
+   JNIHandleBlock* new_handles = JNIHandleBlock::allocate_block(thread);
+ 
+   // After this, we are official in JavaCode. This needs to be done before we change any of the thread local
+   // info, since we cannot find oops before the new information is set up completely.
+   ThreadStateTransition::transition(thread, _thread_in_vm, _thread_in_Java);
+-  
+-  // Make sure that we handle asynchronous stops and suspends _before_ we clear all thread state 
++
++  // Make sure that we handle asynchronous stops and suspends _before_ we clear all thread state
+   // in JavaCallWrapper::JavaCallWrapper(). This way, we can decide if we need to do any pd actions
+   // to prepare for stop/suspend (flush register windows on sparcs, cache sp, or other state).
+   if (thread->has_special_runtime_exit_condition()) {
+@@ -74,9 +71,9 @@
+   THREAD->allow_unhandled_oop(&_receiver);
+ #endif // CHECK_UNHANDLED_OOPS
+ 
+-  _thread       = (JavaThread *)thread;  
++  _thread       = (JavaThread *)thread;
+   _handles      = _thread->active_handles();    // save previous handle block & Java frame linkage
+-  
++
+   // For the profiler, the last_Java_frame information in thread must always be in
+   // legal state. We have no last Java frame if last_Java_sp == NULL so
+   // the valid transition is to clear _last_Java_sp and then reset the rest of
+@@ -92,7 +89,7 @@
+ 
+   // clear any pending exception in thread (native calls start with no exception pending)
+   if(clear_pending_exception) {
+-    _thread->clear_pending_exception();             
++    _thread->clear_pending_exception();
+   }
+ 
+   if (_anchor.last_Java_sp() == NULL) {
+@@ -105,12 +102,12 @@
+   assert(_thread == JavaThread::current(), "must still be the same thread");
+ 
+   // restore previous handle block & Java frame linkage
+-  JNIHandleBlock *_old_handles = _thread->active_handles();   
++  JNIHandleBlock *_old_handles = _thread->active_handles();
+   _thread->set_active_handles(_handles);
+ 
+   _thread->frame_anchor()->zap();
+ 
+-  debug_only(_thread->dec_java_call_counter());  
++  debug_only(_thread->dec_java_call_counter());
+ 
+   if (_anchor.last_Java_sp() == NULL) {
+     _thread->set_base_of_stack_pointer(NULL);
+@@ -137,7 +134,7 @@
+ void JavaCallWrapper::oops_do(OopClosure* f) {
+   f->do_oop((oop*)&_callee_method);
+   f->do_oop((oop*)&_receiver);
+-  handles()->oops_do(f);  
++  handles()->oops_do(f);
+ }
+ 
+ 
+@@ -177,14 +174,14 @@
+     // safe to skip constructor call
+   } else {
+     static JavaValue result(T_VOID);
+-    JavaCallArguments args(receiver);  
++    JavaCallArguments args(receiver);
+     call(&result, method, &args, CHECK);
+   }
+ }
+ 
+ // ============ Virtual calls ============
+ 
+-void JavaCalls::call_virtual(JavaValue* result, KlassHandle spec_klass, symbolHandle name, symbolHandle signature, JavaCallArguments* args, TRAPS) {  
++void JavaCalls::call_virtual(JavaValue* result, KlassHandle spec_klass, symbolHandle name, symbolHandle signature, JavaCallArguments* args, TRAPS) {
+   CallInfo callinfo;
+   Handle receiver = args->receiver();
+   KlassHandle recvrKlass(THREAD, receiver.is_null() ? (klassOop)NULL : receiver->klass());
+@@ -192,7 +189,7 @@
+           callinfo, receiver, recvrKlass, spec_klass, name, signature,
+           KlassHandle(), false, true, CHECK);
+   methodHandle method = callinfo.selected_method();
+-  assert(method.not_null(), "should have thrown exception"); 
++  assert(method.not_null(), "should have thrown exception");
+ 
+   // Invoke the method
+   JavaCalls::call(result, method, args, CHECK);
+@@ -228,7 +225,7 @@
+   LinkResolver::resolve_special_call(callinfo, klass, name, signature, KlassHandle(), false, CHECK);
+   methodHandle method = callinfo.selected_method();
+   assert(method.not_null(), "should have thrown exception");
+-    
++
+   // Invoke the method
+   JavaCalls::call(result, method, args, CHECK);
+ }
+@@ -242,7 +239,7 @@
+ 
+ void JavaCalls::call_special(JavaValue* result, Handle receiver, KlassHandle klass, symbolHandle name, symbolHandle signature, Handle arg1, TRAPS) {
+   JavaCallArguments args(receiver); // One oop argument
+-  args.push_oop(arg1);  
++  args.push_oop(arg1);
+   call_special(result, klass, name, signature, &args, CHECK);
+ }
+ 
+@@ -262,7 +259,7 @@
+   LinkResolver::resolve_static_call(callinfo, klass, name, signature, KlassHandle(), false, true, CHECK);
+   methodHandle method = callinfo.selected_method();
+   assert(method.not_null(), "should have thrown exception");
+-    
++
+   // Invoke the method
+   JavaCalls::call(result, method, args, CHECK);
+ }
+@@ -295,7 +292,7 @@
+ void JavaCalls::call(JavaValue* result, methodHandle method, JavaCallArguments* args, TRAPS) {
+   // Check if we need to wrap a potential OS exception handler around thread
+   // This is used for e.g. Win32 structured exception handlers
+-  assert(THREAD->is_Java_thread(), "only JavaThreads can make JavaCalls");  
++  assert(THREAD->is_Java_thread(), "only JavaThreads can make JavaCalls");
+   // Need to wrap each and everytime, since there might be native code down the
+   // stack that has installed its own exception handlers
+   os::os_exception_wrapper(call_helper, result, &method, args, THREAD);
+@@ -304,11 +301,11 @@
+ void JavaCalls::call_helper(JavaValue* result, methodHandle* m, JavaCallArguments* args, TRAPS) {
+   methodHandle method = *m;
+   JavaThread* thread = (JavaThread*)THREAD;
+-  assert(thread->is_Java_thread(), "must be called by a java thread"); 
+-  assert(method.not_null(), "must have a method to call");  
++  assert(thread->is_Java_thread(), "must be called by a java thread");
++  assert(method.not_null(), "must have a method to call");
+   assert(!SafepointSynchronize::is_at_safepoint(), "call to Java code during VM operation");
+   assert(!thread->handle_area()->no_handle_mark_active(), "cannot call out to Java here");
+-  
++
+ 
+   CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
+ 
+@@ -320,14 +317,14 @@
+     assert(result->get_type() == T_VOID, "an empty method must return a void value");
+     return;
+   }
+-    
++
+ 
+ #ifdef ASSERT
+-  { klassOop holder = method->method_holder();    
++  { klassOop holder = method->method_holder();
+     // A klass might not be initialized since JavaCall's might be used during the executing of
+     // the <clinit>. For example, a Thread.start might start executing on an object that is
+     // not fully initialized! (bad Java programming style)
+-    assert(instanceKlass::cast(holder)->is_linked(), "rewritting must have taken place");                  
++    assert(instanceKlass::cast(holder)->is_linked(), "rewritting must have taken place");
+   }
+ #endif
+ 
+@@ -336,20 +333,20 @@
+   if (CompilationPolicy::mustBeCompiled(method)) {
+     CompileBroker::compile_method(method, InvocationEntryBci,
+                                   methodHandle(), 0, "mustBeCompiled", CHECK);
+-  }  
+-  
++  }
++
+   // Since the call stub sets up like the interpreter we call the from_interpreted_entry
+   // so we can go compiled via a i2c. Otherwise initial entry method will always
+   // run interpreted.
+-  address entry_point = method->from_interpreted_entry();      
++  address entry_point = method->from_interpreted_entry();
+   if (JvmtiExport::can_post_interpreter_events() && thread->is_interp_only_mode()) {
+-    entry_point = method->interpreter_entry();      
++    entry_point = method->interpreter_entry();
+   }
+ 
+   // Figure out if the result value is an oop or not (Note: This is a different value
+-  // than result_type. result_type will be T_INT of oops. (it is about size)    
+-  BasicType result_type = runtime_type_from(result);  
+-  bool oop_result_flag = (result->get_type() == T_OBJECT || result->get_type() == T_ARRAY);  
++  // than result_type. result_type will be T_INT of oops. (it is about size)
++  BasicType result_type = runtime_type_from(result);
++  bool oop_result_flag = (result->get_type() == T_OBJECT || result->get_type() == T_ARRAY);
+ 
+   // NOTE: if we move the computation of the result_val_address inside
+   // the call to call_stub, the optimizer produces wrong code.
+@@ -376,11 +373,11 @@
+   }
+ 
+   // do call
+-  { JavaCallWrapper link(method, receiver, result, CHECK);  
++  { JavaCallWrapper link(method, receiver, result, CHECK);
+     { HandleMark hm(thread);  // HandleMark used by HandleMarkCleaner
+ 
+       StubRoutines::call_stub()(
+-        (address)&link,        
++        (address)&link,
+         // (intptr_t*)&(result->_value), // see NOTE above (compiler problem)
+         result_val_address,          // see NOTE above (compiler problem)
+         result_type,
+@@ -389,8 +386,8 @@
+         args->parameters(),
+         args->size_of_parameters(),
+         CHECK
+-      );    
+-  
++      );
++
+       result = link.result();  // circumvent MS C++ 5.0 compiler bug (result is clobbered across call)
+       // Preserve oop return value across possible gc points
+       if (oop_result_flag) {
+@@ -403,7 +400,7 @@
+   // The following assert was not realistic.  Thread.stop can set that bit at any moment.
+   //assert(!thread->has_special_runtime_exit_condition(), "no async. exceptions should be installed");
+ 
+-  // Restore possible oop return 
++  // Restore possible oop return
+   if (oop_result_flag) {
+     result->set_jobject((jobject)thread->vm_result());
+     thread->set_vm_result(NULL);
+@@ -451,19 +448,19 @@
+     _is_oop = is_oop;
+     _is_return = false;
+     _return_type = return_type;
+-    _pos = 0;    
+-    if (!is_static) {      
++    _pos = 0;
++    if (!is_static) {
+       check_value(true); // Receiver must be an oop
+     }
+   }
+ 
+-  void check_value(bool type) {    
++  void check_value(bool type) {
+     guarantee(_is_oop[_pos++] == type, "signature does not match pushed arguments");
+   }
+ 
+   void check_doing_return(bool state) { _is_return = state; }
+ 
+-  void check_return_type(BasicType t) {    
++  void check_return_type(BasicType t) {
+     guarantee(_is_return && t == _return_type, "return type does not match");
+   }
+ 
+@@ -471,7 +468,7 @@
+     if (_is_return) {
+       check_return_type(t);
+       return;
+-    }      
++    }
+     check_value(false);
+   }
+ 
+@@ -482,8 +479,8 @@
+       check_return_type(t);
+       return;
+     }
+-    
+-    check_value(false); 
++
++    check_value(false);
+     check_value(false);
+   }
+ 
+@@ -491,7 +488,7 @@
+     if (_is_return) {
+       check_return_type(t);
+       return;
+-    }    
++    }
+     check_value(true);
+   }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/javaCalls.hpp openjdk/hotspot/src/share/vm/runtime/javaCalls.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/javaCalls.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/javaCalls.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)javaCalls.hpp	1.81 07/05/05 17:06:47 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A JavaCallWrapper is constructed before each JavaCall and destructed after the call.
+@@ -33,7 +30,7 @@
+   friend class VMStructs;
+  private:
+   JavaThread*      _thread;                 // the thread to which this call belongs
+-  JNIHandleBlock*  _handles;                // the saved handle block      
++  JNIHandleBlock*  _handles;                // the saved handle block
+   methodOop        _callee_method;          // to be able to collect arguments if entry frame is top frame
+   oop              _receiver;               // the receiver of the call (if a non-static call)
+ 
+@@ -51,7 +48,7 @@
+   JNIHandleBlock*  handles() const          { return _handles; }
+ 
+   JavaFrameAnchor* anchor(void)             { return &_anchor; }
+-  
++
+   JavaValue*       result() const           { return _result; }
+   // GC support
+   methodOop        callee_method()          { return _callee_method; }
+@@ -70,13 +67,13 @@
+ 
+   intptr_t    _value_buffer [_default_size + 1];
+   intptr_t    _parameter_buffer [_default_size*2 + 1];
+-  bool        _is_oop_buffer[_default_size + 1];    
++  bool        _is_oop_buffer[_default_size + 1];
+ 
+-  intptr_t*   _value;     
+-  intptr_t*   _parameters;     
++  intptr_t*   _value;
++  intptr_t*   _parameters;
+   bool*       _is_oop;
+   int         _size;
+-  int         _max_size;  
++  int         _max_size;
+   bool        _start_at_zero;      // Support late setting of receiver
+ 
+   void initialize() {
+@@ -93,7 +90,7 @@
+  public:
+   JavaCallArguments() { initialize(); }
+ 
+-  JavaCallArguments(Handle receiver) {    
++  JavaCallArguments(Handle receiver) {
+     initialize();
+     push_oop(receiver);
+   }
+@@ -106,32 +103,32 @@
+         _parameters  = NEW_RESOURCE_ARRAY(intptr_t, max_size*2 + 1);
+       }
+       // Reserve room for potential receiver in value and is_oop
+-      _value++; _is_oop++;  
++      _value++; _is_oop++;
+       _max_size = max_size;
+       _size = 0;
+       _start_at_zero = false;
+     } else {
+       initialize();
+     }
+-  }  
+-  
++  }
++
+   inline void push_oop(Handle h)    { _is_oop[_size] = true;
+-		               JNITypes::put_obj((oop)h.raw_value(), _value, _size); }
++                               JNITypes::put_obj((oop)h.raw_value(), _value, _size); }
+ 
+   inline void push_int(int i)       { _is_oop[_size] = false;
+-		               JNITypes::put_int(i, _value, _size); }
++                               JNITypes::put_int(i, _value, _size); }
+ 
+   inline void push_double(double d) { _is_oop[_size] = false; _is_oop[_size + 1] = false;
+-                               JNITypes::put_double(d, _value, _size); } 
+-  
++                               JNITypes::put_double(d, _value, _size); }
++
+   inline void push_long(jlong l)    { _is_oop[_size] = false; _is_oop[_size + 1] = false;
+                                JNITypes::put_long(l, _value, _size); }
+-  
++
+   inline void push_float(float f)   { _is_oop[_size] = false;
+                                JNITypes::put_float(f, _value, _size); }
+-  
++
+   // receiver
+-  Handle receiver() {    
++  Handle receiver() {
+     assert(_size > 0, "must at least be one argument");
+     assert(_is_oop[0], "first argument must be an oop");
+     assert(_value[0] != 0, "receiver must be not-null");
+@@ -145,12 +142,12 @@
+     _value--;
+     _size++;
+     _is_oop[0] = true;
+-    _value[0] = (intptr_t)h.raw_value();            
++    _value[0] = (intptr_t)h.raw_value();
+   }
+ 
+   // Converts all Handles to oops, and returns a reference to parameter vector
+   intptr_t* parameters() ;
+-  int   size_of_parameters() const { return _size; }    
++  int   size_of_parameters() const { return _size; }
+ 
+   // Verify that pushed arguments fits a given method
+   void verify(methodHandle method, BasicType return_type, Thread *thread) PRODUCT_RETURN;
+@@ -172,9 +169,9 @@
+   static void call_special(JavaValue* result, KlassHandle klass, symbolHandle name, symbolHandle signature, JavaCallArguments* args, TRAPS);
+ 
+   static void call_special(JavaValue* result, Handle receiver, KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS); // No args
+-  static void call_special(JavaValue* result, Handle receiver, KlassHandle klass, symbolHandle name, symbolHandle signature, Handle arg1, TRAPS); 
+-  static void call_special(JavaValue* result, Handle receiver, KlassHandle klass, symbolHandle name, symbolHandle signature, Handle arg1, Handle arg2, TRAPS); 
+-  
++  static void call_special(JavaValue* result, Handle receiver, KlassHandle klass, symbolHandle name, symbolHandle signature, Handle arg1, TRAPS);
++  static void call_special(JavaValue* result, Handle receiver, KlassHandle klass, symbolHandle name, symbolHandle signature, Handle arg1, Handle arg2, TRAPS);
++
+   // virtual call
+   // ------------
+ 
+@@ -182,13 +179,13 @@
+   static void call_virtual(JavaValue* result, KlassHandle spec_klass, symbolHandle name, symbolHandle signature, JavaCallArguments* args, TRAPS);
+ 
+   static void call_virtual(JavaValue* result, Handle receiver, KlassHandle spec_klass, symbolHandle name, symbolHandle signature, TRAPS); // No args
+-  static void call_virtual(JavaValue* result, Handle receiver, KlassHandle spec_klass, symbolHandle name, symbolHandle signature, Handle arg1, TRAPS); 
+-  static void call_virtual(JavaValue* result, Handle receiver, KlassHandle spec_klass, symbolHandle name, symbolHandle signature, Handle arg1, Handle arg2, TRAPS);   
++  static void call_virtual(JavaValue* result, Handle receiver, KlassHandle spec_klass, symbolHandle name, symbolHandle signature, Handle arg1, TRAPS);
++  static void call_virtual(JavaValue* result, Handle receiver, KlassHandle spec_klass, symbolHandle name, symbolHandle signature, Handle arg1, Handle arg2, TRAPS);
+ 
+   // Static call
+-  // -----------  
++  // -----------
+   static void call_static(JavaValue* result, KlassHandle klass, symbolHandle name, symbolHandle signature, JavaCallArguments* args, TRAPS);
+- 
++
+   static void call_static(JavaValue* result, KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS);
+   static void call_static(JavaValue* result, KlassHandle klass, symbolHandle name, symbolHandle signature, Handle arg1, TRAPS);
+   static void call_static(JavaValue* result, KlassHandle klass, symbolHandle name, symbolHandle signature, Handle arg1, Handle arg2, TRAPS);
+@@ -196,4 +193,3 @@
+   // Low-level interface
+   static void call(JavaValue* result, methodHandle method, JavaCallArguments* args, TRAPS);
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/java.cpp openjdk/hotspot/src/share/vm/runtime/java.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/java.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/java.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)java.cpp	1.221 07/05/29 09:44:26 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -47,7 +44,7 @@
+ 
+ void collect_profiled_methods(methodOop m) {
+   methodHandle mh(Thread::current(), m);
+-  if ((m->method_data() != NULL) && 
++  if ((m->method_data() != NULL) &&
+       (PrintMethodData || CompilerOracle::should_print(mh))) {
+     collected_profiled_methods->push(m);
+   }
+@@ -72,7 +69,7 @@
+   tty->print_cr("Histogram Over MethodOop Invocation Counters (cutoff = %d):", MethodHistogramCutoff);
+   tty->cr();
+   tty->print_cr("____Count_(I+C)____Method________________________Module_________________");
+-  unsigned total = 0, int_total = 0, comp_total = 0, static_total = 0, final_total = 0, 
++  unsigned total = 0, int_total = 0, comp_total = 0, static_total = 0, final_total = 0,
+       synch_total = 0, nativ_total = 0, acces_total = 0;
+   for (int index = 0; index < collected_invoked_methods->length(); index++) {
+     methodOop m = collected_invoked_methods->at(index);
+@@ -107,7 +104,7 @@
+   collected_profiled_methods = new GrowableArray<methodOop>(1024);
+   SystemDictionary::methods_do(collect_profiled_methods);
+   collected_profiled_methods->sort(&compare_methods);
+-  
++
+   int count = collected_profiled_methods->length();
+   if (count > 0) {
+     for (int index = 0; index < count; index++) {
+@@ -136,7 +133,7 @@
+ // General statistics printing (profiling ...)
+ 
+ void print_statistics() {
+-  
++
+ #ifdef ASSERT
+ 
+   if (CountRuntimeCalls) {
+@@ -144,13 +141,6 @@
+     RuntimeHistogram->print();
+   }
+ 
+-  if (CountVMLocks) {
+-    extern Histogram *MutexHistogram;
+-    extern Histogram *MutexContentionHistogram;
+-    MutexHistogram->print();
+-    MutexContentionHistogram->print();
+-  }
+-
+   if (CountJNICalls) {
+     extern Histogram *JNIHistogram;
+     JNIHistogram->print();
+@@ -219,7 +209,7 @@
+   }
+   if (TimeOopMap) {
+     GenerateOopMap::print_time();
+-  }  
++  }
+   if (ProfilerCheckIntervals) {
+     PeriodicTask::print_intervals();
+   }
+@@ -228,7 +218,7 @@
+   }
+   if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
+     BytecodeCounter::print();
+-  }  
++  }
+   if (PrintBytecodePairHistogram) {
+     BytecodePairHistogram::print();
+   }
+@@ -334,7 +324,7 @@
+ extern "C" {
+   void register_on_exit_function(void (*func)(void)) {
+     ExitProc *entry = new ExitProc(func);
+-    // Classic vm does not throw an exception in case the allocation failed, 
++    // Classic vm does not throw an exception in case the allocation failed,
+     if (entry != NULL) {
+       entry->set_next(exit_procs);
+       exit_procs = entry;
+@@ -352,9 +342,9 @@
+   static jint volatile _before_exit_status = BEFORE_EXIT_NOT_RUN;
+ 
+   // Note: don't use a Mutex to guard the entire before_exit(), as
+-  // JVMTI post_thread_end_event and post_vm_death_event will run native code. 
+-  // A CAS or OSMutex would work just fine but then we need to manipulate 
+-  // thread state for Safepoint. Here we use Monitor wait() and notify_all() 
++  // JVMTI post_thread_end_event and post_vm_death_event will run native code.
++  // A CAS or OSMutex would work just fine but then we need to manipulate
++  // thread state for Safepoint. Here we use Monitor wait() and notify_all()
+   // for synchronization.
+   { MutexLocker ml(BeforeExit_lock);
+     switch (_before_exit_status) {
+@@ -372,7 +362,7 @@
+     }
+   }
+ 
+-  // The only difference between this and Win32's _onexit procs is that 
++  // The only difference between this and Win32's _onexit procs is that
+   // this version is invoked before any threads get killed.
+   ExitProc* current = exit_procs;
+   while (current != NULL) {
+@@ -381,7 +371,7 @@
+     delete current;
+     current = next;
+   }
+- 
++
+   // Hang forever on exit if we're reporting an error.
+   if (ShowMessageBoxOnError && is_error_reported()) {
+     os::infinite_sleep();
+@@ -391,7 +381,7 @@
+   WatcherThread::stop();
+ 
+   // Print statistics gathered (profiling ...)
+-  if (Arguments::has_profile()) {    
++  if (Arguments::has_profile()) {
+     FlatProfiler::disengage();
+     FlatProfiler::print(10);
+   }
+@@ -421,7 +411,7 @@
+ 
+   if (Arguments::has_alloc_profile()) {
+     HandleMark hm;
+-    // Do one last collection to enumerate all the objects 
++    // Do one last collection to enumerate all the objects
+     // allocated since the last one.
+     Universe::heap()->collect(GCCause::_allocation_profiler);
+     AllocationProfiler::disengage();
+@@ -438,6 +428,7 @@
+   // Always call even when there are not JVMTI environments yet, since environments
+   // may be attached late and JVMTI must track phases of VM execution
+   JvmtiExport::post_vm_death();
++  Threads::shutdown_vm_agents();
+ 
+   // Terminate the signal thread
+   // Note: we don't wait until it actually dies.
+@@ -458,7 +449,7 @@
+   #undef BEFORE_EXIT_DONE
+ }
+ 
+-void vm_exit(int code) {  
++void vm_exit(int code) {
+   Thread* thread = ThreadLocalStorage::thread_index() == -1 ? NULL
+     : ThreadLocalStorage::get_thread_slow();
+   if (thread == NULL) {
+@@ -482,7 +473,7 @@
+ }
+ 
+ void notify_vm_shutdown() {
+-  // For now, just a dtrace probe.  
++  // For now, just a dtrace probe.
+   HS_DTRACE_PROBE(hotspot, vm__shutdown);
+ }
+ 
+@@ -523,7 +514,7 @@
+ }
+ 
+ void vm_notify_during_shutdown(const char* error, const char* message) {
+-  if (error != NULL) { 
++  if (error != NULL) {
+     tty->print_cr("Error occurred during initialization of VM");
+     tty->print("%s", error);
+     if (message != NULL) {
+@@ -577,7 +568,7 @@
+ 
+ void JDK_Version::initialize() {
+   void *lib_handle = os::native_java_library();
+-  jdk_version_info_fn_t func = 
++  jdk_version_info_fn_t func =
+     CAST_TO_FN_PTR(jdk_version_info_fn_t, hpi::dll_lookup(lib_handle, "JDK_GetVersionInfo0"));
+ 
+   if (func == NULL) {
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/javaFrameAnchor.hpp openjdk/hotspot/src/share/vm/runtime/javaFrameAnchor.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/javaFrameAnchor.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/javaFrameAnchor.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)javaFrameAnchor.hpp	1.16 07/05/05 17:06:50 JVM"
+-#endif
+ /*
+  * Copyright 2002-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ //
+ // An object for encapsulating the machine/os dependent part of a JavaThread frame state
+@@ -44,7 +41,7 @@
+ friend class JavaThread;
+ friend class frame;
+ friend class VMStructs;
+-friend class cInterpreter;
++friend class BytecodeInterpreter;
+ friend class JavaCallWrapper;
+ 
+  private:
+@@ -72,7 +69,7 @@
+   // Invalidate the anchor so that has_last_frame is false
+   // and no one should look at the other fields.
+   void zap(void)                                     { _last_Java_sp = NULL; }
+-  
++
+ #include "incls/_javaFrameAnchor_pd.hpp.incl"
+ 
+ public:
+@@ -88,4 +85,3 @@
+   static ByteSize last_Java_pc_offset()          { return byte_offset_of(JavaFrameAnchor, _last_Java_pc); }
+ 
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/java.hpp openjdk/hotspot/src/share/vm/runtime/java.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/java.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/java.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)java.hpp	1.36 07/05/05 17:06:49 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Register function to be called by before_exit
+@@ -35,7 +32,7 @@
+ extern void vm_exit(int code);
+ 
+ // Wrapper for ::exit()
+-extern void vm_direct_exit(int code); 
++extern void vm_direct_exit(int code);
+ 
+ // Shutdown the VM but do not exit the process
+ extern void vm_shutdown();
+@@ -54,7 +51,7 @@
+ class JDK_Version : AllStatic {
+   friend class VMStructs;
+  private:
+-  static jdk_version_info _version_info; 
++  static jdk_version_info _version_info;
+   static bool             _pre_jdk16_version;
+   static int              _jdk_version;  // JDK version number representing the release
+                                          //  i.e. n in 1.n.x (= jdk_minor_version())
+@@ -72,6 +69,7 @@
+   static bool is_jdk14x_version()           { assert(is_jdk_version_initialized(), "must have been initialized"); return _jdk_version == 4; }
+   static bool is_jdk15x_version()           { assert(is_jdk_version_initialized(), "must have been initialized"); return _jdk_version == 5; }
+   static bool is_jdk16x_version()           { assert(is_jdk_version_initialized(), "must have been initialized"); return _jdk_version == 6; }
++  static bool is_jdk17x_version()           { assert(is_jdk_version_initialized(), "must have been initialized"); return _jdk_version == 7; }
+ 
+   static bool supports_thread_park_blocker() { return _version_info.thread_park_blocker; }
+ 
+@@ -87,9 +85,16 @@
+   }
+   static bool is_gte_jdk16x_version() {
+     // Keep the semantics of this that the version number is >= 1.6
++    assert(is_jdk_version_initialized(), "Not initialized");
+     return _jdk_version >= 6;
+   }
+ 
++  static bool is_gte_jdk17x_version() {
++    // Keep the semantics of this that the version number is >= 1.7
++    assert(is_jdk_version_initialized(), "Not initialized");
++    return _jdk_version >= 7;
++  }
++
+   static bool is_jdk_version_initialized() {
+     return _jdk_version > 0;
+   }
+@@ -116,5 +121,3 @@
+     _version_info.jdk_version = (1 << 24) | (5 << 16);
+   }
+ };
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/jfieldIDWorkaround.hpp openjdk/hotspot/src/share/vm/runtime/jfieldIDWorkaround.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/jfieldIDWorkaround.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/jfieldIDWorkaround.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jfieldIDWorkaround.hpp	1.11 07/05/05 17:06:51 JVM"
+-#endif
+ /*
+  * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class jfieldIDWorkaround: AllStatic {
+@@ -31,7 +28,7 @@
+   // The workaround is to steal a low-order bit:
+   //   a 1 means the jfieldID is an instance jfieldID,
+   //             and the rest of the word is the offset of the field.
+-  //   a 0 means the jfieldID is a static jfieldID, 
++  //   a 0 means the jfieldID is a static jfieldID,
+   //             and the rest of the word is the JNIid*.
+   //
+   // Another low-order bit is used to mark if an instance field
+@@ -144,7 +141,7 @@
+ 
+   static JNIid* from_static_jfieldID(jfieldID id) {
+     assert(jfieldIDWorkaround::is_static_jfieldID(id),
+-	   "to_JNIid, but not static jfieldID");
++           "to_JNIid, but not static jfieldID");
+     JNIid* result = (JNIid*) id;
+     assert(result->is_static_field_id(), "to_JNIid, but not static field id");
+     return result;
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/jniHandles.cpp openjdk/hotspot/src/share/vm/runtime/jniHandles.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/jniHandles.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/jniHandles.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)jniHandles.cpp	1.64 07/05/17 16:06:13 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -61,14 +58,14 @@
+   if (obj == NULL) {
+     return NULL;                // ignore null handles
+   } else {
+-    JavaThread* thread = JavaThread::thread_from_jni_environment(env);    
++    JavaThread* thread = JavaThread::thread_from_jni_environment(env);
+     assert(Universe::heap()->is_in_reserved(obj), "sanity check");
+     return thread->active_handles()->allocate_handle(obj);
+   }
+ }
+ 
+ 
+-jobject JNIHandles::make_global(Handle obj) {  
++jobject JNIHandles::make_global(Handle obj) {
+   jobject res = NULL;
+   if (!obj.is_null()) {
+     // ignore null handles
+@@ -83,13 +80,13 @@
+ }
+ 
+ 
+-jobject JNIHandles::make_weak_global(Handle obj) {  
++jobject JNIHandles::make_weak_global(Handle obj) {
+   jobject res = NULL;
+   if (!obj.is_null()) {
+     // ignore null handles
+     MutexLocker ml(JNIGlobalHandle_lock);
+     assert(Universe::heap()->is_in_reserved(obj()), "sanity check");
+-    res = _weak_global_handles->allocate_handle(obj());   
++    res = _weak_global_handles->allocate_handle(obj());
+   } else {
+     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
+   }
+@@ -219,7 +216,7 @@
+ void JNIHandles::print_on(outputStream* st) {
+   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+   assert(_global_handles != NULL && _weak_global_handles != NULL,
+-	 "JNIHandles not initialized");
++         "JNIHandles not initialized");
+ 
+   CountHandleClosure global_handle_count;
+   AlwaysAliveClosure always_alive;
+@@ -262,7 +259,7 @@
+ 
+ void JNIHandleBlock::zap() {
+   // Zap block values
+-  _top  = 0; 
++  _top  = 0;
+   for (int index = 0; index < block_size_in_oops; index++) {
+     _handles[index] = badJNIHandle;
+   }
+@@ -280,7 +277,7 @@
+   else {
+     // locking with safepoint checking introduces a potential deadlock:
+     // - we would hold JNIHandleBlockFreeList_lock and then Threads_lock
+-    // - another would hold Threads_lock (jni_AttachCurrentThread) and then 
++    // - another would hold Threads_lock (jni_AttachCurrentThread) and then
+     //   JNIHandleBlockFreeList_lock (JNIHandleBlock::allocate_block)
+     MutexLockerEx ml(JNIHandleBlockFreeList_lock,
+                      Mutex::_no_safepoint_check_flag);
+@@ -304,7 +301,7 @@
+       _block_free_list = _block_free_list->_next;
+     }
+   }
+-  block->_top  = 0; 
++  block->_top  = 0;
+   block->_next = NULL;
+   block->_pop_frame_link = NULL;
+   // _last, _free_list & _allocate_before_rebuild initialized in allocate_handle
+@@ -339,7 +336,7 @@
+     // Return blocks to free list
+     // locking with safepoint checking introduces a potential deadlock:
+     // - we would hold JNIHandleBlockFreeList_lock and then Threads_lock
+-    // - another would hold Threads_lock (jni_AttachCurrentThread) and then 
++    // - another would hold Threads_lock (jni_AttachCurrentThread) and then
+     //   JNIHandleBlockFreeList_lock (JNIHandleBlock::allocate_block)
+     MutexLockerEx ml(JNIHandleBlockFreeList_lock,
+                      Mutex::_no_safepoint_check_flag);
+@@ -366,32 +363,32 @@
+   // pop frame links.
+   while (current_chain != NULL) {
+     for (JNIHandleBlock* current = current_chain; current != NULL;
+-	 current = current->_next) {
+-      assert(current == current_chain || current->pop_frame_link() == NULL, 
++         current = current->_next) {
++      assert(current == current_chain || current->pop_frame_link() == NULL,
+         "only blocks first in chain should have pop frame link set");
+       for (int index = 0; index < current->_top; index++) {
+         oop* root = &(current->_handles)[index];
+         oop value = *root;
+         // traverse heap pointers only, not deleted handles or free list
+-	// pointers
++        // pointers
+         if (value != NULL && Universe::heap()->is_in_reserved(value)) {
+           f->do_oop(root);
+         }
+       }
+       // the next handle block is valid only if current block is full
+-      if (current->_top < block_size_in_oops) {      
++      if (current->_top < block_size_in_oops) {
+         break;
+       }
+     }
+-    current_chain = current_chain->pop_frame_link();    
++    current_chain = current_chain->pop_frame_link();
+   }
+ }
+ 
+ 
+ void JNIHandleBlock::weak_oops_do(BoolObjectClosure* is_alive,
+-				  OopClosure* f) {
++                                  OopClosure* f) {
+   for (JNIHandleBlock* current = this; current != NULL; current = current->_next) {
+-    assert(current->pop_frame_link() == NULL, 
++    assert(current->pop_frame_link() == NULL,
+       "blocks holding weak global JNI handles should not have pop frame link set");
+     for (int index = 0; index < current->_top; index++) {
+       oop* root = &(current->_handles)[index];
+@@ -411,14 +408,14 @@
+       }
+     }
+     // the next handle block is valid only if current block is full
+-    if (current->_top < block_size_in_oops) {      
++    if (current->_top < block_size_in_oops) {
+       break;
+     }
+   }
+ }
+ 
+ 
+-jobject JNIHandleBlock::allocate_handle(oop obj) {  
++jobject JNIHandleBlock::allocate_handle(oop obj) {
+   assert(Universe::heap()->is_in_reserved(obj), "sanity check");
+   if (_top == 0) {
+     // This is the first allocation or the initial block got zapped when
+@@ -443,36 +440,36 @@
+   if (_last->_top < block_size_in_oops) {
+     oop* handle = &(_last->_handles)[_last->_top++];
+     *handle = obj;
+-    return (jobject) handle;    
++    return (jobject) handle;
+   }
+ 
+   // Try free list
+-  if (_free_list != NULL) {    
++  if (_free_list != NULL) {
+     oop* handle = _free_list;
+     _free_list = (oop*) *_free_list;
+     *handle = obj;
+-    return (jobject) handle;    
++    return (jobject) handle;
+   }
+   // Check if unused block follow last
+   if (_last->_next != NULL) {
+     // update last and retry
+     _last = _last->_next;
+     return allocate_handle(obj);
+-  } 
++  }
+ 
+-  // No space available, we have to rebuild free list or expand  
++  // No space available, we have to rebuild free list or expand
+   if (_allocate_before_rebuild == 0) {
+-      rebuild_free_list();        // updates _allocate_before_rebuild counter    
++      rebuild_free_list();        // updates _allocate_before_rebuild counter
+   } else {
+     // Append new block
+     Thread* thread = Thread::current();
+-    Handle obj_handle(thread, obj); 
++    Handle obj_handle(thread, obj);
+     // This can block, so we need to preserve obj accross call.
+     _last->_next = JNIHandleBlock::allocate_block(thread);
+     _last = _last->_next;
+-    _allocate_before_rebuild--;    
++    _allocate_before_rebuild--;
+     obj = obj_handle();
+-  }  
++  }
+   return allocate_handle(obj);  // retry
+ }
+ 
+@@ -536,7 +533,7 @@
+ 
+ // This method is not thread-safe, i.e., must be called whule holding a lock on the
+ // structure.
+-long JNIHandleBlock::memory_usage() const {  
++long JNIHandleBlock::memory_usage() const {
+   return length() * sizeof(JNIHandleBlock);
+ }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/jniHandles.hpp openjdk/hotspot/src/share/vm/runtime/jniHandles.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/jniHandles.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/jniHandles.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jniHandles.hpp	1.54 07/05/17 16:06:14 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class JNIHandleBlock;
+@@ -36,7 +33,7 @@
+   static JNIHandleBlock* _global_handles;             // First global handle block
+   static JNIHandleBlock* _weak_global_handles;        // First weak global handle block
+   static oop _deleted_handle;                         // Sentinel marking deleted handles
+-  
++
+  public:
+   // Resolve handle into oop
+   inline static oop resolve(jobject handle);
+@@ -62,7 +59,7 @@
+   // jmethodID handling (as Weak global handles).
+   // Because the useful life-span of a jmethodID cannot be determined, once created they are
+   // never reclaimed.  The methods to which they refer, however, can be GC'ed away if the class
+-  // is unloaded or if the method is made obsolete or deleted -- in these cases, the jmethodID 
++  // is unloaded or if the method is made obsolete or deleted -- in these cases, the jmethodID
+   // refers to NULL (as is the case for any weak reference).
+   static jmethodID make_jmethod_id(methodHandle mh);
+   static void destroy_jmethod_id(jmethodID mid);
+@@ -70,17 +67,17 @@
+   inline static methodOop checked_resolve_jmethod_id(jmethodID mid); // NULL on invalid jmethodID
+   static void change_method_associated_with_jmethod_id(jmethodID jmid, methodHandle mh);
+ 
+-  // Sentinel marking deleted handles in block. Note that we cannot store NULL as 
++  // Sentinel marking deleted handles in block. Note that we cannot store NULL as
+   // the sentinel, since clearing weak global JNI refs are done by storing NULL in
+   // the handle. The handle may not be reused before destroy_weak_global is called.
+   static oop deleted_handle()   { return _deleted_handle; }
+ 
+   // Initialization
+   static void initialize();
+-  
++
+   // Debugging
+   static void print_on(outputStream* st);
+-  static void print()		{ print_on(tty); }
++  static void print()           { print_on(tty); }
+   static void verify();
+   static bool is_local_handle(Thread* thread, jobject handle);
+   static bool is_frame_handle(JavaThread* thr, jobject obj);
+@@ -103,7 +100,7 @@
+ class JNIHandleBlock : public CHeapObj {
+   friend class VMStructs;
+  private:
+-  enum SomeConstants {     
++  enum SomeConstants {
+     block_size_in_oops  = 32                    // Number of handles per handle block
+   };
+ 
+@@ -111,9 +108,9 @@
+   int             _top;                         // Index of next unused handle
+   JNIHandleBlock* _next;                        // Link to next block
+ 
+-  // The following instance variables are only used by the first block in a chain. 
++  // The following instance variables are only used by the first block in a chain.
+   // Having two types of blocks complicates the code and the space overhead in negligble.
+-  JNIHandleBlock* _last;                        // Last block in use 
++  JNIHandleBlock* _last;                        // Last block in use
+   JNIHandleBlock* _pop_frame_link;              // Block to restore on PopLocalFrame call
+   oop*            _free_list;                   // Handle free list
+   int             _allocate_before_rebuild;     // Number of blocks to allocate before rebuilding free list
+@@ -213,5 +210,3 @@
+     *((oop*)handle) = deleted_handle(); // Mark the handle as deleted, allocate will reuse it
+   }
+ }
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/jniPeriodicChecker.cpp openjdk/hotspot/src/share/vm/runtime/jniPeriodicChecker.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/jniPeriodicChecker.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/jniPeriodicChecker.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)jniPeriodicChecker.cpp	1.4 07/05/05 17:06:51 JVM"
+-#endif
+ /*
+  * Copyright 2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -54,7 +51,7 @@
+   if (CheckJNICalls && !is_active()) {
+     // start up the periodic task
+     _task = new JniPeriodicCheckerTask(10);
+-    _task->enroll();    
++    _task->enroll();
+   }
+ }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/jniPeriodicChecker.hpp openjdk/hotspot/src/share/vm/runtime/jniPeriodicChecker.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/jniPeriodicChecker.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/jniPeriodicChecker.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jniPeriodicChecker.hpp	1.4 07/05/05 17:06:51 JVM"
+-#endif
+ /*
+  * Copyright 2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class JniPeriodicCheckerTask;
+@@ -42,7 +39,7 @@
+     static JniPeriodicCheckerTask* _task;
+ 
+   public:
+-    // Start/stop task 
++    // Start/stop task
+     static void engage();
+     static void disengage();
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/memprofiler.cpp openjdk/hotspot/src/share/vm/runtime/memprofiler.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/memprofiler.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/memprofiler.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)memprofiler.cpp	1.24 07/05/05 17:06:52 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -69,11 +66,11 @@
+     }
+     fprintf(_log_fp, "MemProfiler: sizes are in Kb, time is in seconds since startup\n\n");
+     fprintf(_log_fp, "  time, #thr, #cls,  heap,  heap,  perm,  perm,  code, hndls, rescs, oopmp\n");
+-    fprintf(_log_fp, "                     used, total,  used, total, total, total, total, total\n"); 
+-    fprintf(_log_fp, "--------------------------------------------------------------------------\n");     
++    fprintf(_log_fp, "                     used, total,  used, total, total, total, total, total\n");
++    fprintf(_log_fp, "--------------------------------------------------------------------------\n");
+ 
+     _task = new MemProfilerTask(MemProfilingInterval);
+-    _task->enroll();    
++    _task->enroll();
+   }
+ }
+ 
+@@ -95,19 +92,19 @@
+ }
+ 
+ 
+-void MemProfiler::do_trace() {  
++void MemProfiler::do_trace() {
+   // Calculate thread local sizes
+   size_t handles_memory_usage    = VMThread::vm_thread()->handle_area()->size_in_bytes();
+   size_t resource_memory_usage   = VMThread::vm_thread()->resource_area()->size_in_bytes();
+   JavaThread *cur = Threads::first();
+-  while (cur != NULL) {    
++  while (cur != NULL) {
+     handles_memory_usage  += cur->handle_area()->size_in_bytes();
+     resource_memory_usage += cur->resource_area()->size_in_bytes();
+     cur = cur->next();
+   }
+-  
++
+   // Print trace line in log
+-  fprintf(_log_fp, "%6.1f,%5d,%5d,%6ld,%6ld,%6ld,%6ld,", 
++  fprintf(_log_fp, "%6.1f,%5d,%5d,%6ld,%6ld,%6ld,%6ld,",
+       os::elapsedTime(),
+       Threads::number_of_threads(),
+       SystemDictionary::number_of_classes(),
+@@ -125,4 +122,4 @@
+   fflush(_log_fp);
+ }
+ 
+-#endif 
++#endif
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/memprofiler.hpp openjdk/hotspot/src/share/vm/runtime/memprofiler.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/memprofiler.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/memprofiler.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)memprofiler.hpp	1.17 07/05/05 17:06:52 JVM"
+-#endif
+ /*
+  * Copyright 1998 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Prints periodic memory usage trace of HotSpot VM
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/monitorChunk.cpp openjdk/hotspot/src/share/vm/runtime/monitorChunk.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/monitorChunk.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/monitorChunk.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)monitorChunk.cpp	1.19 07/05/05 17:06:52 JVM"
+-#endif
+ /*
+  * Copyright 1997-2000 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -45,4 +42,3 @@
+     at(index)->oops_do(f);
+   }
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/monitorChunk.hpp openjdk/hotspot/src/share/vm/runtime/monitorChunk.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/monitorChunk.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/monitorChunk.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)monitorChunk.hpp	1.19 07/05/05 17:06:52 JVM"
+-#endif
+ /*
+  * Copyright 1997-2000 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Data structure for holding monitors for one activation during
+@@ -52,7 +49,7 @@
+   // Returns the index'th monitor
+   BasicObjectLock* at(int index)            { assert(index >= 0 && index < number_of_monitors(), "out of bounds check"); return &monitors()[index]; }
+ 
+-  
++
+   // Memory management
+   void oops_do(OopClosure* f);
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/mutex.cpp openjdk/hotspot/src/share/vm/runtime/mutex.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/mutex.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/mutex.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,4 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)mutex.cpp	1.60 07/05/05 17:06:43 JVM"
+-#endif
++
+ /*
+  * Copyright 1998-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,69 +20,814 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_mutex.cpp.incl"
+ 
+-#ifdef ASSERT
+-Histogram* MutexHistogram;
+-static volatile jint MutexHistogram_lock = 0;
+-Histogram* MutexContentionHistogram;
+-static volatile jint MutexContentionHistogram_lock = 0;
++// o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
++//
++// Native Monitor-Mutex locking - theory of operations
++//
++// * Native Monitors are completely unrelated to Java-level monitors,
++//   although the "back-end" slow-path implementations share a common lineage.
++//   See objectMonitor:: in synchronizer.cpp.
++//   Native Monitors do *not* support nesting or recursion but otherwise
++//   they're basically Hoare-flavor monitors.
++//
++// * A thread acquires ownership of a Monitor/Mutex by CASing the LockByte
++//   in the _LockWord from zero to non-zero.  Note that the _Owner field
++//   is advisory and is used only to verify that the thread calling unlock()
++//   is indeed the last thread to have acquired the lock.
++//
++// * Contending threads "push" themselves onto the front of the contention
++//   queue -- called the cxq -- with CAS and then spin/park.
++//   The _LockWord contains the LockByte as well as the pointer to the head
++//   of the cxq.  Colocating the LockByte with the cxq precludes certain races.
++//
++// * Using a separately addressable LockByte allows for CAS:MEMBAR or CAS:0
++//   idioms.  We currently use MEMBAR in the uncontended unlock() path, as
++//   MEMBAR often has less latency than CAS.  If warranted, we could switch to
++//   a CAS:0 mode, using timers to close the resultant race, as is done
++//   with Java Monitors in synchronizer.cpp.
++//
++//   See the following for a discussion of the relative cost of atomics (CAS)
++//   MEMBAR, and ways to eliminate such instructions from the common-case paths:
++//   -- http://blogs.sun.com/dave/entry/biased_locking_in_hotspot
++//   -- http://blogs.sun.com/dave/resource/MustangSync.pdf
++//   -- http://blogs.sun.com/dave/resource/synchronization-public2.pdf
++//   -- synchronizer.cpp
++//
++// * Overall goals - desiderata
++//   1. Minimize context switching
++//   2. Minimize lock migration
++//   3. Minimize CPI -- affinity and locality
++//   4. Minimize the execution of high-latency instructions such as CAS or MEMBAR
++//   5. Minimize outer lock hold times
++//   6. Behave gracefully on a loaded system
++//
++// * Thread flow and list residency:
++//
++//   Contention queue --> EntryList --> OnDeck --> Owner --> !Owner
++//   [..resident on monitor list..]
++//   [...........contending..................]
++//
++//   -- The contention queue (cxq) contains recently-arrived threads (RATs).
++//      Threads on the cxq eventually drain into the EntryList.
++//   -- Invariant: a thread appears on at most one list -- cxq, EntryList
++//      or WaitSet -- at any one time.
++//   -- For a given monitor there can be at most one "OnDeck" thread at any
++//      given time but if needbe this particular invariant could be relaxed.
++//
++// * The WaitSet and EntryList linked lists are composed of ParkEvents.
++//   I use ParkEvent instead of threads as ParkEvents are immortal and
++//   type-stable, meaning we can safely unpark() a possibly stale
++//   list element in the unlock()-path.  (That's benign).
++//
++// * Succession policy - providing for progress:
++//
++//   As necessary, the unlock()ing thread identifies, unlinks, and unparks
++//   an "heir presumptive" tentative successor thread from the EntryList.
++//   This becomes the so-called "OnDeck" thread, of which there can be only
++//   one at any given time for a given monitor.  The wakee will recontend
++//   for ownership of monitor.
++//
++//   Succession is provided for by a policy of competitive handoff.
++//   The exiting thread does _not_ grant or pass ownership to the
++//   successor thread.  (This is also referred to as "handoff" succession").
++//   Instead the exiting thread releases ownership and possibly wakes
++//   a successor, so the successor can (re)compete for ownership of the lock.
++//
++//   Competitive handoff provides excellent overall throughput at the expense
++//   of short-term fairness.  If fairness is a concern then one remedy might
++//   be to add an AcquireCounter field to the monitor.  After a thread acquires
++//   the lock it will decrement the AcquireCounter field.  When the count
++//   reaches 0 the thread would reset the AcquireCounter variable, abdicate
++//   the lock directly to some thread on the EntryList, and then move itself to the
++//   tail of the EntryList.
++//
++//   But in practice most threads engage or otherwise participate in resource
++//   bounded producer-consumer relationships, so lock domination is not usually
++//   a practical concern.  Recall too, that in general it's easier to construct
++//   a fair lock from a fast lock, but not vice-versa.
++//
++// * The cxq can have multiple concurrent "pushers" but only one concurrent
++//   detaching thread.  This mechanism is immune from the ABA corruption.
++//   More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
++//   We use OnDeck as a pseudo-lock to enforce the at-most-one detaching
++//   thread constraint.
++//
++// * Taken together, the cxq and the EntryList constitute or form a
++//   single logical queue of threads stalled trying to acquire the lock.
++//   We use two distinct lists to reduce heat on the list ends.
++//   Threads in lock() enqueue onto cxq while threads in unlock() will
++//   dequeue from the EntryList.  (c.f. Michael Scott's "2Q" algorithm).
++//   A key desideratum is to minimize queue & monitor metadata manipulation
++//   that occurs while holding the "outer" monitor lock -- that is, we want to
++//   minimize monitor lock holds times.
++//
++//   The EntryList is ordered by the prevailing queue discipline and
++//   can be organized in any convenient fashion, such as a doubly-linked list or
++//   a circular doubly-linked list.  If we need a priority queue then something akin
++//   to Solaris' sleepq would work nicely.  Viz.,
++//   -- http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
++//   -- http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/os/sleepq.c
++//   Queue discipline is enforced at ::unlock() time, when the unlocking thread
++//   drains the cxq into the EntryList, and orders or reorders the threads on the
++//   EntryList accordingly.
++//
++//   Barring "lock barging", this mechanism provides fair cyclic ordering,
++//   somewhat similar to an elevator-scan.
++//
++// * OnDeck
++//   --  For a given monitor there can be at most one OnDeck thread at any given
++//       instant.  The OnDeck thread is contending for the lock, but has been
++//       unlinked from the EntryList and cxq by some previous unlock() operations.
++//       Once a thread has been designated the OnDeck thread it will remain so
++//       until it manages to acquire the lock -- being OnDeck is a stable property.
++//   --  Threads on the EntryList or cxq are _not allowed to attempt lock acquisition.
++//   --  OnDeck also serves as an "inner lock" as follows.  Threads in unlock() will, after
++//       having cleared the LockByte and dropped the outer lock,  attempt to "trylock"
++//       OnDeck by CASing the field from null to non-null.  If successful, that thread
++//       is then responsible for progress and succession and can use CAS to detach and
++//       drain the cxq into the EntryList.  By convention, only this thread, the holder of
++//       the OnDeck inner lock, can manipulate the EntryList or detach and drain the
++//       RATs on the cxq into the EntryList.  This avoids ABA corruption on the cxq as
++//       we allow multiple concurrent "push" operations but restrict detach concurrency
++//       to at most one thread.  Having selected and detached a successor, the thread then
++//       changes the OnDeck to refer to that successor, and then unparks the successor.
++//       That successor will eventually acquire the lock and clear OnDeck.  Beware
++//       that the OnDeck usage as a lock is asymmetric.  A thread in unlock() transiently
++//       "acquires" OnDeck, performs queue manipulations, passes OnDeck to some successor,
++//       and then the successor eventually "drops" OnDeck.  Note that there's never
++//       any sense of contention on the inner lock, however.  Threads never contend
++//       or wait for the inner lock.
++//   --  OnDeck provides for futile wakeup throttling a described in section 3.3 of
++//       See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
++//       In a sense, OnDeck subsumes the ObjectMonitor _Succ and ObjectWaiter
++//       TState fields found in Java-level objectMonitors.  (See synchronizer.cpp).
++//
++// * Waiting threads reside on the WaitSet list -- wait() puts
++//   the caller onto the WaitSet.  Notify() or notifyAll() simply
++//   transfers threads from the WaitSet to either the EntryList or cxq.
++//   Subsequent unlock() operations will eventually unpark the notifyee.
++//   Unparking a notifee in notify() proper is inefficient - if we were to do so
++//   it's likely the notifyee would simply impale itself on the lock held
++//   by the notifier.
++//
++// * The mechanism is obstruction-free in that if the holder of the transient
++//   OnDeck lock in unlock() is preempted or otherwise stalls, other threads
++//   can still acquire and release the outer lock and continue to make progress.
++//   At worst, waking of already blocked contending threads may be delayed,
++//   but nothing worse.  (We only use "trylock" operations on the inner OnDeck
++//   lock).
++//
++// * Note that thread-local storage must be initialized before a thread
++//   uses Native monitors or mutexes.  The native monitor-mutex subsystem
++//   depends on Thread::current().
++//
++// * The monitor synchronization subsystem avoids the use of native
++//   synchronization primitives except for the narrow platform-specific
++//   park-unpark abstraction.  See the comments in os_solaris.cpp regarding
++//   the semantics of park-unpark.  Put another way, this monitor implementation
++//   depends only on atomic operations and park-unpark.  The monitor subsystem
++//   manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
++//   underlying OS manages the READY<->RUN transitions.
++//
++// * The memory consistency model provide by lock()-unlock() is at least as
++//   strong or stronger than the Java Memory model defined by JSR-133.
++//   That is, we guarantee at least entry consistency, if not stronger.
++//   See http://g.oswego.edu/dl/jmm/cookbook.html.
++//
++// * Thread:: currently contains a set of purpose-specific ParkEvents:
++//   _MutexEvent, _ParkEvent, etc.  A better approach might be to do away with
++//   the purpose-specific ParkEvents and instead implement a general per-thread
++//   stack of available ParkEvents which we could provision on-demand.  The
++//   stack acts as a local cache to avoid excessive calls to ParkEvent::Allocate()
++//   and ::Release().  A thread would simply pop an element from the local stack before it
++//   enqueued or park()ed.  When the contention was over the thread would
++//   push the no-longer-needed ParkEvent back onto its stack.
++//
++// * A slightly reduced form of ILock() and IUnlock() have been partially
++//   model-checked (Murphi) for safety and progress at T=1,2,3 and 4.
++//   It'd be interesting to see if TLA/TLC could be useful as well.
++//
++// * Mutex-Monitor is a low-level "leaf" subsystem.  That is, the monitor
++//   code should never call other code in the JVM that might itself need to
++//   acquire monitors or mutexes.  That's true *except* in the case of the
++//   ThreadBlockInVM state transition wrappers.  The ThreadBlockInVM DTOR handles
++//   mutator reentry (ingress) by checking for a pending safepoint in which case it will
++//   call SafepointSynchronize::block(), which in turn may call Safepoint_lock->lock(), etc.
++//   In that particular case a call to lock() for a given Monitor can end up recursively
++//   calling lock() on another monitor.   While distasteful, this is largely benign
++//   as the calls come from jacket that wraps lock(), and not from deep within lock() itself.
++//
++//   It's unfortunate that native mutexes and thread state transitions were convolved.
++//   They're really separate concerns and should have remained that way.  Melding
++//   them together was facile -- a bit too facile.   The current implementation badly
++//   conflates the two concerns.
++//
++// * TODO-FIXME:
++//
++//   -- Add DTRACE probes for contended acquire, contended acquired, contended unlock
++//      We should also add DTRACE probes in the ParkEvent subsystem for
++//      Park-entry, Park-exit, and Unpark.
++//
++//   -- We have an excess of mutex-like constructs in the JVM, namely:
++//      1. objectMonitors for Java-level synchronization (synchronizer.cpp)
++//      2. low-level muxAcquire and muxRelease
++//      3. low-level spinAcquire and spinRelease
++//      4. native Mutex:: and Monitor::
++//      5. jvm_raw_lock() and _unlock()
++//      6. JVMTI raw monitors -- distinct from (5) despite having a confusingly
++//         similar name.
++//
++// o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
++
++
++// CASPTR() uses the canonical argument order that dominates in the literature.
++// Our internal cmpxchg_ptr() uses a bastardized ordering to accommodate Sun .il templates.
++
++#define CASPTR(a,c,s) intptr_t(Atomic::cmpxchg_ptr ((void *)(s),(void *)(a),(void *)(c)))
++#define UNS(x) (uintptr_t(x))
++#define TRACE(m) { static volatile int ctr = 0 ; int x = ++ctr ; if ((x & (x-1))==0) { ::printf ("%d:%s\n", x, #m); ::fflush(stdout); }}
++
++// Simplistic low-quality Marsaglia SHIFT-XOR RNG.
++// Bijective except for the trailing mask operation.
++// Useful for spin loops as the compiler can't optimize it away.
++
++static inline jint MarsagliaXORV (jint x) {
++  if (x == 0) x = 1|os::random() ;
++  x ^= x << 6;
++  x ^= ((unsigned)x) >> 21;
++  x ^= x << 7 ;
++  return x & 0x7FFFFFFF ;
++}
++
++static inline jint MarsagliaXOR (jint * const a) {
++  jint x = *a ;
++  if (x == 0) x = UNS(a)|1 ;
++  x ^= x << 6;
++  x ^= ((unsigned)x) >> 21;
++  x ^= x << 7 ;
++  *a = x ;
++  return x & 0x7FFFFFFF ;
++}
++
++static int Stall (int its) {
++  static volatile jint rv = 1 ;
++  volatile int OnFrame = 0 ;
++  jint v = rv ^ UNS(OnFrame) ;
++  while (--its >= 0) {
++    v = MarsagliaXORV (v) ;
++  }
++  // Make this impossible for the compiler to optimize away,
++  // but (mostly) avoid W coherency sharing on MP systems.
++  if (v == 0x12345) rv = v ;
++  return v ;
++}
++
++int Monitor::TryLock () {
++  intptr_t v = _LockWord.FullWord ;
++  for (;;) {
++    if ((v & _LBIT) != 0) return 0 ;
++    const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
++    if (v == u) return 1 ;
++    v = u ;
++  }
++}
++
++int Monitor::TryFast () {
++  // Optimistic fast-path form ...
++  // Fast-path attempt for the common uncontended case.
++  // Avoid RTS->RTO $ coherence upgrade on typical SMP systems.
++  intptr_t v = CASPTR (&_LockWord, 0, _LBIT) ;  // agro ...
++  if (v == 0) return 1 ;
++
++  for (;;) {
++    if ((v & _LBIT) != 0) return 0 ;
++    const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
++    if (v == u) return 1 ;
++    v = u ;
++  }
++}
++
++int Monitor::ILocked () {
++  const intptr_t w = _LockWord.FullWord & 0xFF ;
++  assert (w == 0 || w == _LBIT, "invariant") ;
++  return w == _LBIT ;
++}
++
++// Polite TATAS spinlock with exponential backoff - bounded spin.
++// Ideally we'd use processor cycles, time or vtime to control
++// the loop, but we currently use iterations.
++// All the constants within were derived empirically but work over
++// over the spectrum of J2SE reference platforms.
++// On Niagara-class systems the back-off is unnecessary but
++// is relatively harmless.  (At worst it'll slightly retard
++// acquisition times).  The back-off is critical for older SMP systems
++// where constant fetching of the LockWord would otherwise impair
++// scalability.
++//
++// Clamp spinning at approximately 1/2 of a context-switch round-trip.
++// See synchronizer.cpp for details and rationale.
++
++int Monitor::TrySpin (Thread * const Self) {
++  if (TryLock())    return 1 ;
++  if (!os::is_MP()) return 0 ;
++
++  int Probes  = 0 ;
++  int Delay   = 0 ;
++  int Steps   = 0 ;
++  int SpinMax = NativeMonitorSpinLimit ;
++  int flgs    = NativeMonitorFlags ;
++  for (;;) {
++    intptr_t v = _LockWord.FullWord;
++    if ((v & _LBIT) == 0) {
++      if (CASPTR (&_LockWord, v, v|_LBIT) == v) {
++        return 1 ;
++      }
++      continue ;
++    }
++
++    if ((flgs & 8) == 0) {
++      SpinPause () ;
++    }
++
++    // Periodically increase Delay -- variable Delay form
++    // conceptually: delay *= 1 + 1/Exponent
++    ++ Probes;
++    if (Probes > SpinMax) return 0 ;
++
++    if ((Probes & 0x7) == 0) {
++      Delay = ((Delay << 1)|1) & 0x7FF ;
++      // CONSIDER: Delay += 1 + (Delay/4); Delay &= 0x7FF ;
++    }
+ 
+-MutexHistogramElement::MutexHistogramElement(const char* elementName) {
+-  _name = elementName;
+-  uintx count = 0;
++    if (flgs & 2) continue ;
+ 
+-  while (Atomic::cmpxchg(1, &MutexHistogram_lock, 0) != 0) {
+-    while (OrderAccess::load_acquire(&MutexHistogram_lock) != 0) {
+-      count +=1;
+-      if ( (WarnOnStalledSpinLock > 0)
+-        && (count % WarnOnStalledSpinLock == 0)) {
+-        warning("MutexHistogram_lock seems to be stalled");
++    // Consider checking _owner's schedctl state, if OFFPROC abort spin.
++    // If the owner is OFFPROC then it's unlike that the lock will be dropped
++    // in a timely fashion, which suggests that spinning would not be fruitful
++    // or profitable.
++
++    // Stall for "Delay" time units - iterations in the current implementation.
++    // Avoid generating coherency traffic while stalled.
++    // Possible ways to delay:
++    //   PAUSE, SLEEP, MEMBAR #sync, MEMBAR #halt,
++    //   wr %g0,%asi, gethrtime, rdstick, rdtick, rdtsc, etc. ...
++    // Note that on Niagara-class systems we want to minimize STs in the
++    // spin loop.  N1 and brethren write-around the L1$ over the xbar into the L2$.
++    // Furthermore, they don't have a W$ like traditional SPARC processors.
++    // We currently use a Marsaglia Shift-Xor RNG loop.
++    Steps += Delay ;
++    if (Self != NULL) {
++      jint rv = Self->rng[0] ;
++      for (int k = Delay ; --k >= 0; ) {
++        rv = MarsagliaXORV (rv) ;
++        if ((flgs & 4) == 0 && SafepointSynchronize::do_call_back()) return 0 ;
+       }
++      Self->rng[0] = rv ;
++    } else {
++      Stall (Delay) ;
+     }
+   }
++}
+ 
+-  if (MutexHistogram == NULL) {
+-    MutexHistogram = new Histogram("VM Mutex Lock Attempt Counts",200);
++static int ParkCommon (ParkEvent * ev, jlong timo) {
++  // Diagnostic support - periodically unwedge blocked threads
++  intx nmt = NativeMonitorTimeout ;
++  if (nmt > 0 && (nmt < timo || timo <= 0)) {
++     timo = nmt ;
+   }
++  int err = OS_OK ;
++  if (0 == timo) {
++    ev->park() ;
++  } else {
++    err = ev->park(timo) ;
++  }
++  return err ;
++}
+ 
+-  MutexHistogram->add_element(this);
+-  Atomic::dec(&MutexHistogram_lock);
++inline int Monitor::AcquireOrPush (ParkEvent * ESelf) {
++  intptr_t v = _LockWord.FullWord ;
++  for (;;) {
++    if ((v & _LBIT) == 0) {
++      const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
++      if (u == v) return 1 ;        // indicate acquired
++      v = u ;
++    } else {
++      // Anticipate success ...
++      ESelf->ListNext = (ParkEvent *) (v & ~_LBIT) ;
++      const intptr_t u = CASPTR (&_LockWord, v, intptr_t(ESelf)|_LBIT) ;
++      if (u == v) return 0 ;        // indicate pushed onto cxq
++      v = u ;
++    }
++    // Interference - LockWord change - just retry
++  }
+ }
+ 
++// ILock and IWait are the lowest level primitive internal blocking
++// synchronization functions.  The callers of IWait and ILock must have
++// performed any needed state transitions beforehand.
++// IWait and ILock may directly call park() without any concern for thread state.
++// Note that ILock and IWait do *not* access _owner.
++// _owner is a higher-level logical concept.
++
++void Monitor::ILock (Thread * Self) {
++  assert (_OnDeck != Self->_MutexEvent, "invariant") ;
++
++  if (TryFast()) {
++ Exeunt:
++    assert (ILocked(), "invariant") ;
++    return ;
++  }
+ 
+-MutexContentionHistogramElement::MutexContentionHistogramElement(const char* elementName) {
+-  _name = elementName;
+-  uintx count = 0;
++  ParkEvent * const ESelf = Self->_MutexEvent ;
++  assert (_OnDeck != ESelf, "invariant") ;
+ 
+-  while (Atomic::cmpxchg(1, &MutexContentionHistogram_lock, 0) != 0) {
+-    while (OrderAccess::load_acquire(&MutexContentionHistogram_lock) != 0) {
+-      count +=1;
+-      if ( (WarnOnStalledSpinLock > 0)
+-        && (count % WarnOnStalledSpinLock == 0)) {
+-        warning("MutexContentionHistogram_lock seems to be stalled");
+-      }
++  // As an optimization, spinners could conditionally try to set ONDECK to _LBIT
++  // Synchronizer.cpp uses a similar optimization.
++  if (TrySpin (Self)) goto Exeunt ;
++
++  // Slow-path - the lock is contended.
++  // Either Enqueue Self on cxq or acquire the outer lock.
++  // LockWord encoding = (cxq,LOCKBYTE)
++  ESelf->reset() ;
++  OrderAccess::fence() ;
++
++  // Optional optimization ... try barging on the inner lock
++  if ((NativeMonitorFlags & 32) && CASPTR (&_OnDeck, NULL, UNS(Self)) == 0) {
++    goto OnDeck_LOOP ;
++  }
++
++  if (AcquireOrPush (ESelf)) goto Exeunt ;
++
++  // At any given time there is at most one ondeck thread.
++  // ondeck implies not resident on cxq and not resident on EntryList
++  // Only the OnDeck thread can try to acquire -- contended for -- the lock.
++  // CONSIDER: use Self->OnDeck instead of m->OnDeck.
++  // Deschedule Self so that others may run.
++  while (_OnDeck != ESelf) {
++    ParkCommon (ESelf, 0) ;
++  }
++
++  // Self is now in the ONDECK position and will remain so until it
++  // manages to acquire the lock.
++ OnDeck_LOOP:
++  for (;;) {
++    assert (_OnDeck == ESelf, "invariant") ;
++    if (TrySpin (Self)) break ;
++    // CONSIDER: if ESelf->TryPark() && TryLock() break ...
++    // It's probably wise to spin only if we *actually* blocked
++    // CONSIDER: check the lockbyte, if it remains set then
++    // preemptively drain the cxq into the EntryList.
++    // The best place and time to perform queue operations -- lock metadata --
++    // is _before having acquired the outer lock, while waiting for the lock to drop.
++    ParkCommon (ESelf, 0) ;
++  }
++
++  assert (_OnDeck == ESelf, "invariant") ;
++  _OnDeck = NULL ;
++
++  // Note that we current drop the inner lock (clear OnDeck) in the slow-path
++  // epilog immediately after having acquired the outer lock.
++  // But instead we could consider the following optimizations:
++  // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation.
++  //    This might avoid potential reacquisition of the inner lock in IUlock().
++  // B. While still holding the inner lock, attempt to opportunistically select
++  //    and unlink the next ONDECK thread from the EntryList.
++  //    If successful, set ONDECK to refer to that thread, otherwise clear ONDECK.
++  //    It's critical that the select-and-unlink operation run in constant-time as
++  //    it executes when holding the outer lock and may artificially increase the
++  //    effective length of the critical section.
++  // Note that (A) and (B) are tantamount to succession by direct handoff for
++  // the inner lock.
++  goto Exeunt ;
++}
++
++void Monitor::IUnlock (bool RelaxAssert) {
++  assert (ILocked(), "invariant") ;
++  _LockWord.Bytes[_LSBINDEX] = 0 ;       // drop outer lock
++  OrderAccess::storeload ();
++  ParkEvent * const w = _OnDeck ;
++  assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ;
++  if (w != NULL) {
++    // Either we have a valid ondeck thread or ondeck is transiently "locked"
++    // by some exiting thread as it arranges for succession.  The LSBit of
++    // OnDeck allows us to discriminate two cases.  If the latter, the
++    // responsibility for progress and succession lies with that other thread.
++    // For good performance, we also depend on the fact that redundant unpark()
++    // operations are cheap.  That is, repeated Unpark()ing of the ONDECK thread
++    // is inexpensive.  This approach provides implicit futile wakeup throttling.
++    // Note that the referent "w" might be stale with respect to the lock.
++    // In that case the following unpark() is harmless and the worst that'll happen
++    // is a spurious return from a park() operation.  Critically, if "w" _is stale,
++    // then progress is known to have occurred as that means the thread associated
++    // with "w" acquired the lock.  In that case this thread need take no further
++    // action to guarantee progress.
++    if ((UNS(w) & _LBIT) == 0) w->unpark() ;
++    return ;
++  }
++
++  intptr_t cxq = _LockWord.FullWord ;
++  if (((cxq & ~_LBIT)|UNS(_EntryList)) == 0) {
++    return ;      // normal fast-path exit - cxq and EntryList both empty
++  }
++  if (cxq & _LBIT) {
++    // Optional optimization ...
++    // Some other thread acquired the lock in the window since this
++    // thread released it.  Succession is now that thread's responsibility.
++    return ;
++  }
++
++ Succession:
++  // Slow-path exit - this thread must ensure succession and progress.
++  // OnDeck serves as lock to protect cxq and EntryList.
++  // Only the holder of OnDeck can manipulate EntryList or detach the RATs from cxq.
++  // Avoid ABA - allow multiple concurrent producers (enqueue via push-CAS)
++  // but only one concurrent consumer (detacher of RATs).
++  // Consider protecting this critical section with schedctl on Solaris.
++  // Unlike a normal lock, however, the exiting thread "locks" OnDeck,
++  // picks a successor and marks that thread as OnDeck.  That successor
++  // thread will then clear OnDeck once it eventually acquires the outer lock.
++  if (CASPTR (&_OnDeck, NULL, _LBIT) != UNS(NULL)) {
++    return ;
++  }
++
++  ParkEvent * List = _EntryList ;
++  if (List != NULL) {
++    // Transfer the head of the EntryList to the OnDeck position.
++    // Once OnDeck, a thread stays OnDeck until it acquires the lock.
++    // For a given lock there is at most OnDeck thread at any one instant.
++   WakeOne:
++    assert (List == _EntryList, "invariant") ;
++    ParkEvent * const w = List ;
++    assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ;
++    _EntryList = w->ListNext ;
++    // as a diagnostic measure consider setting w->_ListNext = BAD
++    assert (UNS(_OnDeck) == _LBIT, "invariant") ;
++    _OnDeck = w ;           // pass OnDeck to w.
++                            // w will clear OnDeck once it acquires the outer lock
++
++    // Another optional optimization ...
++    // For heavily contended locks it's not uncommon that some other
++    // thread acquired the lock while this thread was arranging succession.
++    // Try to defer the unpark() operation - Delegate the responsibility
++    // for unpark()ing the OnDeck thread to the current or subsequent owners
++    // That is, the new owner is responsible for unparking the OnDeck thread.
++    OrderAccess::storeload() ;
++    cxq = _LockWord.FullWord ;
++    if (cxq & _LBIT) return ;
++
++    w->unpark() ;
++    return ;
++  }
++
++  cxq = _LockWord.FullWord ;
++  if ((cxq & ~_LBIT) != 0) {
++    // The EntryList is empty but the cxq is populated.
++    // drain RATs from cxq into EntryList
++    // Detach RATs segment with CAS and then merge into EntryList
++    for (;;) {
++      // optional optimization - if locked, the owner is responsible for succession
++      if (cxq & _LBIT) goto Punt ;
++      const intptr_t vfy = CASPTR (&_LockWord, cxq, cxq & _LBIT) ;
++      if (vfy == cxq) break ;
++      cxq = vfy ;
++      // Interference - LockWord changed - Just retry
++      // We can see concurrent interference from contending threads
++      // pushing themselves onto the cxq or from lock-unlock operations.
++      // From the perspective of this thread, EntryList is stable and
++      // the cxq is prepend-only -- the head is volatile but the interior
++      // of the cxq is stable.  In theory if we encounter interference from threads
++      // pushing onto cxq we could simply break off the original cxq suffix and
++      // move that segment to the EntryList, avoiding a 2nd or multiple CAS attempts
++      // on the high-traffic LockWord variable.   For instance lets say the cxq is "ABCD"
++      // when we first fetch cxq above.  Between the fetch -- where we observed "A"
++      // -- and CAS -- where we attempt to CAS null over A -- "PQR" arrive,
++      // yielding cxq = "PQRABCD".  In this case we could simply set A.ListNext
++      // null, leaving cxq = "PQRA" and transfer the "BCD" segment to the EntryList.
++      // Note too, that it's safe for this thread to traverse the cxq
++      // without taking any special concurrency precautions.
+     }
++
++    // We don't currently reorder the cxq segment as we move it onto
++    // the EntryList, but it might make sense to reverse the order
++    // or perhaps sort by thread priority.  See the comments in
++    // synchronizer.cpp objectMonitor::exit().
++    assert (_EntryList == NULL, "invariant") ;
++    _EntryList = List = (ParkEvent *)(cxq & ~_LBIT) ;
++    assert (List != NULL, "invariant") ;
++    goto WakeOne ;
+   }
+ 
+-  if (MutexContentionHistogram == NULL) {
+-    MutexContentionHistogram = new Histogram("VM Mutex Lock Contention Count",200);
++  // cxq|EntryList is empty.
++  // w == NULL implies that cxq|EntryList == NULL in the past.
++  // Possible race - rare inopportune interleaving.
++  // A thread could have added itself to cxq since this thread previously checked.
++  // Detect and recover by refetching cxq.
++ Punt:
++  assert (UNS(_OnDeck) == _LBIT, "invariant") ;
++  _OnDeck = NULL ;            // Release inner lock.
++  OrderAccess::storeload();   // Dekker duality - pivot point
++
++  // Resample LockWord/cxq to recover from possible race.
++  // For instance, while this thread T1 held OnDeck, some other thread T2 might
++  // acquire the outer lock.  Another thread T3 might try to acquire the outer
++  // lock, but encounter contention and enqueue itself on cxq.  T2 then drops the
++  // outer lock, but skips succession as this thread T1 still holds OnDeck.
++  // T1 is and remains responsible for ensuring succession of T3.
++  //
++  // Note that we don't need to recheck EntryList, just cxq.
++  // If threads moved onto EntryList since we dropped OnDeck
++  // that implies some other thread forced succession.
++  cxq = _LockWord.FullWord ;
++  if ((cxq & ~_LBIT) != 0 && (cxq & _LBIT) == 0) {
++    goto Succession ;         // potential race -- re-run succession
+   }
++  return ;
++}
+ 
+-  MutexContentionHistogram->add_element(this);
+-  Atomic::dec(&MutexContentionHistogram_lock);
++bool Monitor::notify() {
++  assert (_owner == Thread::current(), "invariant") ;
++  assert (ILocked(), "invariant") ;
++  if (_WaitSet == NULL) return true ;
++  NotifyCount ++ ;
++
++  // Transfer one thread from the WaitSet to the EntryList or cxq.
++  // Currently we just unlink the head of the WaitSet and prepend to the cxq.
++  // And of course we could just unlink it and unpark it, too, but
++  // in that case it'd likely impale itself on the reentry.
++  Thread::muxAcquire (_WaitLock, "notify:WaitLock") ;
++  ParkEvent * nfy = _WaitSet ;
++  if (nfy != NULL) {                  // DCL idiom
++    _WaitSet = nfy->ListNext ;
++    assert (nfy->Notified == 0, "invariant") ;
++    // push nfy onto the cxq
++    for (;;) {
++      const intptr_t v = _LockWord.FullWord ;
++      assert ((v & 0xFF) == _LBIT, "invariant") ;
++      nfy->ListNext = (ParkEvent *)(v & ~_LBIT);
++      if (CASPTR (&_LockWord, v, UNS(nfy)|_LBIT) == v) break;
++      // interference - _LockWord changed -- just retry
++    }
++    // Note that setting Notified before pushing nfy onto the cxq is
++    // also legal and safe, but the safety properties are much more
++    // subtle, so for the sake of code stewardship ...
++    OrderAccess::fence() ;
++    nfy->Notified = 1;
++  }
++  Thread::muxRelease (_WaitLock) ;
++  if (nfy != NULL && (NativeMonitorFlags & 16)) {
++    // Experimental code ... light up the wakee in the hope that this thread (the owner)
++    // will drop the lock just about the time the wakee comes ONPROC.
++    nfy->unpark() ;
++  }
++  assert (ILocked(), "invariant") ;
++  return true ;
+ }
+ 
+-#endif
++// Currently notifyAll() transfers the waiters one-at-a-time from the waitset
++// to the cxq.  This could be done more efficiently with a single bulk en-mass transfer,
++// but in practice notifyAll() for large #s of threads is rare and not time-critical.
++// Beware too, that we invert the order of the waiters.  Lets say that the
++// waitset is "ABCD" and the cxq is "XYZ".  After a notifyAll() the waitset
++// will be empty and the cxq will be "DCBAXYZ".  This is benign, of course.
++
++bool Monitor::notify_all() {
++  assert (_owner == Thread::current(), "invariant") ;
++  assert (ILocked(), "invariant") ;
++  while (_WaitSet != NULL) notify() ;
++  return true ;
++}
+ 
++int Monitor::IWait (Thread * Self, jlong timo) {
++  assert (ILocked(), "invariant") ;
++
++  // Phases:
++  // 1. Enqueue Self on WaitSet - currently prepend
++  // 2. unlock - drop the outer lock
++  // 3. wait for either notification or timeout
++  // 4. lock - reentry - reacquire the outer lock
++
++  ParkEvent * const ESelf = Self->_MutexEvent ;
++  ESelf->Notified = 0 ;
++  ESelf->reset() ;
++  OrderAccess::fence() ;
++
++  // Add Self to WaitSet
++  // Ideally only the holder of the outer lock would manipulate the WaitSet -
++  // That is, the outer lock would implicitly protect the WaitSet.
++  // But if a thread in wait() encounters a timeout it will need to dequeue itself
++  // from the WaitSet _before it becomes the owner of the lock.  We need to dequeue
++  // as the ParkEvent -- which serves as a proxy for the thread -- can't reside
++  // on both the WaitSet and the EntryList|cxq at the same time..  That is, a thread
++  // on the WaitSet can't be allowed to compete for the lock until it has managed to
++  // unlink its ParkEvent from WaitSet.  Thus the need for WaitLock.
++  // Contention on the WaitLock is minimal.
++  //
++  // Another viable approach would be add another ParkEvent, "WaitEvent" to the
++  // thread class.  The WaitSet would be composed of WaitEvents.  Only the
++  // owner of the outer lock would manipulate the WaitSet.  A thread in wait()
++  // could then compete for the outer lock, and then, if necessary, unlink itself
++  // from the WaitSet only after having acquired the outer lock.  More precisely,
++  // there would be no WaitLock.  A thread in in wait() would enqueue its WaitEvent
++  // on the WaitSet; release the outer lock; wait for either notification or timeout;
++  // reacquire the inner lock; and then, if needed, unlink itself from the WaitSet.
++  //
++  // Alternatively, a 2nd set of list link fields in the ParkEvent might suffice.
++  // One set would be for the WaitSet and one for the EntryList.
++  // We could also deconstruct the ParkEvent into a "pure" event and add a
++  // new immortal/TSM "ListElement" class that referred to ParkEvents.
++  // In that case we could have one ListElement on the WaitSet and another
++  // on the EntryList, with both referring to the same pure Event.
++
++  Thread::muxAcquire (_WaitLock, "wait:WaitLock:Add") ;
++  ESelf->ListNext = _WaitSet ;
++  _WaitSet = ESelf ;
++  Thread::muxRelease (_WaitLock) ;
++
++  // Release the outer lock
++  // We call IUnlock (RelaxAssert=true) as a thread T1 might
++  // enqueue itself on the WaitSet, call IUnlock(), drop the lock,
++  // and then stall before it can attempt to wake a successor.
++  // Some other thread T2 acquires the lock, and calls notify(), moving
++  // T1 from the WaitSet to the cxq.  T2 then drops the lock.  T1 resumes,
++  // and then finds *itself* on the cxq.  During the course of a normal
++  // IUnlock() call a thread should _never find itself on the EntryList
++  // or cxq, but in the case of wait() it's possible.
++  // See synchronizer.cpp objectMonitor::wait().
++  IUnlock (true) ;
++
++  // Wait for either notification or timeout
++  // Beware that in some circumstances we might propagate
++  // spurious wakeups back to the caller.
++
++  for (;;) {
++    if (ESelf->Notified) break ;
++    int err = ParkCommon (ESelf, timo) ;
++    if (err == OS_TIMEOUT || (NativeMonitorFlags & 1)) break ;
++  }
++
++  // Prepare for reentry - if necessary, remove ESelf from WaitSet
++  // ESelf can be:
++  // 1. Still on the WaitSet.  This can happen if we exited the loop by timeout.
++  // 2. On the cxq or EntryList
++  // 3. Not resident on cxq, EntryList or WaitSet, but in the OnDeck position.
++
++  OrderAccess::fence() ;
++  int WasOnWaitSet = 0 ;
++  if (ESelf->Notified == 0) {
++    Thread::muxAcquire (_WaitLock, "wait:WaitLock:remove") ;
++    if (ESelf->Notified == 0) {     // DCL idiom
++      assert (_OnDeck != ESelf, "invariant") ;   // can't be both OnDeck and on WaitSet
++      // ESelf is resident on the WaitSet -- unlink it.
++      // A doubly-linked list would be better here so we can unlink in constant-time.
++      // We have to unlink before we potentially recontend as ESelf might otherwise
++      // end up on the cxq|EntryList -- it can't be on two lists at once.
++      ParkEvent * p = _WaitSet ;
++      ParkEvent * q = NULL ;            // classic q chases p
++      while (p != NULL && p != ESelf) {
++        q = p ;
++        p = p->ListNext ;
++      }
++      assert (p == ESelf, "invariant") ;
++      if (p == _WaitSet) {      // found at head
++        assert (q == NULL, "invariant") ;
++        _WaitSet = p->ListNext ;
++      } else {                  // found in interior
++        assert (q->ListNext == p, "invariant") ;
++        q->ListNext = p->ListNext ;
++      }
++      WasOnWaitSet = 1 ;        // We were *not* notified but instead encountered timeout
++    }
++    Thread::muxRelease (_WaitLock) ;
++  }
++
++  // Reentry phase - reacquire the lock
++  if (WasOnWaitSet) {
++    // ESelf was previously on the WaitSet but we just unlinked it above
++    // because of a timeout.  ESelf is not resident on any list and is not OnDeck
++    assert (_OnDeck != ESelf, "invariant") ;
++    ILock (Self) ;
++  } else {
++    // A prior notify() operation moved ESelf from the WaitSet to the cxq.
++    // ESelf is now on the cxq, EntryList or at the OnDeck position.
++    // The following fragment is extracted from Monitor::ILock()
++    for (;;) {
++      if (_OnDeck == ESelf && TrySpin(Self)) break ;
++      ParkCommon (ESelf, 0) ;
++    }
++    assert (_OnDeck == ESelf, "invariant") ;
++    _OnDeck = NULL ;
++  }
++
++  assert (ILocked(), "invariant") ;
++  return WasOnWaitSet != 0 ;        // return true IFF timeout
++}
+ 
+-// This needs to be an invalid return from Thread::current; NULL is checked
+-// for as invalid in its implementation. _owner == INVALID_THREAD, means that
+-// the lock is unlocked.
+ 
+ // ON THE VMTHREAD SNEAKING PAST HELD LOCKS:
+ // In particular, there are certain types of global lock that may be held
+@@ -92,181 +835,351 @@
+ // written the _owner field. These locks may be sneakily acquired by the
+ // VM thread during a safepoint to avoid deadlocks. Alternatively, one should
+ // identify all such locks, and ensure that Java threads never block at
+-// safepoints while holding them (_no_safepopint_check_flag). While it
++// safepoints while holding them (_no_safepoint_check_flag). While it
+ // seems as though this could increase the time to reach a safepoint
+ // (or at least increase the mean, if not the variance), the latter
+ // approach might make for a cleaner, more maintainable JVM design.
++//
++// Sneaking is vile and reprehensible and should be excised at the 1st
++// opportunity.  It's possible that the need for sneaking could be obviated
++// as follows.  Currently, a thread might (a) while TBIVM, call pthread_mutex_lock
++// or ILock() thus acquiring the "physical" lock underlying Monitor/Mutex.
++// (b) stall at the TBIVM exit point as a safepoint is in effect.  Critically,
++// it'll stall at the TBIVM reentry state transition after having acquired the
++// underlying lock, but before having set _owner and having entered the actual
++// critical section.  The lock-sneaking facility leverages that fact and allowed the
++// VM thread to logically acquire locks that had already be physically locked by mutators
++// but where mutators were known blocked by the reentry thread state transition.
++//
++// If we were to modify the Monitor-Mutex so that TBIVM state transitions tightly
++// wrapped calls to park(), then we could likely do away with sneaking.  We'd
++// decouple lock acquisition and parking.  The critical invariant  to eliminating
++// sneaking is to ensure that we never "physically" acquire the lock while TBIVM.
++// An easy way to accomplish this is to wrap the park calls in a narrow TBIVM jacket.
++// One difficulty with this approach is that the TBIVM wrapper could recurse and
++// call lock() deep from within a lock() call, while the MutexEvent was already enqueued.
++// Using a stack (N=2 at minimum) of ParkEvents would take care of that problem.
++//
++// But of course the proper ultimate approach is to avoid schemes that require explicit
++// sneaking or dependence on any any clever invariants or subtle implementation properties
++// of Mutex-Monitor and instead directly address the underlying design flaw.
+ 
+-Thread* Mutex::INVALID_THREAD = (Thread*)NULL;
+-
+-void Mutex::lock(Thread *thread) { 
+-
++void Monitor::lock (Thread * Self) {
+ #ifdef CHECK_UNHANDLED_OOPS
+   // Clear unhandled oops so we get a crash right away.  Only clear for non-vm
+   // or GC threads.
+-  if (thread->is_Java_thread()) {
+-    thread->clear_unhandled_oops();
++  if (Self->is_Java_thread()) {
++    Self->clear_unhandled_oops();
+   }
+ #endif // CHECK_UNHANDLED_OOPS
+ 
+-  debug_only(check_prelock_state(thread));
+-  // assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler");
++  debug_only(check_prelock_state(Self));
++  assert (_owner != Self              , "invariant") ;
++  assert (_OnDeck != Self->_MutexEvent, "invariant") ;
++
++  if (TryFast()) {
++ Exeunt:
++    assert (ILocked(), "invariant") ;
++    assert (owner() == NULL, "invariant");
++    set_owner (Self);
++    return ;
++  }
+ 
+-#ifdef ASSERT
+-  // Keep track of how many time access lock
+-  if (CountVMLocks) _histogram->increment_count();  
+-#endif
++  // The lock is contended ...
+ 
+-  // lock_implementation is a os-specific method
+-  if (lock_implementation()) {
+-    // Success, we now own the lock
+-  } else {    
+-#ifdef ASSERT
+-  // Keep track of how many times we fail 
+-  if (CountVMLocks) _contend_histogram->increment_count();  
+-#endif
+-    bool can_sneak = thread->is_VM_thread() &&
+-                     SafepointSynchronize::is_at_safepoint();
+-    if (can_sneak && _owner == INVALID_THREAD) {        
+-      // a java thread has locked the lock but has not entered the
+-      // critical region -- let's just pretend we've locked the lock
+-      // and go on.  we note this with _suppress_signal so we can also
+-      // pretend to unlock when the time comes.
+-      _suppress_signal = true;
+-    } else {
+-      check_block_state(thread);
+-      if (!thread->is_Java_thread()) {
+-	wait_for_lock_implementation();
+-      } else {	
+-	debug_only(assert(rank() > Mutex::special, 
+-	  "Potential deadlock with special or lesser rank mutex"));
+-	wait_for_lock_blocking_implementation((JavaThread*)thread);
+-      }
+-    }
++  bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
++  if (can_sneak && _owner == NULL) {
++    // a java thread has locked the lock but has not entered the
++    // critical region -- let's just pretend we've locked the lock
++    // and go on.  we note this with _snuck so we can also
++    // pretend to unlock when the time comes.
++    _snuck = true;
++    goto Exeunt ;
++  }
++
++  // Try a brief spin to avoid passing thru thread state transition ...
++  if (TrySpin (Self)) goto Exeunt ;
++
++  check_block_state(Self);
++  if (Self->is_Java_thread()) {
++    // Horribile dictu - we suffer through a state transition
++    assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex");
++    ThreadBlockInVM tbivm ((JavaThread *) Self) ;
++    ILock (Self) ;
++  } else {
++    // Mirabile dictu
++    ILock (Self) ;
+   }
++  goto Exeunt ;
++}
++
++void Monitor::lock() {
++  this->lock(Thread::current());
++}
++
++// Lock without safepoint check - a degenerate variant of lock().
++// Should ONLY be used by safepoint code and other code
++// that is guaranteed not to block while running inside the VM. If this is called with
++// thread state set to be in VM, the safepoint synchronization code will deadlock!
+ 
+-  assert(owner() == Mutex::INVALID_THREAD, "Mutex lock count and owner are inconsistent");
+-  set_owner(thread);
+-  trace("locks");  
++void Monitor::lock_without_safepoint_check (Thread * Self) {
++  assert (_owner != Self, "invariant") ;
++  ILock (Self) ;
++  assert (_owner == NULL, "invariant");
++  set_owner (Self);
+ }
+ 
+-void Mutex::lock() {
+-  Thread* thread = Thread::current();    
+-  this->lock(thread);
++void Monitor::lock_without_safepoint_check () {
++  lock_without_safepoint_check (Thread::current()) ;
+ }
+ 
+-// Returns true if thread succeceed in grabbing the lock, otherwise false.
+-bool Mutex::try_lock() {
+-  Thread* thread    = Thread::current();
+-  debug_only(check_prelock_state(thread));
++
++// Returns true if thread succeceed [sic] in grabbing the lock, otherwise false.
++
++bool Monitor::try_lock() {
++  Thread * const Self = Thread::current();
++  debug_only(check_prelock_state(Self));
+   // assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler");
+ 
+-#ifdef ASSERT
+-  // Keep track of how many time access lock
+-  if (CountVMLocks) _histogram->increment_count();  
+-#endif
+-  // Special case, where all Java threads are stopped. The count is not -1, but the owner
+-  // is not yet set. In that case the VM thread can safely grab the lock.
+-  bool can_sneak = thread->is_VM_thread() &&
+-                   SafepointSynchronize::is_at_safepoint();
+-  if (can_sneak && _owner == INVALID_THREAD) {
+-    set_owner(thread); // Do not need to be atomic, since we are at a safepoint
+-    _suppress_signal = true;
++  // Special case, where all Java threads are stopped.
++  // The lock may have been acquired but _owner is not yet set.
++  // In that case the VM thread can safely grab the lock.
++  // It strikes me this should appear _after the TryLock() fails, below.
++  bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
++  if (can_sneak && _owner == NULL) {
++    set_owner(Self); // Do not need to be atomic, since we are at a safepoint
++    _snuck = true;
+     return true;
+   }
+ 
+-  // The try_lock_implementation is platform-specific
+-  if (try_lock_implementation()) {
+-    // We got the lock        
+-    assert(owner() == Mutex::INVALID_THREAD, "Mutex lock count and owner are inconsistent");
+-    set_owner(thread);
+-    trace("try_locks");
++  if (TryLock()) {
++    // We got the lock
++    assert (_owner == NULL, "invariant");
++    set_owner (Self);
+     return true;
+-  } else {
+-#ifdef ASSERT
+-  // Keep track of how many times we fail 
+-  if (CountVMLocks) _contend_histogram->increment_count();  
+-#endif
+-    return false;
+   }
++  return false;
+ }
+ 
+-// Lock without safepoint check. Should ONLY be used by safepoint code and other code
+-// that is guaranteed not to block while running inside the VM. If this is called with
+-// thread state set to be in VM, the safepoint synchronization code will deadlock!
++void Monitor::unlock() {
++  assert (_owner  == Thread::current(), "invariant") ;
++  assert (_OnDeck != Thread::current()->_MutexEvent , "invariant") ;
++  set_owner (NULL) ;
++  if (_snuck) {
++    assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
++    _snuck = false;
++    return ;
++  }
++  IUnlock (false) ;
++}
+ 
+-void Mutex::lock_without_safepoint_check() {
+-#ifdef ASSERT
+-  // Keep track of how many time access lock
+-  if (CountVMLocks) _histogram->increment_count();  
+-#endif
+-  Thread* thread = Thread::current();
+-// #ifdef ASSERT
+-//   if (thread) {
+-//     assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler");
+-//   }
+-// #endif
+-
+-  // lock_implementation is platform specific
+-  if (lock_implementation()) {
+-    // Success, we now own the lock
+-  } else {    
+-#ifdef ASSERT
+-    // Keep track of how many times we fail 
+-    if (CountVMLocks) _contend_histogram->increment_count();  
+-#endif
+-    wait_for_lock_implementation();
++// Yet another degenerate version of Monitor::lock() or lock_without_safepoint_check()
++// jvm_raw_lock() and _unlock() can be called by non-Java threads via JVM_RawMonitorEnter.
++//
++// There's no expectation that JVM_RawMonitors will interoperate properly with the native
++// Mutex-Monitor constructs.  We happen to implement JVM_RawMonitors in terms of
++// native Mutex-Monitors simply as a matter of convenience.  A simple abstraction layer
++// over a pthread_mutex_t would work equally as well, but require more platform-specific
++// code -- a "PlatformMutex".  Alternatively, a simply layer over muxAcquire-muxRelease
++// would work too.
++//
++// Since the caller might be a foreign thread, we don't necessarily have a Thread.MutexEvent
++// instance available.  Instead, we transiently allocate a ParkEvent on-demand if
++// we encounter contention.  That ParkEvent remains associated with the thread
++// until it manages to acquire the lock, at which time we return the ParkEvent
++// to the global ParkEvent free list.  This is correct and suffices for our purposes.
++//
++// Beware that the original jvm_raw_unlock() had a "_snuck" test but that
++// jvm_raw_lock() didn't have the corresponding test.  I suspect that's an
++// oversight, but I've replicated the original suspect logic in the new code ...
++
++void Monitor::jvm_raw_lock() {
++  assert(rank() == native, "invariant");
++
++  if (TryLock()) {
++ Exeunt:
++    assert (ILocked(), "invariant") ;
++    assert (_owner == NULL, "invariant");
++    // This can potentially be called by non-java Threads. Thus, the ThreadLocalStorage
++    // might return NULL. Don't call set_owner since it will break on an NULL owner
++    // Consider installing a non-null "ANON" distinguished value instead of just NULL.
++    _owner = ThreadLocalStorage::thread();
++    return ;
++  }
++
++  if (TrySpin(NULL)) goto Exeunt ;
++
++  // slow-path - apparent contention
++  // Allocate a ParkEvent for transient use.
++  // The ParkEvent remains associated with this thread until
++  // the time the thread manages to acquire the lock.
++  ParkEvent * const ESelf = ParkEvent::Allocate(NULL) ;
++  ESelf->reset() ;
++  OrderAccess::storeload() ;
++
++  // Either Enqueue Self on cxq or acquire the outer lock.
++  if (AcquireOrPush (ESelf)) {
++    ParkEvent::Release (ESelf) ;      // surrender the ParkEvent
++    goto Exeunt ;
++  }
++
++  // At any given time there is at most one ondeck thread.
++  // ondeck implies not resident on cxq and not resident on EntryList
++  // Only the OnDeck thread can try to acquire -- contended for -- the lock.
++  // CONSIDER: use Self->OnDeck instead of m->OnDeck.
++  for (;;) {
++    if (_OnDeck == ESelf && TrySpin(NULL)) break ;
++    ParkCommon (ESelf, 0) ;
++  }
++
++  assert (_OnDeck == ESelf, "invariant") ;
++  _OnDeck = NULL ;
++  ParkEvent::Release (ESelf) ;      // surrender the ParkEvent
++  goto Exeunt ;
++}
++
++void Monitor::jvm_raw_unlock() {
++  // Nearly the same as Monitor::unlock() ...
++  // directly set _owner instead of using set_owner(null)
++  _owner = NULL ;
++  if (_snuck) {         // ???
++    assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
++    _snuck = false;
++    return ;
++  }
++  IUnlock(false) ;
++}
++
++bool Monitor::wait(bool no_safepoint_check, long timeout, bool as_suspend_equivalent) {
++  Thread * const Self = Thread::current() ;
++  assert (_owner == Self, "invariant") ;
++  assert (ILocked(), "invariant") ;
++
++  // as_suspend_equivalent logically implies !no_safepoint_check
++  guarantee (!as_suspend_equivalent || !no_safepoint_check, "invariant") ;
++  // !no_safepoint_check logically implies java_thread
++  guarantee (no_safepoint_check || Self->is_Java_thread(), "invariant") ;
++
++  #ifdef ASSERT
++    Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks());
++    assert(least != this, "Specification of get_least_... call above");
++    if (least != NULL && least->rank() <= special) {
++      tty->print("Attempting to wait on monitor %s/%d while holding"
++                 " lock %s/%d -- possible deadlock",
++                 name(), rank(), least->name(), least->rank());
++      assert(false, "Shouldn't block(wait) while holding a lock of rank special");
++    }
++  #endif // ASSERT
++
++  int wait_status ;
++  // conceptually set the owner to NULL in anticipation of
++  // abdicating the lock in wait
++  set_owner(NULL);
++  if (no_safepoint_check) {
++    wait_status = IWait (Self, timeout) ;
++  } else {
++    assert (Self->is_Java_thread(), "invariant") ;
++    JavaThread *jt = (JavaThread *)Self;
++
++    // Enter safepoint region - ornate and Rococo ...
++    ThreadBlockInVM tbivm(jt);
++    OSThreadWaitState osts(Self->osthread(), false /* not Object.wait() */);
++
++    if (as_suspend_equivalent) {
++      jt->set_suspend_equivalent();
++      // cleared by handle_special_suspend_equivalent_condition() or
++      // java_suspend_self()
++    }
++
++    wait_status = IWait (Self, timeout) ;
++
++    // were we externally suspended while we were waiting?
++    if (as_suspend_equivalent && jt->handle_special_suspend_equivalent_condition()) {
++      // Our event wait has finished and we own the lock, but
++      // while we were waiting another thread suspended us. We don't
++      // want to hold the lock while suspended because that
++      // would surprise the thread that suspended us.
++      assert (ILocked(), "invariant") ;
++      IUnlock (true) ;
++      jt->java_suspend_self();
++      ILock (Self) ;
++      assert (ILocked(), "invariant") ;
++    }
+   }
+ 
+-  assert(_owner == INVALID_THREAD, "Mutex lock count and owner are inconsistent");
+-  set_owner(thread);
++  // Conceptually reestablish ownership of the lock.
++  // The "real" lock -- the LockByte -- was reacquired by IWait().
++  assert (ILocked(), "invariant") ;
++  assert (_owner == NULL, "invariant") ;
++  set_owner (Self) ;
++  return wait_status != 0 ;          // return true IFF timeout
++}
++
++Monitor::~Monitor() {
++  assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ;
++}
++
++void Monitor::ClearMonitor (Monitor * m) {
++  m->_owner             = NULL ;
++  m->_snuck             = false ;
++  m->_name              = "UNKNOWN" ;
++  m->_LockWord.FullWord = 0 ;
++  m->_EntryList         = NULL ;
++  m->_OnDeck            = NULL ;
++  m->_WaitSet           = NULL ;
++  m->_WaitLock[0]       = 0 ;
+ }
+ 
++Monitor::Monitor() { ClearMonitor(this); }
+ 
+-// Can be called by non-Java threads (JVM_RawMonitorEnter)
+-void Mutex::jvm_raw_lock() {
++Monitor::Monitor (int Rank, const char * name, bool allow_vm_block) {
++  ClearMonitor (this) ;
+ #ifdef ASSERT
+-  // Keep track of how many time access lock
+-  if (CountVMLocks) _histogram->increment_count();  
++  _allow_vm_block  = allow_vm_block;
++  _rank            = Rank ;
+ #endif
+-  assert(rank() == native, "must be called by non-VM locks");
+-  if (lock_implementation()) {
+-    // Success, we now own the lock
+-  } else {    
++}
++
++Mutex::~Mutex() {
++  assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ;
++}
++
++Mutex::Mutex (int Rank, const char * name, bool allow_vm_block) {
++  ClearMonitor ((Monitor *) this) ;
+ #ifdef ASSERT
+-    // Keep track of how many times we fail 
+-    if (CountVMLocks) _contend_histogram->increment_count();  
++ _allow_vm_block   = allow_vm_block;
++ _rank             = Rank ;
+ #endif
+-    wait_for_lock_implementation();
+-  }
+-  assert(_owner == INVALID_THREAD, "Mutex lock count and owner are inconsistent");
+-  // This can potentially be called by non-java Threads. Thus, the ThreadLocalStorage
+-  // might return NULL. Don't call set_owner since it will break on an NULL
+-  // owner
+-  _owner = ThreadLocalStorage::thread();
+ }
+ 
+-bool Mutex::owned_by_self() const { 
+-  bool ret = _owner == Thread::current(); 
+-  assert(_lock_count>=0 || !ret, "lock count must by >=0 for a locked mutex");
++bool Monitor::owned_by_self() const {
++  bool ret = _owner == Thread::current();
++  assert (!ret || _LockWord.Bytes[_LSBINDEX] != 0, "invariant") ;
+   return ret;
+ }
+ 
+-void Mutex::print_on_error(outputStream* st) const {
++void Monitor::print_on_error(outputStream* st) const {
+   st->print("[" PTR_FORMAT, this);
+-  st->print("/" PTR_FORMAT, _lock_event);
+   st->print("] %s", _name);
+   st->print(" - owner thread: " PTR_FORMAT, _owner);
+ }
+ 
++
++
++
+ // ----------------------------------------------------------------------------------
+ // Non-product code
+ 
++#ifndef PRODUCT
++void Monitor::print_on(outputStream* st) const {
++  st->print_cr("Mutex: [0x%lx/0x%lx] %s - owner: 0x%lx", this, _LockWord.FullWord, _name, _owner);
++}
++#endif
+ 
+ #ifndef PRODUCT
+ #ifdef ASSERT
+-Mutex* Mutex::get_least_ranked_lock(Mutex* locks) {
+-  Mutex *res, *tmp;
++Monitor * Monitor::get_least_ranked_lock(Monitor * locks) {
++  Monitor *res, *tmp;
+   for (res = tmp = locks; tmp != NULL; tmp = tmp->next()) {
+     if (tmp->rank() < res->rank()) {
+       res = tmp;
+@@ -277,7 +1190,7 @@
+     // in increasing rank order (modulo any native ranks)
+     for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
+       if (tmp->next() != NULL) {
+-        assert(tmp->rank() == Mutex::native || 
++        assert(tmp->rank() == Mutex::native ||
+                tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
+       }
+     }
+@@ -285,8 +1198,8 @@
+   return res;
+ }
+ 
+-Mutex* Mutex::get_least_ranked_lock_besides_this(Mutex* locks) {
+-  Mutex *res, *tmp;
++Monitor* Monitor::get_least_ranked_lock_besides_this(Monitor* locks) {
++  Monitor *res, *tmp;
+   for (res = NULL, tmp = locks; tmp != NULL; tmp = tmp->next()) {
+     if (tmp != this && (res == NULL || tmp->rank() < res->rank())) {
+       res = tmp;
+@@ -306,7 +1219,7 @@
+ }
+ 
+ 
+-bool Mutex::contains(Mutex* locks, Mutex* lock) {
++bool Monitor::contains(Monitor* locks, Monitor * lock) {
+   for (; locks != NULL; locks = locks->next()) {
+     if (locks == lock)
+       return true;
+@@ -315,7 +1228,12 @@
+ }
+ #endif
+ 
+-void Mutex::set_owner_implementation(Thread *new_owner) {  
++// Called immediately after lock acquisition or release as a diagnostic
++// to track the lock-set of the thread and test for rank violations that
++// might indicate exposure to deadlock.
++// Rather like an EventListener for _owner (:>).
++
++void Monitor::set_owner_implementation(Thread *new_owner) {
+   // This function is solely responsible for maintaining
+   // and checking the invariant that threads and locks
+   // are in a 1/N relation, with some some locks unowned.
+@@ -326,17 +1244,17 @@
+   // owner to another--it must be owned by NULL as an
+   // intermediate state.
+ 
+-  if (new_owner != INVALID_THREAD) {
++  if (new_owner != NULL) {
+     // the thread is acquiring this lock
+- 
++
+     assert(new_owner == Thread::current(), "Should I be doing this?");
+-    assert(_owner == INVALID_THREAD, "setting the owner thread of an already owned mutex");
++    assert(_owner == NULL, "setting the owner thread of an already owned mutex");
+     _owner = new_owner; // set the owner
+ 
+     // link "this" into the owned locks list
+ 
+     #ifdef ASSERT  // Thread::_owned_locks is under the same ifdef
+-      Mutex* locks = get_least_ranked_lock(new_owner->owned_locks());
++      Monitor* locks = get_least_ranked_lock(new_owner->owned_locks());
+                     // Mutex::set_owner_implementation is a friend of Thread
+ 
+       assert(this->rank() >= 0, "bad lock rank");
+@@ -346,11 +1264,11 @@
+       }
+ 
+       // Deadlock avoidance rules require us to acquire Mutexes only in
+-      // a global total order. For example m1 is the lowest ranked mutex 
+-      // that the thread holds and m2 is the mutex the thread is trying 
+-      // to acquire, then  deadlock avoidance rules require that the rank 
+-      // of m2 be less  than the rank of m1. 
+-      // The rank Mutex::native  is an exception in that it is not subject 
++      // a global total order. For example m1 is the lowest ranked mutex
++      // that the thread holds and m2 is the mutex the thread is trying
++      // to acquire, then  deadlock avoidance rules require that the rank
++      // of m2 be less  than the rank of m1.
++      // The rank Mutex::native  is an exception in that it is not subject
+       // to the verification rules.
+       // Here are some further notes relating to mutex acquisition anomalies:
+       // . under Solaris, the interrupt lock gets acquired when doing
+@@ -360,13 +1278,13 @@
+       if (this->rank() != Mutex::native &&
+           this->rank() != Mutex::suspend_resume &&
+           locks != NULL && locks->rank() <= this->rank() &&
+-	  !SafepointSynchronize::is_at_safepoint() &&
++          !SafepointSynchronize::is_at_safepoint() &&
+           this != Interrupt_lock && this != ProfileVM_lock &&
+-	  !(this == Safepoint_lock && contains(locks, Terminator_lock) &&
+-            SafepointSynchronize::is_synchronizing())) { 
++          !(this == Safepoint_lock && contains(locks, Terminator_lock) &&
++            SafepointSynchronize::is_synchronizing())) {
+         new_owner->print_owned_locks();
+-        fatal4("acquiring lock %s/%d out of order with lock %s/%d -- possible deadlock", 
+-	       this->name(), this->rank(), locks->name(), locks->rank());
++        fatal4("acquiring lock %s/%d out of order with lock %s/%d -- possible deadlock",
++               this->name(), this->rank(), locks->name(), locks->rank());
+       }
+ 
+       this->_next = new_owner->_owned_locks;
+@@ -378,27 +1296,27 @@
+ 
+     Thread* old_owner = _owner;
+     debug_only(_last_owner = old_owner);
+-    
+-    assert(old_owner != INVALID_THREAD, "removing the owner thread of an unowned mutex");
++
++    assert(old_owner != NULL, "removing the owner thread of an unowned mutex");
+     assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex");
+ 
+-    _owner = INVALID_THREAD; // set the owner
++    _owner = NULL; // set the owner
+ 
+     #ifdef ASSERT
+-      Mutex *locks = old_owner->owned_locks();
++      Monitor *locks = old_owner->owned_locks();
+ 
+       if (LogMultipleMutexLocking && locks != this) {
+         Events::log("thread " INTPTR_FORMAT " unlocks %s, still owns %s", old_owner, this->name(), locks->name());
+       }
+-  
++
+       // remove "this" from the owned locks list
+-  
+-      Mutex *prev = NULL;
++
++      Monitor *prev = NULL;
+       bool found = false;
+       for (; locks != NULL; prev = locks, locks = locks->next()) {
+         if (locks == this) {
+-	  found = true;
+-	  break;
++          found = true;
++          break;
+         }
+       }
+       assert(found, "Removing a lock not owned");
+@@ -414,11 +1332,10 @@
+ 
+ 
+ // Factored out common sanity checks for locking mutex'es. Used by lock() and try_lock()
+-void Mutex::check_prelock_state(Thread *thread) {
+-  assert(_lock_count >= -1, "sanity check");  
+-  assert((!thread->is_Java_thread() || ((JavaThread *)thread)->thread_state() == _thread_in_vm) 
++void Monitor::check_prelock_state(Thread *thread) {
++  assert((!thread->is_Java_thread() || ((JavaThread *)thread)->thread_state() == _thread_in_vm)
+          || rank() == Mutex::special, "wrong thread state for using locks");
+-  if (StrictSafepointChecks) { 
++  if (StrictSafepointChecks) {
+     if (thread->is_VM_thread() && !allow_vm_block()) {
+       fatal1("VM thread using lock %s (not allowed to block on)", name());
+     }
+@@ -427,17 +1344,13 @@
+   }
+ }
+ 
+-void Mutex::check_block_state(Thread *thread) {  
++void Monitor::check_block_state(Thread *thread) {
+   if (!_allow_vm_block && thread->is_VM_thread()) {
+     warning("VM thread blocked on lock");
+     print();
+     BREAKPOINT;
+-  }   
+-  assert(_owner != thread, "deadlock: blocking on monitor owned by current thread");    
+-}
+-
+-
+-void Mutex::trace(const char* operation) {
++  }
++  assert(_owner != thread, "deadlock: blocking on monitor owned by current thread");
+ }
+ 
+ #endif // PRODUCT
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/mutex.hpp openjdk/hotspot/src/share/vm/runtime/mutex.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/mutex.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/mutex.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)mutex.hpp	1.68 07/05/05 17:06:50 JVM"
+-#endif
+ /*
+  * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,52 +19,74 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+-#ifdef  ASSERT
+-class MutexHistogramElement : public HistogramElement {
+-  public:
+-   MutexHistogramElement(const char* elementname);
+-};
+-
+-class MutexContentionHistogramElement : public HistogramElement {
+-  public:
+-   MutexContentionHistogramElement(const char* elementname);
+-};
+-
+-
+-#endif
+-
+-// A simple Mutex for VM locking using OS primitives.  Note that
+-// Mutex locking is NOT guaranteed to interoperate with the fast
+-// object locking, which is intentional: it reduces reliance on the
+-// fast locking mechanism as it is developed and tuned, and gives us
+-// a way out of all the recursive locking rat-holes that appear when
+-// we try to use a single locking mechanism.
+-//
+-// Implementation adapted from WIN32 Q&A by Jeffery Richter
+-// (implementation should be moved into platform specific file)
++// The SplitWord construct allows us to colocate the contention queue
++// (cxq) with the lock-byte.  The queue elements are ParkEvents, which are
++// always aligned on 256-byte addresses - the least significant byte of
++// a ParkEvent is always 0.  Colocating the lock-byte with the queue
++// allows us to easily avoid what would otherwise be a race in lock()
++// if we were to use two completely separate fields for the contention queue
++// and the lock indicator.  Specifically, colocation renders us immune
++// from the race where a thread might enqueue itself in the lock() slow-path
++// immediately after the lock holder drops the outer lock in the unlock()
++// fast-path.
+ //
++// Colocation allows us to use a fast-path unlock() form that uses
++// A MEMBAR instead of a CAS.  MEMBAR has lower local latency than CAS
++// on many platforms.
+ //
+-//                NOTE WELL!!
++// See:
++// +  http://blogs.sun.com/dave/entry/biased_locking_in_hotspot
++// +  http://blogs.sun.com/dave/resource/synchronization-public2.pdf
+ //
++// Note that we're *not* using word-tearing the classic sense.
++// The lock() fast-path will CAS the lockword and the unlock()
++// fast-path will store into the lock-byte colocated within the lockword.
++// We depend on the fact that all our reference platforms have
++// coherent and atomic byte accesses.  More precisely, byte stores
++// interoperate in a safe, sane, and expected manner with respect to
++// CAS, ST and LDs to the full-word containing the byte.
++// If you're porting HotSpot to a platform where that isn't the case
++// then you'll want change the unlock() fast path from:
++//    STB;MEMBAR #storeload; LDN
++// to a full-word CAS of the lockword.
++
++
++union SplitWord {   // full-word with separately addressable LSB
++  volatile intptr_t FullWord ;
++  volatile void * Address ;
++  volatile jbyte Bytes [sizeof(intptr_t)] ;
++} ;
++
++// Endian-ness ... index of least-significant byte in SplitWord.Bytes[]
++#ifdef AMD64        // little
++ #define _LSBINDEX 0
++#else
++#if IA32            // little
++ #define _LSBINDEX 0
++#else
++#ifdef SPARC        // big
++ #define _LSBINDEX (sizeof(intptr_t)-1)
++#else
++ #error "unknown architecture"
++#endif
++#endif
++#endif
++
++class ParkEvent ;
++
+ // See orderAccess.hpp.  We assume throughout the VM that mutex lock and
+ // try_lock do fence-lock-acquire, and that unlock does a release-unlock,
+ // *in that order*.  If their implementations change such that these
+ // assumptions are violated, a whole lot of code will break.
+ 
+-class Mutex : public CHeapObj {
+- private:
+-  // The following methods are machine-specific, and defined in mutex_<os>.inline.hpp or mutex_<os>.cpp
+-  bool lock_implementation();
+-  bool try_lock_implementation();
+-  void wait_for_lock_implementation();
+-  void wait_for_lock_blocking_implementation(JavaThread *thread);
++class Monitor : public CHeapObj {
+ 
+  public:
+-  // A special lock: Is a lock where you are guarantteed not to block while you are holding it, i.e., no
+-  // vm operation can happen, taking other locks, etc. 
++  // A special lock: Is a lock where you are guaranteed not to block while you are
++  // holding it, i.e., no vm operation can happen, taking other locks, etc.
+   // NOTE: It is critical that the rank 'special' be the lowest (earliest)
+   // (except for "event"?) for the deadlock dection to work correctly.
+   // The rank native is only for use in Mutex's created by JVM_RawMonitorCreate,
+@@ -76,7 +95,7 @@
+   // safepoint and leaving a safepoint.  It is only used for the Safepoint_lock
+   // currently.  While at a safepoint no mutexes of rank safepoint are held
+   // by any thread.
+-  // The rank named "leaf" is probably historical (and should 
++  // The rank named "leaf" is probably historical (and should
+   // be changed) -- mutexes of this rank aren't really leaf mutexes
+   // at all.
+   enum lock_types {
+@@ -91,57 +110,95 @@
+        native      = max_nonleaf    +   1
+   };
+ 
+- protected:     
+-  // Fields
+-  jint              _lock_count; 
+-  void*             _lock_event;  
+-  volatile bool     _suppress_signal;    // Used for sneaky locking
+-  Thread* volatile  _owner;              // The owner of the lock
+-  const char* _name;                     // Name of mutex  
+-#ifdef ASSERT
+-  MutexHistogramElement *_histogram;
+-  MutexContentionHistogramElement *_contend_histogram;
+-#endif
++  // The WaitSet and EntryList linked lists are composed of ParkEvents.
++  // I use ParkEvent instead of threads as ParkEvents are immortal and
++  // type-stable, meaning we can safely unpark() a possibly stale
++  // list element in the unlock()-path.
++
++ protected:                              // Monitor-Mutex metadata
++  SplitWord _LockWord ;                  // Contention queue (cxq) colocated with Lock-byte
++  enum LockWordBits { _LBIT=1 } ;
++  Thread * volatile _owner;              // The owner of the lock
++                                         // Consider sequestering _owner on its own $line
++                                         // to aid future synchronization mechanisms.
++  ParkEvent * volatile _EntryList ;      // List of threads waiting for entry
++  ParkEvent * volatile _OnDeck ;         // heir-presumptive
++  volatile intptr_t _WaitLock [1] ;      // Protects _WaitSet
++  ParkEvent * volatile  _WaitSet ;       // LL of ParkEvents
++  volatile bool     _snuck;              // Used for sneaky locking (evil).
++  const char * _name;                    // Name of mutex
++  int NotifyCount ;                      // diagnostic assist
++  double pad [8] ;                       // avoid false sharing
+ 
+   // Debugging fields for naming, deadlock detection, etc. (some only used in debug mode)
+ #ifndef PRODUCT
+   bool      _allow_vm_block;
+-  debug_only(int _rank;)     	   // rank (to avoid/detect potential deadlocks)
+-  debug_only(Mutex* _next;)        // Used by a Thread to link up owned locks    
+-  debug_only(Thread* _last_owner;) // the last thread to own the lock    
+-  debug_only(static bool contains(Mutex* locks, Mutex* lock);)
+-  debug_only(static Mutex* get_least_ranked_lock(Mutex* locks);)
+-  debug_only(Mutex* get_least_ranked_lock_besides_this(Mutex* locks);)
++  debug_only(int _rank;)                 // rank (to avoid/detect potential deadlocks)
++  debug_only(Monitor * _next;)           // Used by a Thread to link up owned locks
++  debug_only(Thread* _last_owner;)       // the last thread to own the lock
++  debug_only(static bool contains(Monitor * locks, Monitor * lock);)
++  debug_only(static Monitor * get_least_ranked_lock(Monitor * locks);)
++  debug_only(Monitor * get_least_ranked_lock_besides_this(Monitor * locks);)
+ #endif
+-  
++
+   void set_owner_implementation(Thread* owner)                        PRODUCT_RETURN;
+-  void trace                   (const char* operation)                PRODUCT_RETURN;
+   void check_prelock_state     (Thread* thread)                       PRODUCT_RETURN;
+   void check_block_state       (Thread* thread)                       PRODUCT_RETURN;
+ 
+   // platform-dependent support code can go here (in os_<os_family>.cpp)
+-  friend class MutexImplementation;
+-  friend class RawMonitor;
+  public:
+   enum {
+     _no_safepoint_check_flag    = true,
+     _allow_vm_block_flag        = true,
+     _as_suspend_equivalent_flag = true
+-  };	
++  };
++
++  enum WaitResults {
++    CONDVAR_EVENT,         // Wait returned because of condition variable notification
++    INTERRUPT_EVENT,       // Wait returned because waiting thread was interrupted
++    NUMBER_WAIT_RESULTS
++  };
++
++ private:
++   int  TrySpin (Thread * Self) ;
++   int  TryLock () ;
++   int  TryFast () ;
++   int  AcquireOrPush (ParkEvent * ev) ;
++   void IUnlock (bool RelaxAssert) ;
++   void ILock (Thread * Self) ;
++   int  IWait (Thread * Self, jlong timo);
++   int  ILocked () ;
++
++ protected:
++   static void ClearMonitor (Monitor * m) ;
++   Monitor() ;
++
++ public:
++  Monitor(int rank, const char *name, bool allow_vm_block=false);
++  ~Monitor();
++
++  // Wait until monitor is notified (or times out).
++  // Defaults are to make safepoint checks, wait time is forever (i.e.,
++  // zero), and not a suspend-equivalent condition. Returns true if wait
++  // times out; otherwise returns false.
++  bool wait(bool no_safepoint_check = !_no_safepoint_check_flag,
++            long timeout = 0,
++            bool as_suspend_equivalent = !_as_suspend_equivalent_flag);
++  bool notify();
++  bool notify_all();
++
+ 
+-  Mutex(int rank, const char *name, bool allow_vm_block = !_allow_vm_block_flag);
+-  ~Mutex();
+-  
+-  void lock(); // prints out warning if VM thread blocks 
++  void lock(); // prints out warning if VM thread blocks
+   void lock(Thread *thread); // overloaded with current thread
+   void unlock();
+-  bool is_locked() const                     { return _owner != INVALID_THREAD; }
++  bool is_locked() const                     { return _owner != NULL; }
+ 
+   bool try_lock(); // Like lock(), but unblocking. It returns false instead
+ 
+   // Lock without safepoint check. Should ONLY be used by safepoint code and other code
+   // that is guaranteed not to block while running inside the VM.
+-  void lock_without_safepoint_check();    
++  void lock_without_safepoint_check();
++  void lock_without_safepoint_check (Thread * Self) ;
+ 
+   // Current owner - not not MT-safe. Can only be used to guarantee that
+   // the current running thread owns the lock
+@@ -158,90 +215,94 @@
+ 
+   #ifndef PRODUCT
+     void print_on(outputStream* st) const;
+-    void print() const			    { print_on(tty); }
++    void print() const                      { print_on(tty); }
+     debug_only(int    rank() const          { return _rank; })
+     bool   allow_vm_block()                 { return _allow_vm_block; }
+-    
+-    debug_only(Mutex *next()  const         { return _next; }) 
+-    debug_only(void   set_next(Mutex *next) { _next = next; })       
++
++    debug_only(Monitor *next()  const         { return _next; })
++    debug_only(void   set_next(Monitor *next) { _next = next; })
+   #endif
+-  
++
+   void set_owner(Thread* owner) {
+   #ifndef PRODUCT
+     set_owner_implementation(owner);
+-    debug_only(void verify_mutex_rank(Thread* thr));
+-  #else  
++    debug_only(void verify_Monitor(Thread* thr));
++  #else
+     _owner = owner;
+   #endif
+   }
+ 
+-  static Thread* INVALID_THREAD; // Value of _owner when unowned. (i.e., lock is unlocked)
+ };
+ 
++// Normally we'd expect Monitor to extend Mutex in the sense that a monitor
++// constructed from pthreads primitives might extend a mutex by adding
++// a condvar and some extra metadata.  In fact this was the case until J2SE7.
++//
++// Currently, however, the base object is a monitor.  Monitor contains all the
++// logic for wait(), notify(), etc.   Mutex extends monitor and restricts the
++// visiblity of wait(), notify(), and notify_all().
++//
++// Another viable alternative would have been to have Monitor extend Mutex and
++// implement all the normal mutex and wait()-notify() logic in Mutex base class.
++// The wait()-notify() facility would be exposed via special protected member functions
++// (e.g., _Wait() and _Notify()) in Mutex.  Monitor would extend Mutex and expose wait()
++// as a call to _Wait().  That is, the public wait() would be a wrapper for the protected
++// _Wait().
++//
++// An even better alternative is to simply eliminate Mutex:: and use Monitor:: instead.
++// After all, monitors are sufficient for Java-level synchronization.   At one point in time
++// there may have been some benefit to having distinct mutexes and monitors, but that time
++// has past.
++//
++// The Mutex/Monitor design parallels that of Java-monitors, being based on
++// thread-specific park-unpark platform-specific primitives.
+ 
+-// A Monitor is a Mutex with a built in condition variable. It allows a thread, to
+-// temporarily to give up the lock and wait on the lock, until it is notified.
+-class Monitor : public Mutex {
+- protected:
+-  void* _event; 	// Manual-reset event for notifications
+-  long _counter;	// Current number of notifications
+-  long _waiters;	// Number of threads waiting for notification
+-  long _tickets;	// Number of waiters to be notified
+ 
+-  enum WaitResults {
+-    CONDVAR_EVENT,         // Wait returned because of condition variable notification
+-    INTERRUPT_EVENT,       // Wait returned because waiting thread was interrupted
+-    NUMBER_WAIT_RESULTS
+-  };
+-
+-  friend class RawMonitor;
++class Mutex : public Monitor {      // degenerate Monitor
+  public:
+-  Monitor(int rank, const char *name, bool allow_vm_block=false);
+-  ~Monitor();
+-
+-  // Wait until monitor is notified (or times out).
+-  // Defaults are to make safepoint checks, wait time is forever (i.e.,
+-  // zero), and not a suspend-equivalent condition. Returns true if wait
+-  // times out; otherwise returns false.
+-  bool wait(bool no_safepoint_check = !_no_safepoint_check_flag,
+-            long timeout = 0,
+-            bool as_suspend_equivalent = !_as_suspend_equivalent_flag);
+-  bool notify();
+-  bool notify_all();
++   Mutex (int rank, const char *name, bool allow_vm_block=false);
++   ~Mutex () ;
++ private:
++   bool notify ()    { ShouldNotReachHere(); return false; }
++   bool notify_all() { ShouldNotReachHere(); return false; }
++   bool wait (bool no_safepoint_check, long timeout, bool as_suspend_equivalent) {
++     ShouldNotReachHere() ;
++     return false ;
++   }
+ };
+ 
+ /*
+  * Per-thread blocking support for JSR166. See the Java-level
+  * Documentation for rationale. Basically, park acts like wait, unpark
+  * like notify.
+- * 
++ *
+  * 6271289 --
+  * To avoid errors where an os thread expires but the JavaThread still
+- * exists, Parkers are immortal (type-stable) and are recycled across 
+- * new threads.  This parallels the Event and ParkEvent implementation.
+- * Because park-unpark alow spurious wakeups it is harmless if an
+- * unpark call unparks a new thread using the old Parker reference.  
++ * exists, Parkers are immortal (type-stable) and are recycled across
++ * new threads.  This parallels the ParkEvent implementation.
++ * Because park-unpark allow spurious wakeups it is harmless if an
++ * unpark call unparks a new thread using the old Parker reference.
+  *
+  * In the future we'll want to think about eliminating Parker and using
+- * ParkEvent instead.  There's consider duplication between the two
+- * services.  
++ * ParkEvent instead.  There's considerable duplication between the two
++ * services.
+  *
+  */
+ 
+ class Parker : public os::PlatformParker {
+ private:
+-  volatile int _counter ; 
++  volatile int _counter ;
+   Parker * FreeNext ;
+   JavaThread * AssociatedWith ; // Current association
+-  
++
+ public:
+-  Parker() : PlatformParker() { 
+-    _counter       = 0 ; 
+-    FreeNext       = NULL ; 
+-    AssociatedWith = NULL ; 
+-  } 
++  Parker() : PlatformParker() {
++    _counter       = 0 ;
++    FreeNext       = NULL ;
++    AssociatedWith = NULL ;
++  }
+ protected:
+-  ~Parker() { ShouldNotReachHere(); } 
++  ~Parker() { ShouldNotReachHere(); }
+ public:
+   // For simplicity of interface with Java, all forms of park (indefinite,
+   // relative, and absolute) are multiplexed into one call.
+@@ -255,4 +316,3 @@
+   static Parker * volatile FreeList ;
+   static volatile int ListLock ;
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/mutexLocker.cpp openjdk/hotspot/src/share/vm/runtime/mutexLocker.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/mutexLocker.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/mutexLocker.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)mutexLocker.cpp	1.178 07/06/19 03:54:19 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,13 +19,19 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+ #include "incls/_mutexLocker.cpp.incl"
+ 
+-// Mutexes used in the VM (see comment in mutexLocker.hpp)
++// Mutexes used in the VM (see comment in mutexLocker.hpp):
++//
++// Note that the following pointers are effectively final -- after having been
++// set at JVM startup-time, they should never be subsequently mutated.
++// Instead of using pointers to malloc()ed monitors and mutexes we should consider
++// eliminating the indirection and using instances instead.
++// Consider using GCC's __read_mostly.
+ 
+ Mutex*   Patching_lock                = NULL;
+ Monitor* SystemDictionary_lock        = NULL;
+@@ -43,7 +46,7 @@
+ Mutex*   JfieldIdCreation_lock        = NULL;
+ Monitor* JNICritical_lock             = NULL;
+ Mutex*   JvmtiThreadState_lock        = NULL;
+-Monitor* JvmtiPendingEvent_lock	      = NULL;
++Monitor* JvmtiPendingEvent_lock       = NULL;
+ Mutex*   Heap_lock                    = NULL;
+ Mutex*   ExpandHeap_lock              = NULL;
+ Mutex*   AdapterHandlerLibrary_lock   = NULL;
+@@ -65,7 +68,7 @@
+ Monitor* iCMS_lock                    = NULL;
+ Monitor* FullGCCount_lock             = NULL;
+ Mutex*   ParGCRareEvent_lock          = NULL;
+-Mutex*	 DerivedPointerTableGC_lock   = NULL;
++Mutex*   DerivedPointerTableGC_lock   = NULL;
+ Mutex*   Compile_lock                 = NULL;
+ Monitor* MethodCompileQueue_lock      = NULL;
+ #ifdef TIERED
+@@ -105,11 +108,11 @@
+ Monitor* LowMemory_lock               = NULL;
+ 
+ #define MAX_NUM_MUTEX 128
+-static Mutex* _mutex_array[MAX_NUM_MUTEX];
++static Monitor * _mutex_array[MAX_NUM_MUTEX];
+ static int _num_mutex;
+ 
+ #ifdef ASSERT
+-void assert_locked_or_safepoint(const Mutex* lock) {
++void assert_locked_or_safepoint(const Monitor * lock) {
+   // check if this thread owns the lock (common case)
+   if (IgnoreLockingAssertions) return;
+   assert(lock != NULL, "Need non-NULL lock");
+@@ -123,7 +126,7 @@
+ }
+ 
+ // a stronger assertion than the above
+-void assert_lock_strong(const Mutex* lock) {
++void assert_lock_strong(const Monitor * lock) {
+   if (IgnoreLockingAssertions) return;
+   assert(lock != NULL, "Need non-NULL lock");
+   if (lock->owned_by_self()) return;
+@@ -138,7 +141,7 @@
+   _mutex_array[_num_mutex++] = var;                               \
+ }
+ 
+-void mutex_init() {  
++void mutex_init() {
+   def(tty_lock                     , Mutex  , event,       true ); // allow to lock in VM
+ 
+   def(CGC_lock                   , Monitor, special,     true ); // coordinate between fore- and background GC
+@@ -151,7 +154,7 @@
+   def(DerivedPointerTableGC_lock   , Mutex,   leaf,        true );
+   def(CodeCache_lock               , Mutex  , special,     true );
+   def(Interrupt_lock               , Monitor, special,     true ); // used for interrupt processing
+-  def(RawMonitor_lock              , Mutex,   special,     true ); 
++  def(RawMonitor_lock              , Mutex,   special,     true );
+   def(OopMapCacheAlloc_lock        , Mutex,   leaf,        true ); // used for oop_map_cache allocation.
+ 
+   def(Patching_lock                , Mutex  , special,     true ); // used for safepointing and code patching.
+@@ -189,14 +192,14 @@
+     def(SerializePage_lock         , Monitor, leaf,        true );
+   }
+ 
+-  def(Threads_lock                 , Monitor, barrier,     true ); 
++  def(Threads_lock                 , Monitor, barrier,     true );
+ 
+-  def(VMOperationQueue_lock        , Monitor, nonleaf,     true ); // VM_thread allowed to block on these     
+-  def(VMOperationRequest_lock      , Monitor, nonleaf,     true ); 
++  def(VMOperationQueue_lock        , Monitor, nonleaf,     true ); // VM_thread allowed to block on these
++  def(VMOperationRequest_lock      , Monitor, nonleaf,     true );
+   def(RetData_lock                 , Mutex  , nonleaf,     false);
+   def(Terminator_lock              , Monitor, nonleaf,     true );
+   def(VtableStubs_lock             , Mutex  , nonleaf,     true );
+-  def(Notify_lock                  , Monitor, nonleaf,     true ); 
++  def(Notify_lock                  , Monitor, nonleaf,     true );
+   def(JNIGlobalHandle_lock         , Mutex  , nonleaf,     true ); // locks JNIHandleBlockFreeList_lock
+   def(JNICritical_lock             , Monitor, nonleaf,     true ); // used for JNI critical regions
+   def(AdapterHandlerLibrary_lock   , Mutex  , nonleaf,     true);
+@@ -209,30 +212,30 @@
+   def(JNICachedItableIndex_lock    , Mutex  , nonleaf+1,   false); // Used to cache an itable index during JNI invoke
+ 
+   def(CompiledIC_lock              , Mutex  , nonleaf+2,   false); // locks VtableStubs_lock, InlineCacheBuffer_lock
+-  def(CompileTaskAlloc_lock        , Mutex  , nonleaf+2,   true ); 
+-  def(CompileStatistics_lock       , Mutex  , nonleaf+2,   false); 
++  def(CompileTaskAlloc_lock        , Mutex  , nonleaf+2,   true );
++  def(CompileStatistics_lock       , Mutex  , nonleaf+2,   false);
+   def(MultiArray_lock              , Mutex  , nonleaf+2,   false); // locks SymbolTable_lock
+ 
+   def(JvmtiThreadState_lock        , Mutex  , nonleaf+2,   false); // Used by JvmtiThreadState/JvmtiEventController
+-  def(JvmtiPendingEvent_lock	   , Monitor, nonleaf,     false); // Used by JvmtiCodeBlobEvents
++  def(JvmtiPendingEvent_lock       , Monitor, nonleaf,     false); // Used by JvmtiCodeBlobEvents
+   def(Management_lock              , Mutex  , nonleaf+2,   false); // used for JVM management
+ 
+-  def(Compile_lock                 , Mutex  , nonleaf+3,   true ); 
++  def(Compile_lock                 , Mutex  , nonleaf+3,   true );
+   def(MethodData_lock              , Mutex  , nonleaf+3,   false);
+ 
+-  def(MethodCompileQueue_lock      , Monitor, nonleaf+4,   true ); 
++  def(MethodCompileQueue_lock      , Monitor, nonleaf+4,   true );
+   def(Debug2_lock                  , Mutex  , nonleaf+4,   true );
+   def(Debug3_lock                  , Mutex  , nonleaf+4,   true );
+   def(ProfileVM_lock               , Monitor, nonleaf+4,   false); // used for profiling of the VMThread
+-  def(CompileThread_lock           , Monitor, nonleaf+5,   false ); 
++  def(CompileThread_lock           , Monitor, nonleaf+5,   false );
+ #ifdef TIERED
+-  def(C1_lock                      , Monitor, nonleaf+5,   false ); 
++  def(C1_lock                      , Monitor, nonleaf+5,   false );
+ #endif // TIERED
+ 
+ 
+ }
+ 
+-GCMutexLocker::GCMutexLocker(Mutex* mutex) {
++GCMutexLocker::GCMutexLocker(Monitor * mutex) {
+   if (SafepointSynchronize::is_at_safepoint()) {
+     _locked = false;
+   } else {
+@@ -249,7 +252,7 @@
+   bool none = true;
+   for (int i = 0; i < _num_mutex; i++) {
+      // see if it has an owner
+-     if (_mutex_array[i]->owner() != Mutex::INVALID_THREAD) {
++     if (_mutex_array[i]->owner() != NULL) {
+        if (none) {
+           // print format used by Mutex::print_on_error()
+           st->print_cr(" ([mutex/lock_event])");
+@@ -261,4 +264,3 @@
+   }
+   if (none) st->print_cr("None");
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/mutexLocker.hpp openjdk/hotspot/src/share/vm/runtime/mutexLocker.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/mutexLocker.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/mutexLocker.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)mutexLocker.hpp	1.150 07/06/19 03:54:12 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Mutexes used in the VM.
+@@ -40,7 +37,7 @@
+ extern Mutex*   JfieldIdCreation_lock;           // a lock on creating JNI static field identifiers
+ extern Monitor* JNICritical_lock;                // a lock used while entering and exiting JNI critical regions, allows GC to sometimes get in
+ extern Mutex*   JvmtiThreadState_lock;           // a lock on modification of JVMTI thread data
+-extern Monitor*	JvmtiPendingEvent_lock;		 // a lock on the JVMTI pending events list
++extern Monitor* JvmtiPendingEvent_lock;          // a lock on the JVMTI pending events list
+ extern Mutex*   Heap_lock;                       // a lock on the heap
+ extern Mutex*   ExpandHeap_lock;                 // a lock on expanding the heap
+ extern Mutex*   AdapterHandlerLibrary_lock;      // a lock on the AdapterHandlerLibrary
+@@ -51,14 +48,14 @@
+ extern Mutex*   CodeCache_lock;                  // a lock on the CodeCache, rank is special, use MutexLockerEx
+ extern Mutex*   MethodData_lock;                 // a lock on installation of method data
+ extern Mutex*   RetData_lock;                    // a lock on installation of RetData inside method data
+-extern Mutex*	DerivedPointerTableGC_lock;	 // a lock to protect the derived pointer table
+-extern Monitor* VMOperationQueue_lock;	         // a lock on queue of vm_operations waiting to execute
++extern Mutex*   DerivedPointerTableGC_lock;      // a lock to protect the derived pointer table
++extern Monitor* VMOperationQueue_lock;           // a lock on queue of vm_operations waiting to execute
+ extern Monitor* VMOperationRequest_lock;         // a lock on Threads waiting for a vm_operation to terminate
+ extern Monitor* Safepoint_lock;                  // a lock used by the safepoint abstraction
+ extern Monitor* SerializePage_lock;              // a lock used when VMThread changing serialize memory page permission during safepoint
+-extern Monitor* Threads_lock;                    // a lock on the Threads table of active Java threads 
++extern Monitor* Threads_lock;                    // a lock on the Threads table of active Java threads
+                                                  // (also used by Safepoints too to block threads creation/destruction)
+-extern Monitor* CGC_lock;                        // used for coordination between 
++extern Monitor* CGC_lock;                        // used for coordination between
+                                                  // fore- & background GC threads.
+ extern Mutex*   STS_init_lock;                   // coordinate initialization of SuspendibleThreadSets.
+ extern Monitor* SLT_lock;                        // used in CMS GC for acquiring PLL
+@@ -91,7 +88,7 @@
+ extern Mutex*   Debug2_lock;                     // down synchronization related bugs!
+ extern Mutex*   Debug3_lock;
+ 
+-extern Mutex*   RawMonitor_lock;             
++extern Mutex*   RawMonitor_lock;
+ extern Mutex*   PerfDataMemAlloc_lock;           // a lock on the allocator for PerfData memory for performance data
+ extern Mutex*   PerfDataManager_lock;            // a long on access to PerfDataManager resources
+ extern Mutex*   ParkerFreeList_lock;
+@@ -122,9 +119,9 @@
+ 
+ class MutexLocker: StackObj {
+  private:
+-  Mutex* _mutex;
++  Monitor * _mutex;
+  public:
+-  MutexLocker(Mutex* mutex) {
++  MutexLocker(Monitor * mutex) {
+     assert(mutex->rank() != Mutex::special,
+       "Special ranked mutex should only use MutexLockerEx");
+     _mutex = mutex;
+@@ -132,14 +129,14 @@
+   }
+ 
+   // Overloaded constructor passing current thread
+-  MutexLocker(Mutex* mutex, Thread *thread) {
++  MutexLocker(Monitor * mutex, Thread *thread) {
+     assert(mutex->rank() != Mutex::special,
+       "Special ranked mutex should only use MutexLockerEx");
+     _mutex = mutex;
+     _mutex->lock(thread);
+   }
+ 
+-  ~MutexLocker() {    
++  ~MutexLocker() {
+     _mutex->unlock();
+   }
+ 
+@@ -147,8 +144,8 @@
+ 
+ // for debugging: check that we're already owning this lock (or are at a safepoint)
+ #ifdef ASSERT
+-void assert_locked_or_safepoint(const Mutex* lock);
+-void assert_lock_strong(const Mutex* lock);
++void assert_locked_or_safepoint(const Monitor * lock);
++void assert_lock_strong(const Monitor * lock);
+ #else
+ #define assert_locked_or_safepoint(lock)
+ #define assert_lock_strong(lock)
+@@ -163,17 +160,17 @@
+ 
+ class MutexLockerEx: public StackObj {
+  private:
+-  Mutex* _mutex;
++  Monitor * _mutex;
+  public:
+-  MutexLockerEx(Mutex* mutex, bool no_safepoint_check = !Mutex::_no_safepoint_check_flag) {
++  MutexLockerEx(Monitor * mutex, bool no_safepoint_check = !Mutex::_no_safepoint_check_flag) {
+     _mutex = mutex;
+     if (_mutex != NULL) {
+       assert(mutex->rank() > Mutex::special || no_safepoint_check,
+-	"Mutexes with rank special or lower should not do safepoint checks");
++        "Mutexes with rank special or lower should not do safepoint checks");
+       if (no_safepoint_check)
+-	_mutex->lock_without_safepoint_check();
++        _mutex->lock_without_safepoint_check();
+       else
+-	_mutex->lock();
++        _mutex->lock();
+     }
+   }
+ 
+@@ -190,7 +187,7 @@
+ 
+ class MonitorLockerEx: public MutexLockerEx {
+  private:
+-  Monitor* _monitor;
++  Monitor * _monitor;
+  public:
+   MonitorLockerEx(Monitor* monitor,
+                   bool no_safepoint_check = !Mutex::_no_safepoint_check_flag):
+@@ -242,10 +239,10 @@
+ 
+ class GCMutexLocker: public StackObj {
+ private:
+-  Mutex* _mutex;
++  Monitor * _mutex;
+   bool _locked;
+ public:
+-  GCMutexLocker(Mutex* mutex);
++  GCMutexLocker(Monitor * mutex);
+   ~GCMutexLocker() { if (_locked) _mutex->unlock(); }
+ };
+ 
+@@ -256,10 +253,10 @@
+ 
+ class MutexUnlocker: StackObj {
+  private:
+-  Mutex* _mutex;
++  Monitor * _mutex;
+ 
+  public:
+-  MutexUnlocker(Mutex* mutex) {
++  MutexUnlocker(Monitor * mutex) {
+     _mutex = mutex;
+     _mutex->unlock();
+   }
+@@ -274,11 +271,11 @@
+ 
+ class MutexUnlockerEx: StackObj {
+  private:
+-  Mutex* _mutex;
++  Monitor * _mutex;
+   bool _no_safepoint_check;
+ 
+  public:
+-  MutexUnlockerEx(Mutex* mutex, bool no_safepoint_check = !Mutex::_no_safepoint_check_flag) {
++  MutexUnlockerEx(Monitor * mutex, bool no_safepoint_check = !Mutex::_no_safepoint_check_flag) {
+     _mutex = mutex;
+     _no_safepoint_check = no_safepoint_check;
+     _mutex->unlock();
+@@ -305,25 +302,24 @@
+ //
+ class VerifyMutexLocker: StackObj {
+  private:
+-  Mutex* _mutex;
++  Monitor * _mutex;
+   bool   _reentrant;
+  public:
+-  VerifyMutexLocker(Mutex* mutex) {
++  VerifyMutexLocker(Monitor * mutex) {
+     _mutex     = mutex;
+     _reentrant = mutex->owned_by_self();
+     if (!_reentrant) {
+       // We temp. diable strict safepoint checking, while we require the lock
+-      FlagSetting fs(StrictSafepointChecks, false);    
++      FlagSetting fs(StrictSafepointChecks, false);
+       _mutex->lock();
+     }
+   }
+-  
++
+   ~VerifyMutexLocker() {
+-    if (!_reentrant) {     
++    if (!_reentrant) {
+       _mutex->unlock();
+     }
+   }
+ };
+ 
+ #endif
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/objectMonitor.hpp openjdk/hotspot/src/share/vm/runtime/objectMonitor.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/objectMonitor.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/objectMonitor.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)objectMonitor.hpp	1.41 07/05/17 16:06:18 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // WARNING:
+@@ -60,7 +57,7 @@
+   static int object_offset_in_bytes()      { return offset_of(ObjectMonitor, _object);     }
+   static int owner_offset_in_bytes()       { return offset_of(ObjectMonitor, _owner);      }
+   static int count_offset_in_bytes()       { return offset_of(ObjectMonitor, _count);      }
+-  static int recursions_offset_in_bytes()  { return offset_of(ObjectMonitor, _recursions); } 
++  static int recursions_offset_in_bytes()  { return offset_of(ObjectMonitor, _recursions); }
+   static int cxq_offset_in_bytes()         { return offset_of(ObjectMonitor, _cxq) ;       }
+   static int succ_offset_in_bytes()        { return offset_of(ObjectMonitor, _succ) ;      }
+   static int EntryList_offset_in_bytes()   { return offset_of(ObjectMonitor, _EntryList);  }
+@@ -72,8 +69,8 @@
+  public:
+   // Eventaully we'll make provisions for multiple callbacks, but
+   // now one will suffice.
+-  static int (*SpinCallbackFunction)(intptr_t, int) ; 
+-  static intptr_t SpinCallbackArgument ; 
++  static int (*SpinCallbackFunction)(intptr_t, int) ;
++  static intptr_t SpinCallbackArgument ;
+ 
+ 
+  public:
+@@ -85,7 +82,7 @@
+ 
+   intptr_t  is_busy() const;
+   intptr_t  is_entered(Thread* current) const;
+-  
++
+   void*     owner() const;
+   void      set_owner(void* owner);
+ 
+@@ -93,11 +90,11 @@
+ 
+   intptr_t  count() const;
+   void      set_count(intptr_t count);
+-  intptr_t  contentions() const ; 
++  intptr_t  contentions() const ;
+ 
+   // JVM/DI GetMonitorInfo() needs this
+-  Thread *  thread_of_waiter (ObjectWaiter *) ; 
+-  ObjectWaiter * first_waiter () ; 
++  Thread *  thread_of_waiter (ObjectWaiter *) ;
++  ObjectWaiter * first_waiter () ;
+   ObjectWaiter * next_waiter(ObjectWaiter* o);
+ 
+   intptr_t  recursions() const { return _recursions; }
+@@ -106,15 +103,15 @@
+   void*     object_addr();
+   void      set_object(void* obj);
+ 
+-  bool      check(TRAPS);	// true if the thread owns the monitor.
++  bool      check(TRAPS);       // true if the thread owns the monitor.
+   void      check_slow(TRAPS);
+   void      clear();
+ #ifndef PRODUCT
+   void      verify();
+   void      print();
+ #endif
+-  
+-  bool      try_enter (TRAPS) ; 
++
++  bool      try_enter (TRAPS) ;
+   void      enter(TRAPS);
+   void      exit(TRAPS);
+   void      wait(jlong millis, bool interruptable, TRAPS);
+@@ -133,39 +130,38 @@
+ 
+  private:
+   // JVMTI support -- remove ASAP
+-  int       SimpleEnter (Thread * Self) ; 
+-  int       SimpleExit  (Thread * Self) ; 
+-  int       SimpleWait  (Thread * Self, jlong millis) ; 
+-  int       SimpleNotify (Thread * Self, bool All) ; 
++  int       SimpleEnter (Thread * Self) ;
++  int       SimpleExit  (Thread * Self) ;
++  int       SimpleWait  (Thread * Self, jlong millis) ;
++  int       SimpleNotify (Thread * Self, bool All) ;
+ 
+  private:
+-  void      Recycle () ; 
+-  void      AddWaiter (ObjectWaiter * waiter) ; 
++  void      Recycle () ;
++  void      AddWaiter (ObjectWaiter * waiter) ;
+ 
+-  ObjectWaiter * DequeueWaiter () ; 
++  ObjectWaiter * DequeueWaiter () ;
+   void      DequeueSpecificWaiter (ObjectWaiter * waiter) ;
+-  void      EnterI (TRAPS) ; 
+-  void      ReenterI (Thread * Self, ObjectWaiter * SelfNode) ; 
+-  void      UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode) ; 
+-  int       TryLock (Thread * Self) ; 
+-  int       NotRunnable (Thread * Self, Thread * Owner) ; 
+-  int       TrySpin_Fixed (Thread * Self) ; 
+-  int       TrySpin_VaryFrequency (Thread * Self) ; 
+-  int       TrySpin_VaryDuration  (Thread * Self) ; 
+-  void      ctAsserts () ; 
+-  void      ExitEpilog (Thread * Self, ObjectWaiter * Wakee) ; 
+-  bool      ExitSuspendEquivalent (JavaThread * Self) ; 
+-   
++  void      EnterI (TRAPS) ;
++  void      ReenterI (Thread * Self, ObjectWaiter * SelfNode) ;
++  void      UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode) ;
++  int       TryLock (Thread * Self) ;
++  int       NotRunnable (Thread * Self, Thread * Owner) ;
++  int       TrySpin_Fixed (Thread * Self) ;
++  int       TrySpin_VaryFrequency (Thread * Self) ;
++  int       TrySpin_VaryDuration  (Thread * Self) ;
++  void      ctAsserts () ;
++  void      ExitEpilog (Thread * Self, ObjectWaiter * Wakee) ;
++  bool      ExitSuspendEquivalent (JavaThread * Self) ;
++
+  private:
+   friend class ObjectSynchronizer;
+   friend class ObjectWaiter;
+-  friend class RawMonitor;
+   friend class VMStructs;
+ 
+   // WARNING: this must be the very first word of ObjectMonitor
+-  // This means this class can't use any virtual member functions.  
+-  // TODO-FIXME: assert that offsetof(_header) is 0 or get rid of the 
+-  // implicit 0 offset in emitted code.  
++  // This means this class can't use any virtual member functions.
++  // TODO-FIXME: assert that offsetof(_header) is 0 or get rid of the
++  // implicit 0 offset in emitted code.
+ 
+   volatile markOop   _header;       // displaced object header word - mark
+   void*     volatile _object;       // backward object pointer - strong root
+@@ -175,42 +171,38 @@
+   // All the following fields must be machine word aligned
+   // The VM assumes write ordering wrt these fields, which can be
+   // read from other threads.
+-  
++
+   void *  volatile _owner;          // pointer to owning thread OR BasicLock
+-  volatile intptr_t  _recursions;   // recursion count, 0 for first entry  
++  volatile intptr_t  _recursions;   // recursion count, 0 for first entry
+   int OwnerIsThread ;               // _owner is (Thread *) vs SP/BasicLock
+   ObjectWaiter * volatile _cxq ;    // LL of recently-arrived threads blocked on entry.
+                                     // The list is actually composed of WaitNodes, acting
+-                                    // as proxies for Threads. 
++                                    // as proxies for Threads.
+   ObjectWaiter * volatile _EntryList ;     // Threads blocked on entry or reentry.
+   Thread * volatile _succ ;          // Heir presumptive thread - used for futile wakeup throttling
+-  Thread * volatile _Responsible ; 
++  Thread * volatile _Responsible ;
+   int _PromptDrain ;                // rqst to drain cxq into EntryList ASAP
+ 
+   volatile int _Spinner ;           // for exit->spinner handoff optimization
+   volatile int _SpinFreq ;          // Spin 1-out-of-N attempts: success rate
+-  volatile int _SpinClock ;         
+-  volatile int _SpinDuration ; 
++  volatile int _SpinClock ;
++  volatile int _SpinDuration ;
+   volatile intptr_t _SpinState ;    // MCS/CLH list of spinners
+ 
+-  // TODO-FIXME: _count, _waiters and _recursions should be of 
++  // TODO-FIXME: _count, _waiters and _recursions should be of
+   // type int, or int32_t but not intptr_t.  There's no reason
+-  // to use 64-bit fields for these variables on a 64-bit JVM.  
++  // to use 64-bit fields for these variables on a 64-bit JVM.
+ 
+-  volatile intptr_t  _count;        // reference count to prevent reclaimation/deflation 
+-                                    // at stop-the-world time.  See deflate_idle_monitors().  
++  volatile intptr_t  _count;        // reference count to prevent reclaimation/deflation
++                                    // at stop-the-world time.  See deflate_idle_monitors().
+                                     // _count is approximately |_WaitSet| + |_EntryList|
+-  volatile intptr_t  _waiters;      // number of waiting threads  
++  volatile intptr_t  _waiters;      // number of waiting threads
+   ObjectWaiter * volatile _WaitSet; // LL of threads wait()ing on the monitor
+   volatile int _WaitSetLock;        // protects Wait Queue - simple spinlock
+ 
+  public:
+   int _QMix ;                       // Mixed prepend queue discipline
+   ObjectMonitor * FreeNext ;        // Free list linkage
+-  intptr_t StatA, StatsB ; 
++  intptr_t StatA, StatsB ;
+ 
+ };
+-
+-
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/objectMonitor.inline.hpp openjdk/hotspot/src/share/vm/runtime/objectMonitor.inline.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/objectMonitor.inline.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/objectMonitor.inline.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)objectMonitor.inline.hpp	1.24 07/05/05 17:06:52 JVM"
+-#endif
+ /*
+  * Copyright 1998-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ inline intptr_t ObjectMonitor::is_entered(TRAPS) const {
+@@ -85,8 +82,8 @@
+   if (THREAD != _owner) {
+     if (THREAD->is_lock_owned((address) _owner)) {
+       _owner = THREAD;  // regain ownership of inflated monitor
+-      OwnerIsThread = 1 ; 
+-      assert (_recursions == 0, "invariant") ; 
++      OwnerIsThread = 1 ;
++      assert (_recursions == 0, "invariant") ;
+     } else {
+       check_slow(THREAD);
+       return false;
+@@ -106,7 +103,7 @@
+   _recursions = 0;
+   _count = 0;
+ }
+-               
++
+ 
+ // here are the platform-dependent bodies:
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/orderAccess.cpp openjdk/hotspot/src/share/vm/runtime/orderAccess.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/orderAccess.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/orderAccess.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)orderAccess.cpp	1.9 07/05/05 17:06:53 JVM"
+-#endif
+ /*
+  * Copyright 2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/orderAccess.hpp openjdk/hotspot/src/share/vm/runtime/orderAccess.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/orderAccess.hpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/orderAccess.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)orderAccess.hpp	1.11 07/05/05 17:06:52 JVM"
+-#endif
+ /*
+  * Copyright 2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //                Memory Access Ordering Model
+@@ -103,7 +100,7 @@
+ // ---------------------------------------------------------------------
+ // fence         membar #LoadStore |   mf               lock addl 0,(sp)
+ //                      #StoreStore |
+-//		        #LoadLoad |
++//                      #LoadLoad |
+ //                      #StoreLoad
+ //
+ // release       membar #LoadStore |   st.rel [sp]=r0   movl $0,<dummy>
+@@ -116,7 +113,7 @@
+ //
+ // release_store membar #LoadStore |   st.rel           <store>
+ //                      #StoreStore
+-//		 st
++//               st
+ //
+ // store_fence   st                    st               lock xchg
+ //               fence                 mf
+@@ -128,7 +125,7 @@
+ // Using only release_store and load_acquire, we can implement the
+ // following ordered sequences.
+ //
+-// 1. load, load   == load_acquire,  load 
++// 1. load, load   == load_acquire,  load
+ //                 or load_acquire,  load_acquire
+ // 2. load, store  == load,          release_store
+ //                 or load_acquire,  store
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/os.cpp openjdk/hotspot/src/share/vm/runtime/os.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/os.cpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/os.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)os.cpp	1.183 07/06/19 03:53:14 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -39,6 +36,8 @@
+ volatile jlong    os::_global_time        = 0;
+ volatile int      os::_global_time_lock   = 0;
+ bool              os::_use_global_time    = false;
++size_t            os::_page_sizes[os::page_sizes_max];
++
+ #ifndef PRODUCT
+ int os::num_mallocs = 0;            // # of calls to malloc/realloc
+ size_t os::alloc_bytes = 0;         // # of bytes allocated
+@@ -116,12 +115,12 @@
+ // Fill in buffer with current local time as an ISO-8601 string.
+ // E.g., yyyy-mm-ddThh:mm:ss-zzzz.
+ // Returns buffer, or NULL if it failed.
+-// This would mostly be a call to 
++// This would mostly be a call to
+ //     strftime(...., "%Y-%m-%d" "T" "%H:%M:%S" "%z", ....)
+ // except that on Windows the %z behaves badly, so we do it ourselves.
+-// Also, people wanted milliseconds on there, 
++// Also, people wanted milliseconds on there,
+ // and strftime doesn't do milliseconds.
+-char* os::iso8601_time(char* buffer, size_t buffer_length) {  
++char* os::iso8601_time(char* buffer, size_t buffer_length) {
+   // Output will be of the form "YYYY-MM-DDThh:mm:ss.mmm+zzzz\0"
+   //                                      1         2
+   //                             12345678901234567890123456789
+@@ -154,8 +153,8 @@
+   // Save the results of localtime
+   const struct tm time_struct = *time_struct_temp;
+   const time_t zone = timezone;
+-  
+-  // If daylight savings time is in effect, 
++
++  // If daylight savings time is in effect,
+   // we are 1 hour East of our time zone
+   const time_t seconds_per_minute = 60;
+   const time_t minutes_per_hour = 60;
+@@ -167,7 +166,7 @@
+   // Compute the time zone offset.
+   //    localtime(3C) sets timezone to the difference (in seconds)
+   //    between UTC and and local time.
+-  //    ISO 8601 says we need the difference between local time and UTC, 
++  //    ISO 8601 says we need the difference between local time and UTC,
+   //    we change the sign of the localtime(3C) result.
+   const time_t local_to_UTC = -(UTC_to_local);
+   // Then we have to figure out if if we are ahead (+) or behind (-) UTC.
+@@ -181,7 +180,7 @@
+   const time_t zone_hours = (abs_local_to_UTC / seconds_per_hour);
+   const time_t zone_min =
+     ((abs_local_to_UTC % seconds_per_hour) / seconds_per_minute);
+-  
++
+   // Print an ISO 8601 date and time stamp into the buffer
+   const int year = 1900 + time_struct.tm_year;
+   const int month = 1 + time_struct.tm_mon;
+@@ -209,7 +208,7 @@
+          Thread::current() == thread  ||
+          Threads_lock->owned_by_self()
+          || thread->is_Compiler_thread()
+-	)) {
++        )) {
+     assert(false, "possibility of dangling Thread pointer");
+   }
+ #endif
+@@ -248,7 +247,7 @@
+ 
+ 
+ static void signal_thread_entry(JavaThread* thread, TRAPS) {
+-  os::set_priority(thread, NearMaxPriority);  
++  os::set_priority(thread, NearMaxPriority);
+   while (true) {
+     int sig;
+     {
+@@ -266,12 +265,12 @@
+       case SIGBREAK: {
+         // Check if the signal is a trigger to start the Attach Listener - in that
+         // case don't print stack traces.
+-	if (!DisableAttachMechanism && AttachListener::is_init_trigger()) {
+-	  continue;
+-  	}
++        if (!DisableAttachMechanism && AttachListener::is_init_trigger()) {
++          continue;
++        }
+         // Print stack traces
+-	// Any SIGBREAK operations added here should make sure to flush
+-	// the output stream (e.g. tty->flush()) after output.  See 4803766.
++        // Any SIGBREAK operations added here should make sure to flush
++        // the output stream (e.g. tty->flush()) after output.  See 4803766.
+         // Each module also prints an extra carriage return after its output.
+         VM_PrintThreads op;
+         VMThread::execute(&op);
+@@ -289,7 +288,7 @@
+         }
+         break;
+       }
+-      default: {       
++      default: {
+         // Dispatch the signal to java
+         HandleMark hm(THREAD);
+         klassOop k = SystemDictionary::resolve_or_null(vmSymbolHandles::sun_misc_Signal(), THREAD);
+@@ -300,31 +299,31 @@
+           args.push_int(sig);
+           JavaCalls::call_static(
+             &result,
+-            klass, 
+-            vmSymbolHandles::dispatch_name(), 
++            klass,
++            vmSymbolHandles::dispatch_name(),
+             vmSymbolHandles::int_void_signature(),
+             &args,
+             THREAD
+           );
+         }
+-	if (HAS_PENDING_EXCEPTION) {
+-	  // tty is initialized early so we don't expect it to be null, but
+-	  // if it is we can't risk doing an initialization that might
+-	  // trigger additional out-of-memory conditions
+-	  if (tty != NULL) {
+-	    char klass_name[256];
+-	    char tmp_sig_name[16];
+-	    const char* sig_name = "UNKNOWN";
+-	    instanceKlass::cast(PENDING_EXCEPTION->klass())->
+-	      name()->as_klass_external_name(klass_name, 256);
+-	    if (os::exception_name(sig, tmp_sig_name, 16) != NULL)
+-	      sig_name = tmp_sig_name;
+-	    warning("Exception %s occurred dispatching signal %s to handler"
+-		    "- the VM may need to be forcibly terminated",  
+-		    klass_name, sig_name );
+-	  }
+-	  CLEAR_PENDING_EXCEPTION;
+-	}
++        if (HAS_PENDING_EXCEPTION) {
++          // tty is initialized early so we don't expect it to be null, but
++          // if it is we can't risk doing an initialization that might
++          // trigger additional out-of-memory conditions
++          if (tty != NULL) {
++            char klass_name[256];
++            char tmp_sig_name[16];
++            const char* sig_name = "UNKNOWN";
++            instanceKlass::cast(PENDING_EXCEPTION->klass())->
++              name()->as_klass_external_name(klass_name, 256);
++            if (os::exception_name(sig, tmp_sig_name, 16) != NULL)
++              sig_name = tmp_sig_name;
++            warning("Exception %s occurred dispatching signal %s to handler"
++                    "- the VM may need to be forcibly terminated",
++                    klass_name, sig_name );
++          }
++          CLEAR_PENDING_EXCEPTION;
++        }
+       }
+     }
+   }
+@@ -340,33 +339,33 @@
+     instanceHandle thread_oop = klass->allocate_instance_handle(CHECK);
+ 
+     const char thread_name[] = "Signal Dispatcher";
+-    Handle string = java_lang_String::create_from_str(thread_name, CHECK);    
++    Handle string = java_lang_String::create_from_str(thread_name, CHECK);
+ 
+     // Initialize thread_oop to put it into the system threadGroup
+     Handle thread_group (THREAD, Universe::system_thread_group());
+     JavaValue result(T_VOID);
+-    JavaCalls::call_special(&result, thread_oop, 
+-                           klass, 
+-                           vmSymbolHandles::object_initializer_name(), 
+-                           vmSymbolHandles::threadgroup_string_void_signature(), 
+-                           thread_group, 
+-                           string, 
+-                           CHECK);  
+-    
++    JavaCalls::call_special(&result, thread_oop,
++                           klass,
++                           vmSymbolHandles::object_initializer_name(),
++                           vmSymbolHandles::threadgroup_string_void_signature(),
++                           thread_group,
++                           string,
++                           CHECK);
++
+     KlassHandle group(THREAD, SystemDictionary::threadGroup_klass());
+     JavaCalls::call_special(&result,
+                             thread_group,
+                             group,
+                             vmSymbolHandles::add_method_name(),
+                             vmSymbolHandles::thread_void_signature(),
+-			    thread_oop,		// ARG 1
++                            thread_oop,         // ARG 1
+                             CHECK);
+ 
+     os::signal_init_pd();
+ 
+     { MutexLocker mu(Threads_lock);
+       JavaThread* signal_thread = new JavaThread(&signal_thread_entry);
+-                                                                                                                              
++
+       // At this point it may be possible that no osthread was created for the
+       // JavaThread due to lack of memory. We would have to throw an exception
+       // in that case. However, since this must work and we do not allow
+@@ -379,7 +378,7 @@
+       java_lang_Thread::set_thread(thread_oop(), signal_thread);
+       java_lang_Thread::set_priority(thread_oop(), NearMaxPriority);
+       java_lang_Thread::set_daemon(thread_oop());
+-         
++
+       signal_thread->set_threadObj(thread_oop());
+       Threads::add(signal_thread);
+       Thread::start(signal_thread);
+@@ -409,7 +408,7 @@
+     char ebuf[1024];
+ 
+     // Try to load verify dll first. In 1.3 java dll depends on it and is not always
+-    // able to find it when the loading executable is outside the JDK. 
++    // able to find it when the loading executable is outside the JDK.
+     // In order to keep working with 1.2 we ignore any loading errors.
+     hpi::dll_build_name(buffer, sizeof(buffer), Arguments::get_dll_dir(), "verify");
+     hpi::dll_load(buffer, ebuf, sizeof(ebuf));
+@@ -457,13 +456,13 @@
+ // MallocCushion: size of extra cushion allocated around objects with +UseMallocOnly
+ // NB: cannot be debug variable, because these aren't set from the command line until
+ // *after* the first few allocs already happened
+-#define MallocCushion            16 
++#define MallocCushion            16
+ #else
+ #define space_before             0
+ #define space_after              0
+ #define size_addr_from_base(p)   should not use w/o ASSERT
+ #define size_addr_from_obj(p)    should not use w/o ASSERT
+-#define MallocCushion            0 
++#define MallocCushion            0
+ #endif
+ #define paranoid                 0  /* only set to 1 if you suspect checking code has bug */
+ 
+@@ -476,7 +475,7 @@
+ }
+ 
+ u_char* find_cushion_backwards(u_char* start) {
+-  u_char* p = start; 
++  u_char* p = start;
+   while (p[ 0] != badResourceValue || p[-1] != badResourceValue ||
+          p[-2] != badResourceValue || p[-3] != badResourceValue) p--;
+   // ok, we have four consecutive marker bytes; find start
+@@ -486,7 +485,7 @@
+ }
+ 
+ u_char* find_cushion_forwards(u_char* start) {
+-  u_char* p = start; 
++  u_char* p = start;
+   while (p[0] != badResourceValue || p[1] != badResourceValue ||
+          p[2] != badResourceValue || p[3] != badResourceValue) p++;
+   // ok, we have four consecutive marker bytes; find end of cushion
+@@ -512,7 +511,7 @@
+     // search one more backwards
+     start_of_prev_block = find_cushion_backwards(start_of_prev_block);
+     size = *size_addr_from_base(start_of_prev_block);
+-    obj = start_of_prev_block + space_before;  
++    obj = start_of_prev_block + space_before;
+   }
+ 
+   if (start_of_prev_block + space_before + size + space_after == start_of_this_block) {
+@@ -526,9 +525,9 @@
+   start_of_next_block = find_cushion_forwards(start_of_next_block);
+   u_char* next_obj = start_of_next_block + space_before;
+   ptrdiff_t next_size = *size_addr_from_base(start_of_next_block);
+-  if (start_of_next_block[0] == badResourceValue && 
+-      start_of_next_block[1] == badResourceValue && 
+-      start_of_next_block[2] == badResourceValue && 
++  if (start_of_next_block[0] == badResourceValue &&
++      start_of_next_block[1] == badResourceValue &&
++      start_of_next_block[2] == badResourceValue &&
+       start_of_next_block[3] == badResourceValue) {
+     tty->print_cr("### next object: %p (%ld bytes)", next_obj, next_size);
+   } else {
+@@ -544,7 +543,7 @@
+   fatal("memory stomping error");
+ }
+ 
+-void verify_block(void* memblock) {  
++void verify_block(void* memblock) {
+   size_t size = get_size(memblock);
+   if (MallocCushion) {
+     u_char* ptr = (u_char*)memblock - space_before;
+@@ -649,14 +648,14 @@
+     u_char* ptr = (u_char*)memblock - space_before;
+     for (u_char* p = ptr; p < ptr + MallocCushion; p++) {
+       guarantee(*p == badResourceValue,
+-		"Thing freed should be malloc result.");
++                "Thing freed should be malloc result.");
+       *p = (u_char)freeBlockPad;
+     }
+     size_t size = get_size(memblock);
+     u_char* end = ptr + space_before + size;
+     for (u_char* q = end; q < end + MallocCushion; q++) {
+       guarantee(*q == badResourceValue,
+-		"Thing freed should be malloc result.");
++                "Thing freed should be malloc result.");
+       *q = (u_char)freeBlockPad;
+     }
+   }
+@@ -675,8 +674,8 @@
+    * see
+    * (1) "Random Number Generators: Good Ones Are Hard to Find",
+    *      S.K. Park and K.W. Miller, Communications of the ACM 31:10 (Oct 1988),
+-   * (2) "Two Fast Implementations of the 'Minimal Standard' Random 
+-   *     Number Generator", David G. Carta, Comm. ACM 33, 1 (Jan 1990), pp. 87-88. 
++   * (2) "Two Fast Implementations of the 'Minimal Standard' Random
++   *     Number Generator", David G. Carta, Comm. ACM 33, 1 (Jan 1990), pp. 87-88.
+   */
+   const long a = 16807;
+   const unsigned long m = 2147483647;
+@@ -805,8 +804,8 @@
+ #ifdef IA64
+   // In order to walk native frames on Itanium, we need to access the unwind
+   // table, which is inside ELF. We don't want to parse ELF after fatal error,
+-  // so return true for IA64. If we need to support C stack walking on IA64, 
+-  // this function needs to be moved to CPU specific files, as fp() on IA64 
++  // so return true for IA64. If we need to support C stack walking on IA64,
++  // this function needs to be moved to CPU specific files, as fp() on IA64
+   // is register stack, which grows towards higher memory address.
+   return true;
+ #endif
+@@ -834,7 +833,7 @@
+ 
+   // stack grows downwards; if old_fp is below current fp or if the stack
+   // frame is too large, either the stack is corrupted or fp is not saved
+-  // on stack (i.e. on x86, ebp may be used as general register). The stack 
++  // on stack (i.e. on x86, ebp may be used as general register). The stack
+   // is not walkable beyond current frame.
+   if (old_fp < ufp) return true;
+   if (old_fp - ufp > 64 * K) return true;
+@@ -851,13 +850,13 @@
+ 
+   tty->print_cr("seed %ld for %ld repeats...", seed, reps);
+   os::init_random(seed);
+-  long num; 
++  long num;
+   for (int k = 0; k < reps; k++) {
+     num = os::random();
+     double u = (double)num / m;
+     assert(u >= 0.0 && u <= 1.0, "bad random number!");
+ 
+-    // calculate mean and variance of the random sequence 
++    // calculate mean and variance of the random sequence
+     mean += u;
+     variance += (u*u);
+   }
+@@ -884,40 +883,40 @@
+                            char fileSep,
+                            char pathSep) {
+     assert((fileSep == '/' && pathSep == ':') ||
+-	   (fileSep == '\\' && pathSep == ';'), "unexpected seperator chars");
+-  
++           (fileSep == '\\' && pathSep == ';'), "unexpected seperator chars");
++
+     // Scan the format string to determine the length of the actual
+     // boot classpath, and handle platform dependencies as well.
+     int formatted_path_len = 0;
+     const char* p;
+     for (p = format_string; *p != 0; ++p) {
+-	if (*p == '%') formatted_path_len += home_len - 1;
+-	++formatted_path_len;
++        if (*p == '%') formatted_path_len += home_len - 1;
++        ++formatted_path_len;
+     }
+ 
+     char* formatted_path = NEW_C_HEAP_ARRAY(char, formatted_path_len + 1);
+     if (formatted_path == NULL) {
+-	return NULL;
++        return NULL;
+     }
+ 
+     // Create boot classpath from format, substituting separator chars and
+     // java home directory.
+     char* q = formatted_path;
+     for (p = format_string; *p != 0; ++p) {
+-	switch (*p) {
+-	case '%':
+-	    strcpy(q, home);
+-	    q += home_len;
+-	    break;
+-	case '/':
+-	    *q++ = fileSep;
+-	    break;
+-	case ':':
+-	    *q++ = pathSep;
+-	    break;
+-	default:
+-	    *q++ = *p;
+-	}
++        switch (*p) {
++        case '%':
++            strcpy(q, home);
++            q += home_len;
++            break;
++        case '/':
++            *q++ = fileSep;
++            break;
++        case ':':
++            *q++ = pathSep;
++            break;
++        default:
++            *q++ = *p;
++        }
+     }
+     *q = '\0';
+ 
+@@ -943,13 +942,13 @@
+     // aligned with install/install/make/common/Pack.gmk. Note: boot class
+     // path class JARs, are stripped for StackMapTable to reduce download size.
+     static const char classpath_format[] =
+-	"%/lib/resources.jar:"
+-	"%/lib/rt.jar:"
+-	"%/lib/sunrsasign.jar:"
+-	"%/lib/jsse.jar:"
+-	"%/lib/jce.jar:"
++        "%/lib/resources.jar:"
++        "%/lib/rt.jar:"
++        "%/lib/sunrsasign.jar:"
++        "%/lib/jsse.jar:"
++        "%/lib/jce.jar:"
+         "%/lib/charsets.jar:"
+-	"%/classes";
++        "%/classes";
+     char* sysclasspath = format_boot_path(classpath_format, home, home_len, fileSep, pathSep);
+     if (sysclasspath == NULL) return false;
+     Arguments::set_sysclasspath(sysclasspath);
+@@ -958,9 +957,9 @@
+ }
+ 
+ 
+-void os::set_memory_serialize_page(address page) { 
++void os::set_memory_serialize_page(address page) {
+   int count = log2_intptr(sizeof(class JavaThread)) - log2_intptr(64);
+-  _mem_serialize_page = (volatile int32_t *)page; 
++  _mem_serialize_page = (volatile int32_t *)page;
+   // We initialize the serialization page shift count here
+   // We assume a cache line size of 64 bytes
+   assert(SerializePageShiftCount == count,
+@@ -969,18 +968,18 @@
+ }
+ 
+ // This method is called from signal handler when SIGSEGV occurs while the current
+-// thread tries to store to the "read-only" memory serialize page during state 
++// thread tries to store to the "read-only" memory serialize page during state
+ // transition.
+ void os::block_on_serialize_page_trap() {
+   if (TraceSafepoint) {
+     tty->print_cr("Block until the serialize page permission restored");
+   }
+-  // When VMThread is holding the SerializePage_lock during modifying the 
++  // When VMThread is holding the SerializePage_lock during modifying the
+   // access permission of the memory serialize page, the following call
+   // will block until the permission of that page is restored to rw.
+   // Generally, it is unsafe to manipulate locks in signal handlers, but in
+   // this case, it's OK as the signal is synchronous and we know precisely when
+-  // it can occur. SerializePage_lock is a transiently-held leaf lock, so 
++  // it can occur. SerializePage_lock is a transiently-held leaf lock, so
+   // lock_without_safepoint_check should be safe.
+   SerializePage_lock->lock_without_safepoint_check();
+   SerializePage_lock->unlock();
+@@ -988,12 +987,12 @@
+ 
+ // Serialize all thread state variables
+ void os::serialize_thread_states() {
+-  // On some platforms such as Solaris & Linux, the time duration of the page 
+-  // permission restoration is observed to be much longer than expected  due to 
+-  // scheduler starvation problem etc. To avoid the long synchronization 
+-  // time and expensive page trap spinning, 'SerializePage_lock' is used to block 
+-  // the mutator thread if such case is encountered. Since this method is always 
+-  // called by VMThread during safepoint, lock_without_safepoint_check is used 
++  // On some platforms such as Solaris & Linux, the time duration of the page
++  // permission restoration is observed to be much longer than expected  due to
++  // scheduler starvation problem etc. To avoid the long synchronization
++  // time and expensive page trap spinning, 'SerializePage_lock' is used to block
++  // the mutator thread if such case is encountered. Since this method is always
++  // called by VMThread during safepoint, lock_without_safepoint_check is used
+   // instead. See bug 6546278.
+   SerializePage_lock->lock_without_safepoint_check();
+   os::protect_memory( (char *)os::get_memory_serialize_page(), os::vm_page_size() );
+@@ -1013,25 +1012,65 @@
+   // handler or a println uses at least 8k stack of VM and native code
+   // respectively.
+   const int framesize_in_bytes =
+-    AbstractInterpreter::size_top_interpreter_activation(method()) * wordSize;
+-  int reserved_area = ((StackShadowPages + StackRedPages + StackYellowPages) 
++    Interpreter::size_top_interpreter_activation(method()) * wordSize;
++  int reserved_area = ((StackShadowPages + StackRedPages + StackYellowPages)
+                       * vm_page_size()) + framesize_in_bytes;
+   // The very lower end of the stack
+   address stack_limit = thread->stack_base() - thread->stack_size();
+   return (sp > (stack_limit + reserved_area));
+ }
+ 
++size_t os::page_size_for_region(size_t region_min_size, size_t region_max_size,
++                                uint min_pages)
++{
++  assert(min_pages > 0, "sanity");
++  if (UseLargePages) {
++    const size_t max_page_size = region_max_size / min_pages;
++
++    for (unsigned int i = 0; _page_sizes[i] != 0; ++i) {
++      const size_t sz = _page_sizes[i];
++      const size_t mask = sz - 1;
++      if ((region_min_size & mask) == 0 && (region_max_size & mask) == 0) {
++        // The largest page size with no fragmentation.
++        return sz;
++      }
++
++      if (sz <= max_page_size) {
++        // The largest page size that satisfies the min_pages requirement.
++        return sz;
++      }
++    }
++  }
++
++  return vm_page_size();
++}
++
++#ifndef PRODUCT
++void os::trace_page_sizes(const char* str, const size_t region_min_size,
++                          const size_t region_max_size, const size_t page_size,
++                          const char* base, const size_t size)
++{
++  if (TracePageSizes) {
++    tty->print_cr("%s:  min=" SIZE_FORMAT " max=" SIZE_FORMAT
++                  " pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT
++                  " size=" SIZE_FORMAT,
++                  str, region_min_size, region_max_size,
++                  page_size, base, size);
++  }
++}
++#endif  // #ifndef PRODUCT
++
+ // This is the working definition of a server class machine:
+-// >= 2 physical CPU's and >=2GB of memory, with some fuzz 
++// >= 2 physical CPU's and >=2GB of memory, with some fuzz
+ // because the graphics memory (?) sometimes masks physical memory.
+-// If you want to change the definition of a server class machine 
+-// on some OS or platform, e.g., >=4GB on Windohs platforms, 
+-// then you'll have to parameterize this method based on that state, 
+-// as was done for logical processors here, or replicate and 
+-// specialize this method for each platform.  (Or fix os to have 
++// If you want to change the definition of a server class machine
++// on some OS or platform, e.g., >=4GB on Windohs platforms,
++// then you'll have to parameterize this method based on that state,
++// as was done for logical processors here, or replicate and
++// specialize this method for each platform.  (Or fix os to have
+ // some inheritance structure and use subclassing.  Sigh.)
+-// If you want some platform to always or never behave as a server 
+-// class machine, change the setting of AlwaysActAsServerClassMachine 
++// If you want some platform to always or never behave as a server
++// class machine, change the setting of AlwaysActAsServerClassMachine
+ // and NeverActAsServerClassMachine in globals*.hpp.
+ bool os::is_server_class_machine() {
+   // First check for the early returns
+@@ -1046,12 +1085,12 @@
+   const unsigned int    server_processors = 2;
+   const julong server_memory     = 2UL * G;
+   // We seem not to get our full complement of memory.
+-  //     We allow some part (1/8?) of the memory to be "missing", 
++  //     We allow some part (1/8?) of the memory to be "missing",
+   //     based on the sizes of DIMMs, and maybe graphics cards.
+   const julong missing_memory   = 256UL * M;
+ 
+   /* Is this a server class machine? */
+-  if ((os::active_processor_count() >= (int)server_processors) && 
++  if ((os::active_processor_count() >= (int)server_processors) &&
+       (os::physical_memory() >= (server_memory - missing_memory))) {
+     const unsigned int logical_processors =
+       VM_Version::logical_processors_per_package();
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/os.hpp openjdk/hotspot/src/share/vm/runtime/os.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/os.hpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/os.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)os.hpp	1.220 07/06/19 03:53:08 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // os defines the interface to operating system; this includes traditional
+@@ -42,7 +39,7 @@
+ // Platform-independent error return values from OS functions
+ enum OSReturn {
+   OS_OK         =  0,        // Operation was successful
+-  OS_ERR        = -1,        // Operation failed 
++  OS_ERR        = -1,        // Operation failed
+   OS_INTRPT     = -2,        // Operation was interrupted
+   OS_TIMEOUT    = -3,        // Operation timed out
+   OS_NOMEM      = -5,        // Operation failed for lack of memory
+@@ -58,11 +55,13 @@
+                              // ensures that VMThread doesn't starve profiler
+ };
+ 
+-// Typedef for structured exception handling support 
++// Typedef for structured exception handling support
+ typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
+ 
+ class os: AllStatic {
+  private:
++  enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel)
++
+   static OSThread*          _starting_thread;
+   static address            _polling_page;
+   static volatile int32_t * _mem_serialize_page;
+@@ -70,9 +69,16 @@
+   static volatile jlong     _global_time;
+   static volatile int       _global_time_lock;
+   static bool               _use_global_time;
++  static size_t             _page_sizes[page_sizes_max];
++
++  static void init_page_sizes(size_t default_page_size) {
++    _page_sizes[0] = default_page_size;
++    _page_sizes[1] = 0; // sentinel
++  }
++
+  public:
+ 
+-  static void init(void);			// Called before command line parsing
++  static void init(void);                       // Called before command line parsing
+   static jint init_2(void);                    // Called after command line parsing
+ 
+   // File names are case-insensitive on windows only
+@@ -81,7 +87,7 @@
+ 
+   static bool getenv(const char* name, char* buffer, int len);
+   static bool have_special_privileges();
+- 
++
+   static jlong  timeofday();
+   static void   enable_global_time()   { _use_global_time = true; }
+   static void   disable_global_time()  { _use_global_time = false; }
+@@ -99,15 +105,15 @@
+   // Returns real time in seconds since an arbitrary point
+   // in the past.
+   static bool getTimesSecs(double* process_real_time,
+-			   double* process_user_time, 
+-			   double* process_system_time);
+-    
++                           double* process_user_time,
++                           double* process_system_time);
++
+   // Interface to the performance counter
+   static jlong elapsed_counter();
+   static jlong elapsed_frequency();
+ 
+-  // Return current local time in a string (YYYY-MM-DD HH:MM:SS). 
+-  // It is MT safe, but not async-safe, as reading time zone 
++  // Return current local time in a string (YYYY-MM-DD HH:MM:SS).
++  // It is MT safe, but not async-safe, as reading time zone
+   // information may require a lock on some platforms.
+   static char* local_time_string(char *buf, size_t buflen);
+   // Fill in buffer with current local time as an ISO-8601 string.
+@@ -139,7 +145,7 @@
+   //     first you generate a distribution of processes to processors,
+   //     then you bind processes according to that distribution.
+   // Compute a distribution for number of processes to processors.
+-  //    Stores the processor id's into the distribution array argument. 
++  //    Stores the processor id's into the distribution array argument.
+   //    Returns true if it worked, false if it didn't.
+   static bool distribute_processes(uint length, uint* distribution);
+   // Binds the current process to a processor.
+@@ -155,9 +161,38 @@
+   static bool stack_shadow_pages_available(Thread *thread, methodHandle method);
+ 
+   // OS interface to Virtual Memory
++
++  // Return the default page size.
+   static int    vm_page_size();
++
++  // Return the page size to use for a region of memory.  The min_pages argument
++  // is a hint intended to limit fragmentation; it says the returned page size
++  // should be <= region_max_size / min_pages.  Because min_pages is a hint,
++  // this routine may return a size larger than region_max_size / min_pages.
++  //
++  // The current implementation ignores min_pages if a larger page size is an
++  // exact multiple of both region_min_size and region_max_size.  This allows
++  // larger pages to be used when doing so would not cause fragmentation; in
++  // particular, a single page can be used when region_min_size ==
++  // region_max_size == a supported page size.
++  static size_t page_size_for_region(size_t region_min_size,
++                                     size_t region_max_size,
++                                     uint min_pages);
++
++  // Method for tracing page sizes returned by the above method; enabled by
++  // TracePageSizes.  The region_{min,max}_size parameters should be the values
++  // passed to page_size_for_region() and page_size should be the result of that
++  // call.  The (optional) base and size parameters should come from the
++  // ReservedSpace base() and size() methods.
++  static void trace_page_sizes(const char* str, const size_t region_min_size,
++                               const size_t region_max_size,
++                               const size_t page_size,
++                               const char* base = NULL,
++                               const size_t size = 0) PRODUCT_RETURN;
++
+   static int    vm_allocation_granularity();
+-  static char*  reserve_memory(size_t bytes, char* addr = 0);
++  static char*  reserve_memory(size_t bytes, char* addr = 0,
++                               size_t alignment_hint = 0);
+   static char*  attempt_reserve_memory_at(size_t bytes, char* addr);
+   static void   split_reserved_memory(char *base, size_t size,
+                                       size_t split, bool realloc);
+@@ -215,15 +250,15 @@
+   // Since we write to the serialize page from every thread, we
+   // want stores to be on unique cache lines whenever possible
+   // in order to minimize CPU cross talk.  We pre-compute the
+-  // amount to shift the thread* to make this offset unique to 
++  // amount to shift the thread* to make this offset unique to
+   // each thread.
+   static int     get_serialize_page_shift_count() {
+     return SerializePageShiftCount;
+-  } 
++  }
+ 
+   static void     set_serialize_page_mask(uintptr_t mask) {
+     _serialize_page_mask = mask;
+-  } 
++  }
+ 
+   static unsigned int  get_serialize_page_mask() {
+     return _serialize_page_mask;
+@@ -231,18 +266,18 @@
+ 
+   static void    set_memory_serialize_page(address page);
+ 
+-  static address get_memory_serialize_page() { 
+-    return (address)_mem_serialize_page; 
++  static address get_memory_serialize_page() {
++    return (address)_mem_serialize_page;
+   }
+ 
+   static inline void write_memory_serialize_page(JavaThread *thread) {
+-    uintptr_t page_offset = ((uintptr_t)thread >> 
++    uintptr_t page_offset = ((uintptr_t)thread >>
+                             get_serialize_page_shift_count()) &
+-                            get_serialize_page_mask(); 
+-    *(volatile int32_t *)((uintptr_t)_mem_serialize_page+page_offset) = 1; 
++                            get_serialize_page_mask();
++    *(volatile int32_t *)((uintptr_t)_mem_serialize_page+page_offset) = 1;
+   }
+ 
+-  static bool    is_memory_serialize_page(JavaThread *thread, address addr) { 
++  static bool    is_memory_serialize_page(JavaThread *thread, address addr) {
+     address thr_addr;
+     if (UseMembar) return false;
+     // Calculate thread specific address
+@@ -250,11 +285,11 @@
+     // TODO-FIXME: some platforms mask off faulting addresses to the base pagesize.
+     // Instead of using a test for equality we should probably use something
+     // of the form:
+-    // return ((_mem_serialize_page ^ addr) & -pagesize) == 0 
+-    // 
+-    thr_addr  = (address)(((uintptr_t)thread >> 
++    // return ((_mem_serialize_page ^ addr) & -pagesize) == 0
++    //
++    thr_addr  = (address)(((uintptr_t)thread >>
+                 get_serialize_page_shift_count()) &
+-                get_serialize_page_mask()) + (uintptr_t)_mem_serialize_page; 
++                get_serialize_page_mask()) + (uintptr_t)_mem_serialize_page;
+     return  (thr_addr == addr);
+   }
+ 
+@@ -292,7 +327,15 @@
+   static int naked_sleep();
+   static void infinite_sleep(); // never returns, use with CAUTION
+   static void yield();        // Yields to all threads with same priority
+-  static void NakedYield () ; 
++  enum YieldResult {
++    YIELD_SWITCHED = 1,         // caller descheduled, other ready threads exist & ran
++    YIELD_NONEREADY = 0,        // No other runnable/ready threads.
++                                // platform-specific yield return immediately
++    YIELD_UNKNOWN = -1          // Unknown: platform doesn't support _SWITCHED or _NONEREADY
++    // YIELD_SWITCHED and YIELD_NONREADY imply the platform supports a "strong"
++    // yield that can be used in lieu of blocking.
++  } ;
++  static YieldResult NakedYield () ;
+   static void yield_all(int attempts = 0); // Yields to all other threads including lower priority
+   static void loop_breaker(int attempts);  // called from within tight loops to possibly influence time-sharing
+   static OSReturn set_priority(Thread* thread, ThreadPriority priority);
+@@ -302,7 +345,7 @@
+   static bool is_interrupted(Thread* thread, bool clear_interrupted);
+ 
+   static int pd_self_suspend_thread(Thread* thread);
+-  
++
+   static ExtendedPC fetch_frame_from_context(void* ucVoid, intptr_t** sp, intptr_t** fp);
+   static frame      fetch_frame_from_context(void* ucVoid);
+ 
+@@ -316,6 +359,9 @@
+   static int message_box(const char* title, const char* message);
+   static char* do_you_want_to_debug(const char* message);
+ 
++  // run cmd in a separate process and return its exit code; or -1 on failures
++  static int fork_and_exec(char *cmd);
++
+   // Set file to send error reports.
+   static void set_error_file(const char *logfile);
+ 
+@@ -337,7 +383,7 @@
+   static int            readdir_buf_size(const char *path);
+   static struct dirent* readdir(DIR* dirp, dirent* dbuf);
+   static int            closedir(DIR* dirp);
+-  
++
+   // Dynamic library extension
+   static const char*    dll_file_extension();
+ 
+@@ -353,7 +399,7 @@
+                                            int buflen, int* offset);
+ 
+   // Locate DLL/DSO. On success, full path of the library is copied to
+-  // buf, and offset is set to be the distance between addr and the 
++  // buf, and offset is set to be the distance between addr and the
+   // library's base address. On failure, buf[0] is set to '\0' and
+   // offset is set to -1.
+   static bool dll_address_to_library_name(address addr, char* buf,
+@@ -362,7 +408,7 @@
+   // Find out whether the pc is in the static code for jvm.dll/libjvm.so.
+   static bool address_is_in_vm(address addr);
+ 
+-  // Loads .dll/.so and 
++  // Loads .dll/.so and
+   // in case of error it checks if .dll/.so was built for the
+   // same architecture as Hotspot is running on
+   static void* dll_load(const char *name, char *ebuf, int ebuflen);
+@@ -379,15 +425,15 @@
+   static void print_signal_handlers(outputStream* st, char* buf, size_t buflen);
+   static void print_date_and_time(outputStream* st);
+ 
+-  // The following two functions are used by fatal error handler to trace 
+-  // native (C) frames. They are not part of frame.hpp/frame.cpp because 
++  // The following two functions are used by fatal error handler to trace
++  // native (C) frames. They are not part of frame.hpp/frame.cpp because
+   // frame.hpp/cpp assume thread is JavaThread, and also because different
+-  // OS/compiler may have different convention or provide different API to 
++  // OS/compiler may have different convention or provide different API to
+   // walk C frames.
+   //
+-  // We don't attempt to become a debugger, so we only follow frames if that 
++  // We don't attempt to become a debugger, so we only follow frames if that
+   // does not require a lookup in the unwind table, which is part of the binary
+-  // file but may be unsafe to read after a fatal error. So on x86, we can 
++  // file but may be unsafe to read after a fatal error. So on x86, we can
+   // only walk stack if %ebp is used as frame pointer; on ia64, it's not
+   // possible to walk C stack without having the unwind table.
+   static bool is_first_C_frame(frame *fr);
+@@ -398,7 +444,7 @@
+ 
+   static void print_hex_dump(outputStream* st, address start, address end, int unitsize);
+ 
+-  // returns a string to describe the exception/signal; 
++  // returns a string to describe the exception/signal;
+   // returns NULL if exception_code is not an OS exception/signal.
+   static const char* exception_name(int exception_code, char* buf, size_t buflen);
+ 
+@@ -510,7 +556,7 @@
+ 
+   // debugging support (mostly used by debug.cpp)
+   static bool find(address pc) PRODUCT_RETURN0; // OS specific function to make sense out of an address
+-  
++
+   static bool dont_yield();                     // when true, JVM_Yield() is nop
+   static void print_statistics();
+ 
+@@ -522,9 +568,9 @@
+   // Void return because it's a hint and can fail.
+   static void hint_no_preempt();
+ 
+-  // Used at creation if requested by the diagnostic flag PauseAtStartup.  
+-  // Causes the VM to wait until an external stimulus has been applied 
+-  // (for Unix, that stimulus is a signal, for Windows, an external 
++  // Used at creation if requested by the diagnostic flag PauseAtStartup.
++  // Causes the VM to wait until an external stimulus has been applied
++  // (for Unix, that stimulus is a signal, for Windows, an external
+   // ResumeThread call)
+   static void pause();
+ 
+@@ -542,11 +588,9 @@
+ 
+ // Note that "PAUSE" is almost always used with synchronization
+ // so arguably we should provide Atomic::SpinPause() instead
+-// of the global SpinPause() with C linkage.  
+-// It'd also be eligible for inlining on many platforms. 
+-
+-extern "C" int SpinPause () ; 
+-extern "C" int SafeFetch32 (int * adr, int errValue) ; 
+-extern "C" intptr_t SafeFetchN (intptr_t * adr, intptr_t errValue) ; 
+-
++// of the global SpinPause() with C linkage.
++// It'd also be eligible for inlining on many platforms.
+ 
++extern "C" int SpinPause () ;
++extern "C" int SafeFetch32 (int * adr, int errValue) ;
++extern "C" intptr_t SafeFetchN (intptr_t * adr, intptr_t errValue) ;
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/osThread.cpp openjdk/hotspot/src/share/vm/runtime/osThread.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/osThread.cpp	2008-02-28 05:02:42.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/osThread.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)osThread.cpp	1.29 07/05/05 17:06:53 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -50,10 +47,9 @@
+     case MONITOR_WAIT:            st->print("waiting for monitor entry "); break;
+     case CONDVAR_WAIT:            st->print("waiting on condition ");      break;
+     case OBJECT_WAIT:             st->print("in Object.wait() ");          break;
+-    case BREAKPOINTED:            st->print("at breakpoint");        	    break;
+-    case SLEEPING:            	  st->print("sleeping");        	    break;
+-    case ZOMBIE:            	  st->print("zombie");        	    	    break;
++    case BREAKPOINTED:            st->print("at breakpoint");               break;
++    case SLEEPING:                st->print("sleeping");                    break;
++    case ZOMBIE:                  st->print("zombie");                      break;
+     default:                      st->print("unknown state %d", _state); break;
+   }
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/osThread.hpp openjdk/hotspot/src/share/vm/runtime/osThread.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/osThread.hpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/osThread.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)osThread.hpp	1.40 07/05/05 17:06:53 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,14 +19,14 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The OSThread class holds OS-specific thread information.  It is equivalent
+ // to the sys_thread_t structure of the classic JVM implementation.
+ 
+ // The thread states represented by the ThreadState values are platform-specific
+-// and are likely to be only approximate, because most OSes don't give you access 
++// and are likely to be only approximate, because most OSes don't give you access
+ // to precise thread state information.
+ 
+ // Note: the ThreadState is legacy code and is not correctly implemented.
+@@ -42,9 +39,9 @@
+   MONITOR_WAIT,                 // Waiting on a contended monitor lock
+   CONDVAR_WAIT,                 // Waiting on a condition variable
+   OBJECT_WAIT,                  // Waiting on an Object.wait() call
+-  BREAKPOINTED,                 // Suspended at breakpoint 
+-  SLEEPING,                     // Thread.sleep() 
+-  ZOMBIE                        // All done, but not reclaimed yet 
++  BREAKPOINTED,                 // Suspended at breakpoint
++  SLEEPING,                     // Thread.sleep()
++  ZOMBIE                        // All done, but not reclaimed yet
+ };
+ 
+ // I'd make OSThread a ValueObj embedded in Thread to avoid an indirection, but
+@@ -69,7 +66,7 @@
+   // Methods
+  public:
+   void set_state(ThreadState state)                { _state = state; }
+-  ThreadState get_state()	      		   { return _state; }
++  ThreadState get_state()                          { return _state; }
+ 
+   // Constructor
+   OSThread(OSThreadStartFunc start_proc, void* start_parm);
+@@ -88,8 +85,8 @@
+ 
+   // Printing
+   void print_on(outputStream* st) const;
+-  void print() const				    { print_on(tty); }
+- 
++  void print() const                                { print_on(tty); }
++
+   // For java intrinsics:
+   static ByteSize interrupted_offset()            { return byte_offset_of(OSThread, _interrupted); }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/perfData.cpp openjdk/hotspot/src/share/vm/runtime/perfData.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/perfData.cpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/perfData.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)perfData.cpp	1.22 07/05/05 17:06:52 JVM"
+-#endif
+ /*
+  * Copyright 2001-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,15 +19,15 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_perfData.cpp.incl"
+ 
+-PerfDataList*	PerfDataManager::_all = NULL;
+-PerfDataList*	PerfDataManager::_sampled = NULL;
+-PerfDataList*	PerfDataManager::_constants = NULL;
++PerfDataList*   PerfDataManager::_all = NULL;
++PerfDataList*   PerfDataManager::_sampled = NULL;
++PerfDataList*   PerfDataManager::_constants = NULL;
+ 
+ /*
+  * The jvmstat global and subsysem jvmstat counter name spaces. The top
+@@ -161,20 +158,20 @@
+ 
+   if (PerfTraceDataCreation) {
+     tty->print("name = %s, dtype = %d, variability = %d,"
+-	       " units = %d, dsize = %d, vlen = %d,"
+-	       " pad_length = %d, size = %d, on_c_heap = %s,"
+-	       " address = " INTPTR_FORMAT ","
+-	       " data address = " INTPTR_FORMAT "\n",
+-	       cname, dtype, variability(),
+-	       units(), dsize, vlen,
+-	       pad_length, size, is_on_c_heap() ? "TRUE":"FALSE",
+-	       psmp, valuep);
++               " units = %d, dsize = %d, vlen = %d,"
++               " pad_length = %d, size = %d, on_c_heap = %s,"
++               " address = " INTPTR_FORMAT ","
++               " data address = " INTPTR_FORMAT "\n",
++               cname, dtype, variability(),
++               units(), dsize, vlen,
++               pad_length, size, is_on_c_heap() ? "TRUE":"FALSE",
++               psmp, valuep);
+   }
+ 
+   // record the start of the entry and the location of the data field.
+   _pdep = pdep;
+   _valuep = valuep;
+-  
++
+   // mark the PerfData memory region as having been updated.
+   PerfMemory::mark_updated();
+ }
+@@ -305,7 +302,7 @@
+     _constants->append(p);
+     return;
+   }
+-    
++
+   if (sampled) {
+     if (_sampled == NULL) {
+       _sampled = new PerfDataList(25);
+@@ -378,7 +375,7 @@
+   PerfStringConstant* p = new PerfStringConstant(ns, name, s);
+ 
+   if (!p->is_valid()) {
+-    // allocation of native resources failed. 
++    // allocation of native resources failed.
+     delete p;
+     THROW_0(vmSymbols::java_lang_OutOfMemoryError());
+   }
+@@ -396,7 +393,7 @@
+   PerfLongConstant* p = new PerfLongConstant(ns, name, u, val);
+ 
+   if (!p->is_valid()) {
+-    // allocation of native resources failed. 
++    // allocation of native resources failed.
+     delete p;
+     THROW_0(vmSymbols::java_lang_OutOfMemoryError());
+   }
+@@ -419,7 +416,7 @@
+   PerfStringVariable* p = new PerfStringVariable(ns, name, max_length, s);
+ 
+   if (!p->is_valid()) {
+-    // allocation of native resources failed. 
++    // allocation of native resources failed.
+     delete p;
+     THROW_0(vmSymbols::java_lang_OutOfMemoryError());
+   }
+@@ -437,7 +434,7 @@
+   PerfLongVariable* p = new PerfLongVariable(ns, name, u, ival);
+ 
+   if (!p->is_valid()) {
+-    // allocation of native resources failed. 
++    // allocation of native resources failed.
+     delete p;
+     THROW_0(vmSymbols::java_lang_OutOfMemoryError());
+   }
+@@ -458,7 +455,7 @@
+   PerfLongVariable* p = new PerfLongVariable(ns, name, u, sp);
+ 
+   if (!p->is_valid()) {
+-    // allocation of native resources failed. 
++    // allocation of native resources failed.
+     delete p;
+     THROW_0(vmSymbols::java_lang_OutOfMemoryError());
+   }
+@@ -480,7 +477,7 @@
+   PerfLongVariable* p = new PerfLongVariable(ns, name, u, sh);
+ 
+   if (!p->is_valid()) {
+-    // allocation of native resources failed. 
++    // allocation of native resources failed.
+     delete p;
+     THROW_0(vmSymbols::java_lang_OutOfMemoryError());
+   }
+@@ -498,7 +495,7 @@
+   PerfLongCounter* p = new PerfLongCounter(ns, name, u, ival);
+ 
+   if (!p->is_valid()) {
+-    // allocation of native resources failed. 
++    // allocation of native resources failed.
+     delete p;
+     THROW_0(vmSymbols::java_lang_OutOfMemoryError());
+   }
+@@ -519,7 +516,7 @@
+   PerfLongCounter* p = new PerfLongCounter(ns, name, u, sp);
+ 
+   if (!p->is_valid()) {
+-    // allocation of native resources failed. 
++    // allocation of native resources failed.
+     delete p;
+     THROW_0(vmSymbols::java_lang_OutOfMemoryError());
+   }
+@@ -541,7 +538,7 @@
+   PerfLongCounter* p = new PerfLongCounter(ns, name, u, sh);
+ 
+   if (!p->is_valid()) {
+-    // allocation of native resources failed. 
++    // allocation of native resources failed.
+     delete p;
+     THROW_0(vmSymbols::java_lang_OutOfMemoryError());
+   }
+@@ -571,7 +568,7 @@
+ 
+ bool PerfDataList::by_name(void* name, PerfData* pd) {
+ 
+-  if (pd == NULL) 
++  if (pd == NULL)
+     return false;
+ 
+   return strcmp((const char*)name, pd->name()) == 0;
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/perfData.hpp openjdk/hotspot/src/share/vm/runtime/perfData.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/perfData.hpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/perfData.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)perfData.hpp	1.24 07/05/05 17:06:54 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ /* jvmstat global and subsystem counter name space - enumeration value
+@@ -94,12 +91,12 @@
+  *         - PerfLongVariant (Abstract)
+  *             - PerfLongVariable    (alias: PerfVariable)
+  *             - PerfLongCounter     (alias: PerfCounter)
+- *    
++ *
+  *     - PerfByteArray (Abstract)
+  *         - PerfString (Abstract)
+  *             - PerfStringVariable
+  *             - PerfStringConstant
+- * 
++ *
+  *
+  * As seen in the class hierarchy, the initially supported types are:
+  *
+@@ -111,7 +108,7 @@
+  *
+  * A PerfData subtype is not required to provide an implementation for
+  * each variability classification. For example, the String type provides
+- * Variable and Constant variablility classifications in the PerfStringVariable 
++ * Variable and Constant variablility classifications in the PerfStringVariable
+  * and PerfStringConstant classes, but does not provide a counter type.
+  *
+  * Performance data are also described by a unit of measure. Units allow
+@@ -141,7 +138,7 @@
+  *    sun.*           - unstable, unsupported interface
+  *
+  * In the above context, unstable is a measure of the interface support
+- * level, not the implementation stability level. 
++ * level, not the implementation stability level.
+  *
+  * Currently, instances of PerfData subtypes are considered to have
+  * a life time equal to that of the VM and are managed by the
+@@ -167,7 +164,7 @@
+  *   foo_counter->inc();
+  *
+  * Creating a performance counter that holds a variably change long
+- * data value with untis specified in U_Bytes in the "com.sun.ci 
++ * data value with untis specified in U_Bytes in the "com.sun.ci
+  * name space.
+  *
+  *   PerfLongVariable* bar_varible;
+@@ -225,8 +222,8 @@
+  * For additional uses of PerfData subtypes, see the utility classes
+  * PerfTraceTime and PerfTraceTimedEvent below.
+  *
+- * Always-on non-sampled counters can be created independent of 
+- * the UsePerfData flag. Counters will be created on the c-heap 
++ * Always-on non-sampled counters can be created independent of
++ * the UsePerfData flag. Counters will be created on the c-heap
+  * if UsePerfData is false.
+  *
+  * Until further noice, all PerfData objects should be created and
+@@ -285,7 +282,7 @@
+     PerfData(CounterNS ns, const char* name, Units u, Variability v);
+     ~PerfData();
+ 
+-    // create the entry for the PerfData item in the PerfData memory region. 
++    // create the entry for the PerfData item in the PerfData memory region.
+     // this region is maintained separately from the PerfData objects to
+     // facilitate its use by external processes.
+     void create_entry(BasicType dtype, size_t dsize, size_t dlen = 0);
+@@ -486,7 +483,7 @@
+ /*
+  * The PerfByteArray provides a PerfData subtype that allows the creation
+  * of a contiguous region of the PerfData memory region for storing a vector
+- * of bytes. This class is currently intended to be a base class for 
++ * of bytes. This class is currently intended to be a base class for
+  * the PerfString class, and cannot be instantiated directly.
+  */
+ class PerfByteArray : public PerfData {
+@@ -589,7 +586,7 @@
+ class PerfDataList : public CHeapObj {
+ 
+   private:
+-  
++
+     // GrowableArray implementation
+     typedef GrowableArray<PerfData*> PerfDataArray;
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/perfMemory.cpp openjdk/hotspot/src/share/vm/runtime/perfMemory.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/perfMemory.cpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/perfMemory.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)perfMemory.cpp	1.27 07/05/05 17:06:53 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -51,7 +48,7 @@
+   // resources it may be dependent on. Typically, the StatSampler
+   // is disengaged from the watcher thread when this method is called,
+   // but it is not disengaged if this method is invoked during a
+-  // VM abort. 
++  // VM abort.
+   //
+   if (!StatSampler::is_active())
+     PerfDataManager::destroy();
+@@ -74,11 +71,11 @@
+ 
+   if (PerfTraceMemOps) {
+     tty->print("PerfDataMemorySize = " SIZE_FORMAT ","
+-	       " os::vm_allocation_granularity = " SIZE_FORMAT ","
+-	       " adjusted size = " SIZE_FORMAT "\n",
+-	       PerfDataMemorySize,
+-	       os::vm_allocation_granularity(),
+-	       capacity);
++               " os::vm_allocation_granularity = " SIZE_FORMAT ","
++               " adjusted size = " SIZE_FORMAT "\n",
++               PerfDataMemorySize,
++               os::vm_allocation_granularity(),
++               capacity);
+   }
+ 
+   // allocate PerfData memory region
+@@ -106,9 +103,9 @@
+ 
+     if (PerfTraceMemOps) {
+       tty->print("PerfMemory created: address = " INTPTR_FORMAT ","
+-		 " size = " SIZE_FORMAT "\n",
+-		 (void*)_start,
+-		 _capacity);
++                 " size = " SIZE_FORMAT "\n",
++                 (void*)_start,
++                 _capacity);
+     }
+ 
+     _prologue = (PerfDataPrologue *)_start;
+@@ -225,13 +222,13 @@
+ // Returns the complete path including the file name of performance data file.
+ // Caller is expected to release the allocated memory.
+ char* PerfMemory::get_perfdata_file_path() {
+-  char* dest_file = NULL; 
+- 
+-  if (PerfDataSaveFile[0] != '\0') {
++  char* dest_file = NULL;
++
++  if (PerfDataSaveFile != NULL) {
+     // dest_file_name stores the validated file name if file_name
+     // contains %p which will be replaced by pid.
+     dest_file = NEW_C_HEAP_ARRAY(char, JVM_MAXPATHLEN);
+-    if(!Arguments::copy_expand_pid(PerfDataSaveFile, strlen(PerfDataSaveFile), 
++    if(!Arguments::copy_expand_pid(PerfDataSaveFile, strlen(PerfDataSaveFile),
+                                    dest_file, JVM_MAXPATHLEN)) {
+       FREE_C_HEAP_ARRAY(char, dest_file);
+       if (PrintMiscellaneous && Verbose) {
+@@ -246,7 +243,6 @@
+   dest_file = NEW_C_HEAP_ARRAY(char, PERFDATA_FILENAME_LEN);
+   jio_snprintf(dest_file, PERFDATA_FILENAME_LEN,
+                "%s_%d", PERFDATA_NAME, os::current_process_id());
+-   
++
+   return dest_file;
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/perfMemory.hpp openjdk/hotspot/src/share/vm/runtime/perfMemory.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/perfMemory.hpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/perfMemory.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)perfMemory.hpp	1.23 07/05/05 17:06:54 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ /*
+@@ -44,8 +41,8 @@
+  * to a machine with a native byte order different from that of the
+  * originating machine.
+  */
+-#define PERFDATA_BIG_ENDIAN	0
+-#define PERFDATA_LITTLE_ENDIAN	1
++#define PERFDATA_BIG_ENDIAN     0
++#define PERFDATA_LITTLE_ENDIAN  1
+ 
+ /*
+  * The PerfDataPrologue structure is known by the PerfDataBuffer Java class
+@@ -79,7 +76,7 @@
+   jint entry_length;      // entry length in bytes
+   jint name_offset;       // offset of the data item name
+   jint vector_length;     // length of the vector. If 0, then scalar
+-  jbyte data_type;        // type of the data item - 
++  jbyte data_type;        // type of the data item -
+                           // 'B','Z','J','I','S','C','D','F','V','L','['
+   jbyte flags;            // flags indicating misc attributes
+   jbyte data_units;       // unit of measure for the data type
+@@ -100,15 +97,15 @@
+ // Prefix of performance data file.
+ static const char PERFDATA_NAME[] = "hsperfdata";
+ 
+-// UINT_CHARS contains the number of characters holding a process id 
+-// (i.e. pid). pid is defined as unsigned "int" so the maximum possible pid value 
+-// would be 2^32 - 1 (4294967295) which can be represented as a 10 characters 
+-// string. 
+-static const size_t UINT_CHARS = 10; 
++// UINT_CHARS contains the number of characters holding a process id
++// (i.e. pid). pid is defined as unsigned "int" so the maximum possible pid value
++// would be 2^32 - 1 (4294967295) which can be represented as a 10 characters
++// string.
++static const size_t UINT_CHARS = 10;
+ 
+-// Add 1 for the '_' character between PERFDATA_NAME and pid. The '\0' terminating 
++// Add 1 for the '_' character between PERFDATA_NAME and pid. The '\0' terminating
+ // character will be included in the sizeof(PERFDATA_NAME) operation.
+-static const size_t PERFDATA_FILENAME_LEN = sizeof(PERFDATA_NAME) + 
++static const size_t PERFDATA_FILENAME_LEN = sizeof(PERFDATA_NAME) +
+                                             UINT_CHARS + 1;
+ 
+ /* the PerfMemory class manages creation, destruction,
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/prefetch.hpp openjdk/hotspot/src/share/vm/runtime/prefetch.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/prefetch.hpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/prefetch.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)prefetch.hpp	1.9 07/05/05 17:06:53 JVM"
+-#endif
+ /*
+  * Copyright 2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // If calls to prefetch methods are in a loop, the loop should be cloned
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/reflectionCompat.hpp openjdk/hotspot/src/share/vm/runtime/reflectionCompat.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/reflectionCompat.hpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/reflectionCompat.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)reflectionCompat.hpp	1.14 07/05/05 17:06:54 JVM"
+-#endif
+ /*
+  * Copyright 2001 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // During the development of the JDK 1.4 reflection implementation
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/reflection.cpp openjdk/hotspot/src/share/vm/runtime/reflection.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/reflection.cpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/reflection.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)reflection.cpp	1.178 07/05/23 10:54:08 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -107,20 +104,20 @@
+       switch (current_type) {
+         case T_BYTE:
+           value->s = (jshort) value->b;
+-	  return;
++          return;
+       }
+       break;  // fail
+     case T_INT:
+       switch (current_type) {
+         case T_BYTE:
+           value->i = (jint) value->b;
+-	  return;
++          return;
+         case T_CHAR:
+           value->i = (jint) value->c;
+-	  return;
++          return;
+         case T_SHORT:
+           value->i = (jint) value->s;
+-	  return;
++          return;
+       }
+       break;  // fail
+     case T_LONG:
+@@ -142,38 +139,38 @@
+     case T_FLOAT:
+       switch (current_type) {
+         case T_BYTE:
+-          value->f = (jfloat) value->b; 
++          value->f = (jfloat) value->b;
+           return;
+         case T_CHAR:
+-          value->f = (jfloat) value->c; 
++          value->f = (jfloat) value->c;
+           return;
+         case T_SHORT:
+-          value->f = (jfloat) value->s; 
++          value->f = (jfloat) value->s;
+           return;
+         case T_INT:
+-          value->f = (jfloat) value->i; 
++          value->f = (jfloat) value->i;
+           return;
+         case T_LONG:
+-          value->f = (jfloat) value->j; 
++          value->f = (jfloat) value->j;
+           return;
+       }
+       break;  // fail
+     case T_DOUBLE:
+       switch (current_type) {
+         case T_BYTE:
+-          value->d = (jdouble) value->b; 
++          value->d = (jdouble) value->b;
+           return;
+         case T_CHAR:
+-          value->d = (jdouble) value->c; 
++          value->d = (jdouble) value->c;
+           return;
+         case T_SHORT:
+-          value->d = (jdouble) value->s; 
++          value->d = (jdouble) value->s;
+           return;
+         case T_INT:
+-          value->d = (jdouble) value->i; 
++          value->d = (jdouble) value->i;
+           return;
+         case T_FLOAT:
+-          value->d = (jdouble) value->f; 
++          value->d = (jdouble) value->f;
+           return;
+         case T_LONG:
+           value->d = (jdouble) value->j;
+@@ -298,7 +295,7 @@
+ 
+ oop Reflection:: basic_type_arrayklass_to_mirror(klassOop basic_type_arrayklass, TRAPS) {
+   BasicType type = typeArrayKlass::cast(basic_type_arrayklass)->element_type();
+-  return SystemDictionary::java_mirror(type);
++  return Universe::java_mirror(type);
+ }
+ 
+ 
+@@ -404,19 +401,19 @@
+   // IllegalAccessException and return false if not.
+ 
+   // The "client" is the class associated with the nearest real frame
+-  // getCallerClass already skips Method.invoke frames, so pass 0 in 
++  // getCallerClass already skips Method.invoke frames, so pass 0 in
+   // that case (same as classic).
+   ResourceMark rm(THREAD);
+   assert(THREAD->is_Java_thread(), "sanity check");
+   klassOop client_class = ((JavaThread *)THREAD)->security_get_caller_class(is_method_invoke ? 0 : 1);
+-  
++
+   if (client_class != field_class) {
+     if (!verify_class_access(client_class, field_class, false)
+-        || !verify_field_access(client_class, 
+-                                field_class, 
+-                                field_class, 
+-                                acc, 
+-                                false)) {     
++        || !verify_field_access(client_class,
++                                field_class,
++                                field_class,
++                                acc,
++                                false)) {
+       THROW_(vmSymbols::java_lang_IllegalAccessException(), false);
+     }
+   }
+@@ -426,7 +423,7 @@
+   if (acc.is_protected()) {
+     if (target_class != client_class) {
+       if (!is_same_class_package(client_class, field_class)) {
+-        if (!Klass::cast(target_class)->is_subclass_of(client_class)) {          
++        if (!Klass::cast(target_class)->is_subclass_of(client_class)) {
+           THROW_(vmSymbols::java_lang_IllegalAccessException(), false);
+         }
+       }
+@@ -438,7 +435,7 @@
+ }
+ 
+ 
+-bool Reflection::verify_class_access(klassOop current_class, klassOop new_class, bool classloader_only) {    
++bool Reflection::verify_class_access(klassOop current_class, klassOop new_class, bool classloader_only) {
+   // Verify that current_class can access new_class.  If the classloader_only
+   // flag is set, we automatically allow any accesses in which current_class
+   // doesn't have a classloader.
+@@ -457,14 +454,14 @@
+   }
+ 
+   return can_relax_access_check_for(current_class, new_class, classloader_only);
+-}    
++}
+ 
+ bool Reflection::can_relax_access_check_for(
+     klassOop accessor, klassOop accessee, bool classloader_only) {
+   instanceKlass* accessor_ik = instanceKlass::cast(accessor);
+   instanceKlass* accessee_ik  = instanceKlass::cast(accessee);
+-  if (RelaxAccessControlCheck || 
+-      (accessor_ik->major_version() < JAVA_1_5_VERSION && 
++  if (RelaxAccessControlCheck ||
++      (accessor_ik->major_version() < JAVA_1_5_VERSION &&
+        accessee_ik->major_version() < JAVA_1_5_VERSION)) {
+     return classloader_only &&
+       Verifier::relax_verify_for(accessor_ik->class_loader()) &&
+@@ -475,21 +472,21 @@
+   }
+ }
+ 
+-bool Reflection::verify_field_access(klassOop current_class, 
++bool Reflection::verify_field_access(klassOop current_class,
+                                      klassOop resolved_class,
+-                                     klassOop field_class, 
+-                                     AccessFlags access, 
+-                                     bool classloader_only, 
+-                                     bool protected_restriction) {  
+-  // Verify that current_class can access a field of field_class, where that  
+-  // field's access bits are "access".  We assume that we've already verified 
++                                     klassOop field_class,
++                                     AccessFlags access,
++                                     bool classloader_only,
++                                     bool protected_restriction) {
++  // Verify that current_class can access a field of field_class, where that
++  // field's access bits are "access".  We assume that we've already verified
+   // that current_class can access field_class.
+   //
+   // If the classloader_only flag is set, we automatically allow any accesses
+   // in which current_class doesn't have a classloader.
+   //
+   // "resolved_class" is the runtime type of "field_class". Sometimes we don't
+-  // need this distinction (e.g. if all we have is the runtime type, or during 
++  // need this distinction (e.g. if all we have is the runtime type, or during
+   // class file parsing when we only care about the static type); in that case
+   // callers should ensure that resolved_class == field_class.
+   //
+@@ -533,7 +530,7 @@
+ bool Reflection::is_same_class_package(klassOop class1, klassOop class2) {
+   return instanceKlass::cast(class1)->is_same_class_package(class2);
+ }
+- 
++
+ 
+ // Checks that the 'outer' klass has declared 'inner' as being an inner klass. If not,
+ // throw an incompatible class change exception
+@@ -545,7 +542,7 @@
+   constantPoolHandle cp   (THREAD, outer->constants());
+   for(int i = 0; i < icls->length(); i += 4) {
+      int ioff = icls->ushort_at(i + inner_class_info_index);
+-     int ooff = icls->ushort_at(i + outer_class_info_index);         
++     int ooff = icls->ushort_at(i + outer_class_info_index);
+ 
+      if (ioff != 0 && ooff != 0) {
+         klassOop o = cp->klass_at(ooff, CHECK);
+@@ -562,11 +559,11 @@
+   ResourceMark rm(THREAD);
+   Exceptions::fthrow(
+     THREAD_AND_LOCATION,
+-    vmSymbolHandles::java_lang_IncompatibleClassChangeError(), 
+-    "%s and %s disagree on InnerClasses attribute", 
+-    outer->external_name(), 
++    vmSymbolHandles::java_lang_IncompatibleClassChangeError(),
++    "%s and %s disagree on InnerClasses attribute",
++    outer->external_name(),
+     inner->external_name()
+-  );      
++  );
+ }
+ 
+ // Utility method converting a single SignatureStream element into java.lang.Class instance
+@@ -583,7 +580,7 @@
+       oop protection_domain = instanceKlass::cast(method->method_holder())->protection_domain();
+       klassOop k = SystemDictionary::resolve_or_fail(
+                                        symbolHandle(THREAD, name),
+-                                       Handle(THREAD, loader), 
++                                       Handle(THREAD, loader),
+                                        Handle(THREAD, protection_domain),
+                                        true, CHECK_NULL);
+       if (TraceClassResolution) {
+@@ -625,7 +622,7 @@
+   // Basic types
+   BasicType type = vmSymbols::signature_type(signature());
+   if (type != T_OBJECT) {
+-    return Handle(THREAD, SystemDictionary::java_mirror(type));
++    return Handle(THREAD, Universe::java_mirror(type));
+   }
+ 
+   oop loader = instanceKlass::cast(k())->class_loader();
+@@ -659,13 +656,13 @@
+   oop return_type_oop = NULL;
+   objArrayHandle parameter_types = get_parameter_types(method, parameter_count, &return_type_oop, CHECK_NULL);
+   if (parameter_types.is_null() || return_type_oop == NULL) return NULL;
+-  
++
+   Handle return_type(THREAD, return_type_oop);
+ 
+   objArrayHandle exception_types = get_exception_types(method, CHECK_NULL);
+ 
+   if (exception_types.is_null()) return NULL;
+-  
++
+   symbolHandle method_name(THREAD, method->name());
+   Handle name;
+   if (intern_name) {
+@@ -718,14 +715,14 @@
+   int parameter_count = ArgumentCount(signature).size();
+   objArrayHandle parameter_types = get_parameter_types(method, parameter_count, NULL, CHECK_NULL);
+   if (parameter_types.is_null()) return NULL;
+-  
++
+   objArrayHandle exception_types = get_exception_types(method, CHECK_NULL);
+   if (exception_types.is_null()) return NULL;
+-  
++
+   int modifiers = method->access_flags().as_int() & JVM_RECOGNIZED_METHOD_MODIFIERS;
+ 
+   Handle ch = java_lang_reflect_Constructor::create(CHECK_NULL);
+-  
++
+   java_lang_reflect_Constructor::set_clazz(ch(), holder->java_mirror());
+   java_lang_reflect_Constructor::set_slot(ch(), slot);
+   java_lang_reflect_Constructor::set_parameter_types(ch(), parameter_types());
+@@ -756,12 +753,12 @@
+     oop name_oop = StringTable::intern(field_name(), CHECK_NULL);
+     name = Handle(THREAD, name_oop);
+   } else {
+-    name = java_lang_String::create_from_symbol(field_name, CHECK_NULL);  
++    name = java_lang_String::create_from_symbol(field_name, CHECK_NULL);
+   }
+   symbolHandle signature (THREAD, fd->signature());
+   KlassHandle  holder    (THREAD, fd->field_holder());
+   Handle type = new_type(signature, holder, CHECK_NULL);
+-  Handle rh  = java_lang_reflect_Field::create(CHECK_NULL);  
++  Handle rh  = java_lang_reflect_Field::create(CHECK_NULL);
+ 
+   java_lang_reflect_Field::set_clazz(rh(), Klass::cast(fd->field_holder())->java_mirror());
+   java_lang_reflect_Field::set_slot(rh(), fd->index());
+@@ -793,49 +790,49 @@
+ 
+ #ifdef SUPPORT_OLD_REFLECTION
+ 
+-methodHandle Reflection::resolve_interface_call(instanceKlassHandle klass, methodHandle method, 
+-						KlassHandle recv_klass, Handle receiver, TRAPS) {
++methodHandle Reflection::resolve_interface_call(instanceKlassHandle klass, methodHandle method,
++                                                KlassHandle recv_klass, Handle receiver, TRAPS) {
+   assert(!method.is_null() , "method should not be null");
+ 
+   CallInfo info;
+   symbolHandle signature (THREAD, method->signature());
+   symbolHandle name      (THREAD, method->name());
+-  LinkResolver::resolve_interface_call(info, receiver, recv_klass, klass, 
+-				       name, signature,
+-				       KlassHandle(), false, true, 
+-				       CHECK_(methodHandle()));
++  LinkResolver::resolve_interface_call(info, receiver, recv_klass, klass,
++                                       name, signature,
++                                       KlassHandle(), false, true,
++                                       CHECK_(methodHandle()));
+   return info.selected_method();
+ }
+ 
+ 
+-oop Reflection::invoke(instanceKlassHandle klass, methodHandle reflected_method, 
+-		       Handle receiver, bool override, objArrayHandle ptypes, 
+-		       BasicType rtype, objArrayHandle args, bool is_method_invoke, TRAPS) {
++oop Reflection::invoke(instanceKlassHandle klass, methodHandle reflected_method,
++                       Handle receiver, bool override, objArrayHandle ptypes,
++                       BasicType rtype, objArrayHandle args, bool is_method_invoke, TRAPS) {
+   ResourceMark rm(THREAD);
+-  
++
+   methodHandle method;      // actual method to invoke
+   KlassHandle target_klass; // target klass, receiver's klass for non-static
+-  
++
+   // Ensure klass is initialized
+   klass->initialize(CHECK_NULL);
+ 
+   bool is_static = reflected_method->is_static();
+   if (is_static) {
+     // ignore receiver argument
+-    method = reflected_method; 
++    method = reflected_method;
+     target_klass = klass;
+   } else {
+     // check for null receiver
+-    if (receiver.is_null()) { 
++    if (receiver.is_null()) {
+       THROW_0(vmSymbols::java_lang_NullPointerException());
+     }
+     // Check class of receiver against class declaring method
+-    if (!receiver->is_a(klass())) { 
++    if (!receiver->is_a(klass())) {
+       THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "object is not an instance of declaring class");
+     }
+     // target klass is receiver's klass
+-    target_klass = KlassHandle(THREAD, receiver->klass()); 
+-    // no need to resolve if method is private or <init> 
++    target_klass = KlassHandle(THREAD, receiver->klass());
++    // no need to resolve if method is private or <init>
+     if (reflected_method->is_private() || reflected_method->name() == vmSymbols::object_initializer_name()) {
+       method = reflected_method;
+     } else {
+@@ -846,7 +843,7 @@
+           // new default: 6531596
+           // Match resolution errors with those thrown due to reflection inlining
+           // Linktime resolution & IllegalAccessCheck already done by Class.getMethod()
+-	  method = resolve_interface_call(klass, reflected_method, target_klass, receiver, THREAD); 
++          method = resolve_interface_call(klass, reflected_method, target_klass, receiver, THREAD);
+           if (HAS_PENDING_EXCEPTION) {
+           // Method resolution threw an exception; wrap it in an InvocationTargetException
+             oop resolution_exception = PENDING_EXCEPTION;
+@@ -857,7 +854,7 @@
+                 &args);
+           }
+         } else {
+-	  method = resolve_interface_call(klass, reflected_method, target_klass, receiver, CHECK_(NULL));
++          method = resolve_interface_call(klass, reflected_method, target_klass, receiver, CHECK_(NULL));
+         }
+       }  else {
+         // if the method can be overridden, we resolve using the vtable index.
+@@ -916,7 +913,7 @@
+     if (!(klass->is_public() && reflected_method->is_public())) {
+       bool access = Reflection::reflect_check_access(klass(), reflected_method->access_flags(), target_klass(), is_method_invoke, CHECK_NULL);
+       if (!access) {
+-	return NULL; // exception
++        return NULL; // exception
+       }
+     }
+   }
+@@ -929,7 +926,7 @@
+   if (ptypes->length() != args_len) {
+     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "wrong number of arguments");
+   }
+-  
++
+   // Create object to contain parameters for the JavaCall
+   JavaCallArguments java_args(method->size_of_parameters());
+ 
+@@ -949,12 +946,12 @@
+       }
+       switch (ptype) {
+         case T_BOOLEAN:     java_args.push_int(value.z);    break;
+-        case T_CHAR:        java_args.push_int(value.c);    break;          
+-        case T_BYTE:        java_args.push_int(value.b);    break;          
+-        case T_SHORT:       java_args.push_int(value.s);    break;          
+-        case T_INT:         java_args.push_int(value.i);    break;                  
++        case T_CHAR:        java_args.push_int(value.c);    break;
++        case T_BYTE:        java_args.push_int(value.b);    break;
++        case T_SHORT:       java_args.push_int(value.s);    break;
++        case T_INT:         java_args.push_int(value.i);    break;
+         case T_LONG:        java_args.push_long(value.j);   break;
+-        case T_FLOAT:       java_args.push_float(value.f);  break;          
++        case T_FLOAT:       java_args.push_float(value.f);  break;
+         case T_DOUBLE:      java_args.push_double(value.d); break;
+         default:
+           THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "argument type mismatch");
+@@ -967,13 +964,13 @@
+         }
+       }
+       Handle arg_handle(THREAD, arg);         // Create handle for argument
+-      java_args.push_oop(arg_handle); // Push handle      
++      java_args.push_oop(arg_handle); // Push handle
+     }
+   }
+ 
+   assert(java_args.size_of_parameters() == method->size_of_parameters(), "just checking");
+ 
+-  // All oops (including receiver) is passed in as Handles. An potential oop is returned as an 
++  // All oops (including receiver) is passed in as Handles. An potential oop is returned as an
+   // oop (i.e., NOT as an handle)
+   JavaValue result(rtype);
+   JavaCalls::call(&result, method, &java_args, THREAD);
+@@ -1038,7 +1035,7 @@
+ 
+ oop Reflection::new_field(FieldStream* st, TRAPS) {
+   symbolHandle field_name(THREAD, st->name());
+-  Handle name = java_lang_String::create_from_symbol(field_name, CHECK_NULL);  
++  Handle name = java_lang_String::create_from_symbol(field_name, CHECK_NULL);
+   symbolHandle signature(THREAD, st->signature());
+   Handle type = new_type(signature, st->klass(), CHECK_NULL);
+   Handle rh  = java_lang_reflect_Field::create(CHECK_NULL);
+@@ -1059,12 +1056,12 @@
+   if (field_mirror.is_null()) {
+     THROW_(vmSymbols::java_lang_NullPointerException(), false);
+   }
+-    
++
+   instanceKlassHandle klass (THREAD, java_lang_Class::as_klassOop(java_lang_reflect_Field::clazz(field_mirror())));
+   int                 slot  = java_lang_reflect_Field::slot(field_mirror());
+ 
+   // Ensure klass is initialized
+-  klass->initialize(CHECK_false);      
++  klass->initialize(CHECK_false);
+   fd->initialize(klass(), slot);
+ 
+   bool is_static = fd->is_static();
+@@ -1078,7 +1075,7 @@
+     if (receiver.is_null()) {
+       THROW_(vmSymbols::java_lang_NullPointerException(), false);
+     }
+-    if (!receiver->is_a(klass())) {      
++    if (!receiver->is_a(klass())) {
+       THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(), "object is not an instance of declaring class", false);
+     }
+     receiver_klass = KlassHandle(THREAD, receiver->klass());
+@@ -1097,7 +1094,7 @@
+   if (check_final && fd->is_final()) {
+     // In 1.3 we always throw an error when attempting to set a final field.
+     // In 1.2.x, this was allowed in the override bit was set by calling Field.setAccessible(true).
+-    // We currently maintain backwards compatibility. See bug 4250960. 
++    // We currently maintain backwards compatibility. See bug 4250960.
+     bool strict_final_check = !JDK_Version::is_jdk12x_version();
+     if (strict_final_check || !java_lang_reflect_Field::override(field_mirror())) {
+       THROW_MSG_(vmSymbols::java_lang_IllegalAccessException(), "field is final", false);
+@@ -1179,7 +1176,7 @@
+       receiver->long_field_put(offset, value->j);
+       break;
+     case T_OBJECT:
+-    case T_ARRAY: {      
++    case T_ARRAY: {
+       Handle obj(THREAD, (oop) value->l);
+       if (obj.not_null()) {
+         symbolHandle signature(THREAD, fd->signature());
+@@ -1223,7 +1220,7 @@
+ }
+ 
+ 
+-objArrayOop Reflection::reflect_fields(oop mirror, jint which, TRAPS) {  
++objArrayOop Reflection::reflect_fields(oop mirror, jint which, TRAPS) {
+   // Exclude primitive types and array types
+   if (java_lang_Class::is_primitive(mirror)
+       || Klass::cast(java_lang_Class::as_klassOop(mirror))->oop_is_array()) {
+@@ -1254,7 +1251,7 @@
+   objArrayHandle result (THREAD, r);
+ 
+   // Fill in results backwards
+-  { 
++  {
+     for (FieldStream st(k, local_fields_only, false); !st.eos(); st.next()) {
+       if (local_fields_only || st.access_flags().is_public()) {
+         oop field = new_field(&st, CHECK_NULL);
+@@ -1271,8 +1268,8 @@
+   if (java_lang_Class::is_primitive(mirror))  return NULL;
+   klassOop klass = java_lang_Class::as_klassOop(mirror);
+   if (Klass::cast(klass)->oop_is_array() && which == MEMBER_DECLARED)  return NULL;
+-  
+-  if (Klass::cast(java_lang_Class::as_klassOop(mirror))->oop_is_array()) {    
++
++  if (Klass::cast(java_lang_Class::as_klassOop(mirror))->oop_is_array()) {
+     klass = SystemDictionary::object_klass();
+   }
+   instanceKlassHandle h_k(THREAD, klass);
+@@ -1329,7 +1326,7 @@
+         }
+       }
+       break;
+-    default: 
++    default:
+       break;
+   }
+   return NULL;
+@@ -1339,13 +1336,13 @@
+ objArrayOop Reflection::reflect_methods(oop mirror, jint which, TRAPS) {
+   // Exclude primitive types
+   if (java_lang_Class::is_primitive(mirror) ||
+-     (Klass::cast(java_lang_Class::as_klassOop(mirror))->oop_is_array() && (which == MEMBER_DECLARED))) {    
++     (Klass::cast(java_lang_Class::as_klassOop(mirror))->oop_is_array() && (which == MEMBER_DECLARED))) {
+     klassOop klass = SystemDictionary::reflect_method_klass();
+     return oopFactory::new_objArray(klass, 0, CHECK_NULL);  // Return empty array
+   }
+ 
+   klassOop klass = java_lang_Class::as_klassOop(mirror);
+-  if (Klass::cast(java_lang_Class::as_klassOop(mirror))->oop_is_array()) {    
++  if (Klass::cast(java_lang_Class::as_klassOop(mirror))->oop_is_array()) {
+     klass = SystemDictionary::object_klass();
+   }
+   instanceKlassHandle h_k(THREAD, klass);
+@@ -1367,7 +1364,7 @@
+         int count = 0;
+         {
+           for (MethodStream st(h_k, false, false); !st.eos(); st.next()) {
+-            methodOop m = st.method();               
++            methodOop m = st.method();
+             // For interfaces include static initializers since classic does that!
+             if (include_clinit || (!m->is_initializer() && m->is_public() && !m->is_overridden_in(h_k()))) {
+               count++;
+@@ -1375,35 +1372,35 @@
+           }
+         }
+ 
+-        // Allocate result        
++        // Allocate result
+         klassOop klass = SystemDictionary::reflect_method_klass();
+         objArrayOop r = oopFactory::new_objArray(klass, count, CHECK_NULL);
+         objArrayHandle h_result (THREAD, r);
+ 
+         // Fill in results backwards
+         {
+-          // First the non-static public methods          
++          // First the non-static public methods
+           for (MethodStream st(h_k, false, false); !st.eos(); st.next()) {
+             methodHandle m (THREAD, st.method());
+             if (!m->is_static() && !m->is_initializer() && m->is_public() && !m->is_overridden_in(h_k())) {
+               oop method = new_method(m, false, false, CHECK_NULL);
+               if (method == NULL) {
+                 return NULL;
+-              } else {                
++              } else {
+                 h_result->obj_at_put(--count, method);
+               }
+             }
+           }
+         }
+         {
+-          // Then the static public methods          
++          // Then the static public methods
+           for (MethodStream st(h_k, false, !is_interface); !st.eos(); st.next()) {
+             methodHandle m (THREAD, st.method());
+             if (m->is_static() && (include_clinit || (!m->is_initializer()) && m->is_public() && !m->is_overridden_in(h_k()))) {
+               oop method = new_method(m, false, false, CHECK_NULL);
+               if (method == NULL) {
+                 return NULL;
+-              } else {                
++              } else {
+                 h_result->obj_at_put(--count, method);
+               }
+             }
+@@ -1414,9 +1411,9 @@
+         return h_result();
+       }
+ 
+-    case MEMBER_DECLARED: 
++    case MEMBER_DECLARED:
+       {
+-        // Count all methods 
++        // Count all methods
+         int count = 0;
+         {
+           for (MethodStream st(h_k, true, !is_interface); !st.eos(); st.next()) {
+@@ -1426,7 +1423,7 @@
+             }
+           }
+         }
+-        // Allocate result        
++        // Allocate result
+         klassOop klass = SystemDictionary::reflect_method_klass();
+         objArrayOop r = oopFactory::new_objArray(klass, count, CHECK_NULL);
+         objArrayHandle h_result (THREAD, r);
+@@ -1462,8 +1459,8 @@
+   if (prim || klass->is_interface() || klass->oop_is_array()) return NULL;
+ 
+   // Must be instance klass
+-  instanceKlassHandle h_k(THREAD, java_lang_Class::as_klassOop(mirror));  
+-  
++  instanceKlassHandle h_k(THREAD, java_lang_Class::as_klassOop(mirror));
++
+   // Ensure klass is linked (need not be initialized)
+   h_k->link_class(CHECK_NULL);
+ 
+@@ -1487,16 +1484,16 @@
+   // Exclude primitive, interface and array types
+   bool prim  = java_lang_Class::is_primitive(mirror);
+   Klass* k = prim ? NULL : Klass::cast(java_lang_Class::as_klassOop(mirror));
+-  if (prim || k->is_interface() || k->oop_is_array()) {        
++  if (prim || k->is_interface() || k->oop_is_array()) {
+     return oopFactory::new_objArray(SystemDictionary::reflect_constructor_klass(), 0, CHECK_NULL);  // Return empty array
+   }
+ 
+   // Must be instanceKlass at this point
+-  instanceKlassHandle h_k(THREAD, java_lang_Class::as_klassOop(mirror));  
++  instanceKlassHandle h_k(THREAD, java_lang_Class::as_klassOop(mirror));
+ 
+   // Ensure klass is linked (need not be initialized)
+   h_k->link_class(CHECK_NULL);
+-  
++
+   bool local_only = (which == MEMBER_DECLARED);
+   int count = 0;
+   {
+@@ -1513,7 +1510,7 @@
+   klassOop klass = SystemDictionary::resolve_or_fail(name, true, CHECK_NULL);
+   objArrayOop r = oopFactory::new_objArray(klass, count, CHECK_NULL);
+   objArrayHandle h_result (THREAD, r);
+- 
++
+   // Fill in results backwards
+   {
+     for (MethodStream st(h_k, true, true); !st.eos(); st.next()) {
+@@ -1533,7 +1530,7 @@
+ }
+ 
+ 
+-// This would be nicer if, say, java.lang.reflect.Method was a subclass 
++// This would be nicer if, say, java.lang.reflect.Method was a subclass
+ // of java.lang.reflect.Constructor
+ 
+ oop Reflection::invoke_method(oop method_mirror, Handle receiver, objArrayHandle args, TRAPS) {
+@@ -1544,7 +1541,7 @@
+ 
+   oop return_type_mirror = java_lang_reflect_Method::return_type(method_mirror);
+   BasicType rtype;
+-  if (java_lang_Class::is_primitive(return_type_mirror)) { 
++  if (java_lang_Class::is_primitive(return_type_mirror)) {
+     rtype = basic_type_mirror_to_basic_type(return_type_mirror, CHECK_NULL);
+   } else {
+     rtype = T_OBJECT;
+@@ -1566,7 +1563,7 @@
+   bool override          = java_lang_reflect_Constructor::override(constructor_mirror) != 0;
+   objArrayHandle ptypes(THREAD, objArrayOop(java_lang_reflect_Constructor::parameter_types(constructor_mirror)));
+ 
+-  instanceKlassHandle klass(THREAD, java_lang_Class::as_klassOop(mirror));  
++  instanceKlassHandle klass(THREAD, java_lang_Class::as_klassOop(mirror));
+   if (!klass->methods()->is_within_bounds(slot)) {
+     THROW_MSG_0(vmSymbols::java_lang_InternalError(), "invoke");
+   }
+@@ -1574,7 +1571,7 @@
+   assert(method->name() == vmSymbols::object_initializer_name(), "invalid constructor");
+ 
+   // Make sure klass gets initialize
+-  klass->initialize(CHECK_NULL);      
++  klass->initialize(CHECK_NULL);
+ 
+   // Create new instance (the receiver)
+   klass->check_valid_for_instantiation(false, CHECK_NULL);
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/reflection.hpp openjdk/hotspot/src/share/vm/runtime/reflection.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/reflection.hpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/reflection.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)reflection.hpp	1.46 07/05/05 17:06:54 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,11 +19,11 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Class Reflection contains utility methods needed for implementing the
+-// reflection api. 
++// reflection api.
+ //
+ // Used by functions in the JVM interface.
+ //
+@@ -41,7 +38,7 @@
+  private:
+   // Access checking
+   static bool reflect_check_access(klassOop field_class, AccessFlags acc, klassOop target_class, bool is_method_invoke, TRAPS);
+-  
++
+   // Conversion
+   static klassOop basic_type_mirror_to_arrayklass(oop basic_type_mirror, TRAPS);
+   static oop      basic_type_arrayklass_to_mirror(klassOop basic_type_arrayklass, TRAPS);
+@@ -83,11 +80,11 @@
+   // Verification
+   static bool     verify_class_access(klassOop current_class, klassOop new_class, bool classloader_only);
+ 
+-  static bool     verify_field_access(klassOop current_class, 
++  static bool     verify_field_access(klassOop current_class,
+                                       klassOop resolved_class,
+-                                      klassOop field_class, 
+-                                      AccessFlags access, 
+-                                      bool classloader_only, 
++                                      klassOop field_class,
++                                      AccessFlags access,
++                                      bool classloader_only,
+                                       bool protected_restriction = false);
+   static bool     is_same_class_package(klassOop class1, klassOop class2);
+ 
+@@ -104,7 +101,7 @@
+   // Create a java.lang.reflect.Method object based on a method
+   static oop new_method(methodHandle method, bool intern_name, bool for_constant_pool_access, TRAPS);
+   // Create a java.lang.reflect.Constructor object based on a method
+-  static oop new_constructor(methodHandle method, TRAPS);  
++  static oop new_constructor(methodHandle method, TRAPS);
+   // Create a java.lang.reflect.Field object based on a field descriptor
+   static oop new_field(fieldDescriptor* fd, bool intern_name, TRAPS);
+ 
+@@ -124,7 +121,7 @@
+   // Method call (shared by invoke_method and invoke_constructor)
+   static oop  invoke(instanceKlassHandle klass, methodHandle method, Handle receiver, bool override, objArrayHandle ptypes, BasicType rtype, objArrayHandle args, bool is_method_invoke, TRAPS);
+ 
+-  // Narrowing of basic types. Used to create correct jvalues for 
++  // Narrowing of basic types. Used to create correct jvalues for
+   // boolean, byte, char and short return return values from interpreter
+   // which are returned as ints. Throws IllegalArgumentException.
+   static void narrow(jvalue* value, BasicType narrow_type, TRAPS);
+@@ -134,13 +131,13 @@
+ 
+   static bool match_parameter_types(methodHandle method, objArrayHandle types, int parameter_count, TRAPS);
+   // Creating new java.lang.reflect.xxx wrappers
+-  static oop new_field(FieldStream* st, TRAPS);  
++  static oop new_field(FieldStream* st, TRAPS);
+ 
+ public:
+   // Field lookup and verification.
+   static bool      resolve_field(Handle field_mirror, Handle& receiver, fieldDescriptor* fd, bool check_final, TRAPS);
+ 
+-  // Reflective field access. Returns type code. Throws IllegalArgumentException. 
++  // Reflective field access. Returns type code. Throws IllegalArgumentException.
+   static BasicType field_get(jvalue* value, fieldDescriptor* fd, Handle receiver);
+   static void      field_set(jvalue* value, fieldDescriptor* fd, Handle receiver, BasicType value_type, TRAPS);
+ 
+@@ -163,4 +160,3 @@
+ #endif /* SUPPORT_OLD_REFLECTION */
+ 
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/reflectionUtils.cpp openjdk/hotspot/src/share/vm/runtime/reflectionUtils.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/reflectionUtils.cpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/reflectionUtils.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)reflectionUtils.cpp	1.15 07/05/05 17:06:54 JVM"
+-#endif
+ /*
+  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,14 +19,14 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+ #include "incls/_reflectionUtils.cpp.incl"
+ 
+ KlassStream::KlassStream(instanceKlassHandle klass, bool local_only, bool classes_only) {
+-  _klass = klass;  
++  _klass = klass;
+   if (classes_only) {
+     _interfaces = Universe::the_empty_system_obj_array();
+   } else {
+@@ -46,7 +43,7 @@
+   if (!_klass->is_interface() && _klass->super() != NULL) {
+     // go up superclass chain (not for interfaces)
+     _klass = _klass->super();
+-  } else { 
++  } else {
+     if (_interface_index > 0) {
+       _klass = klassOop(_interfaces->obj_at(--_interface_index));
+     } else {
+@@ -69,8 +66,8 @@
+   _filtered_fields->append(new FilteredField(SystemDictionary::throwable_klass(), offset));
+   // The latest version of vm may be used with old jdk.
+   if (JDK_Version::is_gte_jdk16x_version()) {
+-    // The following class fields do not exist in 
+-    // previous version of jdk. 
++    // The following class fields do not exist in
++    // previous version of jdk.
+     offset = sun_reflect_ConstantPool::cp_oop_offset();
+     _filtered_fields->append(new FilteredField(SystemDictionary::reflect_constant_pool_klass(), offset));
+     offset = sun_reflect_UnsafeStaticFieldAccessorImpl::base_offset();
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/reflectionUtils.hpp openjdk/hotspot/src/share/vm/runtime/reflectionUtils.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/reflectionUtils.hpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/reflectionUtils.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)reflectionUtils.hpp	1.16 07/05/05 17:06:54 JVM"
+-#endif
+ /*
+  * Copyright 1999-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A KlassStream is an abstract stream for streaming over self, superclasses
+@@ -100,7 +97,7 @@
+  private:
+   int length() const                { return fields()->length(); }
+   constantPoolOop constants() const { return _klass->constants(); }
+- protected:   
++ protected:
+   typeArrayOop fields() const       { return _klass->fields(); }
+  public:
+   FieldStream(instanceKlassHandle klass, bool local_only, bool classes_only)
+@@ -112,7 +109,7 @@
+   void next() { _index -= instanceKlass::next_offset; }
+ 
+   // Accessors for current field
+-  AccessFlags access_flags() const { 
++  AccessFlags access_flags() const {
+     AccessFlags flags;
+     flags.set_flags(fields()->ushort_at(index() + instanceKlass::access_flags_offset));
+     return flags;
+@@ -125,7 +122,7 @@
+     int signature_index = fields()->ushort_at(index() +
+                                        instanceKlass::signature_index_offset);
+     return constants()->symbol_at(signature_index);
+-  }  
++  }
+   // missing: initval()
+   int offset() const {
+     return _klass->offset_from_fields( index() );
+@@ -136,7 +133,7 @@
+  private:
+   klassOop _klass;
+   int      _field_offset;
+-  
++
+  public:
+   FilteredField(klassOop klass, int field_offset) {
+     _klass = klass;
+@@ -154,7 +151,7 @@
+   static void initialize();
+   static bool is_filtered_field(klassOop klass, int field_offset) {
+     for (int i=0; i < _filtered_fields->length(); i++) {
+-      if (klass == _filtered_fields->at(i)->klass() && 
++      if (klass == _filtered_fields->at(i)->klass() &&
+         field_offset == _filtered_fields->at(i)->field_offset()) {
+         return true;
+       }
+@@ -196,7 +193,7 @@
+  private:
+   int  _filtered_fields_count;
+   bool has_filtered_field() { return (_filtered_fields_count > 0); }
+-    
++
+  public:
+   FilteredFieldStream(instanceKlassHandle klass, bool local_only, bool classes_only)
+     : FieldStream(klass, local_only, classes_only) {
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/registerMap.hpp openjdk/hotspot/src/share/vm/runtime/registerMap.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/registerMap.hpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/registerMap.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)registerMap.hpp	1.16 07/05/05 17:06:54 JVM"
+-#endif
+ /*
+  * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class JavaThread;
+@@ -35,11 +32,11 @@
+ // frames.  Hence, it must always be passed in as an argument to
+ // frame::sender(RegisterMap*).
+ //
+-// In particular, 
++// In particular,
+ //   1) It provides access to the thread for which the stack belongs.  The
+ //      thread object is needed in order to get sender of a deoptimized frame.
+ //
+-//   2) It is used to pass information from a callee frame to its caller 
++//   2) It is used to pass information from a callee frame to its caller
+ //      frame about how the frame should be traversed.  This is used to let
+ //      the caller frame take care of calling oops-do of out-going
+ //      arguments, when the callee frame is not instantiated yet.  This
+@@ -49,14 +46,14 @@
+ //      this is hidden by using the StackFrameStream.)  This is used when
+ //      doing follow_oops and oops_do.
+ //
+-//   3) The RegisterMap keeps track of the values of callee-saved registers 
++//   3) The RegisterMap keeps track of the values of callee-saved registers
+ //      from frame to frame (hence, the name).  For some stack traversal the
+ //      values of the callee-saved registers does not matter, e.g., if you
+ //      only need the static properies such as frame type, pc, and such.
+ //      Updating of the RegisterMap can be turned off by instantiating the
+ //      register map as: RegisterMap map(thread, false);
+ 
+-class RegisterMap : public StackObj { 
++class RegisterMap : public StackObj {
+  public:
+     typedef julong LocationValidType;
+   enum {
+@@ -67,7 +64,7 @@
+  private:
+   intptr_t*    _location[reg_count];    // Location of registers (intptr_t* looks better than address in the debugger)
+   LocationValidType _location_valid[location_valid_size];
+-  bool        _include_argument_oops;   // Should include argument_oop marked locations for compiler  
++  bool        _include_argument_oops;   // Should include argument_oop marked locations for compiler
+   JavaThread* _thread;                  // Reference to current thread
+   bool        _update_map;              // Tells if the register map need to be
+                                         // updated when traversing the stack
+@@ -82,7 +79,7 @@
+   debug_only(intptr_t* _update_for_id;) // Assert that RegisterMap is not updated twice for same frame
+   RegisterMap(JavaThread *thread, bool update_map = true);
+   RegisterMap(const RegisterMap* map);
+-    
++
+   address location(VMReg reg) const {
+     int index = reg->value() / location_valid_type_size;
+     assert(0 <= reg->value() && reg->value() < reg_count, "range check");
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/relocator.cpp openjdk/hotspot/src/share/vm/runtime/relocator.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/relocator.cpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/relocator.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)relocator.cpp	1.40 07/05/05 17:06:54 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -33,12 +30,12 @@
+ #define MAX_SHORT ((1 << 15) - 1)
+ #define MIN_SHORT (- (1 << 15))
+ 
+-// Encapsulates a code change request. There are 3 types. 
++// Encapsulates a code change request. There are 3 types.
+ // General instruction, jump instruction, and table/lookup switches
+ //
+-class ChangeItem : public ResourceObj { 
++class ChangeItem : public ResourceObj {
+   int _bci;
+- public:  
++ public:
+    ChangeItem(int bci) { _bci = bci; }
+    virtual bool handle_code_change(Relocator *r) = 0;
+ 
+@@ -61,8 +58,8 @@
+   int              _new_ilen;    // New length of instruction at bci
+   u_char*          _inst_buffer; // New bytecodes
+  public:
+-  ChangeWiden(int bci, int new_ilen, u_char* inst_buffer) : ChangeItem(bci) {    
+-    _new_ilen = new_ilen; 
++  ChangeWiden(int bci, int new_ilen, u_char* inst_buffer) : ChangeItem(bci) {
++    _new_ilen = new_ilen;
+     _inst_buffer = inst_buffer;
+   }
+ 
+@@ -82,14 +79,14 @@
+   // Callback to do instruction
+   bool handle_code_change(Relocator *r) { return r->handle_jump_widen(bci(), _delta); };
+ 
+-  bool is_jump_widen()         { return true; }   
++  bool is_jump_widen()         { return true; }
+ 
+   // If the bci matches, adjust the delta in the change jump request.
+   bool adjust(int jump_bci, int delta) {
+     if (bci() == jump_bci) {
+-      if (_delta > 0) 
++      if (_delta > 0)
+         _delta += delta;
+-      else 
++      else
+         _delta -= delta;
+       return true;
+     }
+@@ -103,7 +100,7 @@
+   int  _padding;
+   bool _is_lookup_switch;
+  public:
+-   ChangeSwitchPad(int bci, int padding, bool is_lookup_switch) : ChangeItem(bci) { 
++   ChangeSwitchPad(int bci, int padding, bool is_lookup_switch) : ChangeItem(bci) {
+      _padding = padding;
+      _is_lookup_switch = is_lookup_switch;
+    }
+@@ -123,7 +120,7 @@
+ 
+ Relocator::Relocator(methodHandle m, RelocatorListener* listener) {
+   set_method(m);
+-  set_code_length(method()->code_size()); 
++  set_code_length(method()->code_size());
+   set_code_array(NULL);
+   // Allocate code array and copy bytecodes
+   if (!expand_code_array(0)) {
+@@ -150,7 +147,7 @@
+   }
+ 
+   if (!handle_code_changes()) return methodHandle();
+- 
++
+     // Construct the new method
+   methodHandle new_method = methodOopDesc::clone_with_new_data(method(),
+                               code_array(), code_length(),
+@@ -182,23 +179,23 @@
+ 
+     // Execute operation
+     if (!ci->handle_code_change(this)) return false;
+-    
++
+     // Shuffel items up
+     for (int index = 1; index < _changes->length(); index++) {
+       _changes->at_put(index-1, _changes->at(index));
+-    }  
++    }
+     _changes->pop();
+   }
+-  return true;  
++  return true;
+ }
+ 
+ 
+ bool Relocator::is_opcode_lookupswitch(Bytecodes::Code bc) {
+   switch (bc) {
+-    case Bytecodes::_tableswitch:       return false;    
++    case Bytecodes::_tableswitch:       return false;
+     case Bytecodes::_lookupswitch:                   // not rewritten on ia64
+     case Bytecodes::_fast_linearswitch:              // rewritten _lookupswitch
+-    case Bytecodes::_fast_binaryswitch: return true; // rewritten _lookupswitch    
++    case Bytecodes::_fast_binaryswitch: return true; // rewritten _lookupswitch
+     default: ShouldNotReachHere();
+   }
+   return true; // dummy
+@@ -211,16 +208,16 @@
+   switch (bc) {
+     // In the case of switch instructions, see if we have the original
+     // padding recorded.
+-    case Bytecodes::_tableswitch:      
++    case Bytecodes::_tableswitch:
+     case Bytecodes::_lookupswitch:
+     case Bytecodes::_fast_linearswitch:
+-    case Bytecodes::_fast_binaryswitch:        
++    case Bytecodes::_fast_binaryswitch:
+     {
+       int pad = get_orig_switch_pad(bci, is_opcode_lookupswitch(bc));
+       if (pad == -1) {
+         return instruction_length_at(bci);
+       }
+-      // Otherwise, depends on the switch type. 
++      // Otherwise, depends on the switch type.
+       switch (bc) {
+         case Bytecodes::_tableswitch: {
+           int lo = int_at(bci + 1 + pad + 4 * 1);
+@@ -229,22 +226,22 @@
+           return 1 + pad + 4*(3 + n);
+         }
+         case Bytecodes::_lookupswitch:
+-        case Bytecodes::_fast_linearswitch: 
+-        case Bytecodes::_fast_binaryswitch: {          
++        case Bytecodes::_fast_linearswitch:
++        case Bytecodes::_fast_binaryswitch: {
+           int npairs = int_at(bci + 1 + pad + 4 * 1);
+-          return 1 + pad + 4*(2 + 2*npairs); 
++          return 1 + pad + 4*(2 + 2*npairs);
+         }
+         default:
+           ShouldNotReachHere();
+       }
+-    }         
++    }
+   }
+   return instruction_length_at(bci);
+ }
+ 
+ // If a change item is recorded for "pc", with type "ct", returns the
+ // associated padding, else -1.
+-int Relocator::get_orig_switch_pad(int bci, bool is_lookup_switch) {      
++int Relocator::get_orig_switch_pad(int bci, bool is_lookup_switch) {
+   for (int k = 0; k < _changes->length(); k++) {
+     ChangeItem* ci = _changes->at(k);
+     if (ci->is_switch_pad()) {
+@@ -275,16 +272,16 @@
+ // at "offset" and is a short if "isShort" is "TRUE",
+ // and an integer otherwise.  If the jump crosses "breakPC", change
+ // the span of the jump by "delta".
+-void Relocator::change_jump(int bci, int offset, bool is_short, int break_bci, int delta) {  
+-  int bci_delta = (is_short) ? short_at(offset) : int_at(offset);  
++void Relocator::change_jump(int bci, int offset, bool is_short, int break_bci, int delta) {
++  int bci_delta = (is_short) ? short_at(offset) : int_at(offset);
+   int targ = bci + bci_delta;
+ 
+   if ((bci <= break_bci && targ >  break_bci) ||
+       (bci >  break_bci && targ <= break_bci)) {
+     int new_delta;
+-    if (bci_delta > 0) 
++    if (bci_delta > 0)
+       new_delta = bci_delta + delta;
+-    else 
++    else
+       new_delta = bci_delta - delta;
+ 
+     if (is_short && ((new_delta > MAX_SHORT) || new_delta < MIN_SHORT)) {
+@@ -296,14 +293,14 @@
+     }
+   }
+ }
+-    
++
+ 
+ // Changes all jumps crossing "break_bci" by "delta".  May enqueue things
+ // on "rc->changes"
+ void Relocator::change_jumps(int break_bci, int delta) {
+   int bci = 0;
+   Bytecodes::Code bc;
+-  // Now, adjust any affected instructions. 
++  // Now, adjust any affected instructions.
+   while (bci < code_length()) {
+     switch (bc= code_at(bci)) {
+       case Bytecodes::_ifeq:
+@@ -320,67 +317,67 @@
+       case Bytecodes::_if_icmple:
+       case Bytecodes::_if_acmpeq:
+       case Bytecodes::_if_acmpne:
+-      case Bytecodes::_ifnull:   
++      case Bytecodes::_ifnull:
+       case Bytecodes::_ifnonnull:
+       case Bytecodes::_goto:
+       case Bytecodes::_jsr:
+-	change_jump(bci, bci+1, true, break_bci, delta);
+-	break;
++        change_jump(bci, bci+1, true, break_bci, delta);
++        break;
+       case Bytecodes::_goto_w:
+       case Bytecodes::_jsr_w:
+-	change_jump(bci, bci+1, false, break_bci, delta);
+-	break;
+-      case Bytecodes::_tableswitch: 
+-      case Bytecodes::_lookupswitch: 
++        change_jump(bci, bci+1, false, break_bci, delta);
++        break;
++      case Bytecodes::_tableswitch:
++      case Bytecodes::_lookupswitch:
+       case Bytecodes::_fast_linearswitch:
+-      case Bytecodes::_fast_binaryswitch: {	
+-	int recPad = get_orig_switch_pad(bci, (bc != Bytecodes::_tableswitch));
++      case Bytecodes::_fast_binaryswitch: {
++        int recPad = get_orig_switch_pad(bci, (bc != Bytecodes::_tableswitch));
+         int oldPad = (recPad != -1) ? recPad : align(bci+1) - (bci+1);
+-	if (bci > break_bci) {
+-	  int new_bci = bci + delta;
+-	  int newPad = align(new_bci+1) - (new_bci+1);
+-	  // Do we need to check the padding? 
+-	  if (newPad != oldPad) {
+-	    if (recPad == -1) {
++        if (bci > break_bci) {
++          int new_bci = bci + delta;
++          int newPad = align(new_bci+1) - (new_bci+1);
++          // Do we need to check the padding?
++          if (newPad != oldPad) {
++            if (recPad == -1) {
+               _changes->push(new ChangeSwitchPad(bci, oldPad, (bc != Bytecodes::_tableswitch)));
+-	    }
+-	  }
+-	}	
+-	
+-	// Then the rest, which depend on the kind of switch.
+-	switch (bc) {
+-	  case Bytecodes::_tableswitch: {
++            }
++          }
++        }
++
++        // Then the rest, which depend on the kind of switch.
++        switch (bc) {
++          case Bytecodes::_tableswitch: {
+             change_jump(bci, bci +1 + oldPad, false, break_bci, delta);
+-            // We cannot use the Bytecode_tableswitch abstraction, since the padding might not be correct.            
++            // We cannot use the Bytecode_tableswitch abstraction, since the padding might not be correct.
+             int lo = int_at(bci + 1 + oldPad + 4 * 1);
+             int hi = int_at(bci + 1 + oldPad + 4 * 2);
+             int n = hi - lo + 1;
+-	    for (int k = 0; k < n; k++) {
+-	      change_jump(bci, bci +1 + oldPad + 4*(k+3), false, break_bci, delta);
+-	    }
+-	    // Special next-bci calculation here...
+-	    bci += 1 + oldPad + (n+3)*4;
+-	    continue;
+-	  }
+-	  case Bytecodes::_lookupswitch:
+-	  case Bytecodes::_fast_linearswitch:
++            for (int k = 0; k < n; k++) {
++              change_jump(bci, bci +1 + oldPad + 4*(k+3), false, break_bci, delta);
++            }
++            // Special next-bci calculation here...
++            bci += 1 + oldPad + (n+3)*4;
++            continue;
++          }
++          case Bytecodes::_lookupswitch:
++          case Bytecodes::_fast_linearswitch:
+           case Bytecodes::_fast_binaryswitch: {
+             change_jump(bci, bci +1 + oldPad, false, break_bci, delta);
+-            // We cannot use the Bytecode_lookupswitch abstraction, since the padding might not be correct.            
+-	    int npairs = int_at(bci + 1 + oldPad + 4 * 1);
+-	    for (int k = 0; k < npairs; k++) {
++            // We cannot use the Bytecode_lookupswitch abstraction, since the padding might not be correct.
++            int npairs = int_at(bci + 1 + oldPad + 4 * 1);
++            for (int k = 0; k < npairs; k++) {
+               change_jump(bci, bci + 1 + oldPad + 4*(2 + 2*k + 1), false, break_bci, delta);
+-	    }
+-	    /* Special next-bci calculation here... */
+-	    bci += 1 + oldPad + (2 + (npairs*2))*4;
+-	    continue;
+-	  }
+-	  default:
+-	    ShouldNotReachHere();
+-	}
++            }
++            /* Special next-bci calculation here... */
++            bci += 1 + oldPad + (2 + (npairs*2))*4;
++            continue;
++          }
++          default:
++            ShouldNotReachHere();
++        }
+       }
+       default:
+-	break;
++        break;
+     }
+     bci += rc_instr_len(bci);
+   }
+@@ -455,14 +452,14 @@
+ 
+   // Expanding current array
+   if (code_array() != NULL) {
+-    memcpy(new_code_array, code_array(), code_length());   
++    memcpy(new_code_array, code_array(), code_length());
+   } else {
+     // Initial copy. Copy directly from methodOop
+     memcpy(new_code_array, method()->code_base(), code_length());
+   }
+-  
++
+   set_code_array(new_code_array);
+-  set_code_array_length(length);   
++  set_code_array_length(length);
+ 
+   return true;
+ }
+@@ -470,14 +467,14 @@
+ 
+ // The instruction at "bci", whose size is "ilen", is changing size by
+ // "delta".  Reallocate, move code, recalculate jumps, and enqueue
+-// change items as necessary. 
++// change items as necessary.
+ bool Relocator::relocate_code(int bci, int ilen, int delta) {
+   int next_bci = bci + ilen;
+   if (delta > 0 && code_length() + delta > code_array_length())  {
+     // Expand allocated code space, if necessary.
+     if (!expand_code_array(delta)) {
+           return false;
+-    }    
++    }
+   }
+ 
+   // We require 4-byte alignment of code arrays.
+@@ -502,22 +499,22 @@
+   // And local variable table...
+   adjust_local_var_table(bci, delta);
+ 
+-  // Relocate the pending change stack...        
++  // Relocate the pending change stack...
+   for (int j = 0; j < _changes->length(); j++) {
+     ChangeItem* ci = _changes->at(j);
+     ci->relocate(bci, delta);
+-  }  
++  }
+ 
+   // Notify any listeners about code relocation
+-  notify(bci, delta, code_length()); 
++  notify(bci, delta, code_length());
+ 
+   return true;
+ }
+ 
+ // relocate a general instruction. Called by ChangeWiden class
+-bool Relocator::handle_widen(int bci, int new_ilen, u_char inst_buffer[]) {  
++bool Relocator::handle_widen(int bci, int new_ilen, u_char inst_buffer[]) {
+   int ilen = rc_instr_len(bci);
+-  if (!relocate_code(bci, ilen, new_ilen - ilen)) 
++  if (!relocate_code(bci, ilen, new_ilen - ilen))
+     return false;
+ 
+   // Insert new bytecode(s)
+@@ -529,9 +526,9 @@
+ }
+ 
+ // handle jump_widen instruction. Called be ChangeJumpWiden class
+-bool Relocator::handle_jump_widen(int bci, int delta) {  
++bool Relocator::handle_jump_widen(int bci, int delta) {
+   int ilen = rc_instr_len(bci);
+-  
++
+   Bytecodes::Code bc = code_at(bci);
+   switch (bc) {
+     case Bytecodes::_ifeq:
+@@ -548,7 +545,7 @@
+     case Bytecodes::_if_icmple:
+     case Bytecodes::_if_acmpeq:
+     case Bytecodes::_if_acmpne:
+-    case Bytecodes::_ifnull:   
++    case Bytecodes::_ifnull:
+     case Bytecodes::_ifnonnull: {
+       const int goto_length   = Bytecodes::length_for(Bytecodes::_goto);
+ 
+@@ -590,9 +587,9 @@
+       assert(ilen == 3, "check length");
+ 
+       if (!relocate_code(bci, 3, 2)) return false;
+-      if (bc == Bytecodes::_goto) 
++      if (bc == Bytecodes::_goto)
+         code_at_put(bci, Bytecodes::_goto_w);
+-      else 
++      else
+         code_at_put(bci, Bytecodes::_jsr_w);
+ 
+       // If it's a forward jump, add 2 for the widening.
+@@ -602,22 +599,22 @@
+ 
+     default: ShouldNotReachHere();
+   }
+-    
++
+   return true;
+ }
+ 
+ // handle lookup/table switch instructions.  Called be ChangeSwitchPad class
+-bool Relocator::handle_switch_pad(int bci, int old_pad, bool is_lookup_switch) {  
++bool Relocator::handle_switch_pad(int bci, int old_pad, bool is_lookup_switch) {
+   int ilen = rc_instr_len(bci);
+   int new_pad = align(bci+1) - (bci+1);
+   int pad_delta = new_pad - old_pad;
+   if (pad_delta != 0) {
+-    int len;    
+-    if (!is_lookup_switch) {    
++    int len;
++    if (!is_lookup_switch) {
+       int low  = int_at(bci+1+old_pad+4);
+       int high = int_at(bci+1+old_pad+8);
+       len = high-low+1 + 3; // 3 for default, hi, lo.
+-    } else {      
++    } else {
+       int npairs = int_at(bci+1+old_pad+4);
+       len = npairs*2 + 2; // 2 for default, npairs.
+     }
+@@ -626,24 +623,24 @@
+     // we need to call that before messing with the current
+     // instruction.  Since it may also overwrite the current
+     // instruction when moving down, remember the possibly
+-    // overwritten part. 
+-    
++    // overwritten part.
++
+     // Move the code following the instruction...
+     if (!relocate_code(bci, ilen, pad_delta)) return false;
+-    
++
+     if (pad_delta < 0) {
+-      // Move the shrunken instruction down.      
++      // Move the shrunken instruction down.
+       memmove(addr_at(bci + 1 + new_pad),
+               addr_at(bci + 1 + old_pad),
+-	      len * 4 + pad_delta);
++              len * 4 + pad_delta);
+       memmove(addr_at(bci + 1 + new_pad + len*4 + pad_delta),
+-	      _overwrite, -pad_delta);
++              _overwrite, -pad_delta);
+     } else {
+       assert(pad_delta > 0, "check");
+       // Move the expanded instruction up.
+       memmove(addr_at(bci +1 + new_pad),
+-	      addr_at(bci +1 + old_pad),
+-	      len * 4);	
++              addr_at(bci +1 + old_pad),
++              len * 4);
+     }
+   }
+   return true;
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/relocator.hpp openjdk/hotspot/src/share/vm/runtime/relocator.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/relocator.hpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/relocator.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)relocator.hpp	1.27 07/05/05 17:06:54 JVM"
+-#endif
+ /*
+  * Copyright 1997-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This code has been converted from the 1.1E java virtual machine
+@@ -38,10 +35,10 @@
+ };
+ 
+ 
+-class Relocator : public ResourceObj {  
++class Relocator : public ResourceObj {
+  public:
+   Relocator(methodHandle method, RelocatorListener* listener);
+-  methodHandle insert_space_at(int bci, int space, u_char inst_buffer[], TRAPS);  
++  methodHandle insert_space_at(int bci, int space, u_char inst_buffer[], TRAPS);
+ 
+   // Callbacks from ChangeItem's
+   bool handle_code_changes();
+@@ -49,7 +46,7 @@
+   void push_jump_widen  (int bci, int delta, int new_delta);    // pushes jumps
+   bool handle_jump_widen  (int bci, int delta);     // handles jumps
+   bool handle_switch_pad  (int bci, int old_pad, bool is_lookup_switch); // handles table and lookup switches
+-  
++
+  private:
+   unsigned char* _code_array;
+   int            _code_array_length;
+@@ -78,12 +75,12 @@
+ 
+   methodHandle method() const               { return _method; }
+   void set_method(methodHandle method)      { _method = method; }
+-  
++
+   // This will return a raw bytecode, which is possibly rewritten.
+   Bytecodes::Code code_at(int bci) const          { return (Bytecodes::Code) code_array()[bci]; }
+   void code_at_put(int bci, Bytecodes::Code code) { code_array()[bci] = (char) code; }
+-  
+-  // get and set signed integers in the code_array 
++
++  // get and set signed integers in the code_array
+   inline int   int_at(int bci) const               { return Bytes::get_Java_u4(&code_array()[bci]); }
+   inline void  int_at_put(int bci, int value)      { Bytes::put_Java_u4(&code_array()[bci], value); }
+ 
+@@ -94,29 +91,28 @@
+   // get the address of in the code_array
+   inline char* addr_at(int bci) const             { return (char*) &code_array()[bci]; }
+ 
+-  int  instruction_length_at(int bci)             { return Bytecodes::length_at(code_array() + bci); }  
+-  
++  int  instruction_length_at(int bci)             { return Bytecodes::length_at(code_array() + bci); }
++
+   // Helper methods
+   int  align(int n) const                          { return (n+3) & ~3; }
+-  int  code_slop_pct() const                       { return 25; }  
++  int  code_slop_pct() const                       { return 25; }
+   bool is_opcode_lookupswitch(Bytecodes::Code bc);
+ 
+   // basic relocation methods
+-  bool relocate_code         (int bci, int ilen, int delta);  
++  bool relocate_code         (int bci, int ilen, int delta);
+   void change_jumps          (int break_bci, int delta);
+-  void change_jump           (int bci, int offset, bool is_short, int break_bci, int delta);  
+-  void adjust_exception_table(int bci, int delta);  
++  void change_jump           (int bci, int offset, bool is_short, int break_bci, int delta);
++  void adjust_exception_table(int bci, int delta);
+   void adjust_line_no_table  (int bci, int delta);
+   void adjust_local_var_table(int bci, int delta);
+   int  get_orig_switch_pad   (int bci, bool is_lookup_switch);
+-  int  rc_instr_len          (int bci);  
++  int  rc_instr_len          (int bci);
+   bool expand_code_array     (int delta);
+ 
+   // Callback support
+   RelocatorListener *_listener;
+   void notify(int bci, int delta, int new_code_length) {
+     if (_listener != NULL)
+-      _listener->relocated(bci, delta, new_code_length); 
++      _listener->relocated(bci, delta, new_code_length);
+   }
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/rframe.cpp openjdk/hotspot/src/share/vm/runtime/rframe.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/rframe.cpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/rframe.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)rframe.cpp	1.41 07/05/05 17:06:52 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,26 +19,26 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ 
+ #include "incls/_rframe.cpp.incl"
+ 
+-static RFrame*const  noCaller    = (RFrame*) 0x1;		// no caller (i.e., initial frame)
+-static RFrame*const  noCallerYet = (RFrame*) 0x0;		// caller not yet computed
++static RFrame*const  noCaller    = (RFrame*) 0x1;               // no caller (i.e., initial frame)
++static RFrame*const  noCallerYet = (RFrame*) 0x0;               // caller not yet computed
+ 
+-RFrame::RFrame(frame fr, JavaThread* thread, RFrame*const callee) : 
++RFrame::RFrame(frame fr, JavaThread* thread, RFrame*const callee) :
+   _fr(fr), _thread(thread), _callee(callee), _num(callee ? callee->num() + 1 : 0) {
+   _caller = (RFrame*)noCallerYet;
+   _invocations = 0;
+   _distance = 0;
+ }
+ 
+-void RFrame::set_distance(int d) { 
++void RFrame::set_distance(int d) {
+   assert(is_compiled() || d >= 0, "should be positive");
+-  _distance = d; 
++  _distance = d;
+ }
+ 
+ InterpretedRFrame::InterpretedRFrame(frame fr, JavaThread* thread, RFrame*const callee)
+@@ -64,12 +61,12 @@
+ }
+ 
+ CompiledRFrame::CompiledRFrame(frame fr, JavaThread* thread, RFrame*const  callee)
+-: RFrame(fr, thread, callee) { 
++: RFrame(fr, thread, callee) {
+   init();
+ }
+ 
+ CompiledRFrame::CompiledRFrame(frame fr, JavaThread* thread)
+-: RFrame(fr, thread, NULL) { 
++: RFrame(fr, thread, NULL) {
+   init();
+ }
+ 
+@@ -82,7 +79,7 @@
+   if (fr.is_interpreted_frame()) {
+     rf = new InterpretedRFrame(fr, thread, callee);
+     dist++;
+-  } else if (fr.is_compiled_frame()) { 
++  } else if (fr.is_compiled_frame()) {
+     // Even deopted frames look compiled because the deopt
+     // is invisible until it happens.
+     rf = new CompiledRFrame(fr, thread, callee);
+@@ -95,8 +92,8 @@
+ }
+ 
+ RFrame* RFrame::caller() {
+-  if (_caller != noCallerYet) return (_caller == noCaller) ? NULL : _caller;	// already computed caller
+-  
++  if (_caller != noCallerYet) return (_caller == noCaller) ? NULL : _caller;    // already computed caller
++
+   // caller not yet computed; do it now
+   if (_fr.is_first_java_frame()) {
+     _caller = (RFrame*)noCaller;
+@@ -135,7 +132,7 @@
+   assert(vf->is_compiled_frame(), "must be compiled");
+   _nm = compiledVFrame::cast(vf)->code();
+   vf = vf->top();
+-  _vf = javaVFrame::cast(vf);  
++  _vf = javaVFrame::cast(vf);
+   _method = methodHandle(thread(), CodeCache::find_nmethod(_fr.pc())->method());
+   assert(_method(), "should have found a method");
+ #ifndef PRODUCT
+@@ -154,7 +151,7 @@
+ #else
+   int cnt = top_method()->invocation_count();
+ #endif
+-  tty->print("%3d %s ", _num, is_interpreted() ? "I" : "C"); 
++  tty->print("%3d %s ", _num, is_interpreted() ? "I" : "C");
+   top_method()->print_short_name(tty);
+   tty->print_cr(": inv=%5d(%d) cst=%4d", _invocations, cnt, cost());
+ #endif
+@@ -171,4 +168,3 @@
+ void DeoptimizedRFrame::print() {
+   RFrame::print("deopt.");
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/rframe.hpp openjdk/hotspot/src/share/vm/runtime/rframe.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/rframe.hpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/rframe.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)rframe.hpp	1.23 07/05/05 17:06:49 JVM"
+-#endif
+ /*
+  * Copyright 1997-2000 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,11 +19,11 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // rframes ("recompiler frames") decorate stack frames with some extra information
+-// needed by the recompiler.  The recompiler views the stack (at the time of recompilation) 
++// needed by the recompiler.  The recompiler views the stack (at the time of recompilation)
+ // as a list of rframes.
+ 
+ class RFrame : public ResourceObj {
+@@ -96,7 +93,7 @@
+  protected:
+   javaVFrame* _vf;                           // may be NULL (for most recent frame)
+   methodHandle   _method;
+- 
++
+   InterpretedRFrame(frame fr, JavaThread* thread, RFrame*const  callee);
+   void init();
+   friend class RFrame;
+@@ -112,11 +109,9 @@
+ 
+ // treat deoptimized frames as interpreted
+ class DeoptimizedRFrame : public InterpretedRFrame {
+- protected: 
++ protected:
+   DeoptimizedRFrame(frame fr, JavaThread* thread, RFrame*const  callee);
+   friend class RFrame;
+  public:
+   void print();
+ };
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/safepoint.cpp openjdk/hotspot/src/share/vm/runtime/safepoint.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/safepoint.cpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/safepoint.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)safepoint.cpp	1.305 07/05/29 09:44:27 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -37,9 +34,10 @@
+ volatile int SafepointSynchronize::_safepoint_counter = 0;
+ static volatile int PageArmed = 0 ;        // safepoint polling page is RO|RW vs PROT_NONE
+ static volatile int TryingToBlock = 0 ;    // proximate value -- for advisory use only
++static bool timeout_error_printed = false;
+ 
+ // Roll all threads forward to a safepoint and suspend them all
+-void SafepointSynchronize::begin() {   
++void SafepointSynchronize::begin() {
+ 
+   Thread* myThread = Thread::current();
+   assert(myThread->is_VM_thread(), "Only VM thread may execute a safepoint");
+@@ -56,11 +54,11 @@
+   }
+ #endif // SERIALGC
+ 
+-  // By getting the Threads_lock, we assure that no threads are about to start or 
+-  // exit. It is released again in SafepointSynchronize::end().  
++  // By getting the Threads_lock, we assure that no threads are about to start or
++  // exit. It is released again in SafepointSynchronize::end().
+   Threads_lock->lock();
+-  
+-  assert( _state == _not_synchronized, "trying to safepoint synchronize with wrong state");     
++
++  assert( _state == _not_synchronized, "trying to safepoint synchronize with wrong state");
+ 
+   int nof_threads = Threads::number_of_threads();
+ 
+@@ -73,17 +71,16 @@
+   {
+   MutexLocker mu(Safepoint_lock);
+ 
+-  // Set number of threads to wait for, before we initiate the callbacks 
+-  _waiting_to_block = nof_threads;    
+-  TryingToBlock     = 0 ; 
+-  int still_running = nof_threads;    
++  // Set number of threads to wait for, before we initiate the callbacks
++  _waiting_to_block = nof_threads;
++  TryingToBlock     = 0 ;
++  int still_running = nof_threads;
+ 
+   // Save the starting time, so that it can be compared to see if this has taken
+   // too long to complete.
+   jlong safepoint_limit_time;
+-  static bool timeout_error_printed = false;
+-  
+-  
++  timeout_error_printed = false;
++
+   // Begin the process of bringing the system to a safepoint.
+   // Java threads can be in several different states and are
+   // stopped by different mechanisms:
+@@ -92,7 +89,7 @@
+   //     The interpeter dispatch table is changed to force it to
+   //     check for a safepoint condition between bytecodes.
+   //  2. Running in native code
+-  //     When returning from the native code, a Java thread must check 
++  //     When returning from the native code, a Java thread must check
+   //     the safepoint _state to see if we must block.  If the
+   //     VM thread sees a Java thread in native, it does
+   //     not wait for this thread to block.  The order of the memory
+@@ -103,43 +100,43 @@
+   //     (on MP systems).  In order to avoid the overhead of issuing
+   //     a memory barrier for each Java thread making native calls, each Java
+   //     thread performs a write to a single memory page after changing
+-  //     the thread state.  The VM thread performs a sequence of 
+-  //     mprotect OS calls which forces all previous writes from all 
+-  //     Java threads to be serialized.  This is done in the 
+-  //     os::serialize_thread_states() call.  This has proven to be 
++  //     the thread state.  The VM thread performs a sequence of
++  //     mprotect OS calls which forces all previous writes from all
++  //     Java threads to be serialized.  This is done in the
++  //     os::serialize_thread_states() call.  This has proven to be
+   //     much more efficient than executing a membar instruction
+   //     on every call to native code.
+   //  3. Running compiled Code
+-  //     Compiled code reads a global (Safepoint Polling) page that 
++  //     Compiled code reads a global (Safepoint Polling) page that
+   //     is set to fault if we are trying to get to a safepoint.
+   //  4. Blocked
+   //     A thread which is blocked will not be allowed to return from the
+   //     block condition until the safepoint operation is complete.
+   //  5. In VM or Transitioning between states
+-  //     If a Java thread is currently running in the VM or transitioning 
+-  //     between states, the safepointing code will wait for the thread to 
++  //     If a Java thread is currently running in the VM or transitioning
++  //     between states, the safepointing code will wait for the thread to
+   //     block itself when it attempts transitions to a new state.
+-  // 
++  //
+   _state            = _synchronizing;
+   OrderAccess::fence();
+ 
+   // Flush all thread states to memory
+-  if (!UseMembar) { 
++  if (!UseMembar) {
+     os::serialize_thread_states();
+   }
+ 
+   // Make interpreter safepoint aware
+-  AbstractInterpreter::notice_safepoints(); 
++  Interpreter::notice_safepoints();
+ 
+-  if (UseCompilerSafepoints && DeferPollingPageLoopCount < 0) { 
++  if (UseCompilerSafepoints && DeferPollingPageLoopCount < 0) {
+     // Make polling safepoint aware
+-    guarantee (PageArmed == 0, "invariant") ; 
+-    PageArmed = 1 ; 
++    guarantee (PageArmed == 0, "invariant") ;
++    PageArmed = 1 ;
+     os::make_polling_page_unreadable();
+   }
+ 
+   // Consider using active_processor_count() ... but that call is expensive.
+-  int ncpus = os::processor_count() ; 
++  int ncpus = os::processor_count() ;
+ 
+ #ifdef ASSERT
+   for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
+@@ -148,91 +145,70 @@
+ #endif // ASSERT
+ 
+   if (SafepointTimeout)
+-    safepoint_limit_time = os::javaTimeMillis() + (jlong)SafepointTimeoutDelay;
++    safepoint_limit_time = os::javaTimeNanos() + (jlong)SafepointTimeoutDelay * MICROUNITS;
+ 
+-  // Iterate through all threads until it have been determined how to stop them all at a safepoint  
++  // Iterate through all threads until it have been determined how to stop them all at a safepoint
+   unsigned int iterations = 0;
+-  int steps = 0 ; 
++  int steps = 0 ;
+   while(still_running > 0) {
+     for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
+       assert(!cur->is_ConcurrentGC_thread(), "A concurrent GC thread is unexpectly being suspended");
+       ThreadSafepointState *cur_state = cur->safepoint_state();
+-      if (cur_state->is_running()) {          
+-        cur_state->examine_state_of_thread();                              
+-        if (!cur_state->is_running()) { 
+-           still_running--; 
++      if (cur_state->is_running()) {
++        cur_state->examine_state_of_thread();
++        if (!cur_state->is_running()) {
++           still_running--;
+            // consider adjusting steps downward:
+-           //   steps = 0 
+-           //   steps -= NNN 
+-           //   steps >>= 1 
++           //   steps = 0
++           //   steps -= NNN
++           //   steps >>= 1
+            //   steps = MIN(steps, 2000-100)
+            //   if (iterations != 0) steps -= NNN
+-        } 
++        }
+         if (TraceSafepoint && Verbose) cur_state->print();
+       }
+     }
+ 
+-    if (PrintSafepointStatistics && iterations == 0) {
++    if ( (PrintSafepointStatistics || (PrintSafepointStatisticsTimeout > 0))
++         && iterations == 0) {
+       begin_statistics(nof_threads, still_running);
+     }
+ 
+     if (still_running > 0) {
+       // Check for if it takes to long
+-      if (SafepointTimeout && safepoint_limit_time < os::javaTimeMillis()) {
+-        if (!timeout_error_printed) {
+-          timeout_error_printed = true;
+-          // Print out the thread IDs which didn't reach the safepoint
+-          // for debugging purposes (useful when there are lots of
+-          // threads in the debugger)
+-          tty->print_cr("# SafepointSynchronize::begin: Fatal error:");
+-          tty->print_cr("# SafepointSynchronize::begin: Timed out while attempting to reach a safepoint.");
+-          tty->print_cr("# SafepointSynchronize::begin: Threads which did not reach the safepoint:");
+-          for(JavaThread *cur = Threads::first(); cur; cur = cur->next()) {
+-            ThreadSafepointState *cur_state = cur->safepoint_state();
+-            if (cur_state->is_running()) {         
+-              tty->print("# ");
+-              cur_state->print();
+-              cur->osthread()->print();
+-              tty->print_cr("");
+-            }
+-          }
+-          tty->print_cr("# SafepointSynchronize::begin: (End of list)");
+-        }
+-
+-        if (DieOnSafepointTimeout) {
+-          fatal("Safepoint Timeout");
+-        }
++      if (SafepointTimeout && safepoint_limit_time < os::javaTimeNanos()) {
++        print_safepoint_timeout(_spinning_timeout);
+       }
+ 
+-      // Spin to avoid context switching.  
+-      // There's a tension between allowing the mutators to run (and rendezvous) 
+-      // vs spinning.  As the VM thread spins, wasting cycles, it consumes CPU that 
+-      // a mutator might otherwise use profitably to reach a safepoint.  Excessive 
++      // Spin to avoid context switching.
++      // There's a tension between allowing the mutators to run (and rendezvous)
++      // vs spinning.  As the VM thread spins, wasting cycles, it consumes CPU that
++      // a mutator might otherwise use profitably to reach a safepoint.  Excessive
+       // spinning by the VM thread on a saturated system can increase rendezvous latency.
+-      // Blocking or yielding incur their own penalties in the form of context switching 
+-      // and the resultant loss of $ residency.  
+-      // 
+-      // Further complicating matters is that yield() does not work as naively expected 
+-      // on many platforms -- yield() does not guarantee that any other ready threads 
+-      // will run.   As such we revert yield_all() after some number of iterations.  
+-      // Yield_all() is implemented as a short unconditional sleep on some platforms.  
+-      // Typical operating systems round a "short" sleep period up to 10 msecs, so sleeping 
+-      // can actually increase the time it takes the VM thread to detect that a system-wide 
+-      // stop-the-world safepoint has been reached.  In a pathological scenario such as that 
+-      // described in CR6415670 the VMthread may sleep just before the mutator(s) become safe.  
++      // Blocking or yielding incur their own penalties in the form of context switching
++      // and the resultant loss of $ residency.
++      //
++      // Further complicating matters is that yield() does not work as naively expected
++      // on many platforms -- yield() does not guarantee that any other ready threads
++      // will run.   As such we revert yield_all() after some number of iterations.
++      // Yield_all() is implemented as a short unconditional sleep on some platforms.
++      // Typical operating systems round a "short" sleep period up to 10 msecs, so sleeping
++      // can actually increase the time it takes the VM thread to detect that a system-wide
++      // stop-the-world safepoint has been reached.  In a pathological scenario such as that
++      // described in CR6415670 the VMthread may sleep just before the mutator(s) become safe.
+       // In that case the mutators will be stalled waiting for the safepoint to complete and the
+       // the VMthread will be sleeping, waiting for the mutators to rendezvous.  The VMthread
+       // will eventually wake up and detect that all mutators are safe, at which point
+-      // we'll again make progress.  
++      // we'll again make progress.
+       //
+-      // Beware too that that the VMThread typically runs at elevated priority.  
++      // Beware too that that the VMThread typically runs at elevated priority.
+       // Its default priority is higher than the default mutator priority.
+-      // Obviously, this complicates spinning.  
++      // Obviously, this complicates spinning.
+       //
+       // Note too that on Windows XP SwitchThreadTo() has quite different behavior than Sleep(0).
+-      // Sleep(0) will _not yield to lower priority threads, while SwitchThreadTo() will.  
++      // Sleep(0) will _not yield to lower priority threads, while SwitchThreadTo() will.
+       //
+-      // See the comments in synchronizer.cpp for additional remarks on spinning.  
++      // See the comments in synchronizer.cpp for additional remarks on spinning.
+       //
+       // In the future we might:
+       // 1. Modify the safepoint scheme to avoid potentally unbounded spinning.
+@@ -242,102 +218,80 @@
+       // 2. Find something useful to do while spinning.  If the safepoint is GC-related
+       //    we might aggressively scan the stacks of threads that are already safe.
+       // 3. Use Solaris schedctl to examine the state of the still-running mutators.
+-      //    If all the mutators are ONPROC there's no reason to sleep or yield. 
++      //    If all the mutators are ONPROC there's no reason to sleep or yield.
+       // 4. YieldTo() any still-running mutators that are ready but OFFPROC.
+       // 5. Check system saturation.  If the system is not fully saturated then
+       //    simply spin and avoid sleep/yield.
+       // 6. As still-running mutators rendezvous they could unpark the sleeping
+       //    VMthread.  This works well for still-running mutators that become
+-      //    safe.  The VMthread must still poll for mutators that call-out.  
+-      // 7. Drive the policy on time-since-begin instead of iterations.  
++      //    safe.  The VMthread must still poll for mutators that call-out.
++      // 7. Drive the policy on time-since-begin instead of iterations.
+       // 8. Consider making the spin duration a function of the # of CPUs:
+-      //    Spin = (((ncpus-1) * M) + K) + F(still_running) 
++      //    Spin = (((ncpus-1) * M) + K) + F(still_running)
+       //    Alternately, instead of counting iterations of the outer loop
+       //    we could count the # of threads visited in the inner loop, above.
+       // 9. On windows consider using the return value from SwitchThreadTo()
+-      //    to drive subsequent spin/SwitchThreadTo()/Sleep(N) decisions.  
++      //    to drive subsequent spin/SwitchThreadTo()/Sleep(N) decisions.
+ 
+       if (UseCompilerSafepoints && int(iterations) == DeferPollingPageLoopCount) {
+-         guarantee (PageArmed == 0, "invariant") ; 
+-         PageArmed = 1 ; 
++         guarantee (PageArmed == 0, "invariant") ;
++         PageArmed = 1 ;
+          os::make_polling_page_unreadable();
+       }
+ 
+-      // Instead of (ncpus > 1) consider either (still_running < (ncpus + EPSILON)) or 
+-      // ((still_running + _waiting_to_block - TryingToBlock)) < ncpus) 
++      // Instead of (ncpus > 1) consider either (still_running < (ncpus + EPSILON)) or
++      // ((still_running + _waiting_to_block - TryingToBlock)) < ncpus)
+       ++steps ;
+-      if (ncpus > 1 && steps < SafepointSpinBeforeYield) { 
++      if (ncpus > 1 && steps < SafepointSpinBeforeYield) {
+         SpinPause() ;     // MP-Polite spin
+-      } else 
+-      if (steps < DeferThrSuspendLoopCount) {  
+-        os::NakedYield() ; 
++      } else
++      if (steps < DeferThrSuspendLoopCount) {
++        os::NakedYield() ;
+       } else {
+         os::yield_all(steps) ;
+         // Alternately, the VM thread could transiently depress its scheduling priority or
+-        // transiently increase the priority of the tardy mutator(s).  
+-      } 
++        // transiently increase the priority of the tardy mutator(s).
++      }
+ 
+       iterations ++ ;
+     }
+     assert(iterations < (uint)max_jint, "We have been iterating in the safepoint loop too long");
+-  }     
+-  assert(still_running == 0, "sanity check"); 
++  }
++  assert(still_running == 0, "sanity check");
+ 
+   if (PrintSafepointStatistics) {
+     update_statistics_on_spin_end();
+   }
+ 
+-  // wait until all threads are stopped    
++  // wait until all threads are stopped
+   while (_waiting_to_block > 0) {
+     if (TraceSafepoint) tty->print_cr("Waiting for %d thread(s) to block", _waiting_to_block);
+     if (!SafepointTimeout || timeout_error_printed) {
+       Safepoint_lock->wait(true);  // true, means with no safepoint checks
+     } else {
+       // Compute remaining time
+-      jlong remaining_time = safepoint_limit_time - os::javaTimeMillis();
++      jlong remaining_time = safepoint_limit_time - os::javaTimeNanos();
+ 
+       // If there is no remaining time, then there is an error
+-      if (remaining_time < 0 || Safepoint_lock->wait(true, remaining_time)) {
+-        if (!timeout_error_printed) {
+-          timeout_error_printed = true;
+-          // Print out the thread IDs which didn't reach the safepoint
+-          // for debugging purposes (useful when there are lots of
+-          // threads in the debugger)
+-          tty->print_cr("# SafepointSynchronize::begin: Fatal error:");
+-          tty->print_cr("# SafepointSynchronize::begin: Timed out while waiting for all threads to stop.");
+-          tty->print_cr("# SafepointSynchronize::begin: Threads which did not reach the safepoint:");
+-          for(JavaThread *cur = Threads::first(); cur; cur = cur->next()) {
+-            ThreadSafepointState *cur_state = cur->safepoint_state();
+-            if (cur_state->type() == ThreadSafepointState::_call_back) {
+-              if (!cur_state->has_called_back()) {
+-                tty->print("# ");
+-                cur_state->print();
+-                cur->osthread()->print();
+-                tty->print_cr("");
+-              }
+-            }
+-          }
+-          tty->print_cr("# SafepointSynchronize::begin: (End of list)");
+-        }
+-
+-        if (DieOnSafepointTimeout) {
+-          fatal("Safepoint Timeout");
+-        }
++      if (remaining_time < 0 || Safepoint_lock->wait(true, remaining_time / MICROUNITS)) {
++        print_safepoint_timeout(_blocking_timeout);
+       }
+     }
+-  }               
++  }
+   assert(_waiting_to_block == 0, "sanity check");
+ 
+ #ifndef PRODUCT
+   if (SafepointTimeout) {
+-    jlong current_time = os::javaTimeMillis();
++    jlong current_time = os::javaTimeNanos();
+     if (safepoint_limit_time < current_time) {
+-      tty->print_cr("# SafepointSynchronize: Finished after %.4f seconds",
+-        0.001 * ((double)current_time - (double)safepoint_limit_time + (double)SafepointTimeoutDelay) );
++      tty->print_cr("# SafepointSynchronize: Finished after "
++                    INT64_FORMAT_W(6) " ms",
++                    ((current_time - safepoint_limit_time) / MICROUNITS +
++                     SafepointTimeoutDelay));
+     }
+   }
+ #endif
+-    
++
+   assert((_safepoint_counter & 0x1) == 0, "must be even");
+   assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
+   _safepoint_counter ++;
+@@ -348,15 +302,15 @@
+   OrderAccess::fence();
+ 
+   if (TraceSafepoint) {
+-    VM_Operation *op = VMThread::vm_operation();     
++    VM_Operation *op = VMThread::vm_operation();
+     tty->print_cr("Entering safepoint region: %s", (op != NULL) ? op->name() : "no vm operation");
+   }
+- 
++
+   RuntimeService::record_safepoint_synchronized();
+-  if (PrintSafepointStatistics) {   
++  if (PrintSafepointStatistics) {
+     update_statistics_on_sync_end(os::javaTimeNanos());
+-  }    
+-  
++  }
++
+   // Call stuff that needs to be run when a safepoint is just about to be completed
+   do_cleanup_tasks();
+   }
+@@ -364,7 +318,7 @@
+ 
+ // Wake up all threads, so they are ready to resume execution after the safepoint
+ // operation has been carried out
+-void SafepointSynchronize::end() {        
++void SafepointSynchronize::end() {
+ 
+   assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
+   assert((_safepoint_counter & 0x1) == 1, "must be odd");
+@@ -378,7 +332,7 @@
+   if (PrintSafepointStatistics) {
+     end_statistics(os::javaTimeNanos());
+   }
+-  
++
+ #ifdef ASSERT
+   // A pending_exception cannot be installed during a safepoint.  The threads
+   // may install an async exception after they come back from a safepoint into
+@@ -393,11 +347,11 @@
+   if (PageArmed) {
+     // Make polling safepoint aware
+     os::make_polling_page_readable();
+-    PageArmed = 0 ; 
++    PageArmed = 0 ;
+   }
+ 
+   // Remove safepoint check from interpreter
+-  AbstractInterpreter::ignore_safepoints();
++  Interpreter::ignore_safepoints();
+ 
+   {
+     MutexLocker mu(Safepoint_lock);
+@@ -406,15 +360,15 @@
+ 
+     // Set to not synchronized, so the threads will not go into the signal_thread_blocked method
+     // when they get restarted.
+-    _state = _not_synchronized;  
++    _state = _not_synchronized;
+     OrderAccess::fence();
+-  
++
+     if (TraceSafepoint) {
+        tty->print_cr("Leaving safepoint region");
+     }
+ 
+     // Start suspended threads
+-    for(JavaThread *current = Threads::first(); current; current = current->next()) {      
++    for(JavaThread *current = Threads::first(); current; current = current->next()) {
+       // A problem occuring on Solaris is when attempting to restart threads
+       // the first #cpus - 1 go well, but then the VMThread is preempted when we get
+       // to the next one (since it has been running the longest).  We then have
+@@ -423,7 +377,7 @@
+       // FIXME: This causes the performance of the VM to degrade when active and with
+       // large numbers of threads.  Apparently this is due to the synchronous nature
+       // of suspending threads.
+-      // 
++      //
+       // TODO-FIXME: the comments above are vestigial and no longer apply.
+       // Furthermore, using solaris' schedctl in this particular context confers no benefit
+       if (VMThreadHintNoPreempt) {
+@@ -431,7 +385,7 @@
+       }
+       ThreadSafepointState* cur_state = current->safepoint_state();
+       assert(cur_state->type() != ThreadSafepointState::_running, "Thread not suspended at safepoint");
+-      cur_state->restart();                  
++      cur_state->restart();
+       assert(cur_state->is_running(), "safepoint state has not been reset");
+     }
+ 
+@@ -439,7 +393,7 @@
+ 
+     // Release threads lock, so threads can be created/destroyed again. It will also starts all threads
+     // blocked in signal_thread_blocked
+-    Threads_lock->unlock();  
++    Threads_lock->unlock();
+ 
+   }
+ #ifndef SERIALGC
+@@ -464,35 +418,35 @@
+   m->invocation_counter()->decay();
+ }
+ 
+-void CounterDecay::decay() {  
++void CounterDecay::decay() {
+   _last_timestamp = os::javaTimeMillis();
+ 
+   // This operation is going to be performed only at the end of a safepoint
+-  // and hence GC's will not be going on, all Java mutators are suspended 
++  // and hence GC's will not be going on, all Java mutators are suspended
+   // at this point and hence SystemDictionary_lock is also not needed.
+   assert(SafepointSynchronize::is_at_safepoint(), "can only be executed at a safepoint");
+   int nclasses = SystemDictionary::number_of_classes();
+-  double classes_per_tick = nclasses * (CounterDecayMinIntervalLength * 1e-3 / 
++  double classes_per_tick = nclasses * (CounterDecayMinIntervalLength * 1e-3 /
+                                         CounterHalfLifeTime);
+   for (int i = 0; i < classes_per_tick; i++) {
+     klassOop k = SystemDictionary::try_get_next_class();
+     if (k != NULL && k->klass_part()->oop_is_instance()) {
+       instanceKlass::cast(k)->methods_do(do_method);
+     }
+-  }  
++  }
+ }
+ 
+ // Various cleaning tasks that should be done periodically at safepoints
+ void SafepointSynchronize::do_cleanup_tasks() {
+   jlong cleanup_time;
+-  
++
+   // Update fat-monitor pool, since this is a safepoint.
+   if (TraceSafepoint) {
+     cleanup_time = os::javaTimeNanos();
+   }
+-  
++
+   ObjectSynchronizer::deflate_idle_monitors();
+-  InlineCacheBuffer::update_inline_caches();  
++  InlineCacheBuffer::update_inline_caches();
+   if(UseCounterDecay && CounterDecay::is_decay_needed()) {
+     CounterDecay::decay();
+   }
+@@ -544,32 +498,32 @@
+ 
+   JavaThreadState state = thread->thread_state();
+   thread->frame_anchor()->make_walkable(thread);
+-  
++
+   // Check that we have a valid thread_state at this point
+   switch(state) {
+     case _thread_in_vm_trans:
+-    case _thread_in_Java:        // From compiled code 
++    case _thread_in_Java:        // From compiled code
+ 
+       // We are highly likely to block on the Safepoint_lock. In order to avoid blocking in this case,
+       // we pretend we are still in the VM.
+       thread->set_thread_state(_thread_in_vm);
+ 
+       if (is_synchronizing()) {
+-         Atomic::inc (&TryingToBlock) ; 
++         Atomic::inc (&TryingToBlock) ;
+       }
+ 
+       // We will always be holding the Safepoint_lock when we are examine the state
+-      // of a thread. Hence, the instructions between the Safepoint_lock->lock() and 
+-      // Safepoint_lock->unlock() are happening atomic with regards to the safepoint code      
++      // of a thread. Hence, the instructions between the Safepoint_lock->lock() and
++      // Safepoint_lock->unlock() are happening atomic with regards to the safepoint code
+       Safepoint_lock->lock_without_safepoint_check();
+       if (is_synchronizing()) {
+-        // Decrement the number of threads to wait for and signal vm thread      
++        // Decrement the number of threads to wait for and signal vm thread
+         assert(_waiting_to_block > 0, "sanity check");
+-        _waiting_to_block--;                       
++        _waiting_to_block--;
+         thread->safepoint_state()->set_has_called_back(true);
+ 
+         // Consider (_waiting_to_block < 2) to pipeline the wakeup of the VM thread
+-        if (_waiting_to_block == 0) { 
++        if (_waiting_to_block == 0) {
+           Safepoint_lock->notify_all();
+         }
+       }
+@@ -581,17 +535,17 @@
+       // we hold different locks. That would leave us suspended while
+       // holding a resource which results in deadlocks.
+       thread->set_thread_state(_thread_blocked);
+-      Safepoint_lock->unlock();            
+-            
++      Safepoint_lock->unlock();
++
+       // We now try to acquire the threads lock. Since this lock is hold by the VM thread during
+       // the entire safepoint, the threads will all line up here during the safepoint.
+       Threads_lock->lock_without_safepoint_check();
+       // restore original state. This is important if the thread comes from compiled code, so it
+-      // will continue to execute with the _thread_in_Java state. 
++      // will continue to execute with the _thread_in_Java state.
+       thread->set_thread_state(state);
+-      Threads_lock->unlock(); 
+-      break;                  
+-       
++      Threads_lock->unlock();
++      break;
++
+     case _thread_in_native_trans:
+     case _thread_blocked_trans:
+     case _thread_new_trans:
+@@ -608,18 +562,18 @@
+       // we hold different locks. That would leave us suspended while
+       // holding a resource which results in deadlocks.
+       thread->set_thread_state(_thread_blocked);
+-      
++
+       // It is not safe to suspend a thread if we discover it is in _thread_in_native_trans. Hence,
+       // the safepoint code might still be waiting for it to block. We need to change the state here,
+-      // so it can see that it is at a safepoint. 
++      // so it can see that it is at a safepoint.
+ 
+-      // Block until the safepoint operation is completed. 
++      // Block until the safepoint operation is completed.
+       Threads_lock->lock_without_safepoint_check();
+ 
+       // Restore state
+       thread->set_thread_state(state);
+ 
+-      Threads_lock->unlock();      
++      Threads_lock->unlock();
+       break;
+ 
+     default:
+@@ -631,8 +585,8 @@
+   // is called last since it grabs a lock and we only want to do that when
+   // we must.
+   //
+-  // Note: we never deliver an async exception at a polling point as the 
+-  // compiler may not have an exception handler for it. The polling 
++  // Note: we never deliver an async exception at a polling point as the
++  // compiler may not have an exception handler for it. The polling
+   // code will notice the async and deoptimize and the exception will
+   // be delivered. (Polling at a return point is ok though). Sure is
+   // a lot of bother for a deprecated feature...
+@@ -642,7 +596,7 @@
+   // a surprising pending exception. If the thread state is going back to java,
+   // async exception is checked in check_special_condition_for_native_trans().
+ 
+-  if (state != _thread_blocked_trans && 
++  if (state != _thread_blocked_trans &&
+       state != _thread_in_vm_trans &&
+       thread->has_special_runtime_exit_condition()) {
+     thread->handle_special_runtime_exit_condition(
+@@ -712,7 +666,7 @@
+ void SafepointSynchronize::handle_polling_page_exception(JavaThread *thread) {
+   assert(thread->is_Java_thread(), "polling reference encountered by VM thread");
+   assert(thread->thread_state() == _thread_in_Java, "should come from Java code");
+-  assert(SafepointSynchronize::is_synchronizing(), "polling encountered outside safepoint synchronization");  
++  assert(SafepointSynchronize::is_synchronizing(), "polling encountered outside safepoint synchronization");
+ 
+   // Uncomment this to get some serious before/after printing of the
+   // Sparc safepoint-blob frame structure.
+@@ -721,13 +675,13 @@
+   intptr_t stack_copy[150];
+   for( int i=0; i<150; i++ ) stack_copy[i] = sp[i];
+   bool was_oops[150];
+-  for( int i=0; i<150; i++ ) 
++  for( int i=0; i<150; i++ )
+     was_oops[i] = stack_copy[i] ? ((oop)stack_copy[i])->is_oop() : false;
+   */
+ 
+   if (ShowSafepointMsgs) {
+-    tty->print("handle_polling_page_exception: ");  
+-  }  
++    tty->print("handle_polling_page_exception: ");
++  }
+ 
+   if (PrintSafepointStatistics) {
+     inc_page_trap_count();
+@@ -735,12 +689,57 @@
+ 
+   ThreadSafepointState* state = thread->safepoint_state();
+ 
+-  state->handle_polling_page_exception();  
++  state->handle_polling_page_exception();
+   // print_me(sp,stack_copy,was_oops);
+ }
+ 
++
++void SafepointSynchronize::print_safepoint_timeout(SafepointTimeoutReason reason) {
++  if (!timeout_error_printed) {
++    timeout_error_printed = true;
++    // Print out the thread infor which didn't reach the safepoint for debugging
++    // purposes (useful when there are lots of threads in the debugger).
++    tty->print_cr("");
++    tty->print_cr("# SafepointSynchronize::begin: Timeout detected:");
++    if (reason ==  _spinning_timeout) {
++      tty->print_cr("# SafepointSynchronize::begin: Timed out while spinning to reach a safepoint.");
++    } else if (reason == _blocking_timeout) {
++      tty->print_cr("# SafepointSynchronize::begin: Timed out while waiting for threads to stop.");
++    }
++
++    tty->print_cr("# SafepointSynchronize::begin: Threads which did not reach the safepoint:");
++    ThreadSafepointState *cur_state;
++    ResourceMark rm;
++    for(JavaThread *cur_thread = Threads::first(); cur_thread;
++        cur_thread = cur_thread->next()) {
++      cur_state = cur_thread->safepoint_state();
++
++      if (cur_thread->thread_state() != _thread_blocked &&
++          ((reason == _spinning_timeout && cur_state->is_running()) ||
++           (reason == _blocking_timeout && !cur_state->has_called_back()))) {
++        tty->print("# ");
++        cur_thread->print();
++        tty->print_cr("");
++      }
++    }
++    tty->print_cr("# SafepointSynchronize::begin: (End of list)");
++  }
++
++  // To debug the long safepoint, specify both DieOnSafepointTimeout &
++  // ShowMessageBoxOnError.
++  if (DieOnSafepointTimeout) {
++    char msg[1024];
++    VM_Operation *op = VMThread::vm_operation();
++    sprintf(msg, "Safepoint sync time longer than %d ms detected when executing %s.",
++            SafepointTimeoutDelay,
++            op != NULL ? op->name() : "no vm operation");
++    fatal(msg);
++  }
++}
++
++
+ // -------------------------------------------------------------------------------------------------------
+-// Implementation of ThreadSafepointState 
++// Implementation of ThreadSafepointState
+ 
+ ThreadSafepointState::ThreadSafepointState(JavaThread *thread) {
+   _thread = thread;
+@@ -756,25 +755,25 @@
+ 
+ void ThreadSafepointState::destroy(JavaThread *thread) {
+   if (thread->safepoint_state()) {
+-    delete(thread->safepoint_state());  
++    delete(thread->safepoint_state());
+     thread->set_safepoint_state(NULL);
+   }
+ }
+ 
+-void ThreadSafepointState::examine_state_of_thread() {  
++void ThreadSafepointState::examine_state_of_thread() {
+   assert(is_running(), "better be running or just have hit safepoint poll");
+ 
+-  JavaThreadState state = _thread->thread_state();  
++  JavaThreadState state = _thread->thread_state();
+ 
+   // Check for a thread that is suspended. Note that thread resume tries
+   // to grab the Threads_lock which we own here, so a thread cannot be
+-  // resumed during safepoint synchronization. 
++  // resumed during safepoint synchronization.
+ 
+   // We check with locking because another thread that has not yet
+   // synchronized may be trying to suspend this one.
+   bool is_suspended = _thread->is_any_suspended_with_lock();
+   if (is_suspended) {
+-    roll_forward(_at_safepoint); 
++    roll_forward(_at_safepoint);
+     return;
+   }
+ 
+@@ -782,7 +781,7 @@
+   // running, but are actually at a safepoint. We will happily
+   // agree and update the safepoint state here.
+   if (SafepointSynchronize::safepoint_safe(_thread, state)) {
+-      roll_forward(_at_safepoint);       
++      roll_forward(_at_safepoint);
+       return;
+   }
+ 
+@@ -802,7 +801,7 @@
+ 
+ // Returns true is thread could not be rolled forward at present position.
+ void ThreadSafepointState::roll_forward(suspend_type type) {
+-  _type = type;  
++  _type = type;
+ 
+   switch(_type) {
+     case _at_safepoint:
+@@ -812,27 +811,27 @@
+     case _call_back:
+       set_has_called_back(false);
+       break;
+-         
+-    case _running:            
++
++    case _running:
+     default:
+       ShouldNotReachHere();
+-  }      
+-}  
++  }
++}
+ 
+-void ThreadSafepointState::restart() {  
+-  switch(type()) {        
+-    case _at_safepoint:               
++void ThreadSafepointState::restart() {
++  switch(type()) {
++    case _at_safepoint:
+     case _call_back:
+-      break;    
+-    
++      break;
++
+     case _running:
+     default:
+-       tty->print_cr("restart thread "INTPTR_FORMAT" with state %d", 
++       tty->print_cr("restart thread "INTPTR_FORMAT" with state %d",
+                       _thread, _type);
+        _thread->print();
+       ShouldNotReachHere();
+-  }    
+-  _type = _running;  
++  }
++  _type = _running;
+   set_has_called_back(false);
+ }
+ 
+@@ -840,14 +839,14 @@
+ void ThreadSafepointState::print_on(outputStream *st) const {
+   const char *s;
+ 
+-  switch(_type) {    
+-    case _running                : s = "_running";              break;    
++  switch(_type) {
++    case _running                : s = "_running";              break;
+     case _at_safepoint           : s = "_at_safepoint";         break;
+-    case _call_back              : s = "_call_back";            break;    
++    case _call_back              : s = "_call_back";            break;
+     default:
+       ShouldNotReachHere();
+   }
+-  
++
+   st->print_cr("Thread: " INTPTR_FORMAT
+               "  [0x%2x] State: %s _has_called_back %d _at_poll_safepoint %d",
+                _thread, _thread->osthread()->thread_id(), s, _has_called_back,
+@@ -877,10 +876,10 @@
+   assert(cb != NULL && cb->is_nmethod(), "return address should be in nmethod");
+   nmethod* nm = (nmethod*)cb;
+ 
+-  // Find frame of caller  
++  // Find frame of caller
+   frame stub_fr = thread()->last_frame();
+   CodeBlob* stub_cb = stub_fr.cb();
+-  assert(stub_cb->is_safepoint_stub(), "must be a safepoint stub");      
++  assert(stub_cb->is_safepoint_stub(), "must be a safepoint stub");
+   RegisterMap map(thread(), true);
+   frame caller_fr = stub_fr.sender(&map);
+ 
+@@ -935,16 +934,16 @@
+     }
+ 
+     // If an exception has been installed we must check for a pending deoptimization
+-    // Deoptimize frame if exception has been thrown. 
++    // Deoptimize frame if exception has been thrown.
+ 
+-    if (thread()->has_pending_exception() ) { 
++    if (thread()->has_pending_exception() ) {
+       RegisterMap map(thread(), true);
+       frame caller_fr = stub_fr.sender(&map);
+       if (caller_fr.is_deoptimized_frame()) {
+         // The exception patch will destroy registers that are still
+-        // live and will be needed during deoptimization. Defer the 
++        // live and will be needed during deoptimization. Defer the
+         // Async exception should have defered the exception until the
+-        // next safepoint which will be detected when we get into 
++        // next safepoint which will be detected when we get into
+         // the interpreter so if we have an exception now things
+         // are messed up.
+ 
+@@ -957,25 +956,28 @@
+ 
+ //
+ //                     Statistics & Instrumentations
+-// 
++//
+ SafepointSynchronize::SafepointStats*  SafepointSynchronize::_safepoint_stats = NULL;
+ int    SafepointSynchronize::_cur_stat_index = 0;
+ julong SafepointSynchronize::_safepoint_reasons[VM_Operation::VMOp_Terminating];
+ julong SafepointSynchronize::_coalesced_vmop_count = 0;
+-jlong  SafepointSynchronize::_max_sync_time = 0;         
++jlong  SafepointSynchronize::_max_sync_time = 0;
+ 
+ // last_safepoint_start_time records the start time of last safepoint.
+ static jlong  last_safepoint_start_time = 0;
+-static jlong  sync_end_time = 0;  
++static jlong  sync_end_time = 0;
+ static bool   need_to_track_page_armed_status = false;
++static bool   init_done = false;
++
++void SafepointSynchronize::deferred_initialize_stat() {
++  if (init_done) return;
+ 
+-void SafepointSynchronize::initialize_stat() {
+   if (PrintSafepointStatisticsCount <= 0) {
+     fatal("Wrong PrintSafepointStatisticsCount");
+   }
+-  
+-  // If PrintSafepointStatisticsTimeout is specified, the statistics data will 
+-  // be printed right away, in which case, _safepoint_stats will regress to 
++
++  // If PrintSafepointStatisticsTimeout is specified, the statistics data will
++  // be printed right away, in which case, _safepoint_stats will regress to
+   // a single element array. Otherwise, it is a circular ring buffer with default
+   // size of PrintSafepointStatisticsCount.
+   int stats_array_size;
+@@ -987,7 +989,7 @@
+   }
+   _safepoint_stats = (SafepointStats*)os::malloc(stats_array_size
+                                                  * sizeof(SafepointStats));
+-  guarantee(_safepoint_stats != NULL, 
++  guarantee(_safepoint_stats != NULL,
+             "not enough memory for safepoint instrumentation data");
+ 
+   if (UseCompilerSafepoints && DeferPollingPageLoopCount >= 0) {
+@@ -1003,12 +1005,15 @@
+   if (need_to_track_page_armed_status) {
+     tty->print("page_armed ");
+   }
+- 
++
+   tty->print_cr("page_trap_count");
++
++  init_done = true;
+ }
+ 
+ void SafepointSynchronize::begin_statistics(int nof_threads, int nof_running) {
+-  
++  deferred_initialize_stat();
++
+   SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
+ 
+   VM_Operation *op = VMThread::vm_operation();
+@@ -1020,7 +1025,7 @@
+   spstat->_nof_total_threads = nof_threads;
+   spstat->_nof_initial_running_threads = nof_running;
+   spstat->_nof_threads_hit_page_trap = 0;
+-  
++
+   // Records the start time of spinning. The real time spent on spinning
+   // will be adjusted when spin is done. Same trick is applied for time
+   // spent on waiting for threads to block.
+@@ -1043,12 +1048,12 @@
+   SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
+ 
+   jlong cur_time = os::javaTimeNanos();
+-  
++
+   spstat->_nof_threads_wait_to_block = _waiting_to_block;
+   if (spstat->_nof_initial_running_threads != 0) {
+     spstat->_time_to_spin = cur_time - spstat->_time_to_spin;
+   }
+- 
++
+   if (need_to_track_page_armed_status) {
+     spstat->_page_armed = (PageArmed == 1);
+   }
+@@ -1063,19 +1068,19 @@
+ 
+ void SafepointSynchronize::update_statistics_on_sync_end(jlong end_time) {
+   SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
+-  
++
+   if (spstat->_nof_threads_wait_to_block != 0) {
+     spstat->_time_to_wait_to_block = end_time -
+       spstat->_time_to_wait_to_block;
+   }
+-  
++
+   // Records the end time of sync which will be used to calculate the total
+   // vm operation time. Again, the real time spending in syncing will be deducted
+   // from the start of the sync time later when end_statistics is called.
+   spstat->_time_to_sync = end_time - _last_safepoint;
+   if (spstat->_time_to_sync > _max_sync_time) {
+     _max_sync_time = spstat->_time_to_sync;
+-  } 
++  }
+   sync_end_time = end_time;
+ }
+ 
+@@ -1084,16 +1089,16 @@
+ 
+   // Update the vm operation time.
+   spstat->_time_to_exec_vmop = vmop_end_time -  sync_end_time;
+-  // Only the sync time longer than the specified 
++  // Only the sync time longer than the specified
+   // PrintSafepointStatisticsTimeout will be printed out right away.
+   // By default, it is -1 meaning all samples will be put into the list.
+   if ( PrintSafepointStatisticsTimeout > 0) {
+     if (spstat->_time_to_sync > PrintSafepointStatisticsTimeout * MICROUNITS) {
+       print_statistics();
+-    } 
++    }
+   } else {
+     // The safepoint statistics will be printed out when the _safepoin_stats
+-    // array fills up. 
++    // array fills up.
+     if (_cur_stat_index != PrintSafepointStatisticsCount - 1) {
+       _cur_stat_index ++;
+     } else {
+@@ -1107,7 +1112,7 @@
+ void SafepointSynchronize::print_statistics() {
+   int index;
+   SafepointStats* sstats = _safepoint_stats;
+-  
++
+   for (index = 0; index <= _cur_stat_index; index++) {
+     sstats = &_safepoint_stats[index];
+     tty->print("%-28s       ["
+@@ -1123,9 +1128,9 @@
+                INT64_FORMAT_W(6)INT64_FORMAT_W(6)INT64_FORMAT_W(6)
+                "]     "
+                "["INT64_FORMAT_W(6)INT64_FORMAT_W(9) "]          ",
+-               sstats->_time_to_spin / MICROUNITS, 
++               sstats->_time_to_spin / MICROUNITS,
+                sstats->_time_to_wait_to_block / MICROUNITS,
+-               sstats->_time_to_sync / MICROUNITS, 
++               sstats->_time_to_sync / MICROUNITS,
+                sstats->_time_to_exec_vmop / MICROUNITS,
+                sstats->_time_elapsed_since_last_safepoint / MICROUNITS);
+ 
+@@ -1140,19 +1145,18 @@
+ // print_statistics to print out the rest of the sampling.  Then
+ // it tries to summarize the sampling.
+ void SafepointSynchronize::print_stat_on_exit() {
+-  assert(Thread::current()->is_VM_thread(), 
+-         "print_stat_on_exit not called on VMThread");
++  if (_safepoint_stats == NULL) return;
+ 
+   SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
+-  
++
+   // During VM exit, end_statistics may not get called and in that
+   // case, if the sync time is less than PrintSafepointStatisticsTimeout,
+   // don't print it out.
+   // Approximate the vm op time.
+-  _safepoint_stats[_cur_stat_index]._time_to_exec_vmop = 
++  _safepoint_stats[_cur_stat_index]._time_to_exec_vmop =
+     os::javaTimeNanos() - sync_end_time;
+-  
+-  if ( PrintSafepointStatisticsTimeout < 0 ||  
++
++  if ( PrintSafepointStatisticsTimeout < 0 ||
+        spstat->_time_to_sync > PrintSafepointStatisticsTimeout * MICROUNITS) {
+     print_statistics();
+   }
+@@ -1164,20 +1168,20 @@
+       tty->print_cr("Polling page always armed");
+     }
+   } else {
+-    tty->print_cr("Defer polling page loop count = %d\n", 
++    tty->print_cr("Defer polling page loop count = %d\n",
+                  DeferPollingPageLoopCount);
+   }
+ 
+   for (int index = 0; index < VM_Operation::VMOp_Terminating; index++) {
+     if (_safepoint_reasons[index] != 0) {
+       tty->print_cr("%-26s"UINT64_FORMAT_W(10), VM_Operation::name(index),
+-                    _safepoint_reasons[index]); 
++                    _safepoint_reasons[index]);
+     }
+   }
+ 
+   tty->print_cr(UINT64_FORMAT_W(5)" VM operations coalesced during safepoint",
+-                _coalesced_vmop_count);        
+-  tty->print_cr("Maximum sync time  "INT64_FORMAT_W(5)" ms", 
++                _coalesced_vmop_count);
++  tty->print_cr("Maximum sync time  "INT64_FORMAT_W(5)" ms",
+                 _max_sync_time / MICROUNITS);
+ }
+ 
+@@ -1190,12 +1194,12 @@
+   if (_state == _not_synchronized) {
+     tty->print_cr("not synchronized");
+   } else if (_state == _synchronizing || _state == _synchronized) {
+-    tty->print_cr("State: %s", (_state == _synchronizing) ? "synchronizing" : 
++    tty->print_cr("State: %s", (_state == _synchronizing) ? "synchronizing" :
+                   "synchronized");
+ 
+-    for(JavaThread *cur = Threads::first(); cur; cur = cur->next()) {       
++    for(JavaThread *cur = Threads::first(); cur; cur = cur->next()) {
+        cur->safepoint_state()->print();
+-    }  
++    }
+   }
+ }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/safepoint.hpp openjdk/hotspot/src/share/vm/runtime/safepoint.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/safepoint.hpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/safepoint.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)safepoint.hpp	1.103 07/05/26 16:02:40 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //
+@@ -46,13 +43,13 @@
+ 
+ //
+ // Implements roll-forward to safepoint (safepoint synchronization)
+-// 
++//
+ class SafepointSynchronize : AllStatic {
+  public:
+   enum SynchronizeState {
+       _not_synchronized = 0,                   // Threads not synchronized at a safepoint
+                                                // Keep this value 0. See the coment in do_call_back()
+-      _synchronizing    = 1,                   // Synchronizing in progress  
++      _synchronizing    = 1,                   // Synchronizing in progress
+       _synchronized     = 2                    // All Java threads are stopped at a safepoint. Only VM thread is running
+   };
+ 
+@@ -62,12 +59,17 @@
+       _other_thread = 2
+   };
+ 
+-  typedef struct { 
++  enum SafepointTimeoutReason {
++    _spinning_timeout = 0,
++    _blocking_timeout = 1
++  };
++
++  typedef struct {
+     int    _vmop_type;                         // type of VM operation triggers the safepoint
+     int    _nof_total_threads;                 // total number of Java threads
+     int    _nof_initial_running_threads;       // total number of initially seen running threads
+     int    _nof_threads_wait_to_block;         // total number of threads waiting for to block
+-    bool   _page_armed;                        // true if polling page is armed, false otherwise 
++    bool   _page_armed;                        // true if polling page is armed, false otherwise
+     int    _nof_threads_hit_page_trap;         // total number of threads hitting the page trap
+     jlong  _time_to_spin;                      // total time in millis spent in spinning
+     jlong  _time_to_wait_to_block;             // total time in millis spent in waiting for to block
+@@ -79,7 +81,7 @@
+  private:
+   static volatile SynchronizeState _state;     // Threads might read this flag directly, without acquireing the Threads_lock
+   static volatile int _waiting_to_block;       // No. of threads we are waiting for to block.
+-  
++
+   // This counter is used for fast versions of jni_Get<Primitive>Field.
+   // An even value means there is no ongoing safepoint operations.
+   // The counter is incremented ONLY at the beginning and end of each
+@@ -91,7 +93,7 @@
+ private:
+ 
+   static jlong   _last_safepoint;      // Time of last safepoint
+-  
++
+   // statistics
+   static SafepointStats*  _safepoint_stats;     // array of SafepointStats struct
+   static int              _cur_stat_index;      // current index to the above array
+@@ -107,6 +109,10 @@
+   inline static void inc_page_trap_count() {
+     Atomic::inc(&_safepoint_stats[_cur_stat_index]._nof_threads_hit_page_trap);
+   }
++
++  // For debug long safepoint
++  static void print_safepoint_timeout(SafepointTimeoutReason timeout_reason);
++
+ public:
+ 
+   // Main entry points
+@@ -125,10 +131,10 @@
+   inline static bool do_call_back() {
+     return (_state != _not_synchronized);
+   }
+-  
++
+   // Called when a thread volantary blocks
+   static void   block(JavaThread *thread);
+-  static void   signal_thread_at_safepoint()              { _waiting_to_block--; }  
++  static void   signal_thread_at_safepoint()              { _waiting_to_block--; }
+ 
+   // Exception handling for page polling
+   static void handle_polling_page_exception(JavaThread *thread);
+@@ -141,8 +147,8 @@
+   // debugging
+   static void print_state()                                PRODUCT_RETURN;
+   static void safepoint_msg(const char* format, ...)       PRODUCT_RETURN;
+-  
+-  static void initialize_stat();
++
++  static void deferred_initialize_stat();
+   static void print_stat_on_exit();
+   inline static void inc_vmop_coalesced_count() { _coalesced_vmop_count++; }
+ 
+@@ -150,18 +156,18 @@
+   static void set_is_not_at_safepoint()                    { _state = _not_synchronized; }
+ 
+   // assembly support
+-  static address address_of_state()                        { return (address)&_state; }  
++  static address address_of_state()                        { return (address)&_state; }
+ 
+   static address safepoint_counter_addr()                  { return (address)&_safepoint_counter; }
+ };
+ 
+ // State class for a thread suspended at a safepoint
+ class ThreadSafepointState: public CHeapObj {
+- public:  
++ public:
+   // These states are maintained by VM thread while threads are being brought
+   // to a safepoint.  After SafepointSynchronize::end(), they are reset to
+   // _running.
+-  enum suspend_type {    
++  enum suspend_type {
+     _running                =  0, // Thread state not yet determined (i.e., not at a safepoint yet)
+     _at_safepoint           =  1, // Thread at a safepoint (f.ex., when blocked on a lock)
+     _call_back              =  2  // Keep executing and wait for callback (if thread is in interpreted or vm)
+@@ -171,22 +177,22 @@
+   // Thread has called back the safepoint code (for debugging)
+   bool                           _has_called_back;
+ 
+-  JavaThread *                   _thread;    
+-  volatile suspend_type          _type;  
++  JavaThread *                   _thread;
++  volatile suspend_type          _type;
+ 
+ 
+- public:       
++ public:
+   ThreadSafepointState(JavaThread *thread);
+ 
+-  // examine/roll-forward/restart   
+-  void examine_state_of_thread();               
++  // examine/roll-forward/restart
++  void examine_state_of_thread();
+   void roll_forward(suspend_type type);
+-  void restart();  
+-  
+-  // Query  
+-  JavaThread*  thread() const	      { return _thread; }
+-  suspend_type type() const	      { return _type; }    
+-  bool         is_running() const     { return (_type==_running); } 
++  void restart();
++
++  // Query
++  JavaThread*  thread() const         { return _thread; }
++  suspend_type type() const           { return _type; }
++  bool         is_running() const     { return (_type==_running); }
+ 
+   // Support for safepoint timeout (debugging)
+   bool has_called_back() const                   { return _has_called_back; }
+@@ -196,9 +202,9 @@
+ 
+   void handle_polling_page_exception();
+ 
+-  // debugging  
++  // debugging
+   void print_on(outputStream* st) const;
+-  void print() const			    { print_on(tty); }
++  void print() const                        { print_on(tty); }
+ 
+   // Initialize
+   static void create(JavaThread *thread);
+@@ -215,14 +221,14 @@
+ };
+ 
+ //
+-// CounterDecay 
++// CounterDecay
+ //
+ // Interates through invocation counters and decrements them. This
+-// is done at each safepoint. 
++// is done at each safepoint.
+ //
+-class CounterDecay : public AllStatic {   
++class CounterDecay : public AllStatic {
+   static jlong _last_timestamp;
+- public: 
++ public:
+   static  void decay();
+-  static  bool is_decay_needed() { return (os::javaTimeMillis() - _last_timestamp) > CounterDecayMinIntervalLength; }  
++  static  bool is_decay_needed() { return (os::javaTimeMillis() - _last_timestamp) > CounterDecayMinIntervalLength; }
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/sharedRuntime.cpp openjdk/hotspot/src/share/vm/runtime/sharedRuntime.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/sharedRuntime.cpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/sharedRuntime.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)sharedRuntime.cpp	1.382 07/07/19 12:19:08 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -30,9 +27,9 @@
+ #include <math.h>
+ 
+ HS_DTRACE_PROBE_DECL4(hotspot, object__alloc, Thread*, char*, int, size_t);
+-HS_DTRACE_PROBE_DECL7(hotspot, method__entry, int, 
++HS_DTRACE_PROBE_DECL7(hotspot, method__entry, int,
+                       char*, int, char*, int, char*, int);
+-HS_DTRACE_PROBE_DECL7(hotspot, method__return, int, 
++HS_DTRACE_PROBE_DECL7(hotspot, method__return, int,
+                       char*, int, char*, int, char*, int);
+ 
+ // Implementation of SharedRuntime
+@@ -48,13 +45,13 @@
+ int SharedRuntime::_implicit_div0_throws = 0;
+ int SharedRuntime::_throw_null_ctr = 0;
+ 
+-int SharedRuntime::_nof_normal_calls = 0; 
+-int SharedRuntime::_nof_optimized_calls = 0; 
+-int SharedRuntime::_nof_inlined_calls = 0; 
+-int SharedRuntime::_nof_megamorphic_calls = 0; 
++int SharedRuntime::_nof_normal_calls = 0;
++int SharedRuntime::_nof_optimized_calls = 0;
++int SharedRuntime::_nof_inlined_calls = 0;
++int SharedRuntime::_nof_megamorphic_calls = 0;
+ int SharedRuntime::_nof_static_calls = 0;
+ int SharedRuntime::_nof_inlined_static_calls = 0;
+-int SharedRuntime::_nof_interface_calls = 0; 
++int SharedRuntime::_nof_interface_calls = 0;
+ int SharedRuntime::_nof_optimized_interface_calls = 0;
+ int SharedRuntime::_nof_inlined_interface_calls = 0;
+ int SharedRuntime::_nof_megamorphic_interface_calls = 0;
+@@ -67,10 +64,10 @@
+ int SharedRuntime::_multi3_ctr=0;
+ int SharedRuntime::_multi4_ctr=0;
+ int SharedRuntime::_multi5_ctr=0;
+-int SharedRuntime::_mon_enter_stub_ctr=0;        
+-int SharedRuntime::_mon_exit_stub_ctr=0;         
+-int SharedRuntime::_mon_enter_ctr=0;             
+-int SharedRuntime::_mon_exit_ctr=0;              
++int SharedRuntime::_mon_enter_stub_ctr=0;
++int SharedRuntime::_mon_exit_stub_ctr=0;
++int SharedRuntime::_mon_enter_ctr=0;
++int SharedRuntime::_mon_exit_ctr=0;
+ int SharedRuntime::_partial_subtype_ctr=0;
+ int SharedRuntime::_jbyte_array_copy_ctr=0;
+ int SharedRuntime::_jshort_array_copy_ctr=0;
+@@ -81,7 +78,7 @@
+ int SharedRuntime::_unsafe_array_copy_ctr=0;
+ int SharedRuntime::_generic_array_copy_ctr=0;
+ int SharedRuntime::_slow_array_copy_ctr=0;
+-int SharedRuntime::_find_handler_ctr=0;          
++int SharedRuntime::_find_handler_ctr=0;
+ int SharedRuntime::_rethrow_ctr=0;
+ 
+ int     SharedRuntime::_ICmiss_index                    = 0;
+@@ -91,13 +88,13 @@
+ void SharedRuntime::trace_ic_miss(address at) {
+   for (int i = 0; i < _ICmiss_index; i++) {
+     if (_ICmiss_at[i] == at) {
+-      _ICmiss_count[i]++; 
++      _ICmiss_count[i]++;
+       return;
+     }
+   }
+-  int index = _ICmiss_index++; 
++  int index = _ICmiss_index++;
+   if (_ICmiss_index >= maxICmiss_count) _ICmiss_index = maxICmiss_count - 1;
+-  _ICmiss_at[index] = at; 
++  _ICmiss_at[index] = at;
+   _ICmiss_count[index] = 1;
+ }
+ 
+@@ -191,7 +188,7 @@
+ JRT_END
+ 
+ 
+-JRT_LEAF(jlong, SharedRuntime::f2l(jfloat  x))  
++JRT_LEAF(jlong, SharedRuntime::f2l(jfloat  x))
+   if (g_isnan(x)) {return 0;}
+   jlong lltmp = (jlong)x;
+   if (lltmp != min_jlong) {
+@@ -273,8 +270,8 @@
+     } else {
+       return code->exception_begin();
+     }
+-  } 
+-  
++  }
++
+   // Entry code
+   if (StubRoutines::returns_to_call_stub(return_address)) {
+     return StubRoutines::catch_exception_entry();
+@@ -292,7 +289,7 @@
+       assert(code != NULL, "nmethod must be present");
+       assert(code->header_begin() != code->exception_begin(), "no exception handler");
+       return code->exception_begin();
+-    } 
++    }
+     if (blob->is_runtime_stub()) {
+       ShouldNotReachHere();   // callers are responsible for skipping runtime stub frames
+     }
+@@ -344,7 +341,7 @@
+     char buf[256];
+     jio_snprintf(buf, sizeof(buf),
+                  "... found polling page %s exception at pc = "
+-                 INTPTR_FORMAT ", stub =" INTPTR_FORMAT, 
++                 INTPTR_FORMAT ", stub =" INTPTR_FORMAT,
+                  at_poll_return ? "return" : "loop",
+                  (intptr_t)pc, (intptr_t)stub);
+     tty->print_raw_cr(buf);
+@@ -383,7 +380,7 @@
+ // for given exception
+ address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception,
+                                                     bool force_unwind, bool top_frame_only) {
+-  assert(nm != NULL, "must exist");  
++  assert(nm != NULL, "must exist");
+   ResourceMark rm;
+ 
+   ScopeDesc* sd = nm->scope_desc_at(ret_pc);
+@@ -424,7 +421,7 @@
+       }
+     } while (!top_frame_only && handler_bci < 0 && sd != NULL);
+   }
+-  
++
+   // found handling method => lookup exception handler
+   int catch_pco = ret_pc - nm->instructions_begin();
+ 
+@@ -470,11 +467,6 @@
+   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_AbstractMethodError());
+ JRT_END
+ 
+-JRT_ENTRY(void, SharedRuntime::throw_IncompatibleClassChangeError(JavaThread* thread))
+-  // These errors occur only at call sites
+-  throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError(), "vtable stub");
+-JRT_END
+-
+ JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* thread))
+   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
+ JRT_END
+@@ -555,7 +547,7 @@
+           // 1. Inline-cache check in C2I handler blob,
+           // 2. Inline-cache check in nmethod, or
+           // 3. Implict null exception in nmethod
+-          
++
+           if (!cb->is_nmethod()) {
+             guarantee(cb->is_adapter_blob(),
+                       "exception happened outside interpreter, nmethods and vtable stubs (2)");
+@@ -651,9 +643,9 @@
+ #ifndef PRODUCT
+ 
+ void SharedRuntime::verify_caller_frame(frame caller_frame, methodHandle callee_method) {
+-  ResourceMark rm;  
++  ResourceMark rm;
+   assert (caller_frame.is_interpreted_frame(), "sanity check");
+-  assert (callee_method->has_compiled_code(), "callee must be compiled");  
++  assert (callee_method->has_compiled_code(), "callee must be compiled");
+   methodHandle caller_method (Thread::current(), caller_frame.interpreter_frame_method());
+   jint bci = caller_frame.interpreter_frame_bci();
+   methodHandle method = find_callee_method_inside_interpreter(caller_frame, caller_method, bci);
+@@ -664,10 +656,10 @@
+   EXCEPTION_MARK;
+   Bytecode_invoke* bytecode = Bytecode_invoke_at(caller_method, bci);
+   methodHandle staticCallee = bytecode->static_target(CATCH); // Non-product code
+-  
++
+   bytecode = Bytecode_invoke_at(caller_method, bci);
+   int bytecode_index = bytecode->index();
+-  Bytecodes::Code bc = bytecode->adjusted_invoke_code();      
++  Bytecodes::Code bc = bytecode->adjusted_invoke_code();
+ 
+   Handle receiver;
+   if (bc == Bytecodes::_invokeinterface ||
+@@ -776,45 +768,45 @@
+   Handle nullHandle;  //create a handy null handle for exception returns
+ 
+   assert(!vfst.at_end(), "Java frame must exist");
+-  
++
+   // Find caller and bci from vframe
+   methodHandle caller (THREAD, vfst.method());
+   int          bci    = vfst.bci();
+-  
++
+   // Find bytecode
+   Bytecode_invoke* bytecode = Bytecode_invoke_at(caller, bci);
+   bc = bytecode->adjusted_invoke_code();
+   int bytecode_index = bytecode->index();
+ 
+-  // Find receiver for non-static call    
++  // Find receiver for non-static call
+   if (bc != Bytecodes::_invokestatic) {
+     // This register map must be update since we need to find the receiver for
+     // compiled frames. The receiver might be in a register.
+-    RegisterMap reg_map2(thread); 
++    RegisterMap reg_map2(thread);
+     frame stubFrame   = thread->last_frame();
+-    // Caller-frame is a compiled frame 
+-    frame callerFrame = stubFrame.sender(&reg_map2); 
++    // Caller-frame is a compiled frame
++    frame callerFrame = stubFrame.sender(&reg_map2);
+ 
+     methodHandle callee = bytecode->static_target(CHECK_(nullHandle));
+     if (callee.is_null()) {
+       THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
+-    }  
++    }
+     // Retrieve from a compiled argument list
+     receiver = Handle(THREAD, callerFrame.retrieve_receiver(&reg_map2));
+ 
+     if (receiver.is_null()) {
+       THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
+     }
+-  }      
++  }
+ 
+-  // Resolve method. This is parameterized by bytecode.  
++  // Resolve method. This is parameterized by bytecode.
+   constantPoolHandle constants (THREAD, caller->constants());
+   assert (receiver.is_null() || receiver->is_oop(), "wrong receiver");
+   LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_(nullHandle));
+ 
+ #ifdef ASSERT
+-  // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls  
+-  if (bc != Bytecodes::_invokestatic) {    
++  // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
++  if (bc != Bytecodes::_invokestatic) {
+     assert(receiver.not_null(), "should have thrown exception");
+     KlassHandle receiver_klass (THREAD, receiver->klass());
+     klassOop rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle));
+@@ -838,10 +830,10 @@
+   ResourceMark rm(THREAD);
+   // We need first to check if any Java activations (compiled, interpreted)
+   // exist on the stack since last JavaCall.  If not, we need
+-  // to get the target method from the JavaCall wrapper.    
++  // to get the target method from the JavaCall wrapper.
+   vframeStream vfst(thread, true);  // Do not skip any javaCalls
+   methodHandle callee_method;
+-  if (vfst.at_end()) {    
++  if (vfst.at_end()) {
+     // No Java frames were found on stack since we did the JavaCall.
+     // Hence the stack can only contain an entry_frame.  We need to
+     // find the target method from the stub frame.
+@@ -850,8 +842,8 @@
+     assert(fr.is_runtime_frame(), "must be a runtimeStub");
+     fr = fr.sender(&reg_map);
+     assert(fr.is_entry_frame(), "must be");
+-    // fr is now pointing to the entry frame.    
+-    callee_method = methodHandle(THREAD, fr.entry_frame_call_wrapper()->callee_method());        
++    // fr is now pointing to the entry frame.
++    callee_method = methodHandle(THREAD, fr.entry_frame_call_wrapper()->callee_method());
+     assert(fr.entry_frame_call_wrapper()->receiver() == NULL || !callee_method->is_static(), "non-null receiver for static call??");
+   } else {
+     Bytecodes::Code bc;
+@@ -863,7 +855,7 @@
+   return callee_method;
+ }
+ 
+-// Resolves a call.  
++// Resolves a call.
+ methodHandle SharedRuntime::resolve_helper(JavaThread *thread,
+                                            bool is_virtual,
+                                            bool is_optimized, TRAPS) {
+@@ -871,19 +863,19 @@
+   callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
+   if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
+     int retry_count = 0;
+-    while (!HAS_PENDING_EXCEPTION && callee_method->is_old() && 
++    while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
+            callee_method->method_holder() != SystemDictionary::object_klass()) {
+-      // If has a pending exception then there is no need to re-try to 
++      // If has a pending exception then there is no need to re-try to
+       // resolve this method.
+       // If the method has been redefined, we need to try again.
+       // Hack: we have no way to update the vtables of arrays, so don't
+       // require that java.lang.Object has been updated.
+ 
+       // It is very unlikely that method is redefined more than 100 times
+-      // in the middle of resolve. If it is looping here more than 100 times 
++      // in the middle of resolve. If it is looping here more than 100 times
+       // means then there could be a bug here.
+-      guarantee((retry_count++ < 100), 
+-    	        "Could not resolve to latest version of redefined method");
++      guarantee((retry_count++ < 100),
++                "Could not resolve to latest version of redefined method");
+       // method is redefined in the middle of resolve so re-try.
+       callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
+     }
+@@ -924,13 +916,13 @@
+ #ifndef PRODUCT
+   // tracing/debugging/statistics
+   int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
+-                (is_virtual) ? (&_resolve_virtual_ctr) : 
++                (is_virtual) ? (&_resolve_virtual_ctr) :
+                                (&_resolve_static_ctr);
+   Atomic::inc(addr);
+ 
+   if (TraceCallFixup) {
+     ResourceMark rm(thread);
+-    tty->print("resolving %s%s (%s) call to", 
++    tty->print("resolving %s%s (%s) call to",
+       (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
+       Bytecodes::name(invoke_code));
+     callee_method->print_short_name(tty);
+@@ -966,12 +958,12 @@
+                      is_optimized, static_bound, virtual_call_info,
+                      CHECK_(methodHandle()));
+   } else {
+-    // static call 
++    // static call
+     CompiledStaticCall::compute_entry(callee_method, static_call_info);
+   }
+ 
+   // grab lock, check for deoptimization and potentially patch caller
+-  { 
++  {
+     MutexLocker ml_patch(CompiledIC_lock);
+ 
+     // Now that we are ready to patch if the methodOop was redefined then
+@@ -981,18 +973,18 @@
+ #ifdef ASSERT
+       // We must not try to patch to jump to an already unloaded method.
+       if (dest_entry_point != 0) {
+-	assert(CodeCache::find_blob(dest_entry_point) != NULL,
+-	       "should not unload nmethod while locked");
++        assert(CodeCache::find_blob(dest_entry_point) != NULL,
++               "should not unload nmethod while locked");
+       }
+ #endif
+       if (is_virtual) {
+-	CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc());
+-	if (inline_cache->is_clean()) {
+-	  inline_cache->set_to_monomorphic(virtual_call_info);
+-	}
++        CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc());
++        if (inline_cache->is_clean()) {
++          inline_cache->set_to_monomorphic(virtual_call_info);
++        }
+       } else {
+-	CompiledStaticCall* ssc = compiledStaticCall_before(caller_frame.pc());
+-	if (ssc->is_clean()) ssc->set(static_call_info);
++        CompiledStaticCall* ssc = compiledStaticCall_before(caller_frame.pc());
++        if (ssc->is_clean()) ssc->set(static_call_info);
+       }
+     }
+ 
+@@ -1008,14 +1000,14 @@
+   RegisterMap reg_map(thread, false);
+   frame stub_frame = thread->last_frame();
+   assert(stub_frame.is_runtime_frame(), "sanity check");
+-  frame caller_frame = stub_frame.sender(&reg_map); 
++  frame caller_frame = stub_frame.sender(&reg_map);
+   assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame");
+ #endif /* ASSERT */
+ 
+   methodHandle callee_method;
+   JRT_BLOCK
+     callee_method = SharedRuntime::handle_ic_miss_helper(thread, CHECK_NULL);
+-    // Return methodOop through TLS  
++    // Return methodOop through TLS
+     thread->set_vm_result(callee_method());
+   JRT_BLOCK_END
+   // return compiled code entry point after potential safepoints
+@@ -1034,11 +1026,11 @@
+   // invisible to the stack walking code. The i2c path will
+   // place the callee method in the callee_target. It is stashed
+   // there because if we try and find the callee by normal means a
+-  // safepoint is possible and have trouble gc'ing the compiled args. 
++  // safepoint is possible and have trouble gc'ing the compiled args.
+   RegisterMap reg_map(thread, false);
+   frame stub_frame = thread->last_frame();
+   assert(stub_frame.is_runtime_frame(), "sanity check");
+-  frame caller_frame = stub_frame.sender(&reg_map); 
++  frame caller_frame = stub_frame.sender(&reg_map);
+   if (caller_frame.is_interpreted_frame() || caller_frame.is_entry_frame() ) {
+     methodOop callee = thread->callee_target();
+     guarantee(callee != NULL && callee->is_method(), "bad handshake");
+@@ -1050,9 +1042,9 @@
+   // Must be compiled to compiled path which is safe to stackwalk
+   methodHandle callee_method;
+   JRT_BLOCK
+-    // Force resolving of caller (if we called from compiled frame)        
++    // Force resolving of caller (if we called from compiled frame)
+     callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_NULL);
+-    thread->set_vm_result(callee_method());  
++    thread->set_vm_result(callee_method());
+   JRT_BLOCK_END
+   // return compiled code entry point after potential safepoints
+   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
+@@ -1104,9 +1096,9 @@
+ 
+ 
+ methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) {
+-  ResourceMark rm(thread);  
++  ResourceMark rm(thread);
+   CallInfo call_info;
+-  Bytecodes::Code bc;  
++  Bytecodes::Code bc;
+ 
+   // receiver is NULL for static calls. An exception is thrown for NULL
+   // receivers for non-static calls
+@@ -1117,16 +1109,16 @@
+   // when in fact the site can never miss. Worse because we'd think it was megamorphic
+   // we'd try and do a vtable dispatch however methods that can be statically bound
+   // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a
+-  // reresolution of the  call site (as if we did a handle_wrong_method and not an 
++  // reresolution of the  call site (as if we did a handle_wrong_method and not an
+   // plain ic_miss) and the site will be converted to an optimized virtual call site
+   // never to miss again. I don't believe C2 will produce code like this but if it
+   // did this would still be the correct thing to do for it too, hence no ifdef.
+-  // 
++  //
+   if (call_info.resolved_method()->can_be_statically_bound()) {
+     methodHandle callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_(methodHandle()));
+     if (TraceCallFixup) {
+       RegisterMap reg_map(thread, false);
+-      frame caller_frame = thread->last_frame().sender(&reg_map);    
++      frame caller_frame = thread->last_frame().sender(&reg_map);
+       ResourceMark rm(thread);
+       tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc));
+       callee_method->print_short_name(tty);
+@@ -1160,22 +1152,22 @@
+   }
+ #endif
+ 
+-  // install an event collector so that when a vtable stub is created the 
++  // install an event collector so that when a vtable stub is created the
+   // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
+   // event can't be posted when the stub is created as locks are held
+   // - instead the event will be deferred until the event collector goes
+   // out of scope.
+   JvmtiDynamicCodeEventCollector event_collector;
+ 
+-  // Update inline cache to megamorphic. Skip update if caller has been 
++  // Update inline cache to megamorphic. Skip update if caller has been
+   // made non-entrant or we are called from interpreted.
+   { MutexLocker ml_patch (CompiledIC_lock);
+     RegisterMap reg_map(thread, false);
+-    frame caller_frame = thread->last_frame().sender(&reg_map);    
++    frame caller_frame = thread->last_frame().sender(&reg_map);
+     CodeBlob* cb = caller_frame.cb();
+     if (cb->is_nmethod() && ((nmethod*)cb)->is_in_use()) {
+-      // Not a non-entrant nmethod, so find inline_cache 
+-      CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc());    
++      // Not a non-entrant nmethod, so find inline_cache
++      CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc());
+       bool should_be_mono = false;
+       if (inline_cache->is_optimized()) {
+         if (TraceCallFixup) {
+@@ -1213,19 +1205,19 @@
+         // by using a new icBuffer.
+         CompiledICInfo info;
+         KlassHandle receiver_klass(THREAD, receiver()->klass());
+-        inline_cache->compute_monomorphic_entry(callee_method, 
++        inline_cache->compute_monomorphic_entry(callee_method,
+                                                 receiver_klass,
+                                                 inline_cache->is_optimized(),
+                                                 false,
+                                                 info, CHECK_(methodHandle()));
+-	inline_cache->set_to_monomorphic(info);
++        inline_cache->set_to_monomorphic(info);
+       } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
+-	// Change to megamorphic
+-	inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
++        // Change to megamorphic
++        inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
+       } else {
+-	// Either clean or megamorphic
++        // Either clean or megamorphic
+       }
+-    }  
++    }
+   } // Release CompiledIC_lock
+ 
+   return callee_method;
+@@ -1240,19 +1232,19 @@
+ methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) {
+   ResourceMark rm(thread);
+   RegisterMap reg_map(thread, false);
+-  frame stub_frame = thread->last_frame();  
++  frame stub_frame = thread->last_frame();
+   assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
+   frame caller = stub_frame.sender(&reg_map);
+ 
+   // Do nothing if the frame isn't a live compiled frame.
+   // nmethod could be deoptimized by the time we get here
+   // so no update to the caller is needed.
+-  
++
+   if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) {
+ 
+     address pc = caller.pc();
+     Events::log("update call-site at pc " INTPTR_FORMAT, pc);
+-    
++
+     // Default call_addr is the location of the "basic" call.
+     // Determine the address of the call we a reresolving. With
+     // Inline Caches we will always find a recognizable call.
+@@ -1262,7 +1254,7 @@
+     // calls it depends on the state of the UseInlineCaches switch.
+     //
+     // With Inline Caches disabled we can get here for a virtual call
+-    // for two reasons: 
++    // for two reasons:
+     //   1 - calling an abstract method. The vtable for abstract methods
+     //       will run us thru handle_wrong_method and we will eventually
+     //       end up in the interpreter to throw the ame.
+@@ -1278,8 +1270,8 @@
+       MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
+       // Location of call instruction
+       if (NativeCall::is_call_before(pc)) {
+-	NativeCall *ncall = nativeCall_before(pc);
+-	call_addr = ncall->instruction_address();
++        NativeCall *ncall = nativeCall_before(pc);
++        call_addr = ncall->instruction_address();
+       }
+     }
+ 
+@@ -1295,16 +1287,16 @@
+       RelocIterator iter(caller_nm, call_addr, call_addr+1);
+       int ret = iter.next(); // Get item
+       if (ret) {
+-	assert(iter.addr() == call_addr, "must find call");
+-	if (iter.type() == relocInfo::static_call_type) {
+-	  is_static_call = true;
+-	} else {
+-	  assert(iter.type() == relocInfo::virtual_call_type ||
+-		 iter.type() == relocInfo::opt_virtual_call_type
+-		, "unexpected relocInfo. type");
+-	}
++        assert(iter.addr() == call_addr, "must find call");
++        if (iter.type() == relocInfo::static_call_type) {
++          is_static_call = true;
++        } else {
++          assert(iter.type() == relocInfo::virtual_call_type ||
++                 iter.type() == relocInfo::opt_virtual_call_type
++                , "unexpected relocInfo. type");
++        }
+       } else {
+-	assert(!UseInlineCaches, "relocation info. must exist for this address");
++        assert(!UseInlineCaches, "relocation info. must exist for this address");
+       }
+ 
+       // Cleaning the inline cache will force a new resolve. This is more robust
+@@ -1313,24 +1305,24 @@
+       // leads to very hard to track down bugs, if an inline cache gets updated
+       // to a wrong method). It should not be performance critical, since the
+       // resolve is only done once.
+-      
+-      MutexLocker ml(CompiledIC_lock);    
++
++      MutexLocker ml(CompiledIC_lock);
+       //
+       // We do not patch the call site if the nmethod has been made non-entrant
+       // as it is a waste of time
+       //
+       if (caller_nm->is_in_use()) {
+-	if (is_static_call) {         
+-	  CompiledStaticCall* ssc= compiledStaticCall_at(call_addr);
+-	  ssc->set_to_clean();
+-	} else {
+-	  // compiled, dispatched call (which used to call an interpreted method)
+-	  CompiledIC* inline_cache = CompiledIC_at(call_addr);
+-	  inline_cache->set_to_clean();
+-	}
++        if (is_static_call) {
++          CompiledStaticCall* ssc= compiledStaticCall_at(call_addr);
++          ssc->set_to_clean();
++        } else {
++          // compiled, dispatched call (which used to call an interpreted method)
++          CompiledIC* inline_cache = CompiledIC_at(call_addr);
++          inline_cache->set_to_clean();
++        }
+       }
+     }
+-  
++
+   }
+ 
+   methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle()));
+@@ -1339,7 +1331,7 @@
+ #ifndef PRODUCT
+   Atomic::inc(&_wrong_method_ctr);
+ 
+-  if (TraceCallFixup) { 
++  if (TraceCallFixup) {
+     ResourceMark rm(thread);
+     tty->print("handle_wrong_method reresolving call to");
+     callee_method->print_short_name(tty);
+@@ -1382,7 +1374,7 @@
+   // the case then entry_point may in fact point to a c2i and we'd patch the
+   // call site with the same old data. clear_code will set code() to NULL
+   // at the end of it. If we happen to see that NULL then we can skip trying
+-  // to patch. If we hit the window where the callee has a c2i in the 
++  // to patch. If we hit the window where the callee has a c2i in the
+   // from_compiled_entry and the NULL isn't present yet then we lose the race
+   // and patch the code with the same old data. Asi es la vida.
+ 
+@@ -1403,12 +1395,12 @@
+       // just made a call site that could be megamorphic into a monomorphic site
+       // for the rest of its life! Just another racing bug in the life of
+       // fixup_callers_callsite ...
+-      // 
++      //
+       RelocIterator iter(cb, call->instruction_address(), call->next_instruction_address());
+       iter.next();
+       assert(iter.has_current(), "must have a reloc at java call site");
+       relocInfo::relocType typ = iter.reloc()->type();
+-      if ( typ != relocInfo::static_call_type && 
++      if ( typ != relocInfo::static_call_type &&
+            typ != relocInfo::opt_virtual_call_type &&
+            typ != relocInfo::static_stub_type) {
+         return;
+@@ -1491,10 +1483,10 @@
+ 
+   char* message = NEW_C_HEAP_ARRAY(char, msglen);
+   if (NULL == message) {
+-    // out of memory - can't use a detailed message.  Since caller is 
+-    // using a resource mark to free memory, returning this should be 
++    // out of memory - can't use a detailed message.  Since caller is
++    // using a resource mark to free memory, returning this should be
+     // safe (caller won't explicitly delete it).
+-    message = const_cast<char*>(objName); 
++    message = const_cast<char*>(objName);
+   } else {
+     jio_snprintf(message, msglen, "%s%s%s", objName, desc, targetKlassName);
+   }
+@@ -1508,7 +1500,7 @@
+ 
+ // Handles the uncommon case in locking, i.e., contention or an inflated lock.
+ #ifndef PRODUCT
+-int SharedRuntime::_monitor_enter_ctr=0;             
++int SharedRuntime::_monitor_enter_ctr=0;
+ #endif
+ JRT_ENTRY_NO_ASYNC(void, SharedRuntime::complete_monitor_locking_C(oopDesc* _obj, BasicLock* lock, JavaThread* thread))
+   oop obj(_obj);
+@@ -1529,10 +1521,10 @@
+ JRT_END
+ 
+ #ifndef PRODUCT
+-int SharedRuntime::_monitor_exit_ctr=0;              
++int SharedRuntime::_monitor_exit_ctr=0;
+ #endif
+ // Handles the uncommon cases of monitor unlocking in compiled code
+-JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* _obj, BasicLock* lock))  
++JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* _obj, BasicLock* lock))
+    oop obj(_obj);
+ #ifndef PRODUCT
+   _monitor_exit_ctr++;              // monitor exit slow
+@@ -1758,7 +1750,7 @@
+     ShouldNotReachHere();
+   }
+ 
+-  // Get the address of the ic_miss handlers before we grab the 
++  // Get the address of the ic_miss handlers before we grab the
+   // AdapterHandlerLibrary_lock. This fixes bug 6236259 which
+   // was caused by the initialization of the stubs happening
+   // while we held the lock and then notifying jvmti while
+@@ -1801,9 +1793,9 @@
+       // because I need a unique handler index.  It cannot be scanned for
+       // because all -1's look alike.  Instead, the matching index is passed out
+       // and immediately used to collect the 2 return values (the c2i and i2c
+-      // adapters). 
++      // adapters).
+     }
+-    
++
+     // Create I2C & C2I handlers
+     ResourceMark rm;
+     // Improve alignment slightly
+@@ -1814,24 +1806,24 @@
+                                            sizeof(buffer_locs)/sizeof(relocInfo));
+     MacroAssembler _masm(&buffer);
+ 
+-    // Fill in the signature array, for the calling-convention call. 
++    // Fill in the signature array, for the calling-convention call.
+     int total_args_passed = method->size_of_parameters(); // All args on stack
+ 
+     BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType,total_args_passed);
+     VMRegPair  * regs   = NEW_RESOURCE_ARRAY(VMRegPair  ,total_args_passed);
+     int i=0;
+-    if( !method->is_static() )	// Pass in receiver first
++    if( !method->is_static() )  // Pass in receiver first
+       sig_bt[i++] = T_OBJECT;
+     for( SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) {
+-      sig_bt[i++] = ss.type();	// Collect remaining bits of signature
++      sig_bt[i++] = ss.type();  // Collect remaining bits of signature
+       if( ss.type() == T_LONG || ss.type() == T_DOUBLE )
+-        sig_bt[i++] = T_VOID;	// Longs & doubles take 2 Java slots
++        sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
+     }
+     assert( i==total_args_passed, "" );
+ 
+     // Now get the re-packed compiled-Java layout.
+     int comp_args_on_stack;
+-    
++
+     // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
+     comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false);
+ 
+@@ -1842,7 +1834,7 @@
+                                                                         regs);
+ 
+     B = BufferBlob::create(AdapterHandlerEntry::name, &buffer);
+-    if (B == NULL)  return -2;		// Out of CodeCache space
++    if (B == NULL)  return -2;          // Out of CodeCache space
+     entry->relocate(B->instructions_begin());
+ #ifndef PRODUCT
+     // debugging suppport
+@@ -1877,9 +1869,9 @@
+ 
+     if (JvmtiExport::should_post_dynamic_code_generated()) {
+       JvmtiExport::post_dynamic_code_generated(blob_id,
+-					       B->instructions_begin(),
+-					       B->instructions_end());
+-    }    
++                                               B->instructions_begin(),
++                                               B->instructions_end());
++    }
+   }
+   return result;
+ }
+@@ -1890,7 +1882,7 @@
+     _c2i_entry += delta;
+     _c2i_unverified_entry += delta;
+ }
+-  
++
+ // Create a native wrapper for this native method.  The wrapper converts the
+ // java compiled calling convention to the native convention, handlizes
+ // arguments, and transitions to native.  On return from the native we transition
+@@ -1902,11 +1894,11 @@
+   if (PrintCompilation) {
+     ttyLocker ttyl;
+     tty->print("---   n%s ", (method->is_synchronized() ? "s" : " "));
+-    method->print_short_name(tty); 
++    method->print_short_name(tty);
+     if (method->is_static()) {
+       tty->print(" (static)");
+     }
+-    tty->cr(); 
++    tty->cr();
+   }
+ 
+   assert(method->has_native_function(), "must have something valid to call!");
+@@ -1919,7 +1911,7 @@
+     if (nm) {
+       return nm;
+     }
+-    
++
+     // Improve alignment slightly
+     u_char* buf = (u_char*)(((intptr_t)_buffer + CodeEntryAlignment-1) & ~(CodeEntryAlignment-1));
+     CodeBuffer buffer(buf, AdapterHandlerLibrary_size);
+@@ -1927,30 +1919,30 @@
+     double locs_buf[20];
+     buffer.insts()->initialize_shared_locs((relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
+     MacroAssembler _masm(&buffer);
+-    
+-    // Fill in the signature array, for the calling-convention call. 
++
++    // Fill in the signature array, for the calling-convention call.
+     int total_args_passed = method->size_of_parameters();
+-    
++
+     BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType,total_args_passed);
+     VMRegPair  * regs   = NEW_RESOURCE_ARRAY(VMRegPair  ,total_args_passed);
+     int i=0;
+-    if( !method->is_static() )	// Pass in receiver first
++    if( !method->is_static() )  // Pass in receiver first
+       sig_bt[i++] = T_OBJECT;
+     SignatureStream ss(method->signature());
+     for( ; !ss.at_return_type(); ss.next()) {
+-      sig_bt[i++] = ss.type();	// Collect remaining bits of signature
++      sig_bt[i++] = ss.type();  // Collect remaining bits of signature
+       if( ss.type() == T_LONG || ss.type() == T_DOUBLE )
+-        sig_bt[i++] = T_VOID;	// Longs & doubles take 2 Java slots
++        sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
+     }
+     assert( i==total_args_passed, "" );
+     BasicType ret_type = ss.type();
+-    
++
+     // Now get the compiled-Java layout as input arguments
+     int comp_args_on_stack;
+     comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false);
+-    
++
+     // Generate the compiled-to-native wrapper code
+-    nm = SharedRuntime::generate_native_wrapper(&_masm, 
++    nm = SharedRuntime::generate_native_wrapper(&_masm,
+                                                 method,
+                                                 total_args_passed,
+                                                 comp_args_on_stack,
+@@ -1966,7 +1958,7 @@
+   } else {
+     // CodeCache is full, disable compilation
+     // Ought to log this but compile log is only per compile thread
+-    // and we're some non descript Java thread. 
++    // and we're some non descript Java thread.
+     UseInterpreter = true;
+     if (UseCompiler || AlwaysCompileLoopMethods ) {
+ #ifndef PRODUCT
+@@ -1977,7 +1969,7 @@
+         vm_direct_exit(CompileTheWorld ? 0 : 1);
+       }
+ #endif
+-      UseCompiler               = false;    
++      UseCompiler               = false;
+       AlwaysCompileLoopMethods  = false;
+     }
+   }
+@@ -1999,7 +1991,7 @@
+   return regs.first();
+ }
+ 
+-VMRegPair *SharedRuntime::find_callee_arguments(symbolOop sig, bool is_static, int* arg_size) {  
++VMRegPair *SharedRuntime::find_callee_arguments(symbolOop sig, bool is_static, int* arg_size) {
+   // This method is returning a data structure allocating as a
+   // ResourceObject, so do not put any ResourceMarks in here.
+   char *s = sig->as_C_string();
+@@ -2011,7 +2003,7 @@
+   BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, 256 );
+   VMRegPair *regs = NEW_RESOURCE_ARRAY( VMRegPair, 256 );
+   int cnt = 0;
+-  if (!is_static) {    
++  if (!is_static) {
+     sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature
+   }
+ 
+@@ -2028,7 +2020,7 @@
+     case 'V': sig_bt[cnt++] = T_VOID;    break;
+     case 'L':                   // Oop
+       while( *s++ != ';'  ) ;   // Skip signature
+-      sig_bt[cnt++] = T_OBJECT; 
++      sig_bt[cnt++] = T_OBJECT;
+       break;
+     case '[': {                 // Array
+       do {                      // Skip optional size
+@@ -2037,7 +2029,7 @@
+       // Skip element type
+       if( s[-1] == 'L' )
+         while( *s++ != ';'  ) ; // Skip signature
+-      sig_bt[cnt++] = T_ARRAY; 
++      sig_bt[cnt++] = T_ARRAY;
+       break;
+     }
+     default : ShouldNotReachHere();
+@@ -2111,8 +2103,8 @@
+        kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
+     if( kptr->obj() != NULL ) active_monitor_count++;
+   }
+-  
+-  // QQQ we could place number of active monitors in the array so that compiled code 
++
++  // QQQ we could place number of active monitors in the array so that compiled code
+   // could double check it.
+ 
+   methodOop moop = fr.interpreter_frame_method();
+@@ -2141,11 +2133,11 @@
+   for( BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end();
+        kptr2 < fr.interpreter_frame_monitor_begin();
+        kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) {
+-    if( kptr2->obj() != NULL) {		// Avoid 'holes' in the monitor array
++    if( kptr2->obj() != NULL) {         // Avoid 'holes' in the monitor array
+       BasicLock *lock = kptr2->lock();
+       // Inflate so the displaced header becomes position-independent
+-      if (lock->displaced_header()->is_unlocked()) 
+-	ObjectSynchronizer::inflate_helper(kptr2->obj());
++      if (lock->displaced_header()->is_unlocked())
++        ObjectSynchronizer::inflate_helper(kptr2->obj());
+       // Now the displaced header is free to move
+       buf[i++] = (intptr_t)lock->displaced_header();
+       buf[i++] = (intptr_t)kptr2->obj();
+@@ -2180,7 +2172,7 @@
+       tty->print("0x%" FORMAT64_MODIFIER "x", _fingerprints->at(i));
+       tty->print_cr(" i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT,
+                     a->get_i2c_entry(), a->get_c2i_entry(), a->get_c2i_unverified_entry());
+-         
++
+       return;
+     }
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/sharedRuntime.hpp openjdk/hotspot/src/share/vm/runtime/sharedRuntime.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/sharedRuntime.hpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/sharedRuntime.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)sharedRuntime.hpp	1.157 07/07/19 12:19:08 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class AdapterHandlerEntry;
+@@ -93,7 +90,7 @@
+   static jdouble dexp(jdouble x);
+   static jdouble dpow(jdouble x, jdouble y);
+ 
+-  
++
+   // exception handling across interpreter/compiler boundaries
+   static address raw_exception_handler_for_return_address(address return_address);
+   static address exception_handler_for_return_address(address return_address);
+@@ -107,7 +104,6 @@
+     STACK_OVERFLOW
+   };
+   static void    throw_AbstractMethodError(JavaThread* thread);
+-  static void    throw_IncompatibleClassChangeError(JavaThread* thread);
+   static void    throw_ArithmeticException(JavaThread* thread);
+   static void    throw_NullPointerException(JavaThread* thread);
+   static void    throw_NullPointerException_at_call(JavaThread* thread);
+@@ -119,7 +115,7 @@
+   // Shared stub locations
+   static address get_poll_stub(address pc);
+ 
+-  static address get_ic_miss_stub() { 
++  static address get_ic_miss_stub() {
+     assert(_ic_miss_blob!= NULL, "oops");
+     return _ic_miss_blob->instructions_begin();
+   }
+@@ -130,6 +126,7 @@
+   }
+ 
+ #ifdef COMPILER2
++  static void generate_uncommon_trap_blob(void);
+   static UncommonTrapBlob* uncommon_trap_blob()                  { return _uncommon_trap_blob; }
+ #endif // COMPILER2
+ 
+@@ -163,11 +160,11 @@
+ 
+   // bytecode tracing is only used by the TraceBytecodes
+   static intptr_t trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2) PRODUCT_RETURN0;
+-  
++
+   // Used to back off a spin lock that is under heavy contention
+   static void yield_all(JavaThread* thread, int attempts = 0);
+ 
+-  static oop retrieve_receiver( symbolHandle sig, frame caller );  
++  static oop retrieve_receiver( symbolHandle sig, frame caller );
+ 
+   static void verify_caller_frame(frame caller_frame, methodHandle callee_method) PRODUCT_RETURN;
+   static methodHandle find_callee_method_inside_interpreter(frame caller_frame, methodHandle caller_method, int bci) PRODUCT_RETURN_(return methodHandle(););
+@@ -190,30 +187,30 @@
+ 
+   /**
+    * Fill in the "X cannot be cast to a Y" message for ClassCastException
+-   * 
++   *
+    * @param thr the current thread
+    * @param name the name of the class of the object attempted to be cast
+    * @return the dynamically allocated exception message (must be freed
+-   * by the caller using a resource mark)  
++   * by the caller using a resource mark)
+    *
+-   * BCP must refer to the current 'checkcast' opcode for the frame 
+-   * on top of the stack.  
+-   * The caller (or one of it's callers) must use a ResourceMark 
++   * BCP must refer to the current 'checkcast' opcode for the frame
++   * on top of the stack.
++   * The caller (or one of it's callers) must use a ResourceMark
+    * in order to correctly free the result.
+    */
+   static char* generate_class_cast_message(JavaThread* thr, const char* name);
+ 
+   /**
+    * Fill in the "X cannot be cast to a Y" message for ClassCastException
+-   * 
++   *
+    * @param name the name of the class of the object attempted to be cast
+    * @param klass the name of the target klass attempt
+    * @return the dynamically allocated exception message (must be freed
+-   * by the caller using a resource mark)  
++   * by the caller using a resource mark)
+    *
+    * This version does not require access the frame, so it can be called
+    * from interpreted code
+-   * The caller (or one of it's callers) must use a ResourceMark 
++   * The caller (or one of it's callers) must use a ResourceMark
+    * in order to correctly free the result.
+    */
+   static char* generate_class_cast_message(const char* name, const char* klass);
+@@ -247,7 +244,7 @@
+ 
+  private:
+   static Handle find_callee_info(JavaThread* thread,
+-                                 Bytecodes::Code& bc, 
++                                 Bytecodes::Code& bc,
+                                  CallInfo& callinfo, TRAPS);
+   static Handle find_callee_info_helper(JavaThread* thread,
+                                         vframeStream& vfst,
+@@ -269,7 +266,7 @@
+   // registers, those above refer to 4-byte stack slots.  All stack slots are
+   // based off of the window top.  SharedInfo::stack0 refers to the first usable
+   // slot in the bottom of the frame. SharedInfo::stack0+1 refers to the memory word
+-  // 4-bytes higher. So for sparc because the register window save area is at 
++  // 4-bytes higher. So for sparc because the register window save area is at
+   // the bottom of the frame the first 16 words will be skipped and SharedInfo::stack0
+   // will be just above it. (
+   // return value is the maximum number of VMReg stack slots the convention will use.
+@@ -282,8 +279,8 @@
+   // blobs. Unlike adapters in the tiger and earlier releases the code in these
+   // blobs does not create a new frame and are therefore virtually invisible
+   // to the stack walking code. In general these blobs extend the callers stack
+-  // as needed for the conversion of argument locations. 
+-  
++  // as needed for the conversion of argument locations.
++
+   // When calling a c2i blob the code will always call the interpreter even if
+   // by the time we reach the blob there is compiled code available. This allows
+   // the blob to pass the incoming stack pointer (the sender sp) in a known
+@@ -333,7 +330,7 @@
+   static VMReg     name_for_receiver();
+ 
+   // "Top of Stack" slots that may be unused by the calling convention but must
+-  // otherwise be preserved.  
++  // otherwise be preserved.
+   // On Intel these are not necessary and the value can be zero.
+   // On Sparc this describes the words reserved for storing a register window
+   // when an interrupt occurs.
+@@ -360,20 +357,20 @@
+   // exists.  Patch the caller so he no longer calls into the interpreter.
+   static void fixup_callers_callsite(methodOopDesc* moop, address ret_pc);
+ 
+-  // Slow-path Locking and Unlocking    
+-  static void complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* thread);  
++  // Slow-path Locking and Unlocking
++  static void complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* thread);
+   static void complete_monitor_unlocking_C(oopDesc* obj, BasicLock* lock);
+ 
+   // Resolving of calls
+-  static address resolve_static_call_C     (JavaThread *thread);    
+-  static address resolve_virtual_call_C    (JavaThread *thread);    
+-  static address resolve_opt_virtual_call_C(JavaThread *thread);  
++  static address resolve_static_call_C     (JavaThread *thread);
++  static address resolve_virtual_call_C    (JavaThread *thread);
++  static address resolve_opt_virtual_call_C(JavaThread *thread);
+ 
+   // arraycopy, the non-leaf version.  (See StubRoutines for all the leaf calls.)
+   static void slow_arraycopy_C(oopDesc* src,  jint src_pos,
+                                oopDesc* dest, jint dest_pos,
+                                jint length, JavaThread* thread);
+-  
++
+   // handle ic miss with caller being compiled code
+   // wrong method handling (inline cache misses, zombie methods)
+   static address handle_wrong_method(JavaThread* thread);
+@@ -384,15 +381,15 @@
+   // Collect and print inline cache miss statistics
+  private:
+   enum { maxICmiss_count = 100 };
+-  static int	 _ICmiss_index;                  // length of IC miss histogram
+-  static int	 _ICmiss_count[maxICmiss_count]; // miss counts
++  static int     _ICmiss_index;                  // length of IC miss histogram
++  static int     _ICmiss_count[maxICmiss_count]; // miss counts
+   static address _ICmiss_at[maxICmiss_count];    // miss addresses
+   static void trace_ic_miss(address at);
+ 
+  public:
+-  static int _monitor_enter_ctr;	         // monitor enter slow
++  static int _monitor_enter_ctr;                 // monitor enter slow
+   static int _monitor_exit_ctr;                  // monitor exit slow
+-  static int _throw_null_ctr;	                 // throwing a null-pointer exception
++  static int _throw_null_ctr;                    // throwing a null-pointer exception
+   static int _ic_miss_ctr;                       // total # of IC misses
+   static int _wrong_method_ctr;
+   static int _resolve_static_ctr;
+@@ -413,7 +410,7 @@
+ 
+   static int _new_instance_ctr;            // 'new' object requires GC
+   static int _new_array_ctr;               // 'new' array requires GC
+-  static int _multi1_ctr, _multi2_ctr, _multi3_ctr, _multi4_ctr, _multi5_ctr; 
++  static int _multi1_ctr, _multi2_ctr, _multi3_ctr, _multi4_ctr, _multi5_ctr;
+   static int _find_handler_ctr;            // find exception handler
+   static int _rethrow_ctr;                 // rethrow exception
+   static int _mon_enter_stub_ctr;          // monitor enter stub
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/sharedRuntimeTrans.cpp openjdk/hotspot/src/share/vm/runtime/sharedRuntimeTrans.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/sharedRuntimeTrans.cpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/sharedRuntimeTrans.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)sharedRuntimeTrans.cpp	1.8 07/05/05 17:06:54 JVM"
+-#endif
+ /*
+  * Copyright 2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -90,25 +87,25 @@
+   int  k,hx,lx;
+   hx = __HI(x);
+   lx = __LO(x);
+-  k = (hx&0x7ff00000)>>20;		/* extract exponent */
+-  if (k==0) {				/* 0 or subnormal x */
++  k = (hx&0x7ff00000)>>20;              /* extract exponent */
++  if (k==0) {                           /* 0 or subnormal x */
+     if ((lx|(hx&0x7fffffff))==0) return x; /* +-0 */
+     x *= two54;
+     hx = __HI(x);
+     k = ((hx&0x7ff00000)>>20) - 54;
+-    if (n< -50000) return tiny*x; 	/*underflow*/
++    if (n< -50000) return tiny*x;       /*underflow*/
+   }
+-  if (k==0x7ff) return x+x;		/* NaN or Inf */
++  if (k==0x7ff) return x+x;             /* NaN or Inf */
+   k = k+n;
+   if (k >  0x7fe) return hugeX*copysign(hugeX,x); /* overflow  */
+-  if (k > 0) 				/* normal result */
++  if (k > 0)                            /* normal result */
+     {__HI(x) = (hx&0x800fffff)|(k<<20); return x;}
+   if (k <= -54) {
+-    if (n > 50000) 	/* in case integer overflow in n+k */
+-      return hugeX*copysign(hugeX,x);	/*overflow*/
+-    else return tiny*copysign(tiny,x); 	/*underflow*/
++    if (n > 50000)      /* in case integer overflow in n+k */
++      return hugeX*copysign(hugeX,x);   /*overflow*/
++    else return tiny*copysign(tiny,x);  /*underflow*/
+   }
+-  k += 54;				/* subnormal result */
++  k += 54;                              /* subnormal result */
+   __HI(x) = (hx&0x800fffff)|(k<<20);
+   return x*twom54;
+ }
+@@ -181,10 +178,10 @@
+   double hfsq,f,s,z,R,w,t1,t2,dk;
+   int k,hx,i,j;
+   unsigned lx;
+- 
++
+   hx = __HI(x);               /* high word of x */
+   lx = __LO(x);               /* low  word of x */
+- 
++
+   k=0;
+   if (hx < 0x00100000) {                   /* x < 2**-1022  */
+     if (((hx&0x7fffffff)|lx)==0)
+@@ -202,7 +199,7 @@
+   f = x-1.0;
+   if((0x000fffff&(2+hx))<3) {  /* |f| < 2**-20 */
+     if(f==zero) {
+-      if (k==0) return zero; 
++      if (k==0) return zero;
+       else {dk=(double)k; return dk*ln2_hi+dk*ln2_lo;}
+     }
+     R = f*f*(0.5-0.33333333333333333*f);
+@@ -228,11 +225,11 @@
+       return dk*ln2_hi-((s*(f-R)-dk*ln2_lo)-f);
+   }
+ }
+- 
++
+ JRT_LEAF(jdouble, SharedRuntime::dlog(jdouble x))
+-  return __ieee754_log(x);  
++  return __ieee754_log(x);
+ JRT_END
+- 
++
+ /* __ieee754_log10(x)
+  * Return the base 10 logarithm of x
+  *
+@@ -270,15 +267,15 @@
+ ivln10     =  4.34294481903251816668e-01, /* 0x3FDBCB7B, 0x1526E50E */
+   log10_2hi  =  3.01029995663611771306e-01, /* 0x3FD34413, 0x509F6000 */
+   log10_2lo  =  3.69423907715893078616e-13; /* 0x3D59FEF3, 0x11F12B36 */
+-  
++
+ static double __ieee754_log10(double x) {
+   double y,z;
+   int i,k,hx;
+   unsigned lx;
+-   
++
+   hx = __HI(x);       /* high word of x */
+   lx = __LO(x);       /* low word of x */
+-   
++
+   k=0;
+   if (hx < 0x00100000) {                  /* x < 2**-1022  */
+     if (((hx&0x7fffffff)|lx)==0)
+@@ -296,9 +293,9 @@
+   z  = y*log10_2lo + ivln10*__ieee754_log(x);
+   return  z+y*log10_2hi;
+ }
+- 
++
+ JRT_LEAF(jdouble, SharedRuntime::dlog10(jdouble x))
+-  return __ieee754_log10(x);  
++  return __ieee754_log10(x);
+ JRT_END
+ 
+ 
+@@ -308,55 +305,55 @@
+  * Method
+  *   1. Argument reduction:
+  *      Reduce x to an r so that |r| <= 0.5*ln2 ~ 0.34658.
+- *	Given x, find r and integer k such that
++ *      Given x, find r and integer k such that
+  *
+  *               x = k*ln2 + r,  |r| <= 0.5*ln2.
+  *
+  *      Here r will be represented as r = hi-lo for better
+- *	accuracy.
++ *      accuracy.
+  *
+  *   2. Approximation of exp(r) by a special rational function on
+- *	the interval [0,0.34658]:
+- *	Write
+- *	    R(r**2) = r*(exp(r)+1)/(exp(r)-1) = 2 + r*r/6 - r**4/360 + ...
++ *      the interval [0,0.34658]:
++ *      Write
++ *          R(r**2) = r*(exp(r)+1)/(exp(r)-1) = 2 + r*r/6 - r**4/360 + ...
+  *      We use a special Reme algorithm on [0,0.34658] to generate
+- * 	a polynomial of degree 5 to approximate R. The maximum error
+- *	of this polynomial approximation is bounded by 2**-59. In
+- *	other words,
+- *	    R(z) ~ 2.0 + P1*z + P2*z**2 + P3*z**3 + P4*z**4 + P5*z**5
+- *  	(where z=r*r, and the values of P1 to P5 are listed below)
+- *	and
+- *	    |                  5          |     -59
+- *	    | 2.0+P1*z+...+P5*z   -  R(z) | <= 2
+- *	    |                             |
+- *	The computation of exp(r) thus becomes
++ *      a polynomial of degree 5 to approximate R. The maximum error
++ *      of this polynomial approximation is bounded by 2**-59. In
++ *      other words,
++ *          R(z) ~ 2.0 + P1*z + P2*z**2 + P3*z**3 + P4*z**4 + P5*z**5
++ *      (where z=r*r, and the values of P1 to P5 are listed below)
++ *      and
++ *          |                  5          |     -59
++ *          | 2.0+P1*z+...+P5*z   -  R(z) | <= 2
++ *          |                             |
++ *      The computation of exp(r) thus becomes
+  *                             2*r
+- *		exp(r) = 1 + -------
+- *		              R - r
++ *              exp(r) = 1 + -------
++ *                            R - r
+  *                                 r*R1(r)
+- *		       = 1 + r + ----------- (for better accuracy)
+- *		                  2 - R1(r)
+- *	where
+- *			         2       4             10
+- *		R1(r) = r - (P1*r  + P2*r  + ... + P5*r   ).
++ *                     = 1 + r + ----------- (for better accuracy)
++ *                                2 - R1(r)
++ *      where
++ *                               2       4             10
++ *              R1(r) = r - (P1*r  + P2*r  + ... + P5*r   ).
+  *
+  *   3. Scale back to obtain exp(x):
+- *	From step 1, we have
+- *	   exp(x) = 2^k * exp(r)
++ *      From step 1, we have
++ *         exp(x) = 2^k * exp(r)
+  *
+  * Special cases:
+- *	exp(INF) is INF, exp(NaN) is NaN;
+- *	exp(-INF) is 0, and
+- *	for finite argument, only exp(0)=1 is exact.
++ *      exp(INF) is INF, exp(NaN) is NaN;
++ *      exp(-INF) is 0, and
++ *      for finite argument, only exp(0)=1 is exact.
+  *
+  * Accuracy:
+- *	according to an error analysis, the error is always less than
+- *	1 ulp (unit in the last place).
++ *      according to an error analysis, the error is always less than
++ *      1 ulp (unit in the last place).
+  *
+  * Misc. info.
+- *	For IEEE double
+- *	    if x >  7.09782712893383973096e+02 then exp(x) overflow
+- *	    if x < -7.45133219101941108420e+02 then exp(x) underflow
++ *      For IEEE double
++ *          if x >  7.09782712893383973096e+02 then exp(x) overflow
++ *          if x < -7.45133219101941108420e+02 then exp(x) underflow
+  *
+  * Constants:
+  * The hexadecimal values are the intended ones for the following
+@@ -366,66 +363,66 @@
+  */
+ 
+ static const double
+-one	= 1.0,
+-  halF[2]	= {0.5,-0.5,},
++one     = 1.0,
++  halF[2]       = {0.5,-0.5,},
+   twom1000= 9.33263618503218878990e-302,     /* 2**-1000=0x01700000,0*/
+     o_threshold=  7.09782712893383973096e+02,  /* 0x40862E42, 0xFEFA39EF */
+     u_threshold= -7.45133219101941108420e+02,  /* 0xc0874910, 0xD52D3051 */
+     ln2HI[2]   ={ 6.93147180369123816490e-01,  /* 0x3fe62e42, 0xfee00000 */
+-		  -6.93147180369123816490e-01,},/* 0xbfe62e42, 0xfee00000 */
++                  -6.93147180369123816490e-01,},/* 0xbfe62e42, 0xfee00000 */
+     ln2LO[2]   ={ 1.90821492927058770002e-10,  /* 0x3dea39ef, 0x35793c76 */
+-		  -1.90821492927058770002e-10,},/* 0xbdea39ef, 0x35793c76 */
++                  -1.90821492927058770002e-10,},/* 0xbdea39ef, 0x35793c76 */
+       invln2 =  1.44269504088896338700e+00, /* 0x3ff71547, 0x652b82fe */
+-	P1   =  1.66666666666666019037e-01, /* 0x3FC55555, 0x5555553E */
+-	P2   = -2.77777777770155933842e-03, /* 0xBF66C16C, 0x16BEBD93 */
+-	P3   =  6.61375632143793436117e-05, /* 0x3F11566A, 0xAF25DE2C */
+-	P4   = -1.65339022054652515390e-06, /* 0xBEBBBD41, 0xC5D26BF1 */
+-	P5   =  4.13813679705723846039e-08; /* 0x3E663769, 0x72BEA4D0 */
++        P1   =  1.66666666666666019037e-01, /* 0x3FC55555, 0x5555553E */
++        P2   = -2.77777777770155933842e-03, /* 0xBF66C16C, 0x16BEBD93 */
++        P3   =  6.61375632143793436117e-05, /* 0x3F11566A, 0xAF25DE2C */
++        P4   = -1.65339022054652515390e-06, /* 0xBEBBBD41, 0xC5D26BF1 */
++        P5   =  4.13813679705723846039e-08; /* 0x3E663769, 0x72BEA4D0 */
+ 
+ static double __ieee754_exp(double x) {
+   double y,hi=0,lo=0,c,t;
+   int k=0,xsb;
+   unsigned hx;
+-  
+-  hx  = __HI(x);	/* high word of x */
+-  xsb = (hx>>31)&1;		/* sign bit of x */
+-  hx &= 0x7fffffff;		/* high word of |x| */
+-  
++
++  hx  = __HI(x);        /* high word of x */
++  xsb = (hx>>31)&1;             /* sign bit of x */
++  hx &= 0x7fffffff;             /* high word of |x| */
++
+   /* filter out non-finite argument */
+-  if(hx >= 0x40862E42) {			/* if |x|>=709.78... */
++  if(hx >= 0x40862E42) {                        /* if |x|>=709.78... */
+     if(hx>=0x7ff00000) {
+-      if(((hx&0xfffff)|__LO(x))!=0) 
+-	return x+x; 		/* NaN */
+-      else return (xsb==0)? x:0.0;	/* exp(+-inf)={inf,0} */
++      if(((hx&0xfffff)|__LO(x))!=0)
++        return x+x;             /* NaN */
++      else return (xsb==0)? x:0.0;      /* exp(+-inf)={inf,0} */
+     }
+     if(x > o_threshold) return hugeX*hugeX; /* overflow */
+     if(x < u_threshold) return twom1000*twom1000; /* underflow */
+   }
+-  
++
+   /* argument reduction */
+-  if(hx > 0x3fd62e42) {		/* if  |x| > 0.5 ln2 */
+-    if(hx < 0x3FF0A2B2) {	/* and |x| < 1.5 ln2 */
++  if(hx > 0x3fd62e42) {         /* if  |x| > 0.5 ln2 */
++    if(hx < 0x3FF0A2B2) {       /* and |x| < 1.5 ln2 */
+       hi = x-ln2HI[xsb]; lo=ln2LO[xsb]; k = 1-xsb-xsb;
+     } else {
+       k  = (int)(invln2*x+halF[xsb]);
+       t  = k;
+-      hi = x - t*ln2HI[0];	/* t*ln2HI is exact here */
++      hi = x - t*ln2HI[0];      /* t*ln2HI is exact here */
+       lo = t*ln2LO[0];
+     }
+     x  = hi - lo;
+   }
+-  else if(hx < 0x3e300000)  {	/* when |x|<2**-28 */
++  else if(hx < 0x3e300000)  {   /* when |x|<2**-28 */
+     if(hugeX+x>one) return one+x;/* trigger inexact */
+   }
+   else k = 0;
+-  
++
+   /* x is now in primary range */
+   t  = x*x;
+   c  = x - t*(P1+t*(P2+t*(P3+t*(P4+t*P5))));
+-  if(k==0) 	return one-((x*c)/(c-2.0)-x);
+-  else 		y = one-((lo-(x*c)/(2.0-c))-hi);
++  if(k==0)      return one-((x*c)/(c-2.0)-x);
++  else          y = one-((lo-(x*c)/(2.0-c))-hi);
+   if(k >= -1021) {
+-    __HI(y) += (k<<20);	/* add k to y's exponent */
++    __HI(y) += (k<<20); /* add k to y's exponent */
+     return y;
+   } else {
+     __HI(y) += ((k+1000)<<20);/* add k to y's exponent */
+@@ -439,41 +436,41 @@
+ 
+ /* __ieee754_pow(x,y) return x**y
+  *
+- *		      n
++ *                    n
+  * Method:  Let x =  2   * (1+f)
+- *	1. Compute and return log2(x) in two pieces:
+- *		log2(x) = w1 + w2,
+- *	   where w1 has 53-24 = 29 bit trailing zeros.
+- *	2. Perform y*log2(x) = n+y' by simulating muti-precision
+- *	   arithmetic, where |y'|<=0.5.
+- *	3. Return x**y = 2**n*exp(y'*log2)
++ *      1. Compute and return log2(x) in two pieces:
++ *              log2(x) = w1 + w2,
++ *         where w1 has 53-24 = 29 bit trailing zeros.
++ *      2. Perform y*log2(x) = n+y' by simulating muti-precision
++ *         arithmetic, where |y'|<=0.5.
++ *      3. Return x**y = 2**n*exp(y'*log2)
+  *
+  * Special cases:
+- *	1.  (anything) ** 0  is 1
+- *	2.  (anything) ** 1  is itself
+- *	3.  (anything) ** NAN is NAN
+- *	4.  NAN ** (anything except 0) is NAN
+- *	5.  +-(|x| > 1) **  +INF is +INF
+- *	6.  +-(|x| > 1) **  -INF is +0
+- *	7.  +-(|x| < 1) **  +INF is +0
+- *	8.  +-(|x| < 1) **  -INF is +INF
+- *	9.  +-1         ** +-INF is NAN
+- *	10. +0 ** (+anything except 0, NAN)               is +0
+- *	11. -0 ** (+anything except 0, NAN, odd integer)  is +0
+- *	12. +0 ** (-anything except 0, NAN)               is +INF
+- *	13. -0 ** (-anything except 0, NAN, odd integer)  is +INF
+- *	14. -0 ** (odd integer) = -( +0 ** (odd integer) )
+- *	15. +INF ** (+anything except 0,NAN) is +INF
+- *	16. +INF ** (-anything except 0,NAN) is +0
+- *	17. -INF ** (anything)  = -0 ** (-anything)
+- *	18. (-anything) ** (integer) is (-1)**(integer)*(+anything**integer)
+- *	19. (-anything except 0 and inf) ** (non-integer) is NAN
++ *      1.  (anything) ** 0  is 1
++ *      2.  (anything) ** 1  is itself
++ *      3.  (anything) ** NAN is NAN
++ *      4.  NAN ** (anything except 0) is NAN
++ *      5.  +-(|x| > 1) **  +INF is +INF
++ *      6.  +-(|x| > 1) **  -INF is +0
++ *      7.  +-(|x| < 1) **  +INF is +0
++ *      8.  +-(|x| < 1) **  -INF is +INF
++ *      9.  +-1         ** +-INF is NAN
++ *      10. +0 ** (+anything except 0, NAN)               is +0
++ *      11. -0 ** (+anything except 0, NAN, odd integer)  is +0
++ *      12. +0 ** (-anything except 0, NAN)               is +INF
++ *      13. -0 ** (-anything except 0, NAN, odd integer)  is +INF
++ *      14. -0 ** (odd integer) = -( +0 ** (odd integer) )
++ *      15. +INF ** (+anything except 0,NAN) is +INF
++ *      16. +INF ** (-anything except 0,NAN) is +0
++ *      17. -INF ** (anything)  = -0 ** (-anything)
++ *      18. (-anything) ** (integer) is (-1)**(integer)*(+anything**integer)
++ *      19. (-anything except 0 and inf) ** (non-integer) is NAN
+  *
+  * Accuracy:
+- *	pow(x,y) returns x**y nearly rounded. In particular
+- *			pow(integer,integer)
+- *	always returns the correct integer provided it is
+- *	representable.
++ *      pow(x,y) returns x**y nearly rounded. In particular
++ *                      pow(integer,integer)
++ *      always returns the correct integer provided it is
++ *      representable.
+  *
+  * Constants :
+  * The hexadecimal values are the intended ones for the following
+@@ -487,25 +484,25 @@
+   dp_h[] = { 0.0, 5.84962487220764160156e-01,}, /* 0x3FE2B803, 0x40000000 */
+     dp_l[] = { 0.0, 1.35003920212974897128e-08,}, /* 0x3E4CFDEB, 0x43CFD006 */
+       zeroX    =  0.0,
+-	two	=  2.0,
+-	two53	=  9007199254740992.0,	/* 0x43400000, 0x00000000 */
+-	/* poly coefs for (3/2)*(log(x)-2s-2/3*s**3 */
+-	L1X  =  5.99999999999994648725e-01, /* 0x3FE33333, 0x33333303 */
+-	L2X  =  4.28571428578550184252e-01, /* 0x3FDB6DB6, 0xDB6FABFF */
+-	L3X  =  3.33333329818377432918e-01, /* 0x3FD55555, 0x518F264D */
+-	L4X  =  2.72728123808534006489e-01, /* 0x3FD17460, 0xA91D4101 */
+-	L5X  =  2.30660745775561754067e-01, /* 0x3FCD864A, 0x93C9DB65 */
+-	L6X  =  2.06975017800338417784e-01, /* 0x3FCA7E28, 0x4A454EEF */
+-	lg2  =  6.93147180559945286227e-01, /* 0x3FE62E42, 0xFEFA39EF */
+-	lg2_h  =  6.93147182464599609375e-01, /* 0x3FE62E43, 0x00000000 */
+-	lg2_l  = -1.90465429995776804525e-09, /* 0xBE205C61, 0x0CA86C39 */
+-	ovt =  8.0085662595372944372e-0017, /* -(1024-log2(ovfl+.5ulp)) */
+-	cp    =  9.61796693925975554329e-01, /* 0x3FEEC709, 0xDC3A03FD =2/(3ln2) */
+-	cp_h  =  9.61796700954437255859e-01, /* 0x3FEEC709, 0xE0000000 =(float)cp */
+-	cp_l  = -7.02846165095275826516e-09, /* 0xBE3E2FE0, 0x145B01F5 =tail of cp_h*/
+-	ivln2    =  1.44269504088896338700e+00, /* 0x3FF71547, 0x652B82FE =1/ln2 */
+-	ivln2_h  =  1.44269502162933349609e+00, /* 0x3FF71547, 0x60000000 =24b 1/ln2*/
+-	ivln2_l  =  1.92596299112661746887e-08; /* 0x3E54AE0B, 0xF85DDF44 =1/ln2 tail*/
++        two     =  2.0,
++        two53   =  9007199254740992.0,  /* 0x43400000, 0x00000000 */
++        /* poly coefs for (3/2)*(log(x)-2s-2/3*s**3 */
++        L1X  =  5.99999999999994648725e-01, /* 0x3FE33333, 0x33333303 */
++        L2X  =  4.28571428578550184252e-01, /* 0x3FDB6DB6, 0xDB6FABFF */
++        L3X  =  3.33333329818377432918e-01, /* 0x3FD55555, 0x518F264D */
++        L4X  =  2.72728123808534006489e-01, /* 0x3FD17460, 0xA91D4101 */
++        L5X  =  2.30660745775561754067e-01, /* 0x3FCD864A, 0x93C9DB65 */
++        L6X  =  2.06975017800338417784e-01, /* 0x3FCA7E28, 0x4A454EEF */
++        lg2  =  6.93147180559945286227e-01, /* 0x3FE62E42, 0xFEFA39EF */
++        lg2_h  =  6.93147182464599609375e-01, /* 0x3FE62E43, 0x00000000 */
++        lg2_l  = -1.90465429995776804525e-09, /* 0xBE205C61, 0x0CA86C39 */
++        ovt =  8.0085662595372944372e-0017, /* -(1024-log2(ovfl+.5ulp)) */
++        cp    =  9.61796693925975554329e-01, /* 0x3FEEC709, 0xDC3A03FD =2/(3ln2) */
++        cp_h  =  9.61796700954437255859e-01, /* 0x3FEEC709, 0xE0000000 =(float)cp */
++        cp_l  = -7.02846165095275826516e-09, /* 0xBE3E2FE0, 0x145B01F5 =tail of cp_h*/
++        ivln2    =  1.44269504088896338700e+00, /* 0x3FF71547, 0x652B82FE =1/ln2 */
++        ivln2_h  =  1.44269502162933349609e+00, /* 0x3FF71547, 0x60000000 =24b 1/ln2*/
++        ivln2_l  =  1.92596299112661746887e-08; /* 0x3E54AE0B, 0xF85DDF44 =1/ln2 tail*/
+ 
+ double __ieee754_pow(double x, double y) {
+   double z,ax,z_h,z_l,p_h,p_l;
+@@ -528,42 +525,42 @@
+     return x+y;
+ 
+   /* determine if y is an odd int when x < 0
+-   * yisint = 0	... y is not an integer
+-   * yisint = 1	... y is an odd int
+-   * yisint = 2	... y is an even int
++   * yisint = 0 ... y is not an integer
++   * yisint = 1 ... y is an odd int
++   * yisint = 2 ... y is an even int
+    */
+   yisint  = 0;
+   if(hx<0) {
+     if(iy>=0x43400000) yisint = 2; /* even integer y */
+     else if(iy>=0x3ff00000) {
+-      k = (iy>>20)-0x3ff;	   /* exponent */
++      k = (iy>>20)-0x3ff;          /* exponent */
+       if(k>20) {
+-	j = ly>>(52-k);
+-	if((unsigned)(j<<(52-k))==ly) yisint = 2-(j&1);
++        j = ly>>(52-k);
++        if((unsigned)(j<<(52-k))==ly) yisint = 2-(j&1);
+       } else if(ly==0) {
+-	j = iy>>(20-k);
+-	if((j<<(20-k))==iy) yisint = 2-(j&1);
++        j = iy>>(20-k);
++        if((j<<(20-k))==iy) yisint = 2-(j&1);
+       }
+     }
+   }
+ 
+   /* special value of y */
+   if(ly==0) {
+-    if (iy==0x7ff00000) {	/* y is +-inf */
++    if (iy==0x7ff00000) {       /* y is +-inf */
+       if(((ix-0x3ff00000)|lx)==0)
+-	return  y - y;	/* inf**+-1 is NaN */
++        return  y - y;  /* inf**+-1 is NaN */
+       else if (ix >= 0x3ff00000)/* (|x|>1)**+-inf = inf,0 */
+-	return (hy>=0)? y: zeroX;
+-      else			/* (|x|<1)**-,+inf = inf,0 */
+-	return (hy<0)?-y: zeroX;
++        return (hy>=0)? y: zeroX;
++      else                      /* (|x|<1)**-,+inf = inf,0 */
++        return (hy<0)?-y: zeroX;
+     }
+-    if(iy==0x3ff00000) {	/* y is  +-1 */
++    if(iy==0x3ff00000) {        /* y is  +-1 */
+       if(hy<0) return one/x; else return x;
+     }
+     if(hy==0x40000000) return x*x; /* y is  2 */
+-    if(hy==0x3fe00000) {	/* y is  0.5 */
+-      if(hx>=0)	/* x >= +0 */
+-	return sqrt(x);
++    if(hy==0x3fe00000) {        /* y is  0.5 */
++      if(hx>=0) /* x >= +0 */
++        return sqrt(x);
+     }
+   }
+ 
+@@ -571,13 +568,13 @@
+   /* special value of x */
+   if(lx==0) {
+     if(ix==0x7ff00000||ix==0||ix==0x3ff00000){
+-      z = ax;			/*x is +-0,+-inf,+-1*/
+-      if(hy<0) z = one/z;	/* z = (1/|x|) */
++      z = ax;                   /*x is +-0,+-inf,+-1*/
++      if(hy<0) z = one/z;       /* z = (1/|x|) */
+       if(hx<0) {
+-	if(((ix-0x3ff00000)|yisint)==0) {
+-	  z = (z-z)/(z-z); /* (-1)**non-int is NaN */
+-	} else if(yisint==1)
+-	  z = -1.0*z;		/* (x<0)**odd = -(|x|**odd) */
++        if(((ix-0x3ff00000)|yisint)==0) {
++          z = (z-z)/(z-z); /* (-1)**non-int is NaN */
++        } else if(yisint==1)
++          z = -1.0*z;           /* (x<0)**odd = -(|x|**odd) */
+       }
+       return z;
+     }
+@@ -593,7 +590,7 @@
+ 
+   /* |y| is huge */
+   if(iy>0x41e00000) { /* if |y| > 2**31 */
+-    if(iy>0x43f00000){	/* if |y| > 2**64, must o/uflow */
++    if(iy>0x43f00000){  /* if |y| > 2**64, must o/uflow */
+       if(ix<=0x3fefffff) return (hy<0)? hugeX*hugeX:tiny*tiny;
+       if(ix>=0x3ff00000) return (hy>0)? hugeX*hugeX:tiny*tiny;
+     }
+@@ -602,9 +599,9 @@
+     if(ix>0x3ff00000) return (hy>0)? s*hugeX*hugeX:s*tiny*tiny;
+     /* now |1-x| is tiny <= 2**-20, suffice to compute
+        log(x) by x-x^2/2+x^3/3-x^4/4 */
+-    t = ax-one;		/* t has 20 trailing zeros */
++    t = ax-one;         /* t has 20 trailing zeros */
+     w = (t*t)*(0.5-t*(0.3333333333333333333333-t*0.25));
+-    u = ivln2_h*t;	/* ivln2_h has 21 sig. bits */
++    u = ivln2_h*t;      /* ivln2_h has 21 sig. bits */
+     v = t*ivln2_l-w*ivln2;
+     t1 = u+v;
+     __LO(t1) = 0;
+@@ -618,21 +615,21 @@
+     n  += ((ix)>>20)-0x3ff;
+     j  = ix&0x000fffff;
+     /* determine interval */
+-    ix = j|0x3ff00000;		/* normalize ix */
+-    if(j<=0x3988E) k=0;		/* |x|<sqrt(3/2) */
+-    else if(j<0xBB67A) k=1;	/* |x|<sqrt(3)   */
++    ix = j|0x3ff00000;          /* normalize ix */
++    if(j<=0x3988E) k=0;         /* |x|<sqrt(3/2) */
++    else if(j<0xBB67A) k=1;     /* |x|<sqrt(3)   */
+     else {k=0;n+=1;ix -= 0x00100000;}
+     __HI(ax) = ix;
+ 
+     /* compute ss = s_h+s_l = (x-1)/(x+1) or (x-1.5)/(x+1.5) */
+-    u = ax-bp[k];		/* bp[0]=1.0, bp[1]=1.5 */
++    u = ax-bp[k];               /* bp[0]=1.0, bp[1]=1.5 */
+     v = one/(ax+bp[k]);
+     ss = u*v;
+     s_h = ss;
+     __LO(s_h) = 0;
+     /* t_h=ax+bp[k] High */
+     t_h = zeroX;
+-    __HI(t_h)=((ix>>1)|0x20000000)+0x00080000+(k<<18); 
++    __HI(t_h)=((ix>>1)|0x20000000)+0x00080000+(k<<18);
+     t_l = ax - (t_h-bp[k]);
+     s_l = v*((u-s_h*t_h)-s_h*t_l);
+     /* compute log(ax) */
+@@ -650,7 +647,7 @@
+     p_h = u+v;
+     __LO(p_h) = 0;
+     p_l = v-(p_h-u);
+-    z_h = cp_h*p_h;		/* cp_h+cp_l = 2/(3*log2) */
++    z_h = cp_h*p_h;             /* cp_h+cp_l = 2/(3*log2) */
+     z_l = cp_l*p_h+p_l*cp+dp_l[k];
+     /* log2(ax) = (ss+..)*2/(3*log2) = n + dp_h + z_h + z_l */
+     t = (double)n;
+@@ -667,17 +664,17 @@
+   z = p_l+p_h;
+   j = __HI(z);
+   i = __LO(z);
+-  if (j>=0x40900000) {				/* z >= 1024 */
+-    if(((j-0x40900000)|i)!=0)			/* if z > 1024 */
+-      return s*hugeX*hugeX;			/* overflow */
++  if (j>=0x40900000) {                          /* z >= 1024 */
++    if(((j-0x40900000)|i)!=0)                   /* if z > 1024 */
++      return s*hugeX*hugeX;                     /* overflow */
+     else {
+-      if(p_l+ovt>z-p_h) return s*hugeX*hugeX;	/* overflow */
++      if(p_l+ovt>z-p_h) return s*hugeX*hugeX;   /* overflow */
+     }
+-  } else if((j&0x7fffffff)>=0x4090cc00 ) {	/* z <= -1075 */
+-    if(((j-0xc090cc00)|i)!=0) 		/* z < -1075 */
+-      return s*tiny*tiny;		/* underflow */
++  } else if((j&0x7fffffff)>=0x4090cc00 ) {      /* z <= -1075 */
++    if(((j-0xc090cc00)|i)!=0)           /* z < -1075 */
++      return s*tiny*tiny;               /* underflow */
+     else {
+-      if(p_l<=z-p_h) return s*tiny*tiny;	/* underflow */
++      if(p_l<=z-p_h) return s*tiny*tiny;        /* underflow */
+     }
+   }
+   /*
+@@ -686,9 +683,9 @@
+   i = j&0x7fffffff;
+   k = (i>>20)-0x3ff;
+   n = 0;
+-  if(i>0x3fe00000) {		/* if |z| > 0.5, set n = [z+0.5] */
++  if(i>0x3fe00000) {            /* if |z| > 0.5, set n = [z+0.5] */
+     n = j+(0x00100000>>(k+1));
+-    k = ((n&0x7fffffff)>>20)-0x3ff;	/* new k for n */
++    k = ((n&0x7fffffff)>>20)-0x3ff;     /* new k for n */
+     t = zeroX;
+     __HI(t) = (n&~(0x000fffff>>k));
+     n = ((n&0x000fffff)|0x00100000)>>(20-k);
+@@ -707,7 +704,7 @@
+   z  = one-(r-z);
+   j  = __HI(z);
+   j += (n<<20);
+-  if((j>>20)<=0) z = scalbn(z,n);	/* subnormal output */
++  if((j>>20)<=0) z = scalbn(z,n);       /* subnormal output */
+   else __HI(z) += (n<<20);
+   return s*z;
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/sharedRuntimeTrig.cpp openjdk/hotspot/src/share/vm/runtime/sharedRuntimeTrig.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/sharedRuntimeTrig.cpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/sharedRuntimeTrig.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)sharedRuntimeTrig.cpp	1.16 07/05/05 17:06:55 JVM"
+-#endif
+ /*
+  * Copyright 2001-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -79,25 +76,25 @@
+   int  k,hx,lx;
+   hx = __HI(x);
+   lx = __LO(x);
+-  k = (hx&0x7ff00000)>>20;		/* extract exponent */
+-  if (k==0) {				/* 0 or subnormal x */
++  k = (hx&0x7ff00000)>>20;              /* extract exponent */
++  if (k==0) {                           /* 0 or subnormal x */
+     if ((lx|(hx&0x7fffffff))==0) return x; /* +-0 */
+     x *= two54;
+     hx = __HI(x);
+     k = ((hx&0x7ff00000)>>20) - 54;
+-    if (n< -50000) return tiny*x; 	/*underflow*/
++    if (n< -50000) return tiny*x;       /*underflow*/
+   }
+-  if (k==0x7ff) return x+x;		/* NaN or Inf */
++  if (k==0x7ff) return x+x;             /* NaN or Inf */
+   k = k+n;
+   if (k >  0x7fe) return hugeX*copysignA(hugeX,x); /* overflow  */
+-  if (k > 0) 				/* normal result */
++  if (k > 0)                            /* normal result */
+     {__HI(x) = (hx&0x800fffff)|(k<<20); return x;}
+   if (k <= -54) {
+-    if (n > 50000) 	/* in case integer overflow in n+k */
+-      return hugeX*copysignA(hugeX,x);	/*overflow*/
+-    else return tiny*copysignA(tiny,x);	/*underflow*/
++    if (n > 50000)      /* in case integer overflow in n+k */
++      return hugeX*copysignA(hugeX,x);  /*overflow*/
++    else return tiny*copysignA(tiny,x); /*underflow*/
+   }
+-  k += 54;				/* subnormal result */
++  k += 54;                              /* subnormal result */
+   __HI(x) = (hx&0x800fffff)|(k<<20);
+   return x*twom54;
+ }
+@@ -105,12 +102,12 @@
+ /*
+  * __kernel_rem_pio2(x,y,e0,nx,prec,ipio2)
+  * double x[],y[]; int e0,nx,prec; int ipio2[];
+- * 
+- * __kernel_rem_pio2 return the last three digits of N with 
+- *		y = x - N*pi/2
++ *
++ * __kernel_rem_pio2 return the last three digits of N with
++ *              y = x - N*pi/2
+  * so that |y| < pi/2.
+  *
+- * The method is to compute the integer (mod 8) and fraction parts of 
++ * The method is to compute the integer (mod 8) and fraction parts of
+  * (2/pi)*x without doing the full multiplication. In general we
+  * skip the part of the product that are known to be a huge integer (
+  * more accurately, = 0 mod 8 ). Thus the number of operations are
+@@ -119,102 +116,102 @@
+  * (2/pi) is represented by an array of 24-bit integers in ipio2[].
+  *
+  * Input parameters:
+- * 	x[]	The input value (must be positive) is broken into nx 
+- *		pieces of 24-bit integers in double precision format.
+- *		x[i] will be the i-th 24 bit of x. The scaled exponent 
+- *		of x[0] is given in input parameter e0 (i.e., x[0]*2^e0 
+- *		match x's up to 24 bits.
+- *
+- *		Example of breaking a double positive z into x[0]+x[1]+x[2]:
+- *			e0 = ilogb(z)-23
+- *			z  = scalbn(z,-e0)
+- *		for i = 0,1,2
+- *			x[i] = floor(z)
+- *			z    = (z-x[i])*2**24
+- *
+- *
+- *	y[]	ouput result in an array of double precision numbers.
+- *		The dimension of y[] is:
+- *			24-bit  precision	1
+- *			53-bit  precision	2
+- *			64-bit  precision	2
+- *			113-bit precision	3
+- *		The actual value is the sum of them. Thus for 113-bit
+- *		precsion, one may have to do something like:
+- *
+- *		long double t,w,r_head, r_tail;
+- *		t = (long double)y[2] + (long double)y[1];
+- *		w = (long double)y[0];
+- *		r_head = t+w;
+- *		r_tail = w - (r_head - t);
+- *
+- *	e0	The exponent of x[0]
+- *
+- *	nx	dimension of x[]
+- *
+- *  	prec	an interger indicating the precision:
+- *			0	24  bits (single)
+- *			1	53  bits (double)
+- *			2	64  bits (extended)
+- *			3	113 bits (quad)
+- *
+- *	ipio2[]
+- *		integer array, contains the (24*i)-th to (24*i+23)-th 
+- *		bit of 2/pi after binary point. The corresponding 
+- *		floating value is
++ *      x[]     The input value (must be positive) is broken into nx
++ *              pieces of 24-bit integers in double precision format.
++ *              x[i] will be the i-th 24 bit of x. The scaled exponent
++ *              of x[0] is given in input parameter e0 (i.e., x[0]*2^e0
++ *              match x's up to 24 bits.
++ *
++ *              Example of breaking a double positive z into x[0]+x[1]+x[2]:
++ *                      e0 = ilogb(z)-23
++ *                      z  = scalbn(z,-e0)
++ *              for i = 0,1,2
++ *                      x[i] = floor(z)
++ *                      z    = (z-x[i])*2**24
++ *
++ *
++ *      y[]     ouput result in an array of double precision numbers.
++ *              The dimension of y[] is:
++ *                      24-bit  precision       1
++ *                      53-bit  precision       2
++ *                      64-bit  precision       2
++ *                      113-bit precision       3
++ *              The actual value is the sum of them. Thus for 113-bit
++ *              precsion, one may have to do something like:
++ *
++ *              long double t,w,r_head, r_tail;
++ *              t = (long double)y[2] + (long double)y[1];
++ *              w = (long double)y[0];
++ *              r_head = t+w;
++ *              r_tail = w - (r_head - t);
++ *
++ *      e0      The exponent of x[0]
++ *
++ *      nx      dimension of x[]
++ *
++ *      prec    an interger indicating the precision:
++ *                      0       24  bits (single)
++ *                      1       53  bits (double)
++ *                      2       64  bits (extended)
++ *                      3       113 bits (quad)
++ *
++ *      ipio2[]
++ *              integer array, contains the (24*i)-th to (24*i+23)-th
++ *              bit of 2/pi after binary point. The corresponding
++ *              floating value is
+  *
+- *			ipio2[i] * 2^(-24(i+1)).
++ *                      ipio2[i] * 2^(-24(i+1)).
+  *
+  * External function:
+- *	double scalbn(), floor();
++ *      double scalbn(), floor();
+  *
+  *
+  * Here is the description of some local variables:
+  *
+- * 	jk	jk+1 is the initial number of terms of ipio2[] needed
+- *		in the computation. The recommended value is 2,3,4,
+- *		6 for single, double, extended,and quad.
++ *      jk      jk+1 is the initial number of terms of ipio2[] needed
++ *              in the computation. The recommended value is 2,3,4,
++ *              6 for single, double, extended,and quad.
+  *
+- * 	jz	local integer variable indicating the number of 
+- *		terms of ipio2[] used. 
++ *      jz      local integer variable indicating the number of
++ *              terms of ipio2[] used.
+  *
+- *	jx	nx - 1
++ *      jx      nx - 1
+  *
+- *	jv	index for pointing to the suitable ipio2[] for the
+- *		computation. In general, we want
+- *			( 2^e0*x[0] * ipio2[jv-1]*2^(-24jv) )/8
+- *		is an integer. Thus
+- *			e0-3-24*jv >= 0 or (e0-3)/24 >= jv
+- *		Hence jv = max(0,(e0-3)/24).
++ *      jv      index for pointing to the suitable ipio2[] for the
++ *              computation. In general, we want
++ *                      ( 2^e0*x[0] * ipio2[jv-1]*2^(-24jv) )/8
++ *              is an integer. Thus
++ *                      e0-3-24*jv >= 0 or (e0-3)/24 >= jv
++ *              Hence jv = max(0,(e0-3)/24).
+  *
+- *	jp	jp+1 is the number of terms in PIo2[] needed, jp = jk.
++ *      jp      jp+1 is the number of terms in PIo2[] needed, jp = jk.
+  *
+- * 	q[]	double array with integral value, representing the
+- *		24-bits chunk of the product of x and 2/pi.
++ *      q[]     double array with integral value, representing the
++ *              24-bits chunk of the product of x and 2/pi.
+  *
+- *	q0	the corresponding exponent of q[0]. Note that the
+- *		exponent for q[i] would be q0-24*i.
++ *      q0      the corresponding exponent of q[0]. Note that the
++ *              exponent for q[i] would be q0-24*i.
+  *
+- *	PIo2[]	double precision array, obtained by cutting pi/2
+- *		into 24 bits chunks. 
++ *      PIo2[]  double precision array, obtained by cutting pi/2
++ *              into 24 bits chunks.
+  *
+- *	f[]	ipio2[] in floating point 
++ *      f[]     ipio2[] in floating point
+  *
+- *	iq[]	integer array by breaking up q[] in 24-bits chunk.
++ *      iq[]    integer array by breaking up q[] in 24-bits chunk.
+  *
+- *	fq[]	final product of x*(2/pi) in fq[0],..,fq[jk]
++ *      fq[]    final product of x*(2/pi) in fq[0],..,fq[jk]
+  *
+- *	ih	integer. If >0 it indicats q[] is >= 0.5, hence
+- *		it also indicates the *sign* of the result.
++ *      ih      integer. If >0 it indicats q[] is >= 0.5, hence
++ *              it also indicates the *sign* of the result.
+  *
+  */
+ 
+ 
+ /*
+  * Constants:
+- * The hexadecimal values are the intended ones for the following 
+- * constants. The decimal values may be used, provided that the 
+- * compiler will convert from decimal to binary accurately enough 
++ * The hexadecimal values are the intended ones for the following
++ * constants. The decimal values may be used, provided that the
++ * compiler will convert from decimal to binary accurately enough
+  * to produce the hexadecimal values shown.
+  */
+ 
+@@ -232,7 +229,7 @@
+   2.16741683877804819444e-51, /* 0x3569F31D, 0x00000000 */
+ };
+ 
+-static const double			
++static const double
+ zeroB   = 0.0,
+ one     = 1.0,
+ two24B  = 1.67772160000000000000e+07, /* 0x41700000, 0x00000000 */
+@@ -241,25 +238,25 @@
+ static int __kernel_rem_pio2(double *x, double *y, int e0, int nx, int prec, const int *ipio2) {
+   int jz,jx,jv,jp,jk,carry,n,iq[20],i,j,k,m,q0,ih;
+   double z,fw,f[20],fq[20],q[20];
+-  
++
+   /* initialize jk*/
+   jk = init_jk[prec];
+   jp = jk;
+-  
++
+   /* determine jx,jv,q0, note that 3>q0 */
+   jx =  nx-1;
+   jv = (e0-3)/24; if(jv<0) jv=0;
+   q0 =  e0-24*(jv+1);
+-  
++
+   /* set up f[0] to f[jx+jk] where f[jx+jk] = ipio2[jv+jk] */
+   j = jv-jx; m = jx+jk;
+   for(i=0;i<=m;i++,j++) f[i] = (j<0)? zeroB : (double) ipio2[j];
+-  
++
+   /* compute q[0],q[1],...q[jk] */
+   for (i=0;i<=jk;i++) {
+     for(j=0,fw=0.0;j<=jx;j++) fw += x[j]*f[jx+i-j]; q[i] = fw;
+   }
+-  
++
+   jz = jk;
+ recompute:
+   /* distill q[] into iq[] reversingly */
+@@ -268,37 +265,37 @@
+     iq[i] =  (int)(z-two24B*fw);
+     z     =  q[j-1]+fw;
+   }
+-  
++
+   /* compute n */
+-  z  = scalbnA(z,q0);		/* actual value of z */
+-  z -= 8.0*floor(z*0.125);		/* trim off integer >= 8 */
++  z  = scalbnA(z,q0);           /* actual value of z */
++  z -= 8.0*floor(z*0.125);              /* trim off integer >= 8 */
+   n  = (int) z;
+   z -= (double)n;
+   ih = 0;
+-  if(q0>0) {	/* need iq[jz-1] to determine n */
++  if(q0>0) {    /* need iq[jz-1] to determine n */
+     i  = (iq[jz-1]>>(24-q0)); n += i;
+     iq[jz-1] -= i<<(24-q0);
+     ih = iq[jz-1]>>(23-q0);
+-  } 
++  }
+   else if(q0==0) ih = iq[jz-1]>>23;
+   else if(z>=0.5) ih=2;
+-  
+-  if(ih>0) {	/* q > 0.5 */
++
++  if(ih>0) {    /* q > 0.5 */
+     n += 1; carry = 0;
+-    for(i=0;i<jz ;i++) {	/* compute 1-q */
++    for(i=0;i<jz ;i++) {        /* compute 1-q */
+       j = iq[i];
+       if(carry==0) {
+-	if(j!=0) {
+-	  carry = 1; iq[i] = 0x1000000- j;
+-	}
++        if(j!=0) {
++          carry = 1; iq[i] = 0x1000000- j;
++        }
+       } else  iq[i] = 0xffffff - j;
+     }
+-    if(q0>0) {		/* rare case: chance is 1 in 12 */
++    if(q0>0) {          /* rare case: chance is 1 in 12 */
+       switch(q0) {
+       case 1:
+-	iq[jz-1] &= 0x7fffff; break;
++        iq[jz-1] &= 0x7fffff; break;
+       case 2:
+-	iq[jz-1] &= 0x3fffff; break;
++        iq[jz-1] &= 0x3fffff; break;
+       }
+     }
+     if(ih==2) {
+@@ -306,78 +303,78 @@
+       if(carry!=0) z -= scalbnA(one,q0);
+     }
+   }
+-  
++
+   /* check if recomputation is needed */
+   if(z==zeroB) {
+     j = 0;
+     for (i=jz-1;i>=jk;i--) j |= iq[i];
+     if(j==0) { /* need recomputation */
+       for(k=1;iq[jk-k]==0;k++);   /* k = no. of terms needed */
+-      
++
+       for(i=jz+1;i<=jz+k;i++) {   /* add q[jz+1] to q[jz+k] */
+-	f[jx+i] = (double) ipio2[jv+i];
+-	for(j=0,fw=0.0;j<=jx;j++) fw += x[j]*f[jx+i-j];
+-	q[i] = fw;
++        f[jx+i] = (double) ipio2[jv+i];
++        for(j=0,fw=0.0;j<=jx;j++) fw += x[j]*f[jx+i-j];
++        q[i] = fw;
+       }
+       jz += k;
+       goto recompute;
+     }
+   }
+-  
++
+   /* chop off zero terms */
+   if(z==0.0) {
+     jz -= 1; q0 -= 24;
+     while(iq[jz]==0) { jz--; q0-=24;}
+   } else { /* break z into 24-bit if neccessary */
+     z = scalbnA(z,-q0);
+-    if(z>=two24B) { 
++    if(z>=two24B) {
+       fw = (double)((int)(twon24*z));
+       iq[jz] = (int)(z-two24B*fw);
+       jz += 1; q0 += 24;
+       iq[jz] = (int) fw;
+     } else iq[jz] = (int) z ;
+   }
+-  
++
+   /* convert integer "bit" chunk to floating-point value */
+   fw = scalbnA(one,q0);
+   for(i=jz;i>=0;i--) {
+     q[i] = fw*(double)iq[i]; fw*=twon24;
+   }
+-  
++
+   /* compute PIo2[0,...,jp]*q[jz,...,0] */
+   for(i=jz;i>=0;i--) {
+     for(fw=0.0,k=0;k<=jp&&k<=jz-i;k++) fw += PIo2[k]*q[i+k];
+     fq[jz-i] = fw;
+   }
+-  
++
+   /* compress fq[] into y[] */
+   switch(prec) {
+   case 0:
+     fw = 0.0;
+     for (i=jz;i>=0;i--) fw += fq[i];
+-    y[0] = (ih==0)? fw: -fw; 
++    y[0] = (ih==0)? fw: -fw;
+     break;
+   case 1:
+   case 2:
+     fw = 0.0;
+-    for (i=jz;i>=0;i--) fw += fq[i]; 
+-    y[0] = (ih==0)? fw: -fw; 
++    for (i=jz;i>=0;i--) fw += fq[i];
++    y[0] = (ih==0)? fw: -fw;
+     fw = fq[0]-fw;
+     for (i=1;i<=jz;i++) fw += fq[i];
+-    y[1] = (ih==0)? fw: -fw; 
++    y[1] = (ih==0)? fw: -fw;
+     break;
+-  case 3:	/* painful */
++  case 3:       /* painful */
+     for (i=jz;i>0;i--) {
+-      fw      = fq[i-1]+fq[i]; 
++      fw      = fq[i-1]+fq[i];
+       fq[i]  += fq[i-1]-fw;
+       fq[i-1] = fw;
+     }
+     for (i=jz;i>1;i--) {
+-      fw      = fq[i-1]+fq[i]; 
++      fw      = fq[i-1]+fq[i];
+       fq[i]  += fq[i-1]-fw;
+       fq[i-1] = fw;
+     }
+-    for (fw=0.0,i=jz;i>=2;i--) fw += fq[i]; 
++    for (fw=0.0,i=jz;i>=2;i--) fw += fq[i];
+     if(ih==0) {
+       y[0] =  fq[0]; y[1] =  fq[1]; y[2] =  fw;
+     } else {
+@@ -394,33 +391,33 @@
+  *
+  * Developed at SunPro, a Sun Microsystems, Inc. business.
+  * Permission to use, copy, modify, and distribute this
+- * software is freely granted, provided that this notice 
++ * software is freely granted, provided that this notice
+  * is preserved.
+  * ====================================================
+  *
+  */
+ 
+ /* __ieee754_rem_pio2(x,y)
+- * 
+- * return the remainder of x rem pi/2 in y[0]+y[1] 
++ *
++ * return the remainder of x rem pi/2 in y[0]+y[1]
+  * use __kernel_rem_pio2()
+  */
+ 
+ /*
+- * Table of constants for 2/pi, 396 Hex digits (476 decimal) of 2/pi 
++ * Table of constants for 2/pi, 396 Hex digits (476 decimal) of 2/pi
+  */
+ static const int two_over_pi[] = {
+-  0xA2F983, 0x6E4E44, 0x1529FC, 0x2757D1, 0xF534DD, 0xC0DB62, 
+-  0x95993C, 0x439041, 0xFE5163, 0xABDEBB, 0xC561B7, 0x246E3A, 
+-  0x424DD2, 0xE00649, 0x2EEA09, 0xD1921C, 0xFE1DEB, 0x1CB129, 
+-  0xA73EE8, 0x8235F5, 0x2EBB44, 0x84E99C, 0x7026B4, 0x5F7E41, 
+-  0x3991D6, 0x398353, 0x39F49C, 0x845F8B, 0xBDF928, 0x3B1FF8, 
+-  0x97FFDE, 0x05980F, 0xEF2F11, 0x8B5A0A, 0x6D1F6D, 0x367ECF, 
+-  0x27CB09, 0xB74F46, 0x3F669E, 0x5FEA2D, 0x7527BA, 0xC7EBE5, 
+-  0xF17B3D, 0x0739F7, 0x8A5292, 0xEA6BFB, 0x5FB11F, 0x8D5D08, 
+-  0x560330, 0x46FC7B, 0x6BABF0, 0xCFBC20, 0x9AF436, 0x1DA9E3, 
+-  0x91615E, 0xE61B08, 0x659985, 0x5F14A0, 0x68408D, 0xFFD880, 
+-  0x4D7327, 0x310606, 0x1556CA, 0x73A8C9, 0x60E27B, 0xC08C6B, 
++  0xA2F983, 0x6E4E44, 0x1529FC, 0x2757D1, 0xF534DD, 0xC0DB62,
++  0x95993C, 0x439041, 0xFE5163, 0xABDEBB, 0xC561B7, 0x246E3A,
++  0x424DD2, 0xE00649, 0x2EEA09, 0xD1921C, 0xFE1DEB, 0x1CB129,
++  0xA73EE8, 0x8235F5, 0x2EBB44, 0x84E99C, 0x7026B4, 0x5F7E41,
++  0x3991D6, 0x398353, 0x39F49C, 0x845F8B, 0xBDF928, 0x3B1FF8,
++  0x97FFDE, 0x05980F, 0xEF2F11, 0x8B5A0A, 0x6D1F6D, 0x367ECF,
++  0x27CB09, 0xB74F46, 0x3F669E, 0x5FEA2D, 0x7527BA, 0xC7EBE5,
++  0xF17B3D, 0x0739F7, 0x8A5292, 0xEA6BFB, 0x5FB11F, 0x8D5D08,
++  0x560330, 0x46FC7B, 0x6BABF0, 0xCFBC20, 0x9AF436, 0x1DA9E3,
++  0x91615E, 0xE61B08, 0x659985, 0x5F14A0, 0x68408D, 0xFFD880,
++  0x4D7327, 0x310606, 0x1556CA, 0x73A8C9, 0x60E27B, 0xC08C6B,
+ };
+ 
+ static const int npio2_hw[] = {
+@@ -442,7 +439,7 @@
+  * pio2_3t:  pi/2 - (pio2_1+pio2_2+pio2_3)
+  */
+ 
+-static const double 
++static const double
+ zeroA =  0.00000000000000000000e+00, /* 0x00000000, 0x00000000 */
+ half =  5.00000000000000000000e-01, /* 0x3FE00000, 0x00000000 */
+ two24A =  1.67772160000000000000e+07, /* 0x41700000, 0x00000000 */
+@@ -458,9 +455,9 @@
+   double z,w,t,r,fn;
+   double tx[3];
+   int e0,i,j,nx,n,ix,hx,i0;
+-  
+-  i0 = ((*(int*)&two24A)>>30)^1;	/* high word index */
+-  hx = *(i0+(int*)&x);		/* high word of x */
++
++  i0 = ((*(int*)&two24A)>>30)^1;        /* high word index */
++  hx = *(i0+(int*)&x);          /* high word of x */
+   ix = hx&0x7fffffff;
+   if(ix<=0x3fe921fb)   /* |x| ~<= pi/4 , no need for reduction */
+     {y[0] = x; y[1] = 0; return 0;}
+@@ -468,68 +465,68 @@
+     if(hx>0) {
+       z = x - pio2_1;
+       if(ix!=0x3ff921fb) {    /* 33+53 bit pi is good enough */
+-	y[0] = z - pio2_1t;
+-	y[1] = (z-y[0])-pio2_1t;
++        y[0] = z - pio2_1t;
++        y[1] = (z-y[0])-pio2_1t;
+       } else {                /* near pi/2, use 33+33+53 bit pi */
+-	z -= pio2_2;
+-	y[0] = z - pio2_2t;
+-	y[1] = (z-y[0])-pio2_2t;
++        z -= pio2_2;
++        y[0] = z - pio2_2t;
++        y[1] = (z-y[0])-pio2_2t;
+       }
+       return 1;
+     } else {    /* negative x */
+       z = x + pio2_1;
+       if(ix!=0x3ff921fb) {    /* 33+53 bit pi is good enough */
+-	y[0] = z + pio2_1t;
+-	y[1] = (z-y[0])+pio2_1t;
++        y[0] = z + pio2_1t;
++        y[1] = (z-y[0])+pio2_1t;
+       } else {                /* near pi/2, use 33+33+53 bit pi */
+-	z += pio2_2;
+-	y[0] = z + pio2_2t;
+-	y[1] = (z-y[0])+pio2_2t;
++        z += pio2_2;
++        y[0] = z + pio2_2t;
++        y[1] = (z-y[0])+pio2_2t;
+       }
+       return -1;
+-    }    
++    }
+   }
+   if(ix<=0x413921fb) { /* |x| ~<= 2^19*(pi/2), medium size */
+     t  = fabsd(x);
+     n  = (int) (t*invpio2+half);
+     fn = (double)n;
+     r  = t-fn*pio2_1;
+-    w  = fn*pio2_1t;	/* 1st round good to 85 bit */
+-    if(n<32&&ix!=npio2_hw[n-1]) {	
+-      y[0] = r-w;	/* quick check no cancellation */
++    w  = fn*pio2_1t;    /* 1st round good to 85 bit */
++    if(n<32&&ix!=npio2_hw[n-1]) {
++      y[0] = r-w;       /* quick check no cancellation */
+     } else {
+       j  = ix>>20;
+-      y[0] = r-w; 
++      y[0] = r-w;
+       i = j-(((*(i0+(int*)&y[0]))>>20)&0x7ff);
+       if(i>16) {  /* 2nd iteration needed, good to 118 */
+-	t  = r;
+-	w  = fn*pio2_2;	
+-	r  = t-w;
+-	w  = fn*pio2_2t-((t-r)-w);	
+-	y[0] = r-w;
+-	i = j-(((*(i0+(int*)&y[0]))>>20)&0x7ff);
+-	if(i>49)  {	/* 3rd iteration need, 151 bits acc */
+-	  t  = r;	/* will cover all possible cases */
+-	  w  = fn*pio2_3;	
+-	  r  = t-w;
+-	  w  = fn*pio2_3t-((t-r)-w);	
+-	  y[0] = r-w;
+-	}
++        t  = r;
++        w  = fn*pio2_2;
++        r  = t-w;
++        w  = fn*pio2_2t-((t-r)-w);
++        y[0] = r-w;
++        i = j-(((*(i0+(int*)&y[0]))>>20)&0x7ff);
++        if(i>49)  {     /* 3rd iteration need, 151 bits acc */
++          t  = r;       /* will cover all possible cases */
++          w  = fn*pio2_3;
++          r  = t-w;
++          w  = fn*pio2_3t-((t-r)-w);
++          y[0] = r-w;
++        }
+       }
+     }
+     y[1] = (r-y[0])-w;
+-    if(hx<0) 	{y[0] = -y[0]; y[1] = -y[1]; return -n;}
+-    else	 return n;
++    if(hx<0)    {y[0] = -y[0]; y[1] = -y[1]; return -n;}
++    else         return n;
+   }
+-  /* 
++  /*
+    * all other (large) arguments
+    */
+-  if(ix>=0x7ff00000) {		/* x is inf or NaN */
++  if(ix>=0x7ff00000) {          /* x is inf or NaN */
+     y[0]=y[1]=x-x; return 0;
+   }
+   /* set z = scalbn(|x|,ilogb(x)-23) */
+   *(1-i0+(int*)&z) = *(1-i0+(int*)&x);
+-  e0 	= (ix>>20)-1046;	/* e0 = ilogb(z)-23; */
++  e0    = (ix>>20)-1046;        /* e0 = ilogb(z)-23; */
+   *(i0+(int*)&z) = ix - (e0<<20);
+   for(i=0;i<2;i++) {
+     tx[i] = (double)((int)(z));
+@@ -537,7 +534,7 @@
+   }
+   tx[2] = z;
+   nx = 3;
+-  while(tx[nx-1]==zeroA) nx--;	/* skip zero term */
++  while(tx[nx-1]==zeroA) nx--;  /* skip zero term */
+   n  =  __kernel_rem_pio2(tx,y,e0,nx,2,two_over_pi);
+   if(hx<0) {y[0] = -y[0]; y[1] = -y[1]; return -n;}
+   return n;
+@@ -551,25 +548,25 @@
+  * Input iy indicates whether y is 0. (if iy=0, y assume to be 0).
+  *
+  * Algorithm
+- *	1. Since sin(-x) = -sin(x), we need only to consider positive x.
+- *	2. if x < 2^-27 (hx<0x3e400000 0), return x with inexact if x!=0.
+- *	3. sin(x) is approximated by a polynomial of degree 13 on
+- *	   [0,pi/4]
+- *		  	         3            13
+- *	   	sin(x) ~ x + S1*x + ... + S6*x
+- *	   where
+- *
+- * 	|sin(x)         2     4     6     8     10     12  |     -58
+- * 	|----- - (1+S1*x +S2*x +S3*x +S4*x +S5*x  +S6*x   )| <= 2
+- * 	|  x 					           |
+- *
+- *	4. sin(x+y) = sin(x) + sin'(x')*y
+- *		    ~ sin(x) + (1-x*x/2)*y
+- *	   For better accuracy, let
+- *		     3      2      2      2      2
+- *		r = x *(S2+x *(S3+x *(S4+x *(S5+x *S6))))
+- *	   then                   3    2
+- *		sin(x) = x + (S1*x + (x *(r-y/2)+y))
++ *      1. Since sin(-x) = -sin(x), we need only to consider positive x.
++ *      2. if x < 2^-27 (hx<0x3e400000 0), return x with inexact if x!=0.
++ *      3. sin(x) is approximated by a polynomial of degree 13 on
++ *         [0,pi/4]
++ *                               3            13
++ *              sin(x) ~ x + S1*x + ... + S6*x
++ *         where
++ *
++ *      |sin(x)         2     4     6     8     10     12  |     -58
++ *      |----- - (1+S1*x +S2*x +S3*x +S4*x +S5*x  +S6*x   )| <= 2
++ *      |  x                                               |
++ *
++ *      4. sin(x+y) = sin(x) + sin'(x')*y
++ *                  ~ sin(x) + (1-x*x/2)*y
++ *         For better accuracy, let
++ *                   3      2      2      2      2
++ *              r = x *(S2+x *(S3+x *(S4+x *(S5+x *S6))))
++ *         then                   3    2
++ *              sin(x) = x + (S1*x + (x *(r-y/2)+y))
+  */
+ 
+ static const double
+@@ -582,16 +579,16 @@
+ 
+ static double __kernel_sin(double x, double y, int iy)
+ {
+-	double z,r,v;
+-	int ix;
+-	ix = __HI(x)&0x7fffffff;	/* high word of x */
+-	if(ix<0x3e400000)			/* |x| < 2**-27 */
+-	   {if((int)x==0) return x;}		/* generate inexact */
+-	z	=  x*x;
+-	v	=  z*x;
+-	r	=  S2+z*(S3+z*(S4+z*(S5+z*S6)));
+-	if(iy==0) return x+v*(S1+z*r);
+-	else      return x-((z*(half*y-v*r)-y)-v*S1);
++        double z,r,v;
++        int ix;
++        ix = __HI(x)&0x7fffffff;        /* high word of x */
++        if(ix<0x3e400000)                       /* |x| < 2**-27 */
++           {if((int)x==0) return x;}            /* generate inexact */
++        z       =  x*x;
++        v       =  z*x;
++        r       =  S2+z*(S3+z*(S4+z*(S5+z*S6)));
++        if(iy==0) return x+v*(S1+z*r);
++        else      return x-((z*(half*y-v*r)-y)-v*S1);
+ }
+ 
+ /*
+@@ -601,32 +598,32 @@
+  * Input y is the tail of x.
+  *
+  * Algorithm
+- *	1. Since cos(-x) = cos(x), we need only to consider positive x.
+- *	2. if x < 2^-27 (hx<0x3e400000 0), return 1 with inexact if x!=0.
+- *	3. cos(x) is approximated by a polynomial of degree 14 on
+- *	   [0,pi/4]
+- *		  	                 4            14
+- *	   	cos(x) ~ 1 - x*x/2 + C1*x + ... + C6*x
+- *	   where the remez error is
+- *
+- * 	|              2     4     6     8     10    12     14 |     -58
+- * 	|cos(x)-(1-.5*x +C1*x +C2*x +C3*x +C4*x +C5*x  +C6*x  )| <= 2
+- * 	|    					               |
+- *
+- * 	               4     6     8     10    12     14
+- *	4. let r = C1*x +C2*x +C3*x +C4*x +C5*x  +C6*x  , then
+- *	       cos(x) = 1 - x*x/2 + r
+- *	   since cos(x+y) ~ cos(x) - sin(x)*y
+- *			  ~ cos(x) - x*y,
+- *	   a correction term is necessary in cos(x) and hence
+- *		cos(x+y) = 1 - (x*x/2 - (r - x*y))
+- *	   For better accuracy when x > 0.3, let qx = |x|/4 with
+- *	   the last 32 bits mask off, and if x > 0.78125, let qx = 0.28125.
+- *	   Then
+- *		cos(x+y) = (1-qx) - ((x*x/2-qx) - (r-x*y)).
+- *	   Note that 1-qx and (x*x/2-qx) is EXACT here, and the
+- *	   magnitude of the latter is at least a quarter of x*x/2,
+- *	   thus, reducing the rounding error in the subtraction.
++ *      1. Since cos(-x) = cos(x), we need only to consider positive x.
++ *      2. if x < 2^-27 (hx<0x3e400000 0), return 1 with inexact if x!=0.
++ *      3. cos(x) is approximated by a polynomial of degree 14 on
++ *         [0,pi/4]
++ *                                       4            14
++ *              cos(x) ~ 1 - x*x/2 + C1*x + ... + C6*x
++ *         where the remez error is
++ *
++ *      |              2     4     6     8     10    12     14 |     -58
++ *      |cos(x)-(1-.5*x +C1*x +C2*x +C3*x +C4*x +C5*x  +C6*x  )| <= 2
++ *      |                                                      |
++ *
++ *                     4     6     8     10    12     14
++ *      4. let r = C1*x +C2*x +C3*x +C4*x +C5*x  +C6*x  , then
++ *             cos(x) = 1 - x*x/2 + r
++ *         since cos(x+y) ~ cos(x) - sin(x)*y
++ *                        ~ cos(x) - x*y,
++ *         a correction term is necessary in cos(x) and hence
++ *              cos(x+y) = 1 - (x*x/2 - (r - x*y))
++ *         For better accuracy when x > 0.3, let qx = |x|/4 with
++ *         the last 32 bits mask off, and if x > 0.78125, let qx = 0.28125.
++ *         Then
++ *              cos(x+y) = (1-qx) - ((x*x/2-qx) - (r-x*y)).
++ *         Note that 1-qx and (x*x/2-qx) is EXACT here, and the
++ *         magnitude of the latter is at least a quarter of x*x/2,
++ *         thus, reducing the rounding error in the subtraction.
+  */
+ 
+ static const double
+@@ -641,19 +638,19 @@
+ {
+   double a,hz,z,r,qx;
+   int ix;
+-  ix = __HI(x)&0x7fffffff;	/* ix = |x|'s high word*/
+-  if(ix<0x3e400000) {			/* if x < 2**27 */
+-    if(((int)x)==0) return one;		/* generate inexact */
++  ix = __HI(x)&0x7fffffff;      /* ix = |x|'s high word*/
++  if(ix<0x3e400000) {                   /* if x < 2**27 */
++    if(((int)x)==0) return one;         /* generate inexact */
+   }
+   z  = x*x;
+   r  = z*(C1+z*(C2+z*(C3+z*(C4+z*(C5+z*C6)))));
+-  if(ix < 0x3FD33333) 			/* if |x| < 0.3 */
++  if(ix < 0x3FD33333)                   /* if |x| < 0.3 */
+     return one - (0.5*z - (z*r - x*y));
+   else {
+-    if(ix > 0x3fe90000) {		/* x > 0.78125 */
++    if(ix > 0x3fe90000) {               /* x > 0.78125 */
+       qx = 0.28125;
+     } else {
+-      __HI(qx) = ix-0x00200000;	/* x/4 */
++      __HI(qx) = ix-0x00200000; /* x/4 */
+       __LO(qx) = 0;
+     }
+     hz = 0.5*z-qx;
+@@ -729,16 +726,16 @@
+         if (iy == 1)
+           return x;
+         else {    /* compute -1 / (x+y) carefully */
+-	  double a, t;
+-          
+-	  z = w = x + y;
+-	  __LO(z) = 0;
+-	  v = y - (z - x);
+-	  t = a = -one / w;
+-	  __LO(t) = 0;
+-	  s = one + t * z;
+-	  return t + a * (s + t * v);
+-	}
++          double a, t;
++
++          z = w = x + y;
++          __LO(z) = 0;
++          v = y - (z - x);
++          t = a = -one / w;
++          __LO(t) = 0;
++          s = one + t * z;
++          return t + a * (s + t * v);
++        }
+       }
+     }
+   }
+@@ -766,7 +763,7 @@
+   }
+   if(iy==1) return w;
+   else {          /* if allow error up to 2 ulp,
+-		     simply return -1.0/(x+r) here */
++                     simply return -1.0/(x+r) here */
+     /*  compute -1.0/(x+r) accurately */
+     double a,t;
+     z  = w;
+@@ -790,22 +787,22 @@
+  * Return sine function of x.
+  *
+  * kernel function:
+- *	__kernel_sin		... sine function on [-pi/4,pi/4]
+- *	__kernel_cos		... cose function on [-pi/4,pi/4]
+- *	__ieee754_rem_pio2	... argument reduction routine
++ *      __kernel_sin            ... sine function on [-pi/4,pi/4]
++ *      __kernel_cos            ... cose function on [-pi/4,pi/4]
++ *      __ieee754_rem_pio2      ... argument reduction routine
+  *
+  * Method.
+  *      Let S,C and T denote the sin, cos and tan respectively on
+- *	[-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2
+- *	in [-pi/4 , +pi/4], and let n = k mod 4.
+- *	We have
++ *      [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2
++ *      in [-pi/4 , +pi/4], and let n = k mod 4.
++ *      We have
+  *
+  *          n        sin(x)      cos(x)        tan(x)
+  *     ----------------------------------------------------------
+- *	    0	       S	   C		 T
+- *	    1	       C	  -S		-1/T
+- *	    2	      -S	  -C		 T
+- *	    3	      -C	   S		-1/T
++ *          0          S           C             T
++ *          1          C          -S            -1/T
++ *          2         -S          -C             T
++ *          3         -C           S            -1/T
+  *     ----------------------------------------------------------
+  *
+  * Special cases:
+@@ -814,7 +811,7 @@
+  *      trig(NaN)    is that NaN;
+  *
+  * Accuracy:
+- *	TRIG(x) returns trig(x) nearly rounded
++ *      TRIG(x) returns trig(x) nearly rounded
+  */
+ 
+ JRT_LEAF(jdouble, SharedRuntime::dsin(jdouble x))
+@@ -848,22 +845,22 @@
+  * Return cosine function of x.
+  *
+  * kernel function:
+- *	__kernel_sin		... sine function on [-pi/4,pi/4]
+- *	__kernel_cos		... cosine function on [-pi/4,pi/4]
+- *	__ieee754_rem_pio2	... argument reduction routine
++ *      __kernel_sin            ... sine function on [-pi/4,pi/4]
++ *      __kernel_cos            ... cosine function on [-pi/4,pi/4]
++ *      __ieee754_rem_pio2      ... argument reduction routine
+  *
+  * Method.
+  *      Let S,C and T denote the sin, cos and tan respectively on
+- *	[-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2
+- *	in [-pi/4 , +pi/4], and let n = k mod 4.
+- *	We have
++ *      [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2
++ *      in [-pi/4 , +pi/4], and let n = k mod 4.
++ *      We have
+  *
+  *          n        sin(x)      cos(x)        tan(x)
+  *     ----------------------------------------------------------
+- *	    0	       S	   C		 T
+- *	    1	       C	  -S		-1/T
+- *	    2	      -S	  -C		 T
+- *	    3	      -C	   S		-1/T
++ *          0          S           C             T
++ *          1          C          -S            -1/T
++ *          2         -S          -C             T
++ *          3         -C           S            -1/T
+  *     ----------------------------------------------------------
+  *
+  * Special cases:
+@@ -872,7 +869,7 @@
+  *      trig(NaN)    is that NaN;
+  *
+  * Accuracy:
+- *	TRIG(x) returns trig(x) nearly rounded
++ *      TRIG(x) returns trig(x) nearly rounded
+  */
+ 
+ JRT_LEAF(jdouble, SharedRuntime::dcos(jdouble x))
+@@ -950,7 +947,7 @@
+   else {
+     n = __ieee754_rem_pio2(x,y);
+     return __kernel_tan(y[0],y[1],1-((n&1)<<1)); /*   1 -- n even
+-	     					     -1 -- n odd */
++                                                     -1 -- n odd */
+   }
+ JRT_END
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/signature.cpp openjdk/hotspot/src/share/vm/runtime/signature.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/signature.cpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/signature.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)signature.cpp	1.43 07/05/05 17:06:56 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -82,21 +79,21 @@
+   switch(_signature->byte_at(_index)) {
+     case 'B': do_byte  (); if (_parameter_index < 0 ) _return_type = T_BYTE;
+               _index++; size = T_BYTE_size   ; break;
+-    case 'C': do_char  (); if (_parameter_index < 0 ) _return_type = T_CHAR;    
++    case 'C': do_char  (); if (_parameter_index < 0 ) _return_type = T_CHAR;
+               _index++; size = T_CHAR_size   ; break;
+-    case 'D': do_double(); if (_parameter_index < 0 ) _return_type = T_DOUBLE;  
++    case 'D': do_double(); if (_parameter_index < 0 ) _return_type = T_DOUBLE;
+               _index++; size = T_DOUBLE_size ; break;
+-    case 'F': do_float (); if (_parameter_index < 0 ) _return_type = T_FLOAT;   
++    case 'F': do_float (); if (_parameter_index < 0 ) _return_type = T_FLOAT;
+               _index++; size = T_FLOAT_size  ; break;
+-    case 'I': do_int   (); if (_parameter_index < 0 ) _return_type = T_INT;     
++    case 'I': do_int   (); if (_parameter_index < 0 ) _return_type = T_INT;
+               _index++; size = T_INT_size    ; break;
+-    case 'J': do_long  (); if (_parameter_index < 0 ) _return_type = T_LONG;    
++    case 'J': do_long  (); if (_parameter_index < 0 ) _return_type = T_LONG;
+               _index++; size = T_LONG_size   ; break;
+-    case 'S': do_short (); if (_parameter_index < 0 ) _return_type = T_SHORT;   
++    case 'S': do_short (); if (_parameter_index < 0 ) _return_type = T_SHORT;
+               _index++; size = T_SHORT_size  ; break;
+-    case 'Z': do_bool  (); if (_parameter_index < 0 ) _return_type = T_BOOLEAN; 
++    case 'Z': do_bool  (); if (_parameter_index < 0 ) _return_type = T_BOOLEAN;
+               _index++; size = T_BOOLEAN_size; break;
+-    case 'V': do_void  (); if (_parameter_index < 0 ) _return_type = T_VOID;    
++    case 'V': do_void  (); if (_parameter_index < 0 ) _return_type = T_VOID;
+               _index++; size = T_VOID_size;  ; break;
+     case 'L':
+       { int begin = ++_index;
+@@ -167,7 +164,7 @@
+   uint64_t saved_fingerprint = fingerprint;
+ 
+   // Check for too many arguments
+-  if ( fingerprint == UCONST64(-1) ) { 
++  if ( fingerprint == UCONST64(-1) ) {
+     SignatureIterator::iterate_parameters();
+     return;
+   }
+@@ -179,23 +176,23 @@
+   while ( 1 ) {
+     switch ( fingerprint & parameter_feature_mask ) {
+       case bool_parm:
+- 	do_bool();
++        do_bool();
+         _parameter_index += T_BOOLEAN_size;
+         break;
+       case byte_parm:
+- 	do_byte();
++        do_byte();
+         _parameter_index += T_BYTE_size;
+         break;
+       case char_parm:
+- 	do_char();
++        do_char();
+         _parameter_index += T_CHAR_size;
+         break;
+       case short_parm:
+- 	do_short();
++        do_short();
+         _parameter_index += T_SHORT_size;
+         break;
+       case int_parm:
+- 	do_int();
++        do_int();
+         _parameter_index += T_INT_size;
+         break;
+       case obj_parm:
+@@ -334,7 +331,7 @@
+ symbolOop SignatureStream::as_symbol_or_null() {
+   // Create a symbol from for string _begin _end
+   ResourceMark rm;
+- 
++
+   int begin = _begin;
+   int end   = _end;
+ 
+@@ -390,7 +387,7 @@
+ bool SignatureVerifier::is_valid_type_signature(symbolHandle sig) {
+   const char* type_sig = (const char*)sig->bytes();
+   ssize_t len = sig->utf8_length();
+-  return (type_sig != NULL && len >= 1 && 
++  return (type_sig != NULL && len >= 1 &&
+           (is_valid_type(type_sig, len) == len));
+ }
+ 
+@@ -406,10 +403,10 @@
+     return -1;
+   }
+   switch (type[index]) {
+-    case 'B': case 'C': case 'D': case 'F': case 'I': 
+-    case 'J': case 'S': case 'Z': case 'V': 
++    case 'B': case 'C': case 'D': case 'F': case 'I':
++    case 'J': case 'S': case 'Z': case 'V':
+       return index + 1;
+-    case 'L': 
++    case 'L':
+       for (index = index + 1; index < limit; ++index) {
+         char c = type[index];
+         if (c == ';') {
+@@ -421,13 +418,13 @@
+       }
+       // fall through
+     default: ; // fall through
+-  } 
++  }
+   return -1;
+ }
+ 
+ bool SignatureVerifier::invalid_name_char(char c) {
+   switch (c) {
+-    case '\0': case '.': case ';': case '[': 
++    case '\0': case '.': case ';': case '[':
+       return true;
+     default:
+       return false;
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/signature.hpp openjdk/hotspot/src/share/vm/runtime/signature.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/signature.hpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/signature.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)signature.hpp	1.50 07/05/05 17:06:56 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // SignatureIterators iterate over a Java signature (or parts of it).
+@@ -47,7 +44,7 @@
+   int          _index;                 // the current character index (only valid during iteration)
+   int          _parameter_index;       // the current parameter index (0 outside iteration phase)
+   BasicType    _return_type;
+- 
++
+   void expect(char c);
+   void skip_optional_size();
+   int  parse_type();                   // returns the parameter size in words (0 for void)
+@@ -58,7 +55,7 @@
+   // bit field form of the signature generated by the
+   // Fingerprinter.
+   enum {
+-    static_feature_size    = 1,  
++    static_feature_size    = 1,
+     result_feature_size    = 4,
+     result_feature_mask    = 0xF,
+     parameter_feature_size = 4,
+@@ -68,16 +65,16 @@
+       byte_parm            = 2,
+       char_parm            = 3,
+       short_parm           = 4,
+-      int_parm             = 5,  
+-      long_parm            = 6,	
++      int_parm             = 5,
++      long_parm            = 6,
+       float_parm           = 7,
+       double_parm          = 8,
+       obj_parm             = 9,
+-      done_parm            = 10,  // marker for end of parameters 
++      done_parm            = 10,  // marker for end of parameters
+ 
+     // max parameters is wordsize minus
+     //    The sign bit, termination field, the result and static bit fields
+-    max_size_of_parameters = (BitsPerLong-1 - 
++    max_size_of_parameters = (BitsPerLong-1 -
+                               result_feature_size - parameter_feature_size -
+                               static_feature_size) / parameter_feature_size
+   };
+@@ -90,10 +87,10 @@
+   // Iteration
+   void dispatch_field();               // dispatches once for field signatures
+   void iterate_parameters();           // iterates over parameters only
+-  void iterate_parameters( uint64_t fingerprint );           
++  void iterate_parameters( uint64_t fingerprint );
+   void iterate_returntype();           // iterates over returntype only
+   void iterate();                      // iterates over whole signature
+-  // Returns the word index of the current parameter; 
++  // Returns the word index of the current parameter;
+   int  parameter_index() const         { return _parameter_index; }
+   bool is_return_type() const          { return parameter_index() < 0; }
+   BasicType get_ret_type() const       { return _return_type; }
+@@ -141,7 +138,7 @@
+ class SignatureInfo: public SignatureIterator {
+  protected:
+   bool      _has_iterated;             // need this because iterate cannot be called in constructor (set is virtual!)
+-  bool      _has_iterated_return;      
++  bool      _has_iterated_return;
+   int       _size;
+ 
+   void lazy_iterate_parameters()       { if (!_has_iterated) { iterate_parameters(); _has_iterated = true; } }
+@@ -177,7 +174,7 @@
+  private:
+   void set(int size, BasicType type)   { _size += size; }
+  public:
+-  ArgumentSizeComputer(symbolHandle signature) : SignatureInfo(signature) {} 
++  ArgumentSizeComputer(symbolHandle signature) : SignatureInfo(signature) {}
+ 
+   int       size()                     { lazy_iterate_parameters(); return _size; }
+ };
+@@ -187,7 +184,7 @@
+  private:
+   void set(int size, BasicType type)   { _size ++; }
+  public:
+-  ArgumentCount(symbolHandle signature) : SignatureInfo(signature) {} 
++  ArgumentCount(symbolHandle signature) : SignatureInfo(signature) {}
+ 
+   int       size()                     { lazy_iterate_parameters(); return _size; }
+ };
+@@ -224,7 +221,7 @@
+   void do_float()   { _fingerprint |= (((uint64_t)float_parm) << _shift_count); _shift_count += parameter_feature_size; }
+   void do_double()  { _fingerprint |= (((uint64_t)double_parm) << _shift_count); _shift_count += parameter_feature_size; }
+ 
+-  void do_object(int begin, int end)  { _fingerprint |= (((uint64_t)obj_parm) << _shift_count); _shift_count += parameter_feature_size; } 
++  void do_object(int begin, int end)  { _fingerprint |= (((uint64_t)obj_parm) << _shift_count); _shift_count += parameter_feature_size; }
+   void do_array (int begin, int end)  { _fingerprint |= (((uint64_t)obj_parm) << _shift_count); _shift_count += parameter_feature_size; }
+ 
+   void do_void()    { ShouldNotReachHere(); }
+@@ -269,10 +266,10 @@
+ class NativeSignatureIterator: public SignatureIterator {
+  private:
+   methodHandle _method;
+-// We need seperate JNI and Java offset values because in 64 bit mode, 
+-// the argument offsets are not in sync with the Java stack.  
++// We need seperate JNI and Java offset values because in 64 bit mode,
++// the argument offsets are not in sync with the Java stack.
+ // For example a long takes up 1 "C" stack entry but 2 Java stack entries.
+-  int          _offset;                // The java stack offset 
++  int          _offset;                // The java stack offset
+   int          _prepended;             // number of prepended JNI parameters (1 JNIEnv, plus 1 mirror if static)
+   int          _jni_offset;            // the current parameter offset, starting with 0
+ 
+@@ -293,7 +290,7 @@
+ #else
+   void do_long  ()                     { pass_long();   _jni_offset += 2; _offset += 2; }
+ #endif
+-  void do_void  ()                     { ShouldNotReachHere();        			     }
++  void do_void  ()                     { ShouldNotReachHere();                               }
+   void do_object(int begin, int end)   { pass_object(); _jni_offset++; _offset++;        }
+   void do_array (int begin, int end)   { pass_object(); _jni_offset++; _offset++;        }
+ 
+@@ -335,7 +332,7 @@
+   void iterate() { iterate(Fingerprinter(method()).fingerprint());
+   }
+ 
+-  
++
+   // Optimized path if we have the bitvector form of signature
+   void iterate( uint64_t fingerprint ) {
+ 
+@@ -370,7 +367,7 @@
+       _end = len + 1;
+       return;
+     }
+-  
++
+     _begin = _end;
+     int t = sig->byte_at(_begin);
+     switch (t) {
+@@ -396,11 +393,11 @@
+     next();
+   }
+ 
+-  bool is_object() const;	                 // True if this argument is an object
+-  bool is_array() const;	                 // True if this argument is an array
++  bool is_object() const;                        // True if this argument is an object
++  bool is_array() const;                         // True if this argument is an array
+   BasicType type() const                         { return _type; }
+   symbolOop as_symbol(TRAPS);
+-  
++
+   // return same as_symbol except allocation of new symbols is avoided.
+   symbolOop as_symbol_or_null();
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/stackValueCollection.cpp openjdk/hotspot/src/share/vm/runtime/stackValueCollection.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/stackValueCollection.cpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/stackValueCollection.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)stackValueCollection.cpp	1.18 07/05/05 17:06:53 JVM"
+-#endif
+ /*
+  * Copyright 2001-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -40,7 +37,7 @@
+ #else
+   union {
+     jlong jl;
+-    jint  array[2]; 
++    jint  array[2];
+   } value;
+   // Interpreter stack is reversed in memory:
+   // low memory location is in higher java local slot.
+@@ -66,7 +63,7 @@
+ #else
+   union {
+     jdouble jd;
+-    jint    array[2]; 
++    jint    array[2];
+   } value;
+   // Interpreter stack is reversed in memory:
+   // low memory location is in higher java local slot.
+@@ -88,7 +85,7 @@
+ #else
+   union {
+     jlong jl;
+-    jint  array[2]; 
++    jint  array[2];
+   } x;
+   // Interpreter stack is reversed in memory:
+   // low memory location is in higher java local slot.
+@@ -106,7 +103,7 @@
+ #ifdef _LP64
+   union {
+     intptr_t jd;
+-    jint    array[2]; 
++    jint    array[2];
+   } val;
+   // Interpreter stores 32 bit floats in first half of 64 bit word.
+   val.array[0] = *(jint*)(&value);
+@@ -123,7 +120,7 @@
+ #else
+   union {
+     jdouble jd;
+-    jint    array[2]; 
++    jint    array[2];
+   } x;
+   // Interpreter stack is reversed in memory:
+   // low memory location is in higher java local slot.
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/stackValueCollection.hpp openjdk/hotspot/src/share/vm/runtime/stackValueCollection.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/stackValueCollection.hpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/stackValueCollection.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)stackValueCollection.hpp	1.14 07/05/05 17:06:58 JVM"
+-#endif
+ /*
+  * Copyright 2001-2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class StackValueCollection : public ResourceObj {
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/stackValue.cpp openjdk/hotspot/src/share/vm/runtime/stackValue.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/stackValue.cpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/stackValue.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)stackValue.cpp	1.27 07/05/05 17:06:58 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,19 +19,149 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_stackValue.cpp.incl"
+ 
++StackValue* StackValue::create_stack_value(const frame* fr, const RegisterMap* reg_map, ScopeValue* sv) {
++  if (sv->is_location()) {
++    // Stack or register value
++    Location loc = ((LocationValue *)sv)->location();
++
++#ifdef SPARC
++    // %%%%% Callee-save floats will NOT be working on a Sparc until we
++    // handle the case of a 2 floats in a single double register.
++    assert( !(loc.is_register() && loc.type() == Location::float_in_dbl), "Sparc does not handle callee-save floats yet" );
++#endif // SPARC
++
++    // First find address of value
++
++    address value_addr = loc.is_register()
++      // Value was in a callee-save register
++      ? reg_map->location(VMRegImpl::as_VMReg(loc.register_number()))
++      // Else value was directly saved on the stack. The frame's original stack pointer,
++      // before any extension by its callee (due to Compiler1 linkage on SPARC), must be used.
++      : ((address)fr->unextended_sp()) + loc.stack_offset();
++
++    // Then package it right depending on type
++    // Note: the transfer of the data is thru a union that contains
++    // an intptr_t. This is because an interpreter stack slot is
++    // really an intptr_t. The use of a union containing an intptr_t
++    // ensures that on a 64 bit platform we have proper alignment
++    // and that we store the value where the interpreter will expect
++    // to find it (i.e. proper endian). Similarly on a 32bit platform
++    // using the intptr_t ensures that when a value is larger than
++    // a stack slot (jlong/jdouble) that we capture the proper part
++    // of the value for the stack slot in question.
++    //
++    switch( loc.type() ) {
++    case Location::float_in_dbl: { // Holds a float in a double register?
++      // The callee has no clue whether the register holds a float,
++      // double or is unused.  He always saves a double.  Here we know
++      // a double was saved, but we only want a float back.  Narrow the
++      // saved double to the float that the JVM wants.
++      assert( loc.is_register(), "floats always saved to stack in 1 word" );
++      union { intptr_t p; jfloat jf; } value;
++      value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
++      value.jf = (jfloat) *(jdouble*) value_addr;
++      return new StackValue(value.p); // 64-bit high half is stack junk
++    }
++    case Location::int_in_long: { // Holds an int in a long register?
++      // The callee has no clue whether the register holds an int,
++      // long or is unused.  He always saves a long.  Here we know
++      // a long was saved, but we only want an int back.  Narrow the
++      // saved long to the int that the JVM wants.
++      assert( loc.is_register(), "ints always saved to stack in 1 word" );
++      union { intptr_t p; jint ji;} value;
++      value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
++      value.ji = (jint) *(jlong*) value_addr;
++      return new StackValue(value.p); // 64-bit high half is stack junk
++    }
++#ifdef _LP64
++    case Location::dbl:
++      // Double value in an aligned adjacent pair
++      return new StackValue(*(intptr_t*)value_addr);
++    case Location::lng:
++      // Long   value in an aligned adjacent pair
++      return new StackValue(*(intptr_t*)value_addr);
++#endif
++    case Location::oop: {
++      Handle h(*(oop *)value_addr); // Wrap a handle around the oop
++      return new StackValue(h);
++    }
++    case Location::addr: {
++      ShouldNotReachHere(); // both C1 and C2 now inline jsrs
++    }
++    case Location::normal: {
++      // Just copy all other bits straight through
++      union { intptr_t p; jint ji;} value;
++      value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
++      value.ji = *(jint*)value_addr;
++      return new StackValue(value.p);
++    }
++    case Location::invalid:
++      return new StackValue();
++    default:
++      ShouldNotReachHere();
++    }
++
++  } else if (sv->is_constant_int()) {
++    // Constant int: treat same as register int.
++    union { intptr_t p; jint ji;} value;
++    value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
++    value.ji = (jint)((ConstantIntValue*)sv)->value();
++    return new StackValue(value.p);
++  } else if (sv->is_constant_oop()) {
++    // constant oop
++    return new StackValue(((ConstantOopReadValue *)sv)->value());
++#ifdef _LP64
++  } else if (sv->is_constant_double()) {
++    // Constant double in a single stack slot
++    union { intptr_t p; double d; } value;
++    value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
++    value.d = ((ConstantDoubleValue *)sv)->value();
++    return new StackValue(value.p);
++  } else if (sv->is_constant_long()) {
++    // Constant long in a single stack slot
++    union { intptr_t p; jlong jl; } value;
++    value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
++    value.jl = ((ConstantLongValue *)sv)->value();
++    return new StackValue(value.p);
++#endif
++  } else if (sv->is_object()) {
++    return new StackValue(((ObjectValue *)sv)->value());
++  }
++
++  // Unknown ScopeValue type
++  ShouldNotReachHere();
++  return new StackValue((intptr_t) 0);   // dummy
++}
++
++
++BasicLock* StackValue::resolve_monitor_lock(const frame* fr, Location location) {
++  assert(location.is_stack(), "for now we only look at the stack");
++  int word_offset = location.stack_offset() / wordSize;
++  // (stack picture)
++  // high: [     ]  word_offset + 1
++  // low   [     ]  word_offset
++  //
++  // sp->  [     ]  0
++  // the word_offset is the distance from the stack pointer to the lowest address
++  // The frame's original stack pointer, before any extension by its callee
++  // (due to Compiler1 linkage on SPARC), must be used.
++  return (BasicLock*) (fr->unextended_sp() + word_offset);
++}
++
++
+ #ifndef PRODUCT
+ 
+ void StackValue::print_on(outputStream* st) const {
+   switch(_type) {
+     case T_INT:
+       st->print("%d (int) %f (float) %x (hex)",  *(int *)&_i, *(float *)&_i,  *(int *)&_i);
+-      break;    
++      break;
+ 
+     case T_OBJECT:
+      _o()->print_value_on(st);
+@@ -42,7 +169,7 @@
+      break;
+ 
+     case T_CONFLICT:
+-     st->print("conflict"); 
++     st->print("conflict");
+      break;
+ 
+     default:
+@@ -51,4 +178,3 @@
+ }
+ 
+ #endif
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/stackValue.hpp openjdk/hotspot/src/share/vm/runtime/stackValue.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/stackValue.hpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/stackValue.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)stackValue.hpp	1.36 07/05/05 17:06:58 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class StackValue : public ResourceObj {
+@@ -84,19 +81,21 @@
+ 
+   bool equal(StackValue *value) {
+     if (_type != value->_type) return false;
+-    if (_type == T_OBJECT) 
++    if (_type == T_OBJECT)
+       return (_o == value->_o);
+     else {
+       assert(_type == T_INT, "sanity check");
+       // [phh] compare only low addressed portions of intptr_t slots
+-      return (*(int *)&_i == *(int *)&value->_i);    
++      return (*(int *)&_i == *(int *)&value->_i);
+     }
+   }
+ 
++  static StackValue* create_stack_value(const frame* fr, const RegisterMap* reg_map, ScopeValue* sv);
++  static BasicLock*  resolve_monitor_lock(const frame* fr, Location location);
++
+ #ifndef PRODUCT
+  public:
+   // Printing
+   void print_on(outputStream* st) const;
+ #endif
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/statSampler.cpp openjdk/hotspot/src/share/vm/runtime/statSampler.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/statSampler.cpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/statSampler.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)statSampler.cpp	1.24 07/05/05 17:06:58 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -77,7 +74,7 @@
+ 
+     // start up the periodic task
+     _task = new StatSamplerTask(PerfDataSamplingInterval);
+-    _task->enroll();    
++    _task->enroll();
+   }
+ }
+ 
+@@ -158,7 +155,7 @@
+   //   }
+   //   _sampled = PerfDataManager::sampled();
+   // }
+-  
++
+   assert(_sampled != NULL, "list not initialized");
+ 
+   sample_data(_sampled);
+@@ -247,7 +244,7 @@
+   { property_counters_uu, SUN_PROPERTY },
+   { NULL, SUN_PROPERTY }
+ };
+-  
++
+ 
+ /*
+  * Method to create PerfData string instruments that contain the values
+@@ -317,7 +314,7 @@
+ 
+   // the Java VM Internal version string
+   PerfDataManager::create_string_constant(SUN_RT, "internalVersion",
+-                                         VM_Version::internal_vm_info_string(), 
++                                         VM_Version::internal_vm_info_string(),
+                                          CHECK);
+ 
+   // create sampled instrumentation objects
+@@ -360,4 +357,3 @@
+ 
+   StatSampler::destroy();
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/statSampler.hpp openjdk/hotspot/src/share/vm/runtime/statSampler.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/statSampler.hpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/statSampler.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)statSampler.hpp	1.12 07/05/05 17:06:58 JVM"
+-#endif
+ /*
+  * Copyright 2001-2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class StatSamplerTask;
+@@ -48,7 +45,7 @@
+     static void create_misc_perfdata();
+     static void create_sampled_perfdata();
+     static void sample_data(PerfDataList* list);
+-    static const char* get_system_property(const char* name, TRAPS); 
++    static const char* get_system_property(const char* name, TRAPS);
+     static void create_system_property_instrumentation(TRAPS);
+ 
+   public:
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/stubCodeGenerator.cpp openjdk/hotspot/src/share/vm/runtime/stubCodeGenerator.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/stubCodeGenerator.cpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/stubCodeGenerator.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)stubCodeGenerator.cpp	1.30 07/05/17 16:06:31 JVM"
+-#endif
+ /*
+  * Copyright 1997-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -146,4 +143,3 @@
+     JvmtiExport::post_dynamic_code_generated(_cdesc->name(), _cdesc->begin(), _cdesc->end());
+   }
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/stubCodeGenerator.hpp openjdk/hotspot/src/share/vm/runtime/stubCodeGenerator.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/stubCodeGenerator.hpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/stubCodeGenerator.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)stubCodeGenerator.hpp	1.25 07/05/17 16:06:33 JVM"
+-#endif
+ /*
+  * Copyright 1997-2000 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // All the basic framework for stubcode generation/debugging/printing.
+@@ -69,10 +66,10 @@
+     _next           = _list;
+     _group          = group;
+     _name           = name;
+-    _index          = ++_count;	// (never zero)
++    _index          = ++_count; // (never zero)
+     _begin          = begin;
+     _end            = NULL;
+-    _list           = this;    
++    _list           = this;
+   };
+ 
+   const char* group() const                      { return _group; }
+@@ -82,7 +79,7 @@
+   address     end() const                        { return _end; }
+   int         size_in_bytes() const              { return _end - _begin; }
+   bool        contains(address pc) const         { return _begin <= pc && pc < _end; }
+-  void        print();  
++  void        print();
+ };
+ 
+ // The base class for all stub-generating code generators.
+@@ -115,10 +112,9 @@
+  protected:
+   StubCodeGenerator* _cgen;
+   StubCodeDesc*      _cdesc;
+-  
++
+  public:
+   StubCodeMark(StubCodeGenerator* cgen, const char* group, const char* name);
+   ~StubCodeMark();
+ 
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/stubRoutines.cpp openjdk/hotspot/src/share/vm/runtime/stubRoutines.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/stubRoutines.cpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/stubRoutines.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)stubRoutines.cpp	1.115 07/07/19 12:19:08 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -43,7 +40,6 @@
+ address StubRoutines::_catch_exception_entry                    = NULL;
+ address StubRoutines::_forward_exception_entry                  = NULL;
+ address StubRoutines::_throw_AbstractMethodError_entry          = NULL;
+-address StubRoutines::_throw_IncompatibleClassChangeError_entry = NULL;
+ address StubRoutines::_throw_ArithmeticException_entry          = NULL;
+ address StubRoutines::_throw_NullPointerException_entry         = NULL;
+ address StubRoutines::_throw_NullPointerException_at_call_entry = NULL;
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/stubRoutines.hpp openjdk/hotspot/src/share/vm/runtime/stubRoutines.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/stubRoutines.hpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/stubRoutines.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)stubRoutines.hpp	1.118 07/07/19 12:19:07 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // StubRoutines provides entry points to assembly routines used by
+@@ -83,11 +80,10 @@
+   static address _verify_oop_subroutine_entry;
+ 
+   static address _call_stub_return_address;                // the return PC, when returning to a call stub
+-  static address _call_stub_entry;  
++  static address _call_stub_entry;
+   static address _forward_exception_entry;
+   static address _catch_exception_entry;
+   static address _throw_AbstractMethodError_entry;
+-  static address _throw_IncompatibleClassChangeError_entry;
+   static address _throw_ArithmeticException_entry;
+   static address _throw_NullPointerException_entry;
+   static address _throw_NullPointerException_at_call_entry;
+@@ -163,8 +159,8 @@
+   }
+ 
+   // Debugging
+-  static jint    verify_oop_count()                        { return _verify_oop_count; }  
+-  static jint*   verify_oop_count_addr()                   { return &_verify_oop_count; }  
++  static jint    verify_oop_count()                        { return _verify_oop_count; }
++  static jint*   verify_oop_count_addr()                   { return &_verify_oop_count; }
+   // a subroutine for debugging the GC
+   static address verify_oop_subroutine_entry_address()    { return (address)&_verify_oop_subroutine_entry; }
+ 
+@@ -188,7 +184,6 @@
+   static address forward_exception_entry()                 { return _forward_exception_entry; }
+   // Implicit exceptions
+   static address throw_AbstractMethodError_entry()         { return _throw_AbstractMethodError_entry; }
+-  static address throw_IncompatibleClassChangeError_entry(){ return _throw_IncompatibleClassChangeError_entry; }
+   static address throw_ArithmeticException_entry()         { return _throw_ArithmeticException_entry; }
+   static address throw_NullPointerException_entry()        { return _throw_NullPointerException_entry; }
+   static address throw_NullPointerException_at_call_entry(){ return _throw_NullPointerException_at_call_entry; }
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/sweeper.cpp openjdk/hotspot/src/share/vm/runtime/sweeper.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/sweeper.cpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/sweeper.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)sweeper.cpp	1.39 07/05/05 17:06:50 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -37,9 +34,9 @@
+ jint      NMethodSweeper::_not_entrant_seen_on_stack = 0;
+ bool      NMethodSweeper::_rescan = false;
+ 
+-void NMethodSweeper::sweep() {   
++void NMethodSweeper::sweep() {
+   assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
+-  if (!MethodFlushing) return;  
++  if (!MethodFlushing) return;
+ 
+   // No need to synchronize access, since this is always executed at a
+   // safepoint.  If we aren't in the middle of scan and a rescan
+@@ -49,7 +46,7 @@
+   // Make sure CompiledIC_lock in unlocked, since we might update some
+   // inline caches. If it is, we just bail-out and try later.
+   if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return;
+-  
++
+   // Check for restart
+   assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
+   if (_current == NULL) {
+@@ -72,22 +69,22 @@
+     tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_blobs(), _invocations);
+   }
+ 
+-  // We want to visit all nmethods after NmethodSweepFraction invocations. 
++  // We want to visit all nmethods after NmethodSweepFraction invocations.
+   // If invocation is 1 we do the rest
+   int todo = CodeCache::nof_blobs();
+   if (_invocations != 1) {
+-    todo = (CodeCache::nof_blobs() - _seen) / _invocations;    
++    todo = (CodeCache::nof_blobs() - _seen) / _invocations;
+     _invocations--;
+   }
+-      
+-  for(int i = 0; i < todo && _current != NULL; i++) {      
++
++  for(int i = 0; i < todo && _current != NULL; i++) {
+     CodeBlob* next = CodeCache::next(_current); // Read next before we potentially delete current
+     if (_current->is_nmethod()) {
+-      process_nmethod((nmethod *)_current);      
+-    }    
++      process_nmethod((nmethod *)_current);
++    }
+     _seen++;
+     _current = next;
+-  }  
++  }
+   // Because we could stop on a codeBlob other than an nmethod we skip forward
+   // to the next nmethod (if any). codeBlobs other than nmethods can be freed
+   // async to us and make _current invalid while we sleep.
+@@ -108,8 +105,8 @@
+   }
+ }
+ 
+-   
+-void NMethodSweeper::process_nmethod(nmethod *nm) {  
++
++void NMethodSweeper::process_nmethod(nmethod *nm) {
+   // Skip methods that are currently referenced by the VM
+   if (nm->is_locked_by_vm()) {
+     // But still remember to clean-up inline caches for alive nmethods
+@@ -128,27 +125,27 @@
+     // there are no inline caches that referes to it.
+     if (nm->is_marked_for_reclamation()) {
+       assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
+-      nm->flush();      
++      nm->flush();
+     } else {
+       nm->mark_for_reclamation();
+       _rescan = true;
+-    }  
+-  } else if (nm->is_not_entrant()) {    
++    }
++  } else if (nm->is_not_entrant()) {
+     // If there is no current activations of this method on the
+     // stack we can safely convert it to a zombie method
+     if (nm->can_not_entrant_be_converted()) {
+       nm->make_zombie();
+       _rescan = true;
+     } else {
+-      // Still alive, clean up its inline caches 
+-      nm->cleanup_inline_caches();    
++      // Still alive, clean up its inline caches
++      nm->cleanup_inline_caches();
+       // we coudn't transition this nmethod so don't immediately
+       // request a rescan.  If this method stays on the stack for a
+       // long time we don't want to keep rescanning at every safepoint.
+       _not_entrant_seen_on_stack++;
+     }
+   } else if (nm->is_unloaded()) {
+-    // Unloaded code, just make it a zombie 
++    // Unloaded code, just make it a zombie
+     if (nm->is_osr_only_method()) {
+       // No inline caches will ever point to osr methods, so we can just remove it
+       nm->flush();
+@@ -159,6 +156,6 @@
+   } else {
+     assert(nm->is_alive(), "should be alive");
+     // Clean-up all inline caches that points to zombie/non-reentrant methods
+-    nm->cleanup_inline_caches();    
++    nm->cleanup_inline_caches();
+   }
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/sweeper.hpp openjdk/hotspot/src/share/vm/runtime/sweeper.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/sweeper.hpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/sweeper.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)sweeper.hpp	1.26 07/05/05 17:06:59 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // An NmethodSweeper is an incremental cleaner for:
+@@ -30,7 +27,7 @@
+ //    - reclamation of unreferences zombie nmethods
+ //
+ 
+-class NMethodSweeper : public AllStatic {  
++class NMethodSweeper : public AllStatic {
+   static long      _traversals;   // Stack traversal count
+   static CodeBlob* _current;      // Current nmethod
+   static int       _seen;         // Nof. nmethod we have currently processed in current pass of CodeCache
+@@ -42,9 +39,9 @@
+   static int       _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack
+ 
+ 
+-  static void process_nmethod(nmethod *nm);    
+- public:      
+-  static long traversal_count() { return _traversals; }  
++  static void process_nmethod(nmethod *nm);
++ public:
++  static long traversal_count() { return _traversals; }
+ 
+   static void sweep();  // Invoked at the end of each safepoint
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/synchronizer.cpp openjdk/hotspot/src/share/vm/runtime/synchronizer.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/synchronizer.cpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/synchronizer.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)synchronizer.cpp	1.110 07/05/26 16:19:45 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -41,9 +38,9 @@
+ // The interpreter and compilers contain specialized transliterated
+ // variants of the enter-exit fast-path operations.  See i486.ad fast_lock(),
+ // for instance.  If you make changes here, make sure to modify the
+-// interpreter, and both C1 and C2 fast-path inline locking code emission. 
++// interpreter, and both C1 and C2 fast-path inline locking code emission.
+ //
+-// TODO: merge the objectMonitor and synchronizer classes.  
++// TODO: merge the objectMonitor and synchronizer classes.
+ //
+ // -----------------------------------------------------------------------------
+ 
+@@ -52,19 +49,19 @@
+ // Only bother with this argument setup if dtrace is available
+ // TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
+ 
+-HS_DTRACE_PROBE_DECL5(hotspot, monitor__wait, 
++HS_DTRACE_PROBE_DECL5(hotspot, monitor__wait,
+   jlong, uintptr_t, char*, int, long);
+-HS_DTRACE_PROBE_DECL4(hotspot, monitor__waited, 
++HS_DTRACE_PROBE_DECL4(hotspot, monitor__waited,
+   jlong, uintptr_t, char*, int);
+-HS_DTRACE_PROBE_DECL4(hotspot, monitor__notify, 
++HS_DTRACE_PROBE_DECL4(hotspot, monitor__notify,
+   jlong, uintptr_t, char*, int);
+-HS_DTRACE_PROBE_DECL4(hotspot, monitor__notifyAll, 
++HS_DTRACE_PROBE_DECL4(hotspot, monitor__notifyAll,
+   jlong, uintptr_t, char*, int);
+-HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__enter, 
++HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__enter,
+   jlong, uintptr_t, char*, int);
+-HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__entered, 
++HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__entered,
+   jlong, uintptr_t, char*, int);
+-HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__exit, 
++HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__exit,
+   jlong, uintptr_t, char*, int);
+ 
+ #define DTRACE_MONITOR_PROBE_COMMON(klassOop, thread)                      \
+@@ -75,7 +72,7 @@
+   if (klassname != NULL) {                                                 \
+     bytes = (char*)klassname->bytes();                                     \
+     len = klassname->utf8_length();                                        \
+-  }                                                                        
++  }
+ 
+ #define DTRACE_MONITOR_WAIT_PROBE(monitor, klassOop, thread, millis)       \
+   {                                                                        \
+@@ -106,35 +103,47 @@
+ // TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific
+ // ParkEvent instead.  Beware, however, that the JVMTI code
+ // knows about ObjectWaiters, so we'll have to reconcile that code.
+-// See next_waiter(), first_waiter(), etc. 
++// See next_waiter(), first_waiter(), etc.
+ 
+ class ObjectWaiter : public StackObj {
+  public:
+-  enum TStates { TS_UNDEF, TS_READY, TS_RUN, TS_WAIT, TS_ENTER, TS_CXQ } ; 
+-  enum Sorted  { PREPEND, APPEND, SORTED } ; 
++  enum TStates { TS_UNDEF, TS_READY, TS_RUN, TS_WAIT, TS_ENTER, TS_CXQ } ;
++  enum Sorted  { PREPEND, APPEND, SORTED } ;
+   ObjectWaiter * volatile _next;
+   ObjectWaiter * volatile _prev;
+   Thread*       _thread;
+   ParkEvent *   _event;
+-  volatile int  _notified ;    
+-  volatile TStates TState ; 
++  volatile int  _notified ;
++  volatile TStates TState ;
+   Sorted        _Sorted ;           // List placement disposition
++  bool          _active ;           // Contention monitoring is enabled
+  public:
+   ObjectWaiter(Thread* thread) {
+     _next     = NULL;
+     _prev     = NULL;
+     _notified = 0;
+-    TState    = TS_RUN ; 
++    TState    = TS_RUN ;
+     _thread   = thread;
+-    _event    = thread->_ParkEvent ; 
+-    assert (_event != NULL, "invariant") ; 
++    _event    = thread->_ParkEvent ;
++    _active   = false;
++    assert (_event != NULL, "invariant") ;
++  }
++
++  void wait_reenter_begin(ObjectMonitor *mon) {
++    JavaThread *jt = (JavaThread *)this->_thread;
++    _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon);
++  }
++
++  void wait_reenter_end(ObjectMonitor *mon) {
++    JavaThread *jt = (JavaThread *)this->_thread;
++    JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active);
+   }
+ };
+ 
+-enum ManifestConstants { 
++enum ManifestConstants {
+     ClearResponsibleAtSTW   = 0,
+     MaximumRecheckInterval  = 1000
+-} ; 
++} ;
+ 
+ 
+ #undef TEVENT
+@@ -146,74 +155,74 @@
+ #define TEVENT(nom) {;}
+ 
+ // Performance concern:
+-// OrderAccess::storestore() calls release() which STs 0 into the global volatile 
+-// OrderAccess::Dummy variable.  This store is unnecessary for correctness.  
++// OrderAccess::storestore() calls release() which STs 0 into the global volatile
++// OrderAccess::Dummy variable.  This store is unnecessary for correctness.
+ // Many threads STing into a common location causes considerable cache migration
+ // or "sloshing" on large SMP system.  As such, I avoid using OrderAccess::storestore()
+ // until it's repaired.  In some cases OrderAccess::fence() -- which incurs local
+ // latency on the executing processor -- is a better choice as it scales on SMP
+-// systems.  See http://blogs.sun.com/dave/entry/biased_locking_in_hotspot for a 
++// systems.  See http://blogs.sun.com/dave/entry/biased_locking_in_hotspot for a
+ // discussion of coherency costs.  Note that all our current reference platforms
+-// provide strong ST-ST order, so the issue is moot on IA32, x64, and SPARC. 
++// provide strong ST-ST order, so the issue is moot on IA32, x64, and SPARC.
+ //
+ // As a general policy we use "volatile" to control compiler-based reordering
+ // and explicit fences (barriers) to control for architectural reordering performed
+-// by the CPU(s) or platform.  
++// by the CPU(s) or platform.
+ 
+-static int  MBFence (int x) { OrderAccess::fence(); return x; } 
++static int  MBFence (int x) { OrderAccess::fence(); return x; }
+ 
+-struct SharedGlobals { 
+-    // These are highly shared mostly-read variables.  
++struct SharedGlobals {
++    // These are highly shared mostly-read variables.
+     // To avoid false-sharing they need to be the sole occupants of a $ line.
+     double padPrefix [8];
+-    volatile int stwRandom ; 
+-    volatile int stwCycle ; 
++    volatile int stwRandom ;
++    volatile int stwCycle ;
+ 
+     // Hot RW variables -- Sequester to avoid false-sharing
+-    double padSuffix [16]; 
+-    volatile int hcSequence ;   
+-    double padFinal [8] ; 
+-} ; 
++    double padSuffix [16];
++    volatile int hcSequence ;
++    double padFinal [8] ;
++} ;
+ 
+-static SharedGlobals GVars ; 
++static SharedGlobals GVars ;
+ 
+ 
+ // Tunables ...
+ // The knob* variables are effectively final.  Once set they should
+-// never be modified hence.
++// never be modified hence.  Consider using __read_mostly with GCC.
+ 
+ static int Knob_LogSpins           = 0 ;       // enable jvmstat tally for spins
+-static int Knob_HandOff            = 0 ;      
+-static int Knob_Verbose            = 0 ; 
++static int Knob_HandOff            = 0 ;
++static int Knob_Verbose            = 0 ;
+ static int Knob_ReportSettings     = 0 ;
+ 
+-static int Knob_SpinLimit          = 5000 ;    // derived by an external tool - 
++static int Knob_SpinLimit          = 5000 ;    // derived by an external tool -
+ static int Knob_SpinBase           = 0 ;       // Floor AKA SpinMin
+ static int Knob_SpinBackOff        = 0 ;       // spin-loop backoff
+ static int Knob_CASPenalty         = -1 ;      // Penalty for failed CAS
+ static int Knob_OXPenalty          = -1 ;      // Penalty for observed _owner change
+ static int Knob_SpinSetSucc        = 1 ;       // spinners set the _succ field
+-static int Knob_SpinEarly          = 1 ; 
++static int Knob_SpinEarly          = 1 ;
+ static int Knob_SuccEnabled        = 1 ;       // futile wake throttling
+ static int Knob_SuccRestrict       = 0 ;       // Limit successors + spinners to at-most-one
+ static int Knob_MaxSpinners        = -1 ;      // Should be a function of # CPUs
+ static int Knob_Bonus              = 100 ;     // spin success bonus
+ static int Knob_BonusB             = 100 ;     // spin success bonus
+-static int Knob_Penalty            = 200 ;     // spin failure penalty 
+-static int Knob_Poverty            = 1000 ;    
++static int Knob_Penalty            = 200 ;     // spin failure penalty
++static int Knob_Poverty            = 1000 ;
+ static int Knob_SpinAfterFutile    = 1 ;       // Spin after returning from park()
+-static int Knob_FixedSpin          = 0 ; 
++static int Knob_FixedSpin          = 0 ;
+ static int Knob_OState             = 3 ;       // Spinner checks thread state of _owner
+-static int Knob_UsePause           = 1 ; 
+-static int Knob_ExitPolicy         = 0 ; 
++static int Knob_UsePause           = 1 ;
++static int Knob_ExitPolicy         = 0 ;
+ static int Knob_PreSpin            = 10 ;      // 20-100 likely better
+-static int Knob_ResetEvent         = 0 ; 
+-static int BackOffMask             = 0 ; 
++static int Knob_ResetEvent         = 0 ;
++static int BackOffMask             = 0 ;
+ 
+-static int Knob_FastHSSEC          = 0 ; 
++static int Knob_FastHSSEC          = 0 ;
+ static int Knob_MoveNotifyee       = 2 ;       // notify() - disposition of notifyee
+ static int Knob_QMode              = 0 ;       // EntryList-cxq policy - queue discipline
+-static volatile int InitDone       = 0 ; 
++static volatile int InitDone       = 0 ;
+ 
+ 
+ // hashCode() generation :
+@@ -224,58 +233,58 @@
+ // * A DES- or AES-style SBox[] mechanism
+ // * One of the Phi-based schemes, such as:
+ //   2654435761 = 2^32 * Phi (golden ratio)
+-//   HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ; 
+-// * A variation of Marsaglia's shift-xor RNG scheme.  
++//   HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ;
++// * A variation of Marsaglia's shift-xor RNG scheme.
+ // * (obj ^ stwRandom) is appealing, but can result
+ //   in undesirable regularity in the hashCode values of adjacent objects
+ //   (objects allocated back-to-back, in particular).  This could potentially
+-//   result in hashtable collisions and reduced hashtable efficiency.  
+-//   There are simple ways to "diffuse" the middle address bits over the 
++//   result in hashtable collisions and reduced hashtable efficiency.
++//   There are simple ways to "diffuse" the middle address bits over the
+ //   generated hashCode values:
+ //
+ 
+ static inline intptr_t get_next_hash(Thread * Self, oop obj) {
+-  intptr_t value = 0 ; 
+-  if (hashCode == 0) { 
+-     // This form uses an unguarded global Park-Miller RNG, 
++  intptr_t value = 0 ;
++  if (hashCode == 0) {
++     // This form uses an unguarded global Park-Miller RNG,
+      // so it's possible for two threads to race and generate the same RNG.
+      // On MP system we'll have lots of RW access to a global, so the
+-     // mechanism induces lots of coherency traffic.  
+-     value = os::random() ; 
++     // mechanism induces lots of coherency traffic.
++     value = os::random() ;
+   } else
+-  if (hashCode == 1) { 
++  if (hashCode == 1) {
+      // This variation has the property of being stable (idempotent)
+      // between STW operations.  This can be useful in some of the 1-0
+-     // synchronization schemes.  
+-     intptr_t addrBits = intptr_t(obj) >> 3 ; 
+-     value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom ; 
+-  } else 
+-  if (hashCode == 2) { 
++     // synchronization schemes.
++     intptr_t addrBits = intptr_t(obj) >> 3 ;
++     value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom ;
++  } else
++  if (hashCode == 2) {
+      value = 1 ;            // for sensitivity testing
+   } else
+-  if (hashCode == 3) { 
+-     value = ++GVars.hcSequence ; 
++  if (hashCode == 3) {
++     value = ++GVars.hcSequence ;
+   } else
+-  if (hashCode == 4) { 
+-     value = intptr_t(obj) ; 
+-  } else { 
++  if (hashCode == 4) {
++     value = intptr_t(obj) ;
++  } else {
+      // Marsaglia's xor-shift scheme with thread-specific state
+      // This is probably the best overall implementation -- we'll
+      // likely make this the default in future releases.
+-     unsigned t = Self->_hashStateX ; 
+-     t ^= (t << 11) ; 
+-     Self->_hashStateX = Self->_hashStateY ; 
+-     Self->_hashStateY = Self->_hashStateZ ; 
+-     Self->_hashStateZ = Self->_hashStateW ; 
+-     unsigned v = Self->_hashStateW ; 
+-     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)) ; 
+-     Self->_hashStateW = v ; 
+-     value = v ; 
+-  } 
++     unsigned t = Self->_hashStateX ;
++     t ^= (t << 11) ;
++     Self->_hashStateX = Self->_hashStateY ;
++     Self->_hashStateY = Self->_hashStateZ ;
++     Self->_hashStateZ = Self->_hashStateW ;
++     unsigned v = Self->_hashStateW ;
++     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)) ;
++     Self->_hashStateW = v ;
++     value = v ;
++  }
+ 
+   value &= markOopDesc::hash_mask;
+-  if (value == 0) value = 0xBAD ; 
+-  assert (value != markOopDesc::no_hash, "invariant") ; 
++  if (value == 0) value = 0xBAD ;
++  assert (value != markOopDesc::no_hash, "invariant") ;
+   TEVENT (hashCode: GENERATE) ;
+   return value;
+ }
+@@ -287,42 +296,42 @@
+ void BasicLock::move_to(oop obj, BasicLock* dest) {
+   // Check to see if we need to inflate the lock. This is only needed
+   // if an object is locked using "this" lightweight monitor. In that
+-  // case, the displaced_header() is unlocked, because the 
++  // case, the displaced_header() is unlocked, because the
+   // displaced_header() contains the header for the originally unlocked
+-  // object. However the object could have already been inflated. But it 
++  // object. However the object could have already been inflated. But it
+   // does not matter, the inflation will just a no-op. For other cases,
+   // the displaced header will be either 0x0 or 0x3, which are location
+   // independent, therefore the BasicLock is free to move.
+   //
+-  // During OSR we may need to relocate a BasicLock (which contains a 
++  // During OSR we may need to relocate a BasicLock (which contains a
+   // displaced word) from a location in an interpreter frame to a
+   // new location in a compiled frame.  "this" refers to the source
+   // basiclock in the interpreter frame.  "dest" refers to the destination
+   // basiclock in the new compiled frame.  We *always* inflate in move_to().
+-  // The always-Inflate policy works properly, but in 1.5.0 it can sometimes 
+-  // cause performance problems in code that makes heavy use of a small # of 
+-  // uncontended locks.   (We'd inflate during OSR, and then sync performance 
+-  // would subsequently plummet because the thread would be forced thru the slow-path).  
+-  // This problem has been made largely moot on IA32 by inlining the inflated fast-path 
+-  // operations in Fast_Lock and Fast_Unlock in i486.ad. 
++  // The always-Inflate policy works properly, but in 1.5.0 it can sometimes
++  // cause performance problems in code that makes heavy use of a small # of
++  // uncontended locks.   (We'd inflate during OSR, and then sync performance
++  // would subsequently plummet because the thread would be forced thru the slow-path).
++  // This problem has been made largely moot on IA32 by inlining the inflated fast-path
++  // operations in Fast_Lock and Fast_Unlock in i486.ad.
+   //
+-  // Note that there is a way to safely swing the object's markword from 
+-  // one stack location to another.  This avoids inflation.  Obviously, 
+-  // we need to ensure that both locations refer to the current thread's stack.  
++  // Note that there is a way to safely swing the object's markword from
++  // one stack location to another.  This avoids inflation.  Obviously,
++  // we need to ensure that both locations refer to the current thread's stack.
+   // There are some subtle concurrency issues, however, and since the benefit is
+   // is small (given the support for inflated fast-path locking in the fast_lock, etc)
+-  // we'll leave that optimization for another time.  
++  // we'll leave that optimization for another time.
+ 
+   if (displaced_header()->is_neutral()) {
+     ObjectSynchronizer::inflate_helper(obj);
+     // WARNING: We can not put check here, because the inflation
+-    // will not update the displaced header. Once BasicLock is inflated, 
++    // will not update the displaced header. Once BasicLock is inflated,
+     // no one should ever look at its content.
+   } else {
+-    // Typically the displaced header will be 0 (recursive stack lock) or 
++    // Typically the displaced header will be 0 (recursive stack lock) or
+     // unused_mark.  Naively we'd like to assert that the displaced mark
+     // value is either 0, neutral, or 3.  But with the advent of the
+-    // the store-before-CAS avoidance in fast_lock/compiler_lock_object
++    // store-before-CAS avoidance in fast_lock/compiler_lock_object
+     // we can find any flavor mark in the displaced mark.
+   }
+ // [RGV] The next line appears to do nothing!
+@@ -340,7 +349,7 @@
+   _obj = obj;
+ 
+   if (_dolock) {
+-    TEVENT (ObjectLocker) ; 
++    TEVENT (ObjectLocker) ;
+ 
+     ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
+   }
+@@ -367,31 +376,31 @@
+ PerfCounter * ObjectSynchronizer::_sync_SlowExit                    = NULL ;
+ PerfCounter * ObjectSynchronizer::_sync_SlowEnter                   = NULL ;
+ PerfCounter * ObjectSynchronizer::_sync_SlowNotify                  = NULL ;
+-PerfCounter * ObjectSynchronizer::_sync_SlowNotifyAll               = NULL ; 
++PerfCounter * ObjectSynchronizer::_sync_SlowNotifyAll               = NULL ;
+ PerfCounter * ObjectSynchronizer::_sync_FailedSpins                 = NULL ;
+ PerfCounter * ObjectSynchronizer::_sync_SuccessfulSpins             = NULL ;
+ PerfCounter * ObjectSynchronizer::_sync_MonInCirculation            = NULL ;
+ PerfCounter * ObjectSynchronizer::_sync_MonScavenged                = NULL ;
+ PerfLongVariable * ObjectSynchronizer::_sync_MonExtant              = NULL ;
+-  
+-// One-shot global initialization for the sync subsystem.  
++
++// One-shot global initialization for the sync subsystem.
+ // We could also defer initialization and initialize on-demand
+ // the first time we call inflate().  Initialization would
+ // be protected - like so many things - by the MonitorCache_lock.
+ 
+ void ObjectSynchronizer::Initialize () {
+   static int InitializationCompleted = 0 ;
+-  assert (InitializationCompleted == 0, "invariant") ; 
+-  InitializationCompleted = 1 ; 
+-  if (UsePerfData) { 
+-      EXCEPTION_MARK ; 
+-      #define NEWPERFCOUNTER(n)   {n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,CHECK); } 
+-      #define NEWPERFVARIABLE(n)  {n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,CHECK); } 
+-      NEWPERFCOUNTER(_sync_Inflations) ; 
+-      NEWPERFCOUNTER(_sync_Deflations) ; 
+-      NEWPERFCOUNTER(_sync_ContendedLockAttempts) ; 
+-      NEWPERFCOUNTER(_sync_FutileWakeups) ; 
+-      NEWPERFCOUNTER(_sync_Parks) ; 
++  assert (InitializationCompleted == 0, "invariant") ;
++  InitializationCompleted = 1 ;
++  if (UsePerfData) {
++      EXCEPTION_MARK ;
++      #define NEWPERFCOUNTER(n)   {n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,CHECK); }
++      #define NEWPERFVARIABLE(n)  {n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,CHECK); }
++      NEWPERFCOUNTER(_sync_Inflations) ;
++      NEWPERFCOUNTER(_sync_Deflations) ;
++      NEWPERFCOUNTER(_sync_ContendedLockAttempts) ;
++      NEWPERFCOUNTER(_sync_FutileWakeups) ;
++      NEWPERFCOUNTER(_sync_Parks) ;
+       NEWPERFCOUNTER(_sync_EmptyNotifications) ;
+       NEWPERFCOUNTER(_sync_Notifications) ;
+       NEWPERFCOUNTER(_sync_SlowEnter) ;
+@@ -404,87 +413,89 @@
+       NEWPERFCOUNTER(_sync_PrivateB) ;
+       NEWPERFCOUNTER(_sync_MonInCirculation) ;
+       NEWPERFCOUNTER(_sync_MonScavenged) ;
+-      NEWPERFVARIABLE(_sync_MonExtant) ; 
++      NEWPERFVARIABLE(_sync_MonExtant) ;
+       #undef NEWPERFCOUNTER
+   }
+ }
+ 
+-// Compile-time asserts 
++// Compile-time asserts
+ // When possible, it's better to catch errors deterministically at
+ // compile-time than at runtime.  The down-side to using compile-time
+ // asserts is that error message -- often something about negative array
+-// indices -- is opaque. 
++// indices -- is opaque.
+ 
+-#define CTASSERT(x) { int tag[1-(2*!(x))]; printf ("Tag @%X\n", tag); } 
++#define CTASSERT(x) { int tag[1-(2*!(x))]; printf ("Tag @%X\n", tag); }
+ 
+-void ObjectMonitor::ctAsserts() { 
+-  CTASSERT(offset_of (ObjectMonitor, _header) == 0); 
++void ObjectMonitor::ctAsserts() {
++  CTASSERT(offset_of (ObjectMonitor, _header) == 0);
+ }
+ 
+ static int Adjust (volatile int * adr, int dx) {
+-  int v ; 
+-  for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ; 
+-  return v ; 
++  int v ;
++  for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
++  return v ;
+ }
+ 
+ // Ad-hoc mutual exclusion primitives: SpinLock and Mux
+-// 
+-// We employ SpinLocks _only for low-contention, fixed-length 
+-// short-duration critical sections where we're concerned 
+-// about native mutex_t or HotSpot Mutex:: latency.  
++//
++// We employ SpinLocks _only for low-contention, fixed-length
++// short-duration critical sections where we're concerned
++// about native mutex_t or HotSpot Mutex:: latency.
+ // The mux construct provides a spin-then-block mutual exclusion
+-// mechanism.  
++// mechanism.
+ //
+-// Testing has shown that contention on the ListLock guarding gFreeList 
+-// is common.  If we implement ListLock as a simple SpinLock it's common 
++// Testing has shown that contention on the ListLock guarding gFreeList
++// is common.  If we implement ListLock as a simple SpinLock it's common
+ // for the JVM to devolve to yielding with little progress.  This is true
+ // despite the fact that the critical sections protected by ListLock are
+-// extremely short. 
++// extremely short.
+ //
+-// TODO-FIXME: ListLock should be of type SpinLock.  
++// TODO-FIXME: ListLock should be of type SpinLock.
+ // We should make this a 1st-class type, integrated into the lock
+-// hierarchy as leaf-locks.  Critically, the SpinLock structure 
+-// should have sufficient padding to avoid false-sharing and excessive 
+-// cache-coherency traffic.   
++// hierarchy as leaf-locks.  Critically, the SpinLock structure
++// should have sufficient padding to avoid false-sharing and excessive
++// cache-coherency traffic.
+ 
+ 
+-typedef volatile int SpinLockT ; 
++typedef volatile int SpinLockT ;
+ 
+-static void SpinAcquire (volatile int * adr, const char * LockName) {
++void Thread::SpinAcquire (volatile int * adr, const char * LockName) {
+   if (Atomic::cmpxchg (1, adr, 0) == 0) {
+      return ;   // normal fast-path return
+   }
+ 
+   // Slow-path : We've encountered contention -- Spin/Yield/Block strategy.
+   TEVENT (SpinAcquire - ctx) ;
+-  int ctr = 0 ; 
+-  int Yields = 0 ; 
+-  for (;;) { 
++  int ctr = 0 ;
++  int Yields = 0 ;
++  for (;;) {
+      while (*adr != 0) {
+-        ++ctr ; 
+-        if ((ctr & 0xFFF) == 0 || !os::is_MP()) { 
+-           if (Yields > 5) { 
+-             Thread::current()->_ParkEvent->park(1) ; 
+-           } else { 
+-             os::NakedYield() ; 
+-             ++Yields ; 
++        ++ctr ;
++        if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
++           if (Yields > 5) {
++             // Consider using a simple NakedSleep() instead.
++             // Then SpinAcquire could be called by non-JVM threads
++             Thread::current()->_ParkEvent->park(1) ;
++           } else {
++             os::NakedYield() ;
++             ++Yields ;
+            }
+         } else {
+            SpinPause() ;
+         }
+      }
+-     if (Atomic::cmpxchg (1, adr, 0) == 0) return ; 
++     if (Atomic::cmpxchg (1, adr, 0) == 0) return ;
+   }
+ }
+ 
+-static void SpinRelease (volatile int * adr) {
+-  assert (*adr != 0, "invariant") ; 
+-  OrderAccess::fence() ;      // guarantee at least release consistency. 
++void Thread::SpinRelease (volatile int * adr) {
++  assert (*adr != 0, "invariant") ;
++  OrderAccess::fence() ;      // guarantee at least release consistency.
+   // Roach-motel semantics.
+   // It's safe if subsequent LDs and STs float "up" into the critical section,
+-  // but prior LDs and STs within the critical section can't be allowed 
+-  // to reorder or float past the ST that releases the lock. 
+-  *adr = 0 ; 
++  // but prior LDs and STs within the critical section can't be allowed
++  // to reorder or float past the ST that releases the lock.
++  *adr = 0 ;
+ }
+ 
+ // muxAcquire and muxRelease:
+@@ -492,144 +503,210 @@
+ // *  muxAcquire and muxRelease support a single-word lock-word construct.
+ //    The LSB of the word is set IFF the lock is held.
+ //    The remainder of the word points to the head of a singly-linked list
+-//    of threads blocked on the lock.  
++//    of threads blocked on the lock.
+ //
+-// *  users of muxAcquire and muxRelease must be careful with regards to
+-//    consuming unpark() "permits".  This is particularly true when 
+-//    muxAcquire-muxRelease are used as part of the implementation of 
+-//    the sync subsystem.  A safe rule of thumb is that a thread should never
+-//    call muxAcquire() if it's enqueued (cxq, EntryList, WaitList, etc) and 
+-//    will subsequently park().  The park() operation in muxAcquire() could
+-//    consume an unpark() permit intended for monitorenter, for instance. 
++// *  The current implementation of muxAcquire-muxRelease uses its own
++//    dedicated Thread._MuxEvent instance.  If we're interested in
++//    minimizing the peak number of extant ParkEvent instances then
++//    we could eliminate _MuxEvent and "borrow" _ParkEvent as long
++//    as certain invariants were satisfied.  Specifically, care would need
++//    to be taken with regards to consuming unpark() "permits".
++//    A safe rule of thumb is that a thread would never call muxAcquire()
++//    if it's enqueued (cxq, EntryList, WaitList, etc) and will subsequently
++//    park().  Otherwise the _ParkEvent park() operation in muxAcquire() could
++//    consume an unpark() permit intended for monitorenter, for instance.
+ //    One way around this would be to widen the restricted-range semaphore
+ //    implemented in park().  Another alternative would be to provide
+-//    multiple instances of the PlatformEvent() for each thread.  One 
++//    multiple instances of the PlatformEvent() for each thread.  One
+ //    instance would be dedicated to muxAcquire-muxRelease, for instance.
+ //
+ // *  Usage:
+-//    -- only as leaf locks
+-//    -- for short-term locks only, as blocking MuxAcquire() does _not perform
+-//       a state transition.  
+-// 
++//    -- Only as leaf locks
++//    -- for short-term locking only as muxAcquire does not perform
++//       thread state transitions.
++//
+ // Alternatives:
+-// *  We could implement muxAcquire and muxRelease with MCS or CLH locks 
+-//    but with parking or spin-then-park instead of pure spinning.  
+-// *  Use Taura-Oyama-Yonenzawa locks.  
+-// *  It's possible to construct a 1-0 lock if we encode the lockword as 
+-//    (List,LockByte).  Acquire will CAS the full lockword while Release 
++// *  We could implement muxAcquire and muxRelease with MCS or CLH locks
++//    but with parking or spin-then-park instead of pure spinning.
++// *  Use Taura-Oyama-Yonenzawa locks.
++// *  It's possible to construct a 1-0 lock if we encode the lockword as
++//    (List,LockByte).  Acquire will CAS the full lockword while Release
+ //    will STB 0 into the LockByte.  The 1-0 scheme admits stranding, so
+-//    acquiring threads use timers (ParkTimed) to detect and recover from 
++//    acquiring threads use timers (ParkTimed) to detect and recover from
+ //    the stranding window.  Thread/Node structures must be aligned on 256-byte
+-//    boundaries by using placement-new.  
++//    boundaries by using placement-new.
+ // *  Augment MCS with advisory back-link fields maintained with CAS().
+ //    Pictorially:  LockWord -> T1 <-> T2 <-> T3 <-> ... <-> Tn <-> Owner.
+ //    The validity of the backlinks must be ratified before we trust the value.
+ //    If the backlinks are invalid the exiting thread must back-track through the
+-//    the forward links, which are always trustworthy.  
+-// *  Add a successor indication.  The LockWord is currently encoded as 
++//    the forward links, which are always trustworthy.
++// *  Add a successor indication.  The LockWord is currently encoded as
+ //    (List, LOCKBIT:1).  We could also add a SUCCBIT or an explicit _succ variable
+-//    to provide the usual futile-wakeup optimization.  
+-//    See RTStt for details.  
++//    to provide the usual futile-wakeup optimization.
++//    See RTStt for details.
+ // *  Consider schedctl.sc_nopreempt to cover the critical section.
++//
+ 
+ 
+ typedef volatile intptr_t MutexT ;      // Mux Lock-word
+-enum MuxBits { LOCKBIT = 1 } ; 
+- 
+-static void ATTR muxAcquire (volatile intptr_t * Lock, const char * LockName) {
+-  intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ; 
+-  if (w == 0) return ; 
+-  if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { 
+-     return ; 
++enum MuxBits { LOCKBIT = 1 } ;
++
++void Thread::muxAcquire (volatile intptr_t * Lock, const char * LockName) {
++  intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
++  if (w == 0) return ;
++  if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
++     return ;
+   }
+ 
+-  TEVENT (muxAcquire - Contention) ; 
+-  ParkEvent * Self = Thread::current()->_ParkEvent ; 
+-  assert ((intptr_t(Self) & LOCKBIT) == 0, "invariant") ; 
+-  for (;;) { 
+-     intptr_t w ; 
+-     int its = (os::is_MP() ? 100 : 0) + 1 ; 
++  TEVENT (muxAcquire - Contention) ;
++  ParkEvent * const Self = Thread::current()->_MuxEvent ;
++  assert ((intptr_t(Self) & LOCKBIT) == 0, "invariant") ;
++  for (;;) {
++     int its = (os::is_MP() ? 100 : 0) + 1 ;
+ 
+      // Optional spin phase: spin-then-park strategy
+-     while (--its >= 0) { 
+-       w = *Lock ; 
+-       if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { 
+-          return ; 
++     while (--its >= 0) {
++       w = *Lock ;
++       if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
++          return ;
+        }
+      }
+ 
+-     Self->reset() ; 
+-     Self->OnList = intptr_t(Lock) ; 
+-     // The following fence() isn't _strictly necessary as the subsequent 
++     Self->reset() ;
++     Self->OnList = intptr_t(Lock) ;
++     // The following fence() isn't _strictly necessary as the subsequent
+      // CAS() both serializes execution and ratifies the fetched *Lock value.
+-     OrderAccess::fence(); 
+-     for (;;) { 
+-        w = *Lock ; 
+-        if ((w & LOCKBIT) == 0) { 
+-            if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { 
+-                return ; 
++     OrderAccess::fence();
++     for (;;) {
++        w = *Lock ;
++        if ((w & LOCKBIT) == 0) {
++            if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
++                Self->OnList = 0 ;   // hygiene - allows stronger asserts
++                return ;
+             }
+             continue ;      // Interference -- *Lock changed -- Just retry
+         }
+-        assert (w & LOCKBIT, "invariant") ; 
++        assert (w & LOCKBIT, "invariant") ;
+         Self->ListNext = (ParkEvent *) (w & ~LOCKBIT );
+-        if (Atomic::cmpxchg_ptr (intptr_t(Self)|LOCKBIT, Lock, w) == w) break ; 
++        if (Atomic::cmpxchg_ptr (intptr_t(Self)|LOCKBIT, Lock, w) == w) break ;
+      }
+ 
+-     while (Self->OnList != 0) { 
+-        Self->park() ; 
++     while (Self->OnList != 0) {
++        Self->park() ;
+      }
+   }
+ }
+ 
++void Thread::muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) {
++  intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
++  if (w == 0) return ;
++  if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
++    return ;
++  }
++
++  TEVENT (muxAcquire - Contention) ;
++  ParkEvent * ReleaseAfter = NULL ;
++  if (ev == NULL) {
++    ev = ReleaseAfter = ParkEvent::Allocate (NULL) ;
++  }
++  assert ((intptr_t(ev) & LOCKBIT) == 0, "invariant") ;
++  for (;;) {
++    guarantee (ev->OnList == 0, "invariant") ;
++    int its = (os::is_MP() ? 100 : 0) + 1 ;
++
++    // Optional spin phase: spin-then-park strategy
++    while (--its >= 0) {
++      w = *Lock ;
++      if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
++        if (ReleaseAfter != NULL) {
++          ParkEvent::Release (ReleaseAfter) ;
++        }
++        return ;
++      }
++    }
++
++    ev->reset() ;
++    ev->OnList = intptr_t(Lock) ;
++    // The following fence() isn't _strictly necessary as the subsequent
++    // CAS() both serializes execution and ratifies the fetched *Lock value.
++    OrderAccess::fence();
++    for (;;) {
++      w = *Lock ;
++      if ((w & LOCKBIT) == 0) {
++        if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
++          ev->OnList = 0 ;
++          // We call ::Release while holding the outer lock, thus
++          // artificially lengthening the critical section.
++          // Consider deferring the ::Release() until the subsequent unlock(),
++          // after we've dropped the outer lock.
++          if (ReleaseAfter != NULL) {
++            ParkEvent::Release (ReleaseAfter) ;
++          }
++          return ;
++        }
++        continue ;      // Interference -- *Lock changed -- Just retry
++      }
++      assert (w & LOCKBIT, "invariant") ;
++      ev->ListNext = (ParkEvent *) (w & ~LOCKBIT );
++      if (Atomic::cmpxchg_ptr (intptr_t(ev)|LOCKBIT, Lock, w) == w) break ;
++    }
++
++    while (ev->OnList != 0) {
++      ev->park() ;
++    }
++  }
++}
++
+ // Release() must extract a successor from the list and then wake that thread.
+-// It can "pop" the front of the list or use a detach-modify-reattach (DMR) scheme 
++// It can "pop" the front of the list or use a detach-modify-reattach (DMR) scheme
+ // similar to that used by ParkEvent::Allocate() and ::Release().  DMR-based
+-// Release() would (A) CAS() or swap() null to *Lock, releasing the lock and 
+-// detaching the list.  (B) Extract a successor from the private list "in-hand" 
+-// (C) attempt to CAS() the residual back into *Lock over null.  If there were any 
+-// newly arrived threads and the CAS() would fail.  In that case Release() would 
+-// detach the RATs, re-merge the list in-hand with the RATs and repeat as needed.  
+-// Alternately, Release() can detach and extract a successor, put then pass the
+-// residual list to the wakee.  The wakee must reattach and remerge before it
+-// competes for the lock.  
++// Release() would :
++// (A) CAS() or swap() null to *Lock, releasing the lock and detaching the list.
++// (B) Extract a successor from the private list "in-hand"
++// (C) attempt to CAS() the residual back into *Lock over null.
++//     If there were any newly arrived threads and the CAS() would fail.
++//     In that case Release() would detach the RATs, re-merge the list in-hand
++//     with the RATs and repeat as needed.  Alternately, Release() might
++//     detach and extract a successor, but then pass the residual list to the wakee.
++//     The wakee would be responsible for reattaching and remerging before it
++//     competed for the lock.
+ //
+ // Both "pop" and DMR are immune from ABA corruption -- there can be
+-// multiple concurrent pushers, but only one popper or detacher.  
++// multiple concurrent pushers, but only one popper or detacher.
+ // This implementation pops from the head of the list.  This is unfair,
+-// but tends to provide excellent throughput as hot threads remain hot. 
+-// (We wake recently run threads first).  
++// but tends to provide excellent throughput as hot threads remain hot.
++// (We wake recently run threads first).
+ 
+-static void ATTR muxRelease (volatile intptr_t * Lock)  {
+-   for (;;) { 
+-     intptr_t w = Atomic::cmpxchg_ptr (0, Lock, LOCKBIT) ; 
+-     assert (w & LOCKBIT, "invariant") ; 
+-     if (w == LOCKBIT) return ; 
+-     ParkEvent * List = (ParkEvent *) (w & ~LOCKBIT) ; 
+-     assert (List != NULL, "invariant") ; 
+-     assert (List->OnList == intptr_t(Lock), "invariant") ; 
+-     ParkEvent * nxt = List->ListNext ; 
+-
+-     // The following CAS() releases the lock and pops the head element.  
+-     if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) { 
+-        continue ; 
+-     }
+-     List->OnList = 0 ; 
+-     OrderAccess::fence() ; 
+-     List->unpark () ;
+-     return ; 
++void Thread::muxRelease (volatile intptr_t * Lock)  {
++  for (;;) {
++    const intptr_t w = Atomic::cmpxchg_ptr (0, Lock, LOCKBIT) ;
++    assert (w & LOCKBIT, "invariant") ;
++    if (w == LOCKBIT) return ;
++    ParkEvent * List = (ParkEvent *) (w & ~LOCKBIT) ;
++    assert (List != NULL, "invariant") ;
++    assert (List->OnList == intptr_t(Lock), "invariant") ;
++    ParkEvent * nxt = List->ListNext ;
++
++    // The following CAS() releases the lock and pops the head element.
++    if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) {
++      continue ;
++    }
++    List->OnList = 0 ;
++    OrderAccess::fence() ;
++    List->unpark () ;
++    return ;
+   }
+ }
+ 
+ // ObjectMonitor Lifecycle
+ // -----------------------
+-// Inflation unlinks monitors from the global gFreeList and 
++// Inflation unlinks monitors from the global gFreeList and
+ // associates them with objects.  Deflation -- which occurs at
+ // STW-time -- disassociates idle monitors from objects.  Such
+ // scavenged monitors are returned to the gFreeList.
+ //
+ // The global list is protected by ListLock.  All the critical sections
+-// are short and operate in constant-time.  
++// are short and operate in constant-time.
+ //
+ // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
+ //
+@@ -637,7 +714,7 @@
+ // --   unassigned and on the global free list
+ // --   unassigned and on a thread's private omFreeList
+ // --   assigned to an object.  The object is inflated and the mark refers
+-//      to the objectmonitor.  
++//      to the objectmonitor.
+ //
+ // TODO-FIXME:
+ //
+@@ -650,20 +727,20 @@
+ //    be installed at STW-time.
+ //
+ // *  For efficiency and to help reduce the store-before-CAS penalty
+-//    the objectmonitors on gFreeList or local free lists should be ready to install 
++//    the objectmonitors on gFreeList or local free lists should be ready to install
+ //    with the exception of _header and _object.  _object can be set after inflation.
+-//    In particular, keep all objectMonitors on a thread's private list in ready-to-install 
+-//    state with m.Owner set properly.  
++//    In particular, keep all objectMonitors on a thread's private list in ready-to-install
++//    state with m.Owner set properly.
+ //
+-// *  We could all diffuse contention by using multiple global (FreeList, Lock) 
+-//    pairs -- threads could use trylock() and a cyclic-scan strategy to search for 
+-//    an unlocked free list.  
++// *  We could all diffuse contention by using multiple global (FreeList, Lock)
++//    pairs -- threads could use trylock() and a cyclic-scan strategy to search for
++//    an unlocked free list.
+ //
+ // *  Add lifecycle tags and assert()s.
+ //
+ // *  Be more consistent about when we clear an objectmonitor's fields:
+ //    A.  After extracting the objectmonitor from a free list.
+-//    B.  After adding an objectmonitor to a free list.  
++//    B.  After adding an objectmonitor to a free list.
+ //
+ 
+ ObjectMonitor * ObjectSynchronizer::gBlockList = NULL ;
+@@ -673,13 +750,13 @@
+ 
+ ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
+     // A large MAXPRIVATE value reduces both list lock contention
+-    // and list coherency traffic, but also tends to increase the  
++    // and list coherency traffic, but also tends to increase the
+     // number of objectMonitors in circulation as well as the STW
+     // scavenge costs.  As usual, we lean toward time in space-time
+-    // tradeoffs.  
+-    const int MAXPRIVATE = 1024 ; 
+-    for (;;) { 
+-        ObjectMonitor * m ; 
++    // tradeoffs.
++    const int MAXPRIVATE = 1024 ;
++    for (;;) {
++        ObjectMonitor * m ;
+ 
+         // 1: try to allocate from the thread's local omFreeList.
+         // Threads will attempt to allocate first from their local list, then
+@@ -687,13 +764,13 @@
+         // attempt to instantiate new monitors.   Thread-local free lists take
+         // heat off the ListLock and improve allocation latency, as well as reducing
+         // coherency traffic on the shared global list.
+-        m = Self->omFreeList ; 
+-        if (m != NULL) { 
+-           Self->omFreeList = m->FreeNext ; 
+-           Self->omFreeCount -- ; 
++        m = Self->omFreeList ;
++        if (m != NULL) {
++           Self->omFreeList = m->FreeNext ;
++           Self->omFreeCount -- ;
+            // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
+            guarantee (m->object() == NULL, "invariant") ;
+-           return m ; 
++           return m ;
+         }
+ 
+         // 2: try to allocate from the global gFreeList
+@@ -702,10 +779,10 @@
+         // If we're using thread-local free lists then try
+         // to reprovision the caller's free list.
+         if (gFreeList != NULL) {
+-            // Reprovision the thread's omFreeList.  
++            // Reprovision the thread's omFreeList.
+             // Use bulk transfers to reduce the allocation rate and heat
+             // on various locks.
+-            muxAcquire (&ListLock, "omAlloc") ;  
++            Thread::muxAcquire (&ListLock, "omAlloc") ;
+             for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL; ) {
+                 ObjectMonitor * take = gFreeList ;
+                 gFreeList = take->FreeNext ;
+@@ -714,33 +791,33 @@
+                 take->Recycle() ;
+                 omRelease (Self, take) ;
+             }
+-            muxRelease (&ListLock) ;  
++            Thread::muxRelease (&ListLock) ;
+             Self->omFreeProvision += 1 + (Self->omFreeProvision/2) ;
+             if (Self->omFreeProvision > MAXPRIVATE ) Self->omFreeProvision = MAXPRIVATE ;
+             TEVENT (omFirst - reprovision) ;
+-            continue ; 
++            continue ;
+         }
+ 
+         // 3: allocate a block of new ObjectMonitors
+-        // Both the local and global free lists are empty -- resort to malloc().  
++        // Both the local and global free lists are empty -- resort to malloc().
+         // In the current implementation objectMonitors are TSM - immortal.
+-        assert (_BLOCKSIZE > 1, "invariant") ; 
++        assert (_BLOCKSIZE > 1, "invariant") ;
+         ObjectMonitor * temp = new ObjectMonitor[_BLOCKSIZE];
+-        
++
+         // NOTE: (almost) no way to recover if allocation failed.
+         // We might be able to induce a STW safepoint and scavenge enough
+         // objectMonitors to permit progress.
+-        if (temp == NULL) { 
+-            vm_exit_out_of_memory (sizeof (ObjectMonitor[_BLOCKSIZE]), "Allocate ObjectMonitors") ; 
++        if (temp == NULL) {
++            vm_exit_out_of_memory (sizeof (ObjectMonitor[_BLOCKSIZE]), "Allocate ObjectMonitors") ;
+         }
+ 
+-        // Format the block.  
++        // Format the block.
+         // initialize the linked list, each monitor points to its next
+         // forming the single linked free list, the very first monitor
+-        // will points to next block, which forms the block list. 
++        // will points to next block, which forms the block list.
+         // The trick of using the 1st element in the block as gBlockList
+         // linkage should be reconsidered.  A better implementation would
+-        // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 
++        // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
+ 
+         for (int i = 1; i < _BLOCKSIZE ; i++) {
+            temp[i].FreeNext = &temp[i+1];
+@@ -752,70 +829,70 @@
+         // Element [0] is reserved for global list linkage
+         temp[0].set_object(CHAINMARKER);
+ 
+-        // Consider carving out this thread's current request from the 
+-        // block in hand.  This avoids some lock traffic and redundant 
+-        // list activity.  
++        // Consider carving out this thread's current request from the
++        // block in hand.  This avoids some lock traffic and redundant
++        // list activity.
+ 
+         // Acquire the ListLock to manipulate BlockList and FreeList.
+-        // An Oyama-Taura-Yonezawa scheme might be more efficient. 
+-        muxAcquire (&ListLock, "omAlloc [2]") ; 
++        // An Oyama-Taura-Yonezawa scheme might be more efficient.
++        Thread::muxAcquire (&ListLock, "omAlloc [2]") ;
+ 
+-        // Add the new block to the list of extant blocks (gBlockList). 
++        // Add the new block to the list of extant blocks (gBlockList).
+         // The very first objectMonitor in a block is reserved and dedicated.
+-        // It serves as blocklist "next" linkage.  
++        // It serves as blocklist "next" linkage.
+         temp[0].FreeNext = gBlockList;
+         gBlockList = temp;
+ 
+         // Add the new string of objectMonitors to the global free list
+-        temp[_BLOCKSIZE - 1].FreeNext = gFreeList ; 
++        temp[_BLOCKSIZE - 1].FreeNext = gFreeList ;
+         gFreeList = temp + 1;
+-        muxRelease (&ListLock) ; 
+-        TEVENT (Allocate block of monitors) ; 
++        Thread::muxRelease (&ListLock) ;
++        TEVENT (Allocate block of monitors) ;
+     }
+ }
+ 
+ // Place "m" on the caller's private per-thread omFreeList.
+-// In practice there's no need to clamp or limit the number of 
++// In practice there's no need to clamp or limit the number of
+ // monitors on a thread's omFreeList as the only time we'll call
+ // omRelease is to return a monitor to the free list after a CAS
+ // attempt failed.  This doesn't allow unbounded #s of monitors to
+-// accumulate on a thread's free list.  
++// accumulate on a thread's free list.
+ //
+ // In the future the usage of omRelease() might change and monitors
+ // could migrate between free lists.  In that case to avoid excessive
+-// accumulation we could  limit omCount to (omProvision*2), otherwise return 
+-// the objectMonitor to the global list.  We should drain (return) in reasonable chunks.  
++// accumulation we could  limit omCount to (omProvision*2), otherwise return
++// the objectMonitor to the global list.  We should drain (return) in reasonable chunks.
+ // That is, *not* one-at-a-time.
+ 
+ 
+-void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m) { 
++void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m) {
+     guarantee (m->object() == NULL, "invariant") ;
+     m->FreeNext = Self->omFreeList ;
+-    Self->omFreeList = m ; 
+-    Self->omFreeCount ++ ; 
++    Self->omFreeList = m ;
++    Self->omFreeCount ++ ;
+ }
+ 
+-// Return the monitors of a moribund thread's local free list to 
+-// the global free list.  Typically a thread calls omFlush() when 
+-// it's dying.  We could also consider having the VM thread steal 
++// Return the monitors of a moribund thread's local free list to
++// the global free list.  Typically a thread calls omFlush() when
++// it's dying.  We could also consider having the VM thread steal
+ // monitors from threads that have not run java code over a few
+-// consecutive STW safepoints.  Relatedly, we might decay 
+-// omFreeProvision at STW safepoints. 
++// consecutive STW safepoints.  Relatedly, we might decay
++// omFreeProvision at STW safepoints.
+ //
+ // We currently call omFlush() from the Thread:: dtor _after the thread
+ // has been excised from the thread list and is no longer a mutator.
+ // That means that omFlush() can run concurrently with a safepoint and
+-// the scavenge operator.  Calling omFlush() from JavaThread::exit() might 
++// the scavenge operator.  Calling omFlush() from JavaThread::exit() might
+ // be a better choice as we could safely reason that that the JVM is
+ // not at a safepoint at the time of the call, and thus there could
+ // be not inopportune interleavings between omFlush() and the scavenge
+-// operator.  
++// operator.
+ 
+ void ObjectSynchronizer::omFlush (Thread * Self) {
+-    ObjectMonitor * List = Self->omFreeList ;  // Null-terminated SLL 
+-    Self->omFreeList = NULL ; 
+-    if (List == NULL) return ; 
+-    ObjectMonitor * Tail = NULL ; 
++    ObjectMonitor * List = Self->omFreeList ;  // Null-terminated SLL
++    Self->omFreeList = NULL ;
++    if (List == NULL) return ;
++    ObjectMonitor * Tail = NULL ;
+     ObjectMonitor * s ;
+     for (s = List ; s != NULL ; s = s->FreeNext) {
+         Tail = s ;
+@@ -825,19 +902,19 @@
+         TEVENT (omFlush - Move one) ;
+     }
+ 
+-    guarantee (Tail != NULL && List != NULL, "invariant") ; 
+-    muxAcquire (&ListLock, "omFlush") ; 
++    guarantee (Tail != NULL && List != NULL, "invariant") ;
++    Thread::muxAcquire (&ListLock, "omFlush") ;
+     Tail->FreeNext = gFreeList ;
+     gFreeList = List ;
+-    muxRelease (&ListLock) ; 
+-    TEVENT (omFlush) ; 
++    Thread::muxRelease (&ListLock) ;
++    TEVENT (omFlush) ;
+ }
+ 
+-    
++
+ // Get the next block in the block list.
+ static inline ObjectMonitor* next(ObjectMonitor* block) {
+   assert(block->object() == CHAINMARKER, "must be a block header");
+-  block = block->FreeNext ; 
++  block = block->FreeNext ;
+   assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
+   return block;
+ }
+@@ -853,21 +930,21 @@
+   return ObjectSynchronizer::inflate(Thread::current(), obj);
+ }
+ 
+-// Note that we could encounter some performance loss through false-sharing as 
+-// multiple locks occupy the same $ line.  Padding might be appropriate. 
++// Note that we could encounter some performance loss through false-sharing as
++// multiple locks occupy the same $ line.  Padding might be appropriate.
+ 
+-#define NINFLATIONLOCKS 256 
+-static volatile intptr_t InflationLocks [NINFLATIONLOCKS] ; 
++#define NINFLATIONLOCKS 256
++static volatile intptr_t InflationLocks [NINFLATIONLOCKS] ;
+ 
+-static markOop ReadStableMark (oop obj) { 
+-  markOop mark = obj->mark() ; 
++static markOop ReadStableMark (oop obj) {
++  markOop mark = obj->mark() ;
+   if (!mark->is_being_inflated()) {
+     return mark ;       // normal fast-path return
+   }
+ 
+-  int its = 0 ; 
++  int its = 0 ;
+   for (;;) {
+-    markOop mark = obj->mark() ; 
++    markOop mark = obj->mark() ;
+     if (!mark->is_being_inflated()) {
+       return mark ;    // normal fast-path return
+     }
+@@ -876,21 +953,21 @@
+     // The caller of ReadStableMark() must wait for inflation to complete.
+     // Avoid live-lock
+     // TODO: consider calling SafepointSynchronize::do_call_back() while
+-    // spinning to see if there's a safepoint pending.  If so, immediately 
++    // spinning to see if there's a safepoint pending.  If so, immediately
+     // yielding or blocking would be appropriate.  Avoid spinning while
+-    // there is a safepoint pending.  
+-    // TODO: add inflation contention performance counters. 
+-    // TODO: restrict the aggregate number of spinners.  
++    // there is a safepoint pending.
++    // TODO: add inflation contention performance counters.
++    // TODO: restrict the aggregate number of spinners.
+ 
+-    ++its ; 
++    ++its ;
+     if (its > 10000 || !os::is_MP()) {
+-       if (its & 1) { 
+-         os::NakedYield() ; 
+-         TEVENT (Inflate: INFLATING - yield) ; 
++       if (its & 1) {
++         os::NakedYield() ;
++         TEVENT (Inflate: INFLATING - yield) ;
+        } else {
+-         // Note that the following code attenuates the livelock problem but is not 
+-         // a complete remedy.  A more complete solution would require that the inflating 
+-         // thread hold the associated inflation lock.  The following code simply restricts 
++         // Note that the following code attenuates the livelock problem but is not
++         // a complete remedy.  A more complete solution would require that the inflating
++         // thread hold the associated inflation lock.  The following code simply restricts
+          // the number of spinners to at most one.  We'll have N-2 threads blocked
+          // on the inflationlock, 1 thread holding the inflation lock and using
+          // a yield/park strategy, and 1 thread in the midst of inflation.
+@@ -900,30 +977,30 @@
+          // list rooted at the markword.  Once enqueued, they'd loop, checking a per-thread flag
+          // and calling park().  When inflation was complete the thread that accomplished inflation
+          // would detach the list and set the markword to inflated with a single CAS and
+-         // then for each thread on the list, set the flag and unpark() the thread.  
++         // then for each thread on the list, set the flag and unpark() the thread.
+          // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
+-         // wakes at most one thread whereas we need to wake the entire list.  
+-         int ix = (intptr_t(obj) >> 5) & (NINFLATIONLOCKS-1) ; 
+-         int YieldThenBlock = 0 ; 
+-         assert (ix >= 0 && ix < NINFLATIONLOCKS, "invariant") ; 
+-         assert ((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant") ; 
+-         muxAcquire (InflationLocks + ix, "InflationLock") ; 
++         // wakes at most one thread whereas we need to wake the entire list.
++         int ix = (intptr_t(obj) >> 5) & (NINFLATIONLOCKS-1) ;
++         int YieldThenBlock = 0 ;
++         assert (ix >= 0 && ix < NINFLATIONLOCKS, "invariant") ;
++         assert ((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant") ;
++         Thread::muxAcquire (InflationLocks + ix, "InflationLock") ;
+          while (obj->mark() == markOopDesc::INFLATING()) {
+            // Beware: NakedYield() is advisory and has almost no effect on some platforms
+            // so we periodically call Self->_ParkEvent->park(1).
+-           // We use a mixed spin/yield/block mechanism.  
+-           if ((YieldThenBlock++) >= 16) { 
+-              Thread::current()->_ParkEvent->park(1) ; 
+-           } else { 
+-              os::NakedYield() ; 
++           // We use a mixed spin/yield/block mechanism.
++           if ((YieldThenBlock++) >= 16) {
++              Thread::current()->_ParkEvent->park(1) ;
++           } else {
++              os::NakedYield() ;
+            }
+          }
+-         muxRelease (InflationLocks + ix ) ; 
+-         TEVENT (Inflate: INFLATING - yield/park) ; 
++         Thread::muxRelease (InflationLocks + ix ) ;
++         TEVENT (Inflate: INFLATING - yield/park) ;
+        }
+-    } else { 
++    } else {
+        SpinPause() ;       // SMP-polite spinning
+-    } 
++    }
+   }
+ }
+ 
+@@ -931,12 +1008,12 @@
+   // Inflate mutates the heap ...
+   // Relaxing assertion for bug 6320749.
+   assert (Universe::verify_in_progress() ||
+-          !SafepointSynchronize::is_at_safepoint(), "invariant") ; 
++          !SafepointSynchronize::is_at_safepoint(), "invariant") ;
+ 
+   for (;;) {
+-      const markOop mark = object->mark() ; 
+-      assert (!mark->has_bias_pattern(), "invariant") ; 
+-         
++      const markOop mark = object->mark() ;
++      assert (!mark->has_bias_pattern(), "invariant") ;
++
+       // The mark can be in one of the following states:
+       // *  Inflated     - just return
+       // *  Stack-locked - coerce it to inflated
+@@ -946,97 +1023,97 @@
+ 
+       // CASE: inflated
+       if (mark->has_monitor()) {
+-          ObjectMonitor * inf = mark->monitor() ; 
++          ObjectMonitor * inf = mark->monitor() ;
+           assert (inf->header()->is_neutral(), "invariant");
+           assert (inf->object() == object, "invariant") ;
+           assert (ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
+-          return inf ; 
++          return inf ;
+       }
+ 
+       // CASE: inflation in progress - inflating over a stack-lock.
+       // Some other thread is converting from stack-locked to inflated.
+-      // Only that thread can complete inflation -- other threads must wait.  
+-      // The INFLATING value is transient. 
+-      // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.  
+-      // We could always eliminate polling by parking the thread on some auxilliary list.
++      // Only that thread can complete inflation -- other threads must wait.
++      // The INFLATING value is transient.
++      // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
++      // We could always eliminate polling by parking the thread on some auxiliary list.
+       if (mark == markOopDesc::INFLATING()) {
+-         TEVENT (Inflate: spin while INFLATING) ; 
+-         ReadStableMark(object) ; 
+-         continue ; 
++         TEVENT (Inflate: spin while INFLATING) ;
++         ReadStableMark(object) ;
++         continue ;
+       }
+ 
+-      // CASE: stack-locked 
++      // CASE: stack-locked
+       // Could be stack-locked either by this thread or by some other thread.
+-      // 
+-      // Note that we allocate the objectmonitor speculatively, _before_ attempting 
+-      // to install INFLATING into the mark word.  We orginally installed INFLATING, 
+-      // allocated the objectmonitor, and then finally STed the address of the 
++      //
++      // Note that we allocate the objectmonitor speculatively, _before_ attempting
++      // to install INFLATING into the mark word.  We originally installed INFLATING,
++      // allocated the objectmonitor, and then finally STed the address of the
+       // objectmonitor into the mark.  This was correct, but artificially lengthened
+-      // the interval in which INFLATED appeared in the mark, thus increasing 
+-      // the odds of inflation contention.  
+-      // 
+-      // We now use per-thread private objectmonitor free lists.  
+-      // These list are reprovisioned from the global free list outside the 
++      // the interval in which INFLATED appeared in the mark, thus increasing
++      // the odds of inflation contention.
++      //
++      // We now use per-thread private objectmonitor free lists.
++      // These list are reprovisioned from the global free list outside the
+       // critical INFLATING...ST interval.  A thread can transfer
+       // multiple objectmonitors en-mass from the global free list to its local free list.
+       // This reduces coherency traffic and lock contention on the global free list.
+       // Using such local free lists, it doesn't matter if the omAlloc() call appears
+       // before or after the CAS(INFLATING) operation.
+-      // See the comments in omAlloc().  
++      // See the comments in omAlloc().
+ 
+-      if (mark->has_locker()) { 
+-          ObjectMonitor * m = omAlloc (Self) ;  
++      if (mark->has_locker()) {
++          ObjectMonitor * m = omAlloc (Self) ;
+           // Optimistically prepare the objectmonitor - anticipate successful CAS
+           // We do this before the CAS in order to minimize the length of time
+-          // in which INFLATING appears in the mark.  
+-          m->Recycle(); 
+-          m->FreeNext      = NULL ; 
+-          m->_Responsible  = NULL ; 
+-          m->OwnerIsThread = 0 ; 
+-          m->_recursions   = 0 ; 
++          // in which INFLATING appears in the mark.
++          m->Recycle();
++          m->FreeNext      = NULL ;
++          m->_Responsible  = NULL ;
++          m->OwnerIsThread = 0 ;
++          m->_recursions   = 0 ;
+           m->_SpinDuration = Knob_SpinLimit ;   // Consider: maintain by type/class
+ 
+-          markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ; 
++          markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ;
+           if (cmp != mark) {
+-             omRelease (Self, m) ;  
++             omRelease (Self, m) ;
+              continue ;       // Interference -- just retry
+           }
+ 
+-          // We've successfully installed INFLATING (0) into the mark-word.  
+-          // This is the only case where 0 will appear in a mark-work.  
+-          // Only the singular thread that successfully swings the mark-word 
++          // We've successfully installed INFLATING (0) into the mark-word.
++          // This is the only case where 0 will appear in a mark-work.
++          // Only the singular thread that successfully swings the mark-word
+           // to 0 can perform (or more precisely, complete) inflation.
+           //
+           // Why do we CAS a 0 into the mark-word instead of just CASing the
+           // mark-word from the stack-locked value directly to the new inflated state?
+-          // Consider what happens when a thread unlocks a stack-locked object.  
+-          // It attempts to use CAS to swing the displaced header value from the 
++          // Consider what happens when a thread unlocks a stack-locked object.
++          // It attempts to use CAS to swing the displaced header value from the
+           // on-stack basiclock back into the object header.  Recall also that the
+-          // header value (hashcode, etc) can reside in (a) the object header, or 
++          // header value (hashcode, etc) can reside in (a) the object header, or
+           // (b) a displaced header associated with the stack-lock, or (c) a displaced
+-          // header in an objectMonitor.  The inflate() routine must copy the header 
+-          // value from the basiclock on the owner's stack to the objectMonitor, all 
+-          // the while preserving the hashCode stability invariants.  If the owner 
+-          // decides to release the lock while the value is 0, the unlock will fail 
+-          // and control will eventually pass from slow_exit() to inflate.  The owner 
+-          // will then spin, waiting for the 0 value to disappear.   Put another way, 
++          // header in an objectMonitor.  The inflate() routine must copy the header
++          // value from the basiclock on the owner's stack to the objectMonitor, all
++          // the while preserving the hashCode stability invariants.  If the owner
++          // decides to release the lock while the value is 0, the unlock will fail
++          // and control will eventually pass from slow_exit() to inflate.  The owner
++          // will then spin, waiting for the 0 value to disappear.   Put another way,
+           // the 0 causes the owner to stall if the owner happens to try to
+-          // drop the lock (restoring the header from the basiclock to the object) 
+-          // while inflation is in-progess.  This protocol avoids races that might 
+-          // would otherwise permit hashCode values to change or "flicker" for an object.  
++          // drop the lock (restoring the header from the basiclock to the object)
++          // while inflation is in-progress.  This protocol avoids races that might
++          // would otherwise permit hashCode values to change or "flicker" for an object.
+           // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable.
+-          // 0 serves as a "BUSY" inflate-in-progress indicator.  
+-          
++          // 0 serves as a "BUSY" inflate-in-progress indicator.
+ 
+-          // fetch the displaced mark from the owner's stack. 
++
++          // fetch the displaced mark from the owner's stack.
+           // The owner can't die or unwind past the lock while our INFLATING
+           // object is in the mark.  Furthermore the owner can't complete
+-          // an unlock on the object, either.  
+-          markOop dmw = mark->displaced_mark_helper() ; 
+-          assert (dmw->is_neutral(), "invariant") ; 
++          // an unlock on the object, either.
++          markOop dmw = mark->displaced_mark_helper() ;
++          assert (dmw->is_neutral(), "invariant") ;
+ 
+           // Setup monitor fields to proper values -- prepare the monitor
+-          m->set_header(dmw) ; 
++          m->set_header(dmw) ;
+ 
+           // Optimization: if the mark->locker stack address is associated
+           // with this thread we could simply set m->_owner = Self and
+@@ -1049,73 +1126,73 @@
+ 
+           // Must preserve store ordering. The monitor state must
+           // be stable at the time of publishing the monitor address.
+-          guarantee (object->mark() == markOopDesc::INFLATING(), "invariant") ; 
++          guarantee (object->mark() == markOopDesc::INFLATING(), "invariant") ;
+           object->release_set_mark(markOopDesc::encode(m));
+ 
+           // Hopefully the performance counters are allocated on distinct cache lines
+           // to avoid false sharing on MP systems ...
+-          if (_sync_Inflations != NULL) _sync_Inflations->inc() ; 
+-          TEVENT(Inflate: overwrite stacklock) ; 
++          if (_sync_Inflations != NULL) _sync_Inflations->inc() ;
++          TEVENT(Inflate: overwrite stacklock) ;
+           if (TraceMonitorInflation) {
+             if (object->is_instance()) {
+               ResourceMark rm;
+               tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
+-                (intptr_t) object, (intptr_t) object->mark(), 
++                (intptr_t) object, (intptr_t) object->mark(),
+                 Klass::cast(object->klass())->external_name());
+             }
+           }
+-          return m ; 
++          return m ;
+       }
+ 
+-      // CASE: neutral     
++      // CASE: neutral
+       // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
+-      // If we know we're inflating for entry it's better to inflate by swinging a 
++      // If we know we're inflating for entry it's better to inflate by swinging a
+       // pre-locked objectMonitor pointer into the object header.   A successful
+       // CAS inflates the object *and* confers ownership to the inflating thread.
+-      // In the current implementation we use a 2-step mechanism where we CAS() 
+-      // to inflate and then CAS() again to try to swing _owner from NULL to Self.  
++      // In the current implementation we use a 2-step mechanism where we CAS()
++      // to inflate and then CAS() again to try to swing _owner from NULL to Self.
+       // An inflateTry() method that we could call from fast_enter() and slow_enter()
+       // would be useful.
+ 
+       assert (mark->is_neutral(), "invariant");
+-      ObjectMonitor * m = omAlloc (Self) ; 
+-      // prepare m for installation - set monitor to initial state 
++      ObjectMonitor * m = omAlloc (Self) ;
++      // prepare m for installation - set monitor to initial state
+       m->Recycle();
+       m->set_header(mark);
+       m->set_owner(NULL);
+       m->set_object(object);
+-      m->OwnerIsThread = 1 ; 
+-      m->_recursions   = 0 ; 
+-      m->FreeNext      = NULL ; 
+-      m->_Responsible  = NULL ; 
++      m->OwnerIsThread = 1 ;
++      m->_recursions   = 0 ;
++      m->FreeNext      = NULL ;
++      m->_Responsible  = NULL ;
+       m->_SpinDuration = Knob_SpinLimit ;       // consider: keep metastats by type/class
+ 
+       if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
+-          m->set_object (NULL) ; 
+-          m->set_owner  (NULL) ; 
+-          m->OwnerIsThread = 0 ; 
+-          m->Recycle() ; 
+-          omRelease (Self, m) ; 
+-          m = NULL ; 
+-          continue ;        
++          m->set_object (NULL) ;
++          m->set_owner  (NULL) ;
++          m->OwnerIsThread = 0 ;
++          m->Recycle() ;
++          omRelease (Self, m) ;
++          m = NULL ;
++          continue ;
+           // interference - the markword changed - just retry.
+-          // The state-transitions are one-way, so there's no chance of 
+-          // live-lock -- "Inflated" is an absorbing state. 
++          // The state-transitions are one-way, so there's no chance of
++          // live-lock -- "Inflated" is an absorbing state.
+       }
+ 
+-      // Hopefully the performance counters are allocated on distinct 
++      // Hopefully the performance counters are allocated on distinct
+       // cache lines to avoid false sharing on MP systems ...
+-      if (_sync_Inflations != NULL) _sync_Inflations->inc() ; 
+-      TEVENT(Inflate: overwrite neutral) ; 
++      if (_sync_Inflations != NULL) _sync_Inflations->inc() ;
++      TEVENT(Inflate: overwrite neutral) ;
+       if (TraceMonitorInflation) {
+         if (object->is_instance()) {
+           ResourceMark rm;
+           tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
+-            (intptr_t) object, (intptr_t) object->mark(), 
++            (intptr_t) object, (intptr_t) object->mark(),
+             Klass::cast(object->klass())->external_name());
+         }
+       }
+-      return m ; 
++      return m ;
+   }
+ }
+ 
+@@ -1140,43 +1217,43 @@
+   }
+ 
+   THREAD->update_highest_lock((address)lock);
+-  slow_enter (obj, lock, THREAD) ; 
++  slow_enter (obj, lock, THREAD) ;
+ }
+ 
+ void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
+   assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here");
+   // if displaced header is null, the previous enter is recursive enter, no-op
+   markOop dhw = lock->displaced_header();
+-  markOop mark ; 
++  markOop mark ;
+   if (dhw == NULL) {
+-     // Recursive stack-lock.  
++     // Recursive stack-lock.
+      // Diagnostics -- Could be: stack-locked, inflating, inflated.
+-     mark = object->mark() ; 
+-     assert (!mark->is_neutral(), "invariant") ; 
+-     if (mark->has_locker() && mark != markOopDesc::INFLATING()) { 
++     mark = object->mark() ;
++     assert (!mark->is_neutral(), "invariant") ;
++     if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
+         assert(THREAD->is_lock_owned((address)mark->locker()), "invariant") ;
+      }
+-     if (mark->has_monitor()) { 
+-        ObjectMonitor * m = mark->monitor() ; 
+-        assert(((oop)(m->object()))->mark() == mark, "invariant") ; 
+-        assert(m->is_entered(THREAD), "invariant") ; 
++     if (mark->has_monitor()) {
++        ObjectMonitor * m = mark->monitor() ;
++        assert(((oop)(m->object()))->mark() == mark, "invariant") ;
++        assert(m->is_entered(THREAD), "invariant") ;
+      }
+-     return ; 
++     return ;
+   }
+ 
+-  mark = object->mark() ; 
++  mark = object->mark() ;
+ 
+   // If the object is stack-locked by the current thread, try to
+-  // swing the displaced header from the box back to the mark.  
+-  if (mark == (markOop) lock) { 
+-     assert (dhw->is_neutral(), "invariant") ; 
+-     if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) { 
+-        TEVENT (fast_exit: release stacklock) ; 
++  // swing the displaced header from the box back to the mark.
++  if (mark == (markOop) lock) {
++     assert (dhw->is_neutral(), "invariant") ;
++     if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
++        TEVENT (fast_exit: release stacklock) ;
+         return;
+      }
+   }
+ 
+-  ObjectSynchronizer::inflate(THREAD, object)->exit (THREAD) ; 
++  ObjectSynchronizer::inflate(THREAD, object)->exit (THREAD) ;
+ }
+ 
+ // This routine is used to handle interpreter/compiler slow case
+@@ -1185,14 +1262,14 @@
+ void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
+   markOop mark = obj->mark();
+   assert(!mark->has_bias_pattern(), "should not see bias pattern here");
+- 
+-  if (mark->is_neutral()) { 
++
++  if (mark->is_neutral()) {
+     // Anticipate successful CAS -- the ST of the displaced mark must
+-    // be visible <= the ST performed by the CAS. 
++    // be visible <= the ST performed by the CAS.
+     lock->set_displaced_header(mark);
+-    if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) { 
+-      TEVENT (slow_enter: release stacklock) ; 
+-      return ; 
++    if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
++      TEVENT (slow_enter: release stacklock) ;
++      return ;
+     }
+     // Fall through to inflate() ...
+   } else
+@@ -1204,10 +1281,10 @@
+   }
+ 
+ #if 0
+-  // The following optimization isn't particulary useful.  
+-  if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) { 
+-    lock->set_displaced_header (NULL) ; 
+-    return ; 
++  // The following optimization isn't particularly useful.
++  if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) {
++    lock->set_displaced_header (NULL) ;
++    return ;
+   }
+ #endif
+ 
+@@ -1220,17 +1297,17 @@
+ }
+ 
+ // This routine is used to handle interpreter/compiler slow case
+-// We don't need to use fast path here, because it must have 
++// We don't need to use fast path here, because it must have
+ // failed in the interpreter/compiler code. Simply use the heavy
+ // weight monitor should be ok, unless someone find otherwise.
+ void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
+-  fast_exit (object, lock, THREAD) ; 
++  fast_exit (object, lock, THREAD) ;
+ }
+ 
+ // NOTE: must use heavy weight monitor to handle jni monitor enter
+ void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter
+   // the current locking is from JNI instead of Java code
+-  TEVENT (jni_enter) ; 
++  TEVENT (jni_enter) ;
+   if (UseBiasedLocking) {
+     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+@@ -1254,7 +1331,7 @@
+ 
+ // NOTE: must use heavy weight monitor to handle jni monitor exit
+ void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
+-  TEVENT (jni_exit) ; 
++  TEVENT (jni_exit) ;
+   if (UseBiasedLocking) {
+     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+   }
+@@ -1269,7 +1346,7 @@
+ }
+ 
+ // complete_exit()/reenter() are used to wait on a nested lock
+-// i.e. to give up an outer lock completely and then re-enter 
++// i.e. to give up an outer lock completely and then re-enter
+ // Used when holding nested locks - lock acquisition order: lock1 then lock2
+ //  1) complete_exit lock1 - saving recursion count
+ //  2) wait on lock2
+@@ -1277,7 +1354,7 @@
+ //  4) reenter lock1 with original recursion count
+ //  5) lock lock2
+ // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
+-intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {  
++intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
+   TEVENT (complete_exit) ;
+   if (UseBiasedLocking) {
+     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+@@ -1290,7 +1367,7 @@
+ }
+ 
+ // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
+-void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {  
++void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
+   TEVENT (reenter) ;
+   if (UseBiasedLocking) {
+     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+@@ -1309,13 +1386,13 @@
+ }
+ 
+ // NOTE: must use heavy weight monitor to handle wait()
+-void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {  
++void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
+   if (UseBiasedLocking) {
+     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+   }
+   if (millis < 0) {
+-    TEVENT (wait - throw IAX) ; 
++    TEVENT (wait - throw IAX) ;
+     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
+   }
+   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
+@@ -1328,16 +1405,16 @@
+   dtrace_waited_probe(monitor, obj, THREAD);
+ }
+ 
+-void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) {  
++void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) {
+   if (UseBiasedLocking) {
+     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+   }
+   if (millis < 0) {
+-    TEVENT (wait - throw IAX) ; 
++    TEVENT (wait - throw IAX) ;
+     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
+   }
+-  ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD) ; 
++  ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD) ;
+ }
+ 
+ void ObjectSynchronizer::notify(Handle obj, TRAPS) {
+@@ -1354,7 +1431,7 @@
+ }
+ 
+ // NOTE: see comment of notify()
+-void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {  
++void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
+   if (UseBiasedLocking) {
+     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+@@ -1363,7 +1440,7 @@
+   markOop mark = obj->mark();
+   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
+     return;
+-  } 
++  }
+   ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD);
+ }
+ 
+@@ -1378,13 +1455,13 @@
+     // thread-local storage.
+     if (obj->mark()->has_bias_pattern()) {
+       // Box and unbox the raw reference just in case we cause a STW safepoint.
+-      Handle hobj (Self, obj) ;         
++      Handle hobj (Self, obj) ;
+       // Relaxing assertion for bug 6320749.
+       assert (Universe::verify_in_progress() ||
+-	      !SafepointSynchronize::is_at_safepoint(),
+-	     "biases should not be seen by VM thread here");
++              !SafepointSynchronize::is_at_safepoint(),
++             "biases should not be seen by VM thread here");
+       BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
+-      obj = hobj() ; 
++      obj = hobj() ;
+       assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+     }
+   }
+@@ -1392,20 +1469,20 @@
+   // hashCode() is a heap mutator ...
+   // Relaxing assertion for bug 6320749.
+   assert (Universe::verify_in_progress() ||
+-	  !SafepointSynchronize::is_at_safepoint(), "invariant") ; 
++          !SafepointSynchronize::is_at_safepoint(), "invariant") ;
+   assert (Universe::verify_in_progress() ||
+-	  Self->is_Java_thread() , "invariant") ; 
++          Self->is_Java_thread() , "invariant") ;
+   assert (Universe::verify_in_progress() ||
+-	 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
++         ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
+ 
+   ObjectMonitor* monitor = NULL;
+   markOop temp, test;
+   intptr_t hash;
+   markOop mark = ReadStableMark (obj);
+ 
+-  // object should remain ineligible for biased locking 
+-  assert (!mark->has_bias_pattern(), "invariant") ; 
+- 
++  // object should remain ineligible for biased locking
++  assert (!mark->has_bias_pattern(), "invariant") ;
++
+   if (mark->is_neutral()) {
+     hash = mark->hash();              // this is a normal header
+     if (hash) {                       // if it has hash, just return it
+@@ -1424,7 +1501,7 @@
+   } else if (mark->has_monitor()) {
+     monitor = mark->monitor();
+     temp = monitor->header();
+-    assert (temp->is_neutral(), "invariant") ; 
++    assert (temp->is_neutral(), "invariant") ;
+     hash = temp->hash();
+     if (hash) {
+       return hash;
+@@ -1432,17 +1509,17 @@
+     // Skip to the following code to reduce code size
+   } else if (Self->is_lock_owned((address)mark->locker())) {
+     temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
+-    assert (temp->is_neutral(), "invariant") ; 
++    assert (temp->is_neutral(), "invariant") ;
+     hash = temp->hash();              // by current thread, check if the displaced
+     if (hash) {                       // header contains hash code
+       return hash;
+     }
+     // WARNING:
+     //   The displaced header is strictly immutable.
+-    // It can NOT be changed in ANY cases. So we have 
++    // It can NOT be changed in ANY cases. So we have
+     // to inflate the header into heavyweight monitor
+     // even the current thread owns the lock. The reason
+-    // is the BasicLock (stack slot) will be asynchronously 
++    // is the BasicLock (stack slot) will be asynchronously
+     // read by other threads during the inflate() function.
+     // Any change to stack may not propagate to other threads
+     // correctly.
+@@ -1452,19 +1529,19 @@
+   monitor = ObjectSynchronizer::inflate(Self, obj);
+   // Load displaced header and check it has hash code
+   mark = monitor->header();
+-  assert (mark->is_neutral(), "invariant") ; 
++  assert (mark->is_neutral(), "invariant") ;
+   hash = mark->hash();
+   if (hash == 0) {
+     hash = get_next_hash(Self, obj);
+     temp = mark->copy_set_hash(hash); // merge hash code into header
+-    assert (temp->is_neutral(), "invariant") ; 
++    assert (temp->is_neutral(), "invariant") ;
+     test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
+     if (test != mark) {
+       // The only update to the header in the monitor (outside GC)
+       // is install the hash code. If someone add new usage of
+       // displaced header, please update this code
+       hash = test->hash();
+-      assert (test->is_neutral(), "invariant") ; 
++      assert (test->is_neutral(), "invariant") ;
+       assert (hash != 0, "Trivial unexpected object/monitor header usage.");
+     }
+   }
+@@ -1475,7 +1552,7 @@
+ // Deprecated -- use FastHashCode() instead.
+ 
+ intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
+-  return FastHashCode (Thread::current(), obj()) ; 
++  return FastHashCode (Thread::current(), obj()) ;
+ }
+ 
+ bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
+@@ -1488,7 +1565,7 @@
+   assert(thread == JavaThread::current(), "Can only be called on current thread");
+   oop obj = h_obj();
+ 
+-  markOop mark = ReadStableMark (obj) ; 
++  markOop mark = ReadStableMark (obj) ;
+ 
+   // Uncontended case, header points to stack
+   if (mark->has_locker()) {
+@@ -1497,14 +1574,14 @@
+   // Contended case, header points to ObjectMonitor (tagged pointer)
+   if (mark->has_monitor()) {
+     ObjectMonitor* monitor = mark->monitor();
+-    return monitor->is_entered(thread) != 0 ; 
++    return monitor->is_entered(thread) != 0 ;
+   }
+   // Unlocked case, header in place
+   assert(mark->is_neutral(), "sanity check");
+   return false;
+ }
+ 
+-// Be aware of this method could revoke bias of the lock object. 
++// Be aware of this method could revoke bias of the lock object.
+ // This method querys the ownership of the lock handle specified by 'h_obj'.
+ // If the current thread owns the lock, it returns owner_self. If no
+ // thread owns the lock, it returns owner_none. Otherwise, it will return
+@@ -1518,20 +1595,20 @@
+ 
+   // Possible mark states: neutral, biased, stack-locked, inflated
+ 
+-  if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) { 
++  if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
+     // CASE: biased
+     BiasedLocking::revoke_and_rebias(h_obj, false, self);
+-    assert(!h_obj->mark()->has_bias_pattern(), 
++    assert(!h_obj->mark()->has_bias_pattern(),
+            "biases should be revoked by now");
+   }
+ 
+   assert(self == JavaThread::current(), "Can only be called on current thread");
+   oop obj = h_obj();
+-  markOop mark = ReadStableMark (obj) ; 
++  markOop mark = ReadStableMark (obj) ;
+ 
+   // CASE: stack-locked.  Mark points to a BasicLock on the owner's stack.
+   if (mark->has_locker()) {
+-    return self->is_lock_owned((address)mark->locker()) ? 
++    return self->is_lock_owned((address)mark->locker()) ?
+       owner_self : owner_other;
+   }
+ 
+@@ -1539,16 +1616,16 @@
+   // The Object:ObjectMonitor relationship is stable as long as we're
+   // not at a safepoint.
+   if (mark->has_monitor()) {
+-    void * owner = mark->monitor()->_owner ; 
++    void * owner = mark->monitor()->_owner ;
+     if (owner == NULL) return owner_none ;
+-    return (owner == self || 
++    return (owner == self ||
+             self->is_lock_owned((address)owner)) ? owner_self : owner_other;
+   }
+-  
++
+   // CASE: neutral
+   assert(mark->is_neutral(), "sanity check");
+-  return owner_none ; 		// it's unlocked
+-} 
++  return owner_none ;           // it's unlocked
++}
+ 
+ // FIXME: jvmti should call this
+ JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
+@@ -1564,8 +1641,8 @@
+   oop obj = h_obj();
+   address owner = NULL;
+ 
+-  markOop mark = ReadStableMark (obj) ; 
+-  
++  markOop mark = ReadStableMark (obj) ;
++
+   // Uncontended case, header points to stack
+   if (mark->has_locker()) {
+     owner = (address) mark->locker();
+@@ -1590,7 +1667,7 @@
+   return NULL;
+ }
+ 
+-// Iterate through monitor cache and attempt to release thread's monitors 
++// Iterate through monitor cache and attempt to release thread's monitors
+ // Gives up on a particular monitor if an exception occurs, but continues
+ // the overall iteration, swallowing the exception.
+ class ReleaseJavaMonitorsClosure: public MonitorClosure {
+@@ -1603,7 +1680,7 @@
+     if (mid->owner() == THREAD) {
+       (void)mid->complete_exit(CHECK);
+     }
+-  } 
++  }
+ };
+ 
+ // Release all inflated monitors owned by THREAD.  Lightweight monitors are
+@@ -1615,19 +1692,19 @@
+ //
+ // Instead of No_Savepoint_Verifier it might be cheaper to
+ // use an idiom of the form:
+-//   auto int tmp = SafepointSynchronize::_safepoint_counter ; 
++//   auto int tmp = SafepointSynchronize::_safepoint_counter ;
+ //   <code that must not run at safepoint>
+-//   guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 
++//   guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
+ // Since the tests are extremely cheap we could leave them enabled
+-// for normal product builds.  
++// for normal product builds.
+ 
+ void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
+   assert(THREAD == JavaThread::current(), "must be current Java thread");
+-  No_Safepoint_Verifier nsv ;       
++  No_Safepoint_Verifier nsv ;
+   ReleaseJavaMonitorsClosure rjmc(THREAD);
+-  muxAcquire(&ListLock, "release_monitors_owned_by_thread");
++  Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread");
+   ObjectSynchronizer::monitors_iterate(&rjmc);
+-  muxRelease(&ListLock);
++  Thread::muxRelease(&ListLock);
+   THREAD->clear_pending_exception();
+ }
+ 
+@@ -1656,42 +1733,42 @@
+     for (int i = 1; i < _BLOCKSIZE; i++) {
+       ObjectMonitor* mid = &block[i];
+       if (mid->object() != NULL) {
+-	f->do_oop((oop*)mid->object_addr());
++        f->do_oop((oop*)mid->object_addr());
+       }
+     }
+   }
+ }
+ 
+ // Deflate_idle_monitors() is called at all safepoints, immediately
+-// after all mutators are stopped, but before any objects have moved.  
+-// It traverses the list of known monitors, deflating where possible.  
+-// The scavenged monitor are returned to the monitor free list.  
++// after all mutators are stopped, but before any objects have moved.
++// It traverses the list of known monitors, deflating where possible.
++// The scavenged monitor are returned to the monitor free list.
+ //
+ // Beware that we scavenge at *every* stop-the-world point.
+ // Having a large number of monitors in-circulation negatively
+-// impacts the performance of some applications (e.g., PointBase). 
+-// Broadly, we want to minimize the # of monitors in circulation. 
++// impacts the performance of some applications (e.g., PointBase).
++// Broadly, we want to minimize the # of monitors in circulation.
+ // Alternately, we could partition the active monitors into sub-lists
+-// of those that need scanning and those that do not.  
++// of those that need scanning and those that do not.
+ // Specifically, we would add a new sub-list of objectmonitors
+ // that are in-circulation and potentially active.  deflate_idle_monitors()
+ // would scan only that list.  Other monitors could reside on a quiescent
+-// list.  Such sequestered monitors wouldn't need to be scanned by 
+-// deflate_idle_monitors().  omAlloc() would first check the global free list, 
+-// then the quiescent list, and, failing those, would allocate a new block. 
++// list.  Such sequestered monitors wouldn't need to be scanned by
++// deflate_idle_monitors().  omAlloc() would first check the global free list,
++// then the quiescent list, and, failing those, would allocate a new block.
+ // Deflate_idle_monitors() would scavenge and move monitors to the
+-// quiescent list.  
++// quiescent list.
+ //
+-// Perversely, the heap size -- and thus the STW safepoint rate -- 
++// Perversely, the heap size -- and thus the STW safepoint rate --
+ // typically drives the scavenge rate.  Large heaps can mean infrequent GC,
+-// which in turn can mean large(r) numbers of objectmonitors in circulation.  
++// which in turn can mean large(r) numbers of objectmonitors in circulation.
+ // This is an unfortunate aspect of this design.
+ //
+ // Another refinement would be to refrain from calling deflate_idle_monitors()
+-// except at stop-the-world points associated with garbage collections.  
++// except at stop-the-world points associated with garbage collections.
+ //
+ // An even better solution would be to deflate on-the-fly, aggressively,
+-// at monitorexit-time as is done in EVM's metalock or Relaxed Locks.  
++// at monitorexit-time as is done in EVM's metalock or Relaxed Locks.
+ 
+ void ObjectSynchronizer::deflate_idle_monitors() {
+   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+@@ -1700,39 +1777,39 @@
+   int nScavenged = 0 ;          // reclaimed
+ 
+   ObjectMonitor * FreeHead = NULL ;  // Local SLL of scavenged monitors
+-  ObjectMonitor * FreeTail = NULL ; 
++  ObjectMonitor * FreeTail = NULL ;
+ 
+-  // Iterate over all extant monitors - Scavenge all idle monitors. 
+-  TEVENT (deflate_idle_monitors) ; 
++  // Iterate over all extant monitors - Scavenge all idle monitors.
++  TEVENT (deflate_idle_monitors) ;
+   for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
+     assert(block->object() == CHAINMARKER, "must be a block header");
+-    nInCirculation += _BLOCKSIZE ; 
++    nInCirculation += _BLOCKSIZE ;
+     for (int i = 1 ; i < _BLOCKSIZE; i++) {
+       ObjectMonitor* mid = &block[i];
+       oop obj = (oop) mid->object();
+ 
+-      if (obj == NULL) { 
++      if (obj == NULL) {
+         // The monitor is not associated with an object.
+         // The monitor should either be a thread-specific private
+         // free list or the global free list.
+-        // obj == NULL IMPLIES mid->is_busy() == 0  
+-        guarantee (!mid->is_busy(), "invariant") ; 
+-        continue ; 
++        // obj == NULL IMPLIES mid->is_busy() == 0
++        guarantee (!mid->is_busy(), "invariant") ;
++        continue ;
+       }
+ 
+-      // Normal case ... The monitor is associated with obj.  
+-      guarantee (obj->mark() == markOopDesc::encode(mid), "invariant") ; 
++      // Normal case ... The monitor is associated with obj.
++      guarantee (obj->mark() == markOopDesc::encode(mid), "invariant") ;
+       guarantee (mid == obj->mark()->monitor(), "invariant");
+       guarantee (mid->header()->is_neutral(), "invariant");
+ 
+       if (mid->is_busy()) {
+-         if (ClearResponsibleAtSTW) mid->_Responsible = NULL ; 
+-         nInuse ++ ; 
+-      } else { 
++         if (ClearResponsibleAtSTW) mid->_Responsible = NULL ;
++         nInuse ++ ;
++      } else {
+          // Deflate the monitor if it is no longer being used
+          // It's idle - scavenge and return to the global free list
+          // plain old deflation ...
+-         TEVENT (deflate_idle_monitors - scavenge1) ; 
++         TEVENT (deflate_idle_monitors - scavenge1) ;
+          if (TraceMonitorInflation) {
+            if (obj->is_instance()) {
+              ResourceMark rm;
+@@ -1748,40 +1825,40 @@
+          assert (mid->object() == NULL, "invariant") ;
+ 
+          // Move the object to the working free list defined by FreeHead,FreeTail.
+-         mid->FreeNext = NULL ; 
+-         if (FreeHead == NULL) FreeHead = mid ; 
+-         if (FreeTail != NULL) FreeTail->FreeNext = mid ; 
+-         FreeTail = mid ; 
+-         nScavenged ++ ; 
++         mid->FreeNext = NULL ;
++         if (FreeHead == NULL) FreeHead = mid ;
++         if (FreeTail != NULL) FreeTail->FreeNext = mid ;
++         FreeTail = mid ;
++         nScavenged ++ ;
+       }
+     }
+   }
+ 
+-  // Move the scavenged monitors back to the global free list.  
+-  // In theory we don't need the freelist lock as we're at a STW safepoint. 
++  // Move the scavenged monitors back to the global free list.
++  // In theory we don't need the freelist lock as we're at a STW safepoint.
+   // omAlloc() and omFree() can only be called while a thread is _not in safepoint state.
+   // But it's remotely possible that omFlush() or release_monitors_owned_by_thread()
+   // might be called while not at a global STW safepoint.  In the interest of
+-  // safety we protect the following access with ListLock.  
++  // safety we protect the following access with ListLock.
+   // An even more conservative and prudent approach would be to guard
+-  // the main loop in scavenge_idle_monitors() with ListLock.  
++  // the main loop in scavenge_idle_monitors() with ListLock.
+   if (FreeHead != NULL) {
+-     guarantee (FreeTail != NULL && nScavenged > 0, "invariant") ; 
+-     assert (FreeTail->FreeNext == NULL, "invariant") ; 
++     guarantee (FreeTail != NULL && nScavenged > 0, "invariant") ;
++     assert (FreeTail->FreeNext == NULL, "invariant") ;
+      // constant-time list splice - prepend scavenged segment to gFreeList
+-     muxAcquire (&ListLock, "scavenge - return") ;  
+-     FreeTail->FreeNext = gFreeList ; 
+-     gFreeList = FreeHead ; 
+-     muxRelease (&ListLock) ; 
+-  } 
++     Thread::muxAcquire (&ListLock, "scavenge - return") ;
++     FreeTail->FreeNext = gFreeList ;
++     gFreeList = FreeHead ;
++     Thread::muxRelease (&ListLock) ;
++  }
+ 
+-  if (_sync_Deflations != NULL) _sync_Deflations->inc(nScavenged) ; 
+-  if (_sync_MonExtant  != NULL) _sync_MonExtant ->set_value(nInCirculation);  
++  if (_sync_Deflations != NULL) _sync_Deflations->inc(nScavenged) ;
++  if (_sync_MonExtant  != NULL) _sync_MonExtant ->set_value(nInCirculation);
+ 
+-  // TODO: Add objectMonitor leak detection.  
++  // TODO: Add objectMonitor leak detection.
+   // Audit/inventory the objectMonitors -- make sure they're all accounted for.
+-  GVars.stwRandom = os::random() ; 
+-  GVars.stwCycle ++ ; 
++  GVars.stwRandom = os::random() ;
++  GVars.stwCycle ++ ;
+ }
+ 
+ // A macro is used below because there may already be a pending
+@@ -1828,44 +1905,44 @@
+   _object       = NULL;
+   _owner        = NULL;
+   _WaitSet      = NULL;
+-  _WaitSetLock  = 0 ; 
+-  _Responsible  = NULL ; 
+-  _succ	        = NULL ; 
+-  _cxq          = NULL ; 
+-  FreeNext      = NULL ; 
+-  _EntryList    = NULL ; 
+-  _SpinFreq     = 0 ; 
+-  _SpinClock    = 0 ; 
+-  OwnerIsThread = 0 ; 
++  _WaitSetLock  = 0 ;
++  _Responsible  = NULL ;
++  _succ         = NULL ;
++  _cxq          = NULL ;
++  FreeNext      = NULL ;
++  _EntryList    = NULL ;
++  _SpinFreq     = 0 ;
++  _SpinClock    = 0 ;
++  OwnerIsThread = 0 ;
+ }
+ 
+ ObjectMonitor::~ObjectMonitor() {
+    // TODO: Add asserts ...
+-   // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0 
++   // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
+    // _count == 0 _EntryList  == NULL etc
+ }
+ 
+ intptr_t ObjectMonitor::is_busy() const {
+-  // TODO-FIXME: merge _count and _waiters.  
++  // TODO-FIXME: merge _count and _waiters.
+   // TODO-FIXME: assert _owner == null implies _recursions = 0
+   // TODO-FIXME: assert _WaitSet != null implies _count > 0
+-  return _count|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList ) ; 
++  return _count|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList ) ;
+ }
+ 
+ void ObjectMonitor::Recycle () {
+   // TODO: add stronger asserts ...
+-  // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0 
++  // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
+   // _count == 0 EntryList  == NULL
+   // _recursions == 0 _WaitSet == NULL
+-  // TODO: assert (is_busy()|_recursions) == 0 
+-  _succ	         = NULL ; 
+-  _EntryList     = NULL ; 
+-  _cxq           = NULL ; 
+-  _WaitSet       = NULL ; 
+-  _recursions    = 0 ; 
+-  _SpinFreq      = 0 ; 
+-  _SpinClock     = 0 ; 
+-  OwnerIsThread  = 0 ; 
++  // TODO: assert (is_busy()|_recursions) == 0
++  _succ          = NULL ;
++  _EntryList     = NULL ;
++  _cxq           = NULL ;
++  _WaitSet       = NULL ;
++  _recursions    = 0 ;
++  _SpinFreq      = 0 ;
++  _SpinClock     = 0 ;
++  OwnerIsThread  = 0 ;
+ }
+ 
+ // WaitSet management ...
+@@ -1904,7 +1981,7 @@
+   assert(node->_prev != NULL, "node already removed from list");
+   assert(node->_next != NULL, "node already removed from list");
+   // when the waiter has woken up because of interrupt,
+-  // timeout or other spurious wake-up, dequeue the 
++  // timeout or other spurious wake-up, dequeue the
+   // waiter from waiting list
+   ObjectWaiter* next = node->_next;
+   if (next == node) {
+@@ -1925,125 +2002,125 @@
+ }
+ 
+ static char * kvGet (char * kvList, const char * Key) {
+-    if (kvList == NULL) return NULL ; 
+-    size_t n = strlen (Key) ; 
+-    char * Search ; 
+-    for (Search = kvList ; *Search ; Search += strlen(Search) + 1) { 
+-        if (strncmp (Search, Key, n) == 0) { 
+-            if (Search[n] == '=') return Search + n + 1 ; 
+-            if (Search[n] == 0)   return (char *) "1" ; 
++    if (kvList == NULL) return NULL ;
++    size_t n = strlen (Key) ;
++    char * Search ;
++    for (Search = kvList ; *Search ; Search += strlen(Search) + 1) {
++        if (strncmp (Search, Key, n) == 0) {
++            if (Search[n] == '=') return Search + n + 1 ;
++            if (Search[n] == 0)   return (char *) "1" ;
+         }
+     }
+-    return NULL ; 
++    return NULL ;
+ }
+ 
+ static int kvGetInt (char * kvList, const char * Key, int Default) {
+-    char * v = kvGet (kvList, Key) ; 
+-    int rslt = v ? ::strtol (v, NULL, 0) : Default ; 
++    char * v = kvGet (kvList, Key) ;
++    int rslt = v ? ::strtol (v, NULL, 0) : Default ;
+     if (Knob_ReportSettings && v != NULL) {
+-        ::printf ("  SyncKnob: %s %d(%d)\n", Key, rslt, Default) ; 
+-        ::fflush (stdout) ; 
++        ::printf ("  SyncKnob: %s %d(%d)\n", Key, rslt, Default) ;
++        ::fflush (stdout) ;
+     }
+-    return rslt ; 
++    return rslt ;
+ }
+ 
+ // By convention we unlink a contending thread from EntryList|cxq immediately
+ // after the thread acquires the lock in ::enter().  Equally, we could defer
+-// unlinking the thread until ::exit()-time.  
++// unlinking the thread until ::exit()-time.
+ 
+ void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode)
+ {
+-    assert (_owner == Self, "invariant") ; 
+-    assert (SelfNode->_thread == Self, "invariant") ; 
++    assert (_owner == Self, "invariant") ;
++    assert (SelfNode->_thread == Self, "invariant") ;
+ 
+     if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
+         // Normal case: remove Self from the DLL EntryList .
+         // This is a constant-time operation.
+-        ObjectWaiter * nxt = SelfNode->_next ; 
+-        ObjectWaiter * prv = SelfNode->_prev ; 
+-        if (nxt != NULL) nxt->_prev = prv ; 
+-        if (prv != NULL) prv->_next = nxt ; 
+-        if (SelfNode == _EntryList ) _EntryList = nxt ; 
+-        assert (nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant") ; 
+-        assert (prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant") ; 
+-        TEVENT (Unlink from EntryList) ; 
++        ObjectWaiter * nxt = SelfNode->_next ;
++        ObjectWaiter * prv = SelfNode->_prev ;
++        if (nxt != NULL) nxt->_prev = prv ;
++        if (prv != NULL) prv->_next = nxt ;
++        if (SelfNode == _EntryList ) _EntryList = nxt ;
++        assert (nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant") ;
++        assert (prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant") ;
++        TEVENT (Unlink from EntryList) ;
+     } else {
+-        guarantee (SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant") ; 
++        guarantee (SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant") ;
+         // Inopportune interleaving -- Self is still on the cxq.
+-        // This usually means the enqueue of self raced an exiting thread. 
++        // This usually means the enqueue of self raced an exiting thread.
+         // Normally we'll find Self near the front of the cxq, so
+         // dequeueing is typically fast.  If needbe we can accelerate
+-        // this with some MCS/CHL-like bidirectional list hints and advisory 
++        // this with some MCS/CHL-like bidirectional list hints and advisory
+         // back-links so dequeueing from the interior will normally operate
+         // in constant-time.
+         // Dequeue Self from either the head (with CAS) or from the interior
+         // with a linear-time scan and normal non-atomic memory operations.
+         // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
+         // and then unlink Self from EntryList.  We have to drain eventually,
+-        // so it might as well be now.  
++        // so it might as well be now.
+ 
+-        ObjectWaiter * v = _cxq ; 
+-        assert (v != NULL, "invariant") ; 
+-        if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) { 
++        ObjectWaiter * v = _cxq ;
++        assert (v != NULL, "invariant") ;
++        if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) {
+             // The CAS above can fail from interference IFF a "RAT" arrived.
+             // In that case Self must be in the interior and can no longer be
+             // at the head of cxq.
+-            if (v == SelfNode) { 
+-                assert (_cxq != v, "invariant") ; 
++            if (v == SelfNode) {
++                assert (_cxq != v, "invariant") ;
+                 v = _cxq ;          // CAS above failed - start scan at head of list
+             }
+-            ObjectWaiter * p ; 
+-            ObjectWaiter * q = NULL ; 
++            ObjectWaiter * p ;
++            ObjectWaiter * q = NULL ;
+             for (p = v ; p != NULL && p != SelfNode; p = p->_next) {
+-                q = p ; 
+-                assert (p->TState == ObjectWaiter::TS_CXQ, "invariant") ; 
++                q = p ;
++                assert (p->TState == ObjectWaiter::TS_CXQ, "invariant") ;
+             }
+-            assert (v != SelfNode,  "invariant") ; 
+-            assert (p == SelfNode,  "Node not found on cxq") ; 
+-            assert (p != _cxq,      "invariant") ; 
+-            assert (q != NULL,      "invariant") ; 
++            assert (v != SelfNode,  "invariant") ;
++            assert (p == SelfNode,  "Node not found on cxq") ;
++            assert (p != _cxq,      "invariant") ;
++            assert (q != NULL,      "invariant") ;
+             assert (q->_next == p,  "invariant") ;
+-            q->_next = p->_next ; 
++            q->_next = p->_next ;
+         }
+-        TEVENT (Unlink from cxq) ; 
+-    } 
++        TEVENT (Unlink from cxq) ;
++    }
+ 
+     // Diagnostic hygiene ...
+-    SelfNode->_prev  = (ObjectWaiter *) 0xBAD ; 
+-    SelfNode->_next  = (ObjectWaiter *) 0xBAD ; 
+-    SelfNode->TState = ObjectWaiter::TS_RUN ;   
++    SelfNode->_prev  = (ObjectWaiter *) 0xBAD ;
++    SelfNode->_next  = (ObjectWaiter *) 0xBAD ;
++    SelfNode->TState = ObjectWaiter::TS_RUN ;
+ }
+ 
+ // Caveat: TryLock() is not necessarily serializing if it returns failure.
+-// Callers must compensate as needed. 
++// Callers must compensate as needed.
+ 
+-int ObjectMonitor::TryLock (Thread * Self) { 
+-   for (;;) { 
+-      void * own = _owner ; 
+-      if (own != NULL) return 0 ; 
+-      if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) { 
+-         // Either guarantee _recursions == 0 or set _recursions = 0.  
+-         assert (_recursions == 0, "invariant") ; 
+-         assert (_owner == Self, "invariant") ; 
++int ObjectMonitor::TryLock (Thread * Self) {
++   for (;;) {
++      void * own = _owner ;
++      if (own != NULL) return 0 ;
++      if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
++         // Either guarantee _recursions == 0 or set _recursions = 0.
++         assert (_recursions == 0, "invariant") ;
++         assert (_owner == Self, "invariant") ;
+          // CONSIDER: set or assert that OwnerIsThread == 1
+-         return 1 ; 
++         return 1 ;
+       }
+       // The lock had been free momentarily, but we lost the race to the lock.
+       // Interference -- the CAS failed.
+-      // We can either return -1 or retry. 
++      // We can either return -1 or retry.
+       // Retry doesn't make as much sense because the lock was just acquired.
+-      if (true) return -1 ; 
++      if (true) return -1 ;
+    }
+ }
+ 
+ // NotRunnable() -- informed spinning
+ //
+-// Don't bother spinning if the owner is not eligible to drop the lock.  
++// Don't bother spinning if the owner is not eligible to drop the lock.
+ // Peek at the owner's schedctl.sc_state and Thread._thread_values and
+-// spin only if the owner thread is _thread_in_Java or _thread_in_vm.  
++// spin only if the owner thread is _thread_in_Java or _thread_in_vm.
+ // The thread must be runnable in order to drop the lock in timely fashion.
+-// If the _owner is not runnable then spinning will not likely be 
+-// successful (profitable). 
++// If the _owner is not runnable then spinning will not likely be
++// successful (profitable).
+ //
+ // Beware -- the thread referenced by _owner could have died
+ // so a simply fetch from _owner->_thread_state might trap.
+@@ -2051,74 +2128,74 @@
+ // Because of the lifecycle issues the schedctl and _thread_state values
+ // observed by NotRunnable() might be garbage.  NotRunnable must
+ // tolerate this and consider the observed _thread_state value
+-// as advisory.  
++// as advisory.
+ //
+ // Beware too, that _owner is sometimes a BasicLock address and sometimes
+-// a thread pointer.  We differentiate the two cases with OwnerIsThread.  
++// a thread pointer.  We differentiate the two cases with OwnerIsThread.
+ // Alternately, we might tag the type (thread pointer vs basiclock pointer)
+ // with the LSB of _owner.  Another option would be to probablistically probe
+-// the putative _owner->TypeTag value.  
++// the putative _owner->TypeTag value.
+ //
+ // Checking _thread_state isn't perfect.  Even if the thread is
+ // in_java it might be blocked on a page-fault or have been preempted
+ // and sitting on a ready/dispatch queue.  _thread state in conjunction
+ // with schedctl.sc_state gives us a good picture of what the
+-// thread is doing, however.   
++// thread is doing, however.
+ //
+-// TODO: check schedctl.sc_state.   
+-// We'll need to use SafeFetch32() to read from the schedctl block. 
++// TODO: check schedctl.sc_state.
++// We'll need to use SafeFetch32() to read from the schedctl block.
+ // See RFE #5004247 and http://sac.sfbay.sun.com/Archives/CaseLog/arc/PSARC/2005/351/
+ //
+ // The return value from NotRunnable() is *advisory* -- the
+-// result is based on sampling and is not necessarily coherent. 
++// result is based on sampling and is not necessarily coherent.
+ // The caller must tolerate false-negative and false-positive errors.
+-// Spinning, in general, is probabilistic anyway.  
++// Spinning, in general, is probabilistic anyway.
+ 
+ 
+-int ObjectMonitor::NotRunnable (Thread * Self, Thread * ox) { 
++int ObjectMonitor::NotRunnable (Thread * Self, Thread * ox) {
+     // Check either OwnerIsThread or ox->TypeTag == 2BAD.
+-    if (!OwnerIsThread) return 0 ; 
++    if (!OwnerIsThread) return 0 ;
+ 
+-    if (ox == NULL) return 0 ; 
++    if (ox == NULL) return 0 ;
+ 
+     // Avoid transitive spinning ...
+     // Say T1 spins or blocks trying to acquire L.  T1._Stalled is set to L.
+     // Immediately after T1 acquires L it's possible that T2, also
+-    // spinning on L, will see L.Owner=T1 and T1._Stalled=L.  
++    // spinning on L, will see L.Owner=T1 and T1._Stalled=L.
+     // This occurs transiently after T1 acquired L but before
+     // T1 managed to clear T1.Stalled.  T2 does not need to abort
+-    // its spin in this circumstance.  
+-    intptr_t BlockedOn = SafeFetchN ((intptr_t *) &ox->_Stalled, intptr_t(1)) ; 
++    // its spin in this circumstance.
++    intptr_t BlockedOn = SafeFetchN ((intptr_t *) &ox->_Stalled, intptr_t(1)) ;
+ 
+-    if (BlockedOn == 1) return 1 ; 
+-    if (BlockedOn != 0) { 
+-      return BlockedOn != intptr_t(this) && _owner == ox ; 
++    if (BlockedOn == 1) return 1 ;
++    if (BlockedOn != 0) {
++      return BlockedOn != intptr_t(this) && _owner == ox ;
+     }
+ 
+-    assert (sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant") ; 
+-    int jst = SafeFetch32 ((int *) &((JavaThread *) ox)->_thread_state, -1) ; ; 
++    assert (sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant") ;
++    int jst = SafeFetch32 ((int *) &((JavaThread *) ox)->_thread_state, -1) ; ;
+     // consider also: jst != _thread_in_Java -- but that's overspecific.
+-    return jst == _thread_blocked || jst == _thread_in_native ; 
++    return jst == _thread_blocked || jst == _thread_in_native ;
+ }
+ 
+-      
++
+ // Adaptive spin-then-block - rational spinning
+ //
+-// Note that we spin "globally" on _owner with a classic SMP-polite TATAS 
+-// algorithm.  On high order SMP systems it would be better to start with 
++// Note that we spin "globally" on _owner with a classic SMP-polite TATAS
++// algorithm.  On high order SMP systems it would be better to start with
+ // a brief global spin and then revert to spinning locally.  In the spirit of MCS/CLH,
+ // a contending thread could enqueue itself on the cxq and then spin locally
+-// on a thread-specific variable such as its ParkEvent._Event flag.  
++// on a thread-specific variable such as its ParkEvent._Event flag.
+ // That's left as an exercise for the reader.  Note that global spinning is
+ // not problematic on Niagara, as the L2$ serves the interconnect and has both
+-// low latency and massive bandwidth.  
++// low latency and massive bandwidth.
+ //
+-// Broadly, we can fix the spin frequency -- that is, the % of contended lock 
++// Broadly, we can fix the spin frequency -- that is, the % of contended lock
+ // acquisition attempts where we opt to spin --  at 100% and vary the spin count
+-// (duration) or we can fix the count at approximately the duration of 
++// (duration) or we can fix the count at approximately the duration of
+ // a context switch and vary the frequency.   Of course we could also
+ // vary both satisfying K == Frequency * Duration, where K is adaptive by monitor.
+-// See http://j2se.east/~dice/PERSIST/040824-AdaptiveSpinning.html. 
++// See http://j2se.east/~dice/PERSIST/040824-AdaptiveSpinning.html.
+ //
+ // This implementation varies the duration "D", where D varies with
+ // the success rate of recent spin attempts. (D is capped at approximately
+@@ -2133,8 +2210,8 @@
+ // or atomics.  The code is designed so that _SpinDuration stays within
+ // a reasonable range even in the presence of races.  The arithmetic
+ // operations on _SpinDuration are closed over the domain of legal values,
+-// so at worst a race will install and older but still legal value.  
+-// At the very worst this introduces some apparent non-determinism.  
++// so at worst a race will install and older but still legal value.
++// At the very worst this introduces some apparent non-determinism.
+ // We might spin when we shouldn't or vice-versa, but since the spin
+ // count are relatively short, even in the worst case, the effect is harmless.
+ //
+@@ -2143,299 +2220,299 @@
+ // is overall profitable -- should not cause the system to converge
+ // on low "D" values.  We want spinning to be stable and predictable
+ // and fairly responsive to change and at the same time we don't want
+-// it to oscillate, become metastable, be "too" non-deterministic, 
+-// or converge on or enter undesirable stable absorbing states.  
++// it to oscillate, become metastable, be "too" non-deterministic,
++// or converge on or enter undesirable stable absorbing states.
+ //
+ // We implement a feedback-based control system -- using past behavior
+-// to predict future behavior.  We face two issues: (a) if the 
++// to predict future behavior.  We face two issues: (a) if the
+ // input signal is random then the spin predictor won't provide optimal
+-// results, and (b) if the signal frequency is too high then the control 
+-// system, which has some natural response lag, will "chase" the signal.  
++// results, and (b) if the signal frequency is too high then the control
++// system, which has some natural response lag, will "chase" the signal.
+ // (b) can arise from multimodal lock hold times.  Transient preemption
+ // can also result in apparent bimodal lock hold times.
+-// Although sub-optimal, neither condition is particularly harmful, as 
+-// in the worst-case we'll spin when we shouldn't or vice-versa.   
++// Although sub-optimal, neither condition is particularly harmful, as
++// in the worst-case we'll spin when we shouldn't or vice-versa.
+ // The maximum spin duration is rather short so the failure modes aren't bad.
+ // To be conservative, I've tuned the gain in system to bias toward
+ // _not spinning.  Relatedly, the system can sometimes enter a mode where it
+ // "rings" or oscillates between spinning and not spinning.  This happens
+-// when spinning is just on the cusp of profitability, however, so the 
+-// situation is not dire.  The state is benign -- there's no need to add 
+-// hysterisis control to damp the transition rate between spinning and 
++// when spinning is just on the cusp of profitability, however, so the
++// situation is not dire.  The state is benign -- there's no need to add
++// hysteresis control to damp the transition rate between spinning and
+ // not spinning.
+ //
+ // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ //
+ // Spin-then-block strategies ...
+ //
+-// Thoughts on ways to improve spinning : 
+-// 
++// Thoughts on ways to improve spinning :
++//
+ // *  Periodically call {psr_}getloadavg() while spinning, and
+-//    permit unbounded spinning if the load average is < 
++//    permit unbounded spinning if the load average is <
+ //    the number of processors.  Beware, however, that getloadavg()
+ //    is exceptionally fast on solaris (about 1/10 the cost of a full
+ //    spin cycle, but quite expensive on linux.  Beware also, that
+ //    multiple JVMs could "ring" or oscillate in a feedback loop.
+-//    Sufficient damping would solve that problem.  
++//    Sufficient damping would solve that problem.
+ //
+-// *  We currently use spin loops with iteration counters to approximate 
+-//    spinning for some interval.  Given the availability of high-precision 
+-//    time sources such as gethrtime(), %TICK, %STICK, RDTSC, etc., we should 
+-//    someday reimplement the spin loops to duration-based instead of iteration-based.  
++// *  We currently use spin loops with iteration counters to approximate
++//    spinning for some interval.  Given the availability of high-precision
++//    time sources such as gethrtime(), %TICK, %STICK, RDTSC, etc., we should
++//    someday reimplement the spin loops to duration-based instead of iteration-based.
+ //
+ // *  Don't spin if there are more than N = (CPUs/2) threads
+-//	  currently spinning on the monitor (or globally).  
+-//    That is, limit the number of concurrent spinners. 
++//        currently spinning on the monitor (or globally).
++//    That is, limit the number of concurrent spinners.
+ //    We might also limit the # of spinners in the JVM, globally.
+ //
+ // *  If a spinning thread observes _owner change hands it should
+ //    abort the spin (and park immediately) or at least debit
+-//    the spin counter by a large "penalty".  
+-// 
++//    the spin counter by a large "penalty".
++//
+ // *  Classically, the spin count is either K*(CPUs-1) or is a
+-//	  simple constant that approximates the length of a context switch.  
++//        simple constant that approximates the length of a context switch.
+ //    We currently use a value -- computed by a special utility -- that
+-//    approximates round-trip context switch times.  
++//    approximates round-trip context switch times.
+ //
+ // *  Normally schedctl_start()/_stop() is used to advise the kernel
+ //    to avoid preempting threads that are running in short, bounded
+ //    critical sections.  We could use the schedctl hooks in an inverted
+ //    sense -- spinners would set the nopreempt flag, but poll the preempt
+ //    pending flag.  If a spinner observed a pending preemption it'd immediately
+-//    abort the spin and park.   As such, the schedctl service acts as 
+-//    a preemption warning mechanism.  
++//    abort the spin and park.   As such, the schedctl service acts as
++//    a preemption warning mechanism.
+ //
+ // *  In lieu of spinning, if the system is running below saturation
+ //    (that is, loadavg() << #cpus), we can instead suppress futile
+ //    wakeup throttling, or even wake more than one successor at exit-time.
+ //    The net effect is largely equivalent to spinning.  In both cases,
+-//    contenting threads go ONPROC and opportunistically attempt to acquire
++//    contending threads go ONPROC and opportunistically attempt to acquire
+ //    the lock, decreasing lock handover latency at the expense of wasted
+-//    cycles and context switching. 
++//    cycles and context switching.
+ //
+ // *  We might to spin less after we've parked as the thread will
+ //    have less $ and TLB affinity with the processor.
+ //    Likewise, we might spin less if we come ONPROC on a different
+-//    processor or after a long period (>> rechose_interval).  
++//    processor or after a long period (>> rechose_interval).
+ //
+-// *  A table-driven state machine similar to Solaris' dispadmin scheduling 
++// *  A table-driven state machine similar to Solaris' dispadmin scheduling
+ //    tables might be a better design.  Instead of encoding information in
+-//    _SpinDuration, _SpinFreq and _SpinClock we'd just use explicit, 
+-//    discrete states.   Success or failure during a spin would drive 
+-//    state transitions, and each state node would contain a spin count.  
++//    _SpinDuration, _SpinFreq and _SpinClock we'd just use explicit,
++//    discrete states.   Success or failure during a spin would drive
++//    state transitions, and each state node would contain a spin count.
+ //
+-// *  If the processor is operating in a mode intended to conserve power 
+-//    (such as Intel's SpeedStep) or to reduce thermal output (thermal 
++// *  If the processor is operating in a mode intended to conserve power
++//    (such as Intel's SpeedStep) or to reduce thermal output (thermal
+ //    step-down mode) then the Java synchronization subsystem should
+ //    forgo spinning.
+ //
+ // *  The minimum spin duration should be approximately the worst-case
+-//    store propagation latency on the platform.  That is, the time 
++//    store propagation latency on the platform.  That is, the time
+ //    it takes a store on CPU A to become visible on CPU B, where A and
+-//    B are "distant".  
++//    B are "distant".
+ //
+ // *  We might want to factor a thread's priority in the spin policy.
+ //    Threads with a higher priority might spin for slightly longer.
+ //    Similarly, if we use back-off in the TATAS loop, lower priority
+ //    threads might back-off longer.  We don't currently use a
+ //    thread's priority when placing it on the entry queue.  We may
+-//    want to consider doing so in future releases. 
++//    want to consider doing so in future releases.
+ //
+ // *  We might transiently drop a thread's scheduling priority while it spins.
+-//    SCHED_BATCH on linux and FX scheduling class at priority=0 on Solaris 
+-//    would suffice.  We could even consider letting the thread spin indefinitely at 
+-//    a depressed or "idle" priority.  This brings up fairness issues, however -- 
+-//    in a saturated system a thread would with a reduced priority could languish 
+-//    for extended periods on the ready queue.  
++//    SCHED_BATCH on linux and FX scheduling class at priority=0 on Solaris
++//    would suffice.  We could even consider letting the thread spin indefinitely at
++//    a depressed or "idle" priority.  This brings up fairness issues, however --
++//    in a saturated system a thread would with a reduced priority could languish
++//    for extended periods on the ready queue.
+ //
+ // *  While spinning try to use the otherwise wasted time to help the VM make
+-//    progess:
+-//  
++//    progress:
++//
+ //    -- YieldTo() the owner, if the owner is OFFPROC but ready
+ //       Done our remaining quantum directly to the ready thread.
+ //       This helps "push" the lock owner through the critical section.
+ //       It also tends to improve affinity/locality as the lock
+-//       "migrates" less frequently between CPUs.   
++//       "migrates" less frequently between CPUs.
+ //    -- Walk our own stack in anticipation of blocking.  Memoize the roots.
+ //    -- Perform strand checking for other thread.  Unpark potential strandees.
+-//    -- Help GC: trace or mark -- this would need to be a bounded unit of work.  
+-//       Unfortunately this will pollute our $ and TLBs.  Recall that we 
+-//       spin to avoid context switching -- context switching has an 
+-//       immediate cost in latency, a disruptive cost to other strands on a CMT 
+-//       processor, and an amortized cost because of the D$ and TLB cache 
+-//       reload transient when the thread comes back ONPROC and repopulates 
++//    -- Help GC: trace or mark -- this would need to be a bounded unit of work.
++//       Unfortunately this will pollute our $ and TLBs.  Recall that we
++//       spin to avoid context switching -- context switching has an
++//       immediate cost in latency, a disruptive cost to other strands on a CMT
++//       processor, and an amortized cost because of the D$ and TLB cache
++//       reload transient when the thread comes back ONPROC and repopulates
+ //       $s and TLBs.
+ //    -- call getloadavg() to see if the system is saturated.  It'd probably
+ //       make sense to call getloadavg() half way through the spin.
+ //       If the system isn't at full capacity the we'd simply reset
+ //       the spin counter to and extend the spin attempt.
+ //    -- Doug points out that we should use the same "helping" policy
+-//       in thread.yield().  
++//       in thread.yield().
++//
++// *  Try MONITOR-MWAIT on systems that support those instructions.
+ //
+-// *  Try MONITOR-MWAIT on systems that support those instructions.  
+-// 
+ // *  The spin statistics that drive spin decisions & frequency are
+ //    maintained in the objectmonitor structure so if we deflate and reinflate
+ //    we lose spin state.  In practice this is not usually a concern
+ //    as the default spin state after inflation is aggressive (optimistic)
+ //    and tends toward spinning.  So in the worst case for a lock where
+ //    spinning is not profitable we may spin unnecessarily for a brief
+-//    period.  But then again, if a lock is contented it'll tend not to deflate\
++//    period.  But then again, if a lock is contended it'll tend not to deflate
+ //    in the first place.
+ 
+ 
+-intptr_t ObjectMonitor::SpinCallbackArgument = 0 ; 
+-int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL ; 
++intptr_t ObjectMonitor::SpinCallbackArgument = 0 ;
++int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL ;
+ 
+ // Spinning: Fixed frequency (100%), vary duration
+ 
+-int ObjectMonitor::TrySpin_VaryDuration (Thread * Self) { 
++int ObjectMonitor::TrySpin_VaryDuration (Thread * Self) {
+ 
+     // Dumb, brutal spin.  Good for comparative measurements against adaptive spinning.
+-    int ctr = Knob_FixedSpin ; 
+-    if (ctr != 0) { 
++    int ctr = Knob_FixedSpin ;
++    if (ctr != 0) {
+         while (--ctr >= 0) {
+-            if (TryLock (Self) > 0) return 1 ; 
+-            SpinPause () ; 
++            if (TryLock (Self) > 0) return 1 ;
++            SpinPause () ;
+         }
+-        return 0 ; 
++        return 0 ;
+     }
+ 
+-    for (ctr = Knob_PreSpin + 1; --ctr >= 0 ; ) { 
++    for (ctr = Knob_PreSpin + 1; --ctr >= 0 ; ) {
+       if (TryLock(Self) > 0) {
+         // Increase _SpinDuration ...
+-        // Note that we don't clamp SpinDuration precisely at SpinLimit.  
++        // Note that we don't clamp SpinDuration precisely at SpinLimit.
+         // Raising _SpurDuration to the poverty line is key.
+-        int x = _SpinDuration ; 
+-        if (x < Knob_SpinLimit) { 
+-           if (x < Knob_Poverty) x = Knob_Poverty ; 
+-           _SpinDuration = x + Knob_BonusB ; 
++        int x = _SpinDuration ;
++        if (x < Knob_SpinLimit) {
++           if (x < Knob_Poverty) x = Knob_Poverty ;
++           _SpinDuration = x + Knob_BonusB ;
+         }
+-        return 1 ; 
++        return 1 ;
+       }
+-      SpinPause () ; 
++      SpinPause () ;
+     }
+ 
+     // Admission control - verify preconditions for spinning
+     //
+     // We always spin a little bit, just to prevent _SpinDuration == 0 from
+     // becoming an absorbing state.  Put another way, we spin briefly to
+-    // sample, just in case the system load, parallelism, contention, or lock 
+-    // modality changed.  
+-    // 
++    // sample, just in case the system load, parallelism, contention, or lock
++    // modality changed.
++    //
+     // Consider the following alternative:
+     // Periodically set _SpinDuration = _SpinLimit and try a long/full
+-    // spin attempt.  "Periodically" might mean after a tally of 
++    // spin attempt.  "Periodically" might mean after a tally of
+     // the # of failed spin attempts (or iterations) reaches some threshold.
+-    // This takes us into the realm of 1-out-of-N spinning, where we 
+-    // hold the duration constant but vary the frequency.  
++    // This takes us into the realm of 1-out-of-N spinning, where we
++    // hold the duration constant but vary the frequency.
+ 
+-    ctr = _SpinDuration  ; 
+-    if (ctr < Knob_SpinBase) ctr = Knob_SpinBase ; 
+-    if (ctr <= 0) return 0 ; 
++    ctr = _SpinDuration  ;
++    if (ctr < Knob_SpinBase) ctr = Knob_SpinBase ;
++    if (ctr <= 0) return 0 ;
+ 
+-    if (Knob_SuccRestrict && _succ != NULL) return 0 ; 
++    if (Knob_SuccRestrict && _succ != NULL) return 0 ;
+     if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
+        TEVENT (Spin abort - notrunnable [TOP]);
+-       return 0 ; 
++       return 0 ;
+     }
+ 
+-    int MaxSpin = Knob_MaxSpinners ; 
+-    if (MaxSpin >= 0) { 
++    int MaxSpin = Knob_MaxSpinners ;
++    if (MaxSpin >= 0) {
+        if (_Spinner > MaxSpin) {
+-          TEVENT (Spin abort -- too many spinners) ; 
+-          return 0 ; 
++          TEVENT (Spin abort -- too many spinners) ;
++          return 0 ;
+        }
+        // Slighty racy, but benign ...
+-       Adjust (&_Spinner, 1) ; 
++       Adjust (&_Spinner, 1) ;
+     }
+ 
+-    // We're good to spin ... spin ingress.  
++    // We're good to spin ... spin ingress.
+     // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
+     // when preparing to LD...CAS _owner, etc and the CAS is likely
+-    // to succeed.  
+-    int hits    = 0 ; 
+-    int msk     = 0 ; 
+-    int caspty  = Knob_CASPenalty ; 
+-    int oxpty   = Knob_OXPenalty ; 
+-    int sss     = Knob_SpinSetSucc ; 
+-    if (sss && _succ == NULL ) _succ = Self ; 
+-    Thread * prv = NULL ; 
++    // to succeed.
++    int hits    = 0 ;
++    int msk     = 0 ;
++    int caspty  = Knob_CASPenalty ;
++    int oxpty   = Knob_OXPenalty ;
++    int sss     = Knob_SpinSetSucc ;
++    if (sss && _succ == NULL ) _succ = Self ;
++    Thread * prv = NULL ;
+ 
+     // There are three ways to exit the following loop:
+     // 1.  A successful spin where this thread has acquired the lock.
+-    // 2.  Spin failure with prejudice 
+-    // 3.  Spin failure without prejudice 
++    // 2.  Spin failure with prejudice
++    // 3.  Spin failure without prejudice
+ 
+     while (--ctr >= 0) {
+ 
+       // Periodic polling -- Check for pending GC
+-      // Threads may spin while they're unsafe.     
+-      // We don't want spinning threads to delay the JVM from reaching 
++      // Threads may spin while they're unsafe.
++      // We don't want spinning threads to delay the JVM from reaching
+       // a stop-the-world safepoint or to steal cycles from GC.
+-      // If we detect a pending safepoint we abort in order that 
++      // If we detect a pending safepoint we abort in order that
+       // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
+-      // this thread, if safe, doesn't steal cycles from GC. 
+-      // This is in keeping with the "no loitering in runtime" rule.  
++      // this thread, if safe, doesn't steal cycles from GC.
++      // This is in keeping with the "no loitering in runtime" rule.
+       // We periodically check to see if there's a safepoint pending.
+-      if ((ctr & 0xFF) == 0) { 
++      if ((ctr & 0xFF) == 0) {
+          if (SafepointSynchronize::do_call_back()) {
+-            TEVENT (Spin: safepoint) ; 
++            TEVENT (Spin: safepoint) ;
+             goto Abort ;           // abrupt spin egress
+          }
+-         if (Knob_UsePause & 1) SpinPause () ; 
++         if (Knob_UsePause & 1) SpinPause () ;
+ 
+-         int (*scb)(intptr_t,int) = SpinCallbackFunction ; 
+-         if (hits > 50 && scb != NULL) { 
+-            int abend = (*scb)(SpinCallbackArgument, 0) ; 
++         int (*scb)(intptr_t,int) = SpinCallbackFunction ;
++         if (hits > 50 && scb != NULL) {
++            int abend = (*scb)(SpinCallbackArgument, 0) ;
+          }
+       }
+ 
+-      if (Knob_UsePause & 2) SpinPause() ; 
++      if (Knob_UsePause & 2) SpinPause() ;
+ 
+       // Exponential back-off ...  Stay off the bus to reduce coherency traffic.
+-      // This is useful on classic SMP systems, but is of less utility on 
+-      // N1-style CMT platforms. 
+-      // 
+-      // Trade-off: lock acquistion latency vs coherency bandwidth.
++      // This is useful on classic SMP systems, but is of less utility on
++      // N1-style CMT platforms.
++      //
++      // Trade-off: lock acquisition latency vs coherency bandwidth.
+       // Lock hold times are typically short.  A histogram
+       // of successful spin attempts shows that we usually acquire
+-      // the lock early in the spin.  That suggests we want to 
++      // the lock early in the spin.  That suggests we want to
+       // sample _owner frequently in the early phase of the spin,
+       // but then back-off and sample less frequently as the spin
+       // progresses.  The back-off makes a good citizen on SMP big
+-      // SMP systems.  Oversampling _owner can consume excessive 
++      // SMP systems.  Oversampling _owner can consume excessive
+       // coherency bandwidth.  Relatedly, if we _oversample _owner we
+       // can inadvertently interfere with the the ST m->owner=null.
+-      // executed by the lock owner.  
+-      if (ctr & msk) continue ; 
+-      ++hits ; 
+-      if ((hits & 0xF) == 0) { 
++      // executed by the lock owner.
++      if (ctr & msk) continue ;
++      ++hits ;
++      if ((hits & 0xF) == 0) {
+         // The 0xF, above, corresponds to the exponent.
+         // Consider: (msk+1)|msk
+-        msk = ((msk << 2)|3) & BackOffMask ; 
++        msk = ((msk << 2)|3) & BackOffMask ;
+       }
+ 
+       // Probe _owner with TATAS
+       // If this thread observes the monitor transition or flicker
+       // from locked to unlocked to locked, then the odds that this
+-      // thread will acquire the lock in this spin attempt go down 
+-      // considerably.  The same argument applies if the CAS fails 
+-      // or if we observe _owner change from one non-null value to 
++      // thread will acquire the lock in this spin attempt go down
++      // considerably.  The same argument applies if the CAS fails
++      // or if we observe _owner change from one non-null value to
+       // another non-null value.   In such cases we might abort
+       // the spin without prejudice or apply a "penalty" to the
+       // spin count-down variable "ctr", reducing it by 100, say.
+ 
+-      Thread * ox = (Thread *) _owner ; 
+-      if (ox == NULL) { 
+-         ox = (Thread *) Atomic::cmpxchg_ptr (Self, &_owner, NULL) ; 
+-         if (ox == NULL) { 
++      Thread * ox = (Thread *) _owner ;
++      if (ox == NULL) {
++         ox = (Thread *) Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
++         if (ox == NULL) {
+             // The CAS succeeded -- this thread acquired ownership
+-            // Take care of some bookkeeping to exit spin state. 
++            // Take care of some bookkeeping to exit spin state.
+             if (sss && _succ == Self) {
+                _succ = NULL ;
+             }
+-            if (MaxSpin > 0) Adjust (&_Spinner, -1) ; 
++            if (MaxSpin > 0) Adjust (&_Spinner, -1) ;
+ 
+             // Increase _SpinDuration :
+             // The spin was successful (profitable) so we tend toward
+@@ -2443,184 +2520,184 @@
+             // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
+             // If we acquired the lock early in the spin cycle it
+             // makes sense to increase _SpinDuration proportionally.
+-            // Note that we don't clamp SpinDuration precisely at SpinLimit.  
+-            int x = _SpinDuration ; 
+-            if (x < Knob_SpinLimit) { 
+-                if (x < Knob_Poverty) x = Knob_Poverty ; 
+-                _SpinDuration = x + Knob_Bonus ; 
++            // Note that we don't clamp SpinDuration precisely at SpinLimit.
++            int x = _SpinDuration ;
++            if (x < Knob_SpinLimit) {
++                if (x < Knob_Poverty) x = Knob_Poverty ;
++                _SpinDuration = x + Knob_Bonus ;
+             }
+-            return 1 ; 
++            return 1 ;
+          }
+ 
+          // The CAS failed ... we can take any of the following actions:
+          // * penalize: ctr -= Knob_CASPenalty
+-         // * exit spin with prejudice -- goto Abort; 
+-         // * exit spin without prejudice. 
++         // * exit spin with prejudice -- goto Abort;
++         // * exit spin without prejudice.
+          // * Since CAS is high-latency, retry again immediately.
+-         prv = ox ; 
+-         TEVENT (Spin: cas failed) ; 
+-         if (caspty == -2) break ; 
+-         if (caspty == -1) goto Abort ; 
+-         ctr -= caspty ; 
+-         continue ; 
+-      } 
++         prv = ox ;
++         TEVENT (Spin: cas failed) ;
++         if (caspty == -2) break ;
++         if (caspty == -1) goto Abort ;
++         ctr -= caspty ;
++         continue ;
++      }
+ 
+-      // Did lock ownership change hands ? 
+-      if (ox != prv && prv != NULL ) { 
++      // Did lock ownership change hands ?
++      if (ox != prv && prv != NULL ) {
+           TEVENT (spin: Owner changed)
+-          if (oxpty == -2) break ; 
+-          if (oxpty == -1) goto Abort ; 
+-          ctr -= oxpty ; 
++          if (oxpty == -2) break ;
++          if (oxpty == -1) goto Abort ;
++          ctr -= oxpty ;
+       }
+-      prv = ox ; 
++      prv = ox ;
+ 
+       // Abort the spin if the owner is not executing.
+-      // The owner must be executing in order to drop the lock. 
+-      // Spinning while the owner is OFFPROC is idiocy.  
+-      // Consider: ctr -= RunnablePenalty ; 
++      // The owner must be executing in order to drop the lock.
++      // Spinning while the owner is OFFPROC is idiocy.
++      // Consider: ctr -= RunnablePenalty ;
+       if (Knob_OState && NotRunnable (Self, ox)) {
+          TEVENT (Spin abort - notrunnable);
+-         goto Abort ; 
++         goto Abort ;
+       }
+-      if (sss && _succ == NULL ) _succ = Self ; 
++      if (sss && _succ == NULL ) _succ = Self ;
+    }
+ 
+    // Spin failed with prejudice -- reduce _SpinDuration.
+-   // TODO: Use an AIMD-like policy to adjust _SpinDuration.  
+-   // AIMD is globally stable.  
+-   TEVENT (Spin failure) ; 
+-   { 
+-     int x = _SpinDuration ; 
+-     if (x > 0) { 
+-        // Consider an AIMD scheme like: x -= (x >> 3) + 100 
+-        // This is globally sample and tends to damp the response.  
+-        x -= Knob_Penalty ; 
+-        if (x < 0) x = 0 ; 
+-        _SpinDuration = x ; 
++   // TODO: Use an AIMD-like policy to adjust _SpinDuration.
++   // AIMD is globally stable.
++   TEVENT (Spin failure) ;
++   {
++     int x = _SpinDuration ;
++     if (x > 0) {
++        // Consider an AIMD scheme like: x -= (x >> 3) + 100
++        // This is globally sample and tends to damp the response.
++        x -= Knob_Penalty ;
++        if (x < 0) x = 0 ;
++        _SpinDuration = x ;
+      }
+    }
+ 
+  Abort:
+-   if (MaxSpin >= 0) Adjust (&_Spinner, -1) ; 
++   if (MaxSpin >= 0) Adjust (&_Spinner, -1) ;
+    if (sss && _succ == Self) {
+-      _succ = NULL ; 
+-      // Invariant: after setting succ=null a contending thread 
+-      // must recheck-retry _owner before parking.  This usually happens 
++      _succ = NULL ;
++      // Invariant: after setting succ=null a contending thread
++      // must recheck-retry _owner before parking.  This usually happens
+       // in the normal usage of TrySpin(), but it's safest
+-      // to make TrySpin() as foolproof as possible. 
+-      OrderAccess::fence() ; 
+-      if (TryLock(Self) > 0) return 1 ; 
++      // to make TrySpin() as foolproof as possible.
++      OrderAccess::fence() ;
++      if (TryLock(Self) > 0) return 1 ;
+    }
+-   return 0 ; 
++   return 0 ;
+ }
+ 
+ #define TrySpin TrySpin_VaryDuration
+ 
+ static void DeferredInitialize () {
+-  if (InitDone > 0) return ; 
+-  if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) { 
+-      while (InitDone != 1) ; 
+-      return ; 
++  if (InitDone > 0) return ;
++  if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) {
++      while (InitDone != 1) ;
++      return ;
+   }
+ 
+-  // One-shot global initialization ... 
+-  // The initialization is idempotent, so we don't need locks.  
++  // One-shot global initialization ...
++  // The initialization is idempotent, so we don't need locks.
+   // In the future consider doing this via os::init_2().
+   // SyncKnobs consist of <Key>=<Value> pairs in the style
+-  // of environment variables.  Start by converting ':' to NUL.  
++  // of environment variables.  Start by converting ':' to NUL.
+ 
+-  if (SyncKnobs == NULL) SyncKnobs = "" ;    
++  if (SyncKnobs == NULL) SyncKnobs = "" ;
+ 
+-  size_t sz = strlen (SyncKnobs) ; 
+-  char * knobs = (char *) malloc (sz + 2) ; 
+-  if (knobs == NULL) { 
+-     vm_exit_out_of_memory (sz + 2, "Parse SyncKnobs") ; 
+-     guarantee (0, "invariant") ; 
++  size_t sz = strlen (SyncKnobs) ;
++  char * knobs = (char *) malloc (sz + 2) ;
++  if (knobs == NULL) {
++     vm_exit_out_of_memory (sz + 2, "Parse SyncKnobs") ;
++     guarantee (0, "invariant") ;
+   }
+-  strcpy (knobs, SyncKnobs) ; 
+-  knobs[sz+1] = 0 ; 
++  strcpy (knobs, SyncKnobs) ;
++  knobs[sz+1] = 0 ;
+   for (char * p = knobs ; *p ; p++) {
+-     if (*p == ':') *p = 0 ; 
++     if (*p == ':') *p = 0 ;
+   }
+ 
+   #define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); }
+-  SETKNOB(ReportSettings) ; 
+-  SETKNOB(Verbose) ; 
+-  SETKNOB(FixedSpin) ; 
+-  SETKNOB(SpinLimit) ; 
+-  SETKNOB(SpinBase) ; 
++  SETKNOB(ReportSettings) ;
++  SETKNOB(Verbose) ;
++  SETKNOB(FixedSpin) ;
++  SETKNOB(SpinLimit) ;
++  SETKNOB(SpinBase) ;
+   SETKNOB(SpinBackOff);
+-  SETKNOB(CASPenalty) ; 
+-  SETKNOB(OXPenalty) ; 
+-  SETKNOB(LogSpins) ; 
+-  SETKNOB(SpinSetSucc) ; 
+-  SETKNOB(SuccEnabled) ; 
+-  SETKNOB(SuccRestrict) ; 
+-  SETKNOB(Penalty) ; 
+-  SETKNOB(Bonus) ; 
+-  SETKNOB(BonusB) ; 
+-  SETKNOB(Poverty) ; 
+-  SETKNOB(SpinAfterFutile) ; 
+-  SETKNOB(UsePause) ; 
+-  SETKNOB(SpinEarly) ; 
+-  SETKNOB(OState) ; 
+-  SETKNOB(MaxSpinners) ; 
+-  SETKNOB(PreSpin) ; 
+-  SETKNOB(ExitPolicy) ; 
+-  SETKNOB(QMode); 
+-  SETKNOB(ResetEvent) ; 
+-  SETKNOB(MoveNotifyee) ; 
+-  SETKNOB(FastHSSEC) ; 
++  SETKNOB(CASPenalty) ;
++  SETKNOB(OXPenalty) ;
++  SETKNOB(LogSpins) ;
++  SETKNOB(SpinSetSucc) ;
++  SETKNOB(SuccEnabled) ;
++  SETKNOB(SuccRestrict) ;
++  SETKNOB(Penalty) ;
++  SETKNOB(Bonus) ;
++  SETKNOB(BonusB) ;
++  SETKNOB(Poverty) ;
++  SETKNOB(SpinAfterFutile) ;
++  SETKNOB(UsePause) ;
++  SETKNOB(SpinEarly) ;
++  SETKNOB(OState) ;
++  SETKNOB(MaxSpinners) ;
++  SETKNOB(PreSpin) ;
++  SETKNOB(ExitPolicy) ;
++  SETKNOB(QMode);
++  SETKNOB(ResetEvent) ;
++  SETKNOB(MoveNotifyee) ;
++  SETKNOB(FastHSSEC) ;
+   #undef SETKNOB
+-  
+-  if (os::is_MP()) { 
+-     BackOffMask = (1 << Knob_SpinBackOff) - 1 ; 
+-     if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ; 
++
++  if (os::is_MP()) {
++     BackOffMask = (1 << Knob_SpinBackOff) - 1 ;
++     if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ;
+      // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1)
+-  } else { 
+-     Knob_SpinLimit = 0 ; 
+-     Knob_SpinBase  = 0 ; 
+-     Knob_PreSpin   = 0 ; 
+-     Knob_FixedSpin = -1 ; 
++  } else {
++     Knob_SpinLimit = 0 ;
++     Knob_SpinBase  = 0 ;
++     Knob_PreSpin   = 0 ;
++     Knob_FixedSpin = -1 ;
+   }
+ 
+-  if (Knob_LogSpins == 0) { 
+-     ObjectSynchronizer::_sync_FailedSpins = NULL ; 
++  if (Knob_LogSpins == 0) {
++     ObjectSynchronizer::_sync_FailedSpins = NULL ;
+   }
+ 
+-  free (knobs) ; 
+-  OrderAccess::fence() ; 
+-  InitDone = 1 ; 
++  free (knobs) ;
++  OrderAccess::fence() ;
++  InitDone = 1 ;
+ }
+ 
+ // Theory of operations -- Monitors lists, thread residency, etc:
+ //
+ // * A thread acquires ownership of a monitor by successfully
+-//   CAS()ing the _owner field from null to non-null.  
+-// 
++//   CAS()ing the _owner field from null to non-null.
++//
+ // * Invariant: A thread appears on at most one monitor list --
+ //   cxq, EntryList or WaitSet -- at any one time.
+ //
+-// * Contenting threads "push" themselves onto the cxq with CAS 
++// * Contending threads "push" themselves onto the cxq with CAS
+ //   and then spin/park.
+ //
+ // * After a contending thread eventually acquires the lock it must
+-//   dequeue itself from either the EntryList or the cxq. 
++//   dequeue itself from either the EntryList or the cxq.
+ //
+ // * The exiting thread identifies and unparks an "heir presumptive"
+ //   tentative successor thread on the EntryList.  Critically, the
+ //   exiting thread doesn't unlink the successor thread from the EntryList.
+-//   After having been unparked, the wakee will recontend for ownership of 
+-//   the monitor.   The successor (wakee) will either acquire the lock or 
+-//   re-park itself.  
+-//
+-//   Succession is provided for by a policy of competitive handoff.  
+-//   The exiting thread does _not_ grant or pass ownership to the 
+-//   successor thread.  (This is also referred to as "handoff" succession").  
++//   After having been unparked, the wakee will recontend for ownership of
++//   the monitor.   The successor (wakee) will either acquire the lock or
++//   re-park itself.
++//
++//   Succession is provided for by a policy of competitive handoff.
++//   The exiting thread does _not_ grant or pass ownership to the
++//   successor thread.  (This is also referred to as "handoff" succession").
+ //   Instead the exiting thread releases ownership and possibly wakes
+-//   a successor, so the successor can (re)compete for ownership of the lock.  
+-//   If the EntryList is empty but the cxq is populated the exiting 
+-//   thread will drain the cxq into the EntryList.  It does so by 
++//   a successor, so the successor can (re)compete for ownership of the lock.
++//   If the EntryList is empty but the cxq is populated the exiting
++//   thread will drain the cxq into the EntryList.  It does so by
+ //   by detaching the cxq (installing null with CAS) and folding
+ //   the threads from the cxq into the EntryList.  The EntryList is
+ //   doubly linked, while the cxq is singly linked because of the
+@@ -2629,47 +2706,47 @@
+ // * Concurrency invariants:
+ //
+ //   -- only the monitor owner may access or mutate the EntryList.
+-//      The mutex property of the monitor itself protects the EntryList 
++//      The mutex property of the monitor itself protects the EntryList
+ //      from concurrent interference.
+-//   -- Only the monitor owner may detach the cxq.  
++//   -- Only the monitor owner may detach the cxq.
+ //
+-// * The monitor entry list operations avoid locks, but strictly speaking 
+-//   they're not lock-free.  Enter is lock-free, exit is not.  
++// * The monitor entry list operations avoid locks, but strictly speaking
++//   they're not lock-free.  Enter is lock-free, exit is not.
+ //   See http://j2se.east/~dice/PERSIST/040825-LockFreeQueues.html
+ //
+ // * The cxq can have multiple concurrent "pushers" but only one concurrent
+-//   detaching thread.  This mechanism is immune from the ABA corruption.  
+-//   More precisely, the CAS-based "push" onto cxq is ABA-oblivious.  
++//   detaching thread.  This mechanism is immune from the ABA corruption.
++//   More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
+ //
+ // * Taken together, the cxq and the EntryList constitute or form a
+ //   single logical queue of threads stalled trying to acquire the lock.
+-//   We use two distinct lists to improve the odds of a constant-time 
+-//   dequeue operation after acquisition (in the ::enter() epilog) and 
+-//   to reduce heat on the list ends.  (c.f. Michael Scott's "2Q" algorithm).  
++//   We use two distinct lists to improve the odds of a constant-time
++//   dequeue operation after acquisition (in the ::enter() epilog) and
++//   to reduce heat on the list ends.  (c.f. Michael Scott's "2Q" algorithm).
+ //   A key desideratum is to minimize queue & monitor metadata manipulation
+-//   that occurs while holding the monitor lock -- that is, we want to 
++//   that occurs while holding the monitor lock -- that is, we want to
+ //   minimize monitor lock holds times.  Note that even a small amount of
+ //   fixed spinning will greatly reduce the # of enqueue-dequeue operations
+ //   on EntryList|cxq.  That is, spinning relieves contention on the "inner"
+-//   locks and monitor metadata.  
+-// 
++//   locks and monitor metadata.
++//
+ //   Cxq points to the the set of Recently Arrived Threads attempting entry.
+-//   Because we push threads onto _cxq with CAS, the RATs must take the form of 
++//   Because we push threads onto _cxq with CAS, the RATs must take the form of
+ //   a singly-linked LIFO.  We drain _cxq into EntryList  at unlock-time when
+ //   the unlocking thread notices that EntryList is null but _cxq is != null.
+ //
+ //   The EntryList is ordered by the prevailing queue discipline and
+ //   can be organized in any convenient fashion, such as a doubly-linked list or
+-//   a circular doubly-linked list.  Critically, we want insert and delete operations 
++//   a circular doubly-linked list.  Critically, we want insert and delete operations
+ //   to operate in constant-time.  If we need a priority queue then something akin
+ //   to Solaris' sleepq would work nicely.  Viz.,
+ //   http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
+ //   Queue discipline is enforced at ::exit() time, when the unlocking thread
+ //   drains the cxq into the EntryList, and orders or reorders the threads on the
+-//   EntryList accordingly.  
+-//   
++//   EntryList accordingly.
++//
+ //   Barring "lock barging", this mechanism provides fair cyclic ordering,
+-//   somewhat similar to an elevator-scan.  
++//   somewhat similar to an elevator-scan.
+ //
+ // * The monitor synchronization subsystem avoids the use of native
+ //   synchronization primitives except for the narrow platform-specific
+@@ -2677,301 +2754,301 @@
+ //   the semantics of park-unpark.  Put another way, this monitor implementation
+ //   depends only on atomic operations and park-unpark.  The monitor subsystem
+ //   manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
+-//   underlying OS manages the READY<->RUN transitions.  
++//   underlying OS manages the READY<->RUN transitions.
+ //
+ // * Waiting threads reside on the WaitSet list -- wait() puts
+-//   the caller onto the WaitSet. 
++//   the caller onto the WaitSet.
+ //
+-// * notify() or notifyAll() simply transfers threads from the WaitSet to 
+-//   either the EntryList or cxq.  Subsequent exit() operations will 
++// * notify() or notifyAll() simply transfers threads from the WaitSet to
++//   either the EntryList or cxq.  Subsequent exit() operations will
+ //   unpark the notifyee.  Unparking a notifee in notify() is inefficient -
+ //   it's likely the notifyee would simply impale itself on the lock held
+-//   by the notifier.  
++//   by the notifier.
+ //
+ // * An interesting alternative is to encode cxq as (List,LockByte) where
+-//   the LockByte is 0 iff the monitor is owned.  _owner is simply an auxilliary
++//   the LockByte is 0 iff the monitor is owned.  _owner is simply an auxiliary
+ //   variable, like _recursions, in the scheme.  The threads or Events that form
+ //   the list would have to be aligned in 256-byte addresses.  A thread would
+ //   try to acquire the lock or enqueue itself with CAS, but exiting threads
+ //   could use a 1-0 protocol and simply STB to set the LockByte to 0.
+ //   Note that is is *not* word-tearing, but it does presume that full-word
+ //   CAS operations are coherent with intermix with STB operations.  That's true
+-//   on most common processors.  
++//   on most common processors.
+ //
+ // * See also http://blogs.sun.com/dave
+ 
+ 
+-void ATTR ObjectMonitor::EnterI (TRAPS) {  
+-    Thread * Self = THREAD ; 
+-    assert (Self->is_Java_thread(), "invariant") ; 
+-    assert (((JavaThread *) Self)->thread_state() == _thread_blocked   , "invariant") ; 
+-
+-    // Try the lock - TATAS  
+-    if (TryLock (Self) > 0) { 
+-        assert (_succ != Self              , "invariant") ;  
+-        assert (_owner == Self             , "invariant") ; 
+-        assert (_Responsible != Self       , "invariant") ; 
+-        return ; 
+-    }
+-
+-    DeferredInitialize () ; 
+-  
+-    // We try one round of spinning *before* enqueueing Self.  
++void ATTR ObjectMonitor::EnterI (TRAPS) {
++    Thread * Self = THREAD ;
++    assert (Self->is_Java_thread(), "invariant") ;
++    assert (((JavaThread *) Self)->thread_state() == _thread_blocked   , "invariant") ;
++
++    // Try the lock - TATAS
++    if (TryLock (Self) > 0) {
++        assert (_succ != Self              , "invariant") ;
++        assert (_owner == Self             , "invariant") ;
++        assert (_Responsible != Self       , "invariant") ;
++        return ;
++    }
++
++    DeferredInitialize () ;
++
++    // We try one round of spinning *before* enqueueing Self.
+     //
+     // If the _owner is ready but OFFPROC we could use a YieldTo()
+     // operation to donate the remainder of this thread's quantum
+     // to the owner.  This has subtle but beneficial affinity
+-    // effects.  
++    // effects.
+ 
+-    if (TrySpin (Self) > 0) { 
+-        assert (_owner == Self        , "invariant") ; 
+-        assert (_succ != Self         , "invariant") ; 
+-        assert (_Responsible != Self  , "invariant") ; 
+-        return ; 
++    if (TrySpin (Self) > 0) {
++        assert (_owner == Self        , "invariant") ;
++        assert (_succ != Self         , "invariant") ;
++        assert (_Responsible != Self  , "invariant") ;
++        return ;
+     }
+-  
++
+     // The Spin failed -- Enqueue and park the thread ...
+-    assert (_succ  != Self            , "invariant") ; 
+-    assert (_owner != Self            , "invariant") ; 
+-    assert (_Responsible != Self      , "invariant") ; 
+-    
++    assert (_succ  != Self            , "invariant") ;
++    assert (_owner != Self            , "invariant") ;
++    assert (_Responsible != Self      , "invariant") ;
++
+     // Enqueue "Self" on ObjectMonitor's _cxq.
+     //
+-    // Node acts as a proxy for Self.  
++    // Node acts as a proxy for Self.
+     // As an aside, if were to ever rewrite the synchronization code mostly
+-    // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class 
+-    // Java objects.  This would avoid awkward lifecycle and liveness issues, 
+-    // as well as eliminate a subset of ABA issues. 
++    // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
++    // Java objects.  This would avoid awkward lifecycle and liveness issues,
++    // as well as eliminate a subset of ABA issues.
+     // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
+     //
+-  
+-    ObjectWaiter node(Self) ;   
+-    Self->_ParkEvent->reset() ; 
+-    node._prev   = (ObjectWaiter *) 0xBAD ; 
+-    node.TState  = ObjectWaiter::TS_CXQ ; 
+-  
++
++    ObjectWaiter node(Self) ;
++    Self->_ParkEvent->reset() ;
++    node._prev   = (ObjectWaiter *) 0xBAD ;
++    node.TState  = ObjectWaiter::TS_CXQ ;
++
+     // Push "Self" onto the front of the _cxq.
+     // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
+     // Note that spinning tends to reduce the rate at which threads
+-    // enqueue and dequeue on EntryList|cxq.  
+-    ObjectWaiter * nxt ; 
++    // enqueue and dequeue on EntryList|cxq.
++    ObjectWaiter * nxt ;
+     for (;;) {
+-        node._next = nxt = _cxq ; 
+-        if (Atomic::cmpxchg_ptr (&node, &_cxq, nxt) == nxt) break ; 
+-  
++        node._next = nxt = _cxq ;
++        if (Atomic::cmpxchg_ptr (&node, &_cxq, nxt) == nxt) break ;
++
+         // Interference - the CAS failed because _cxq changed.  Just retry.
+         // As an optional optimization we retry the lock.
+-        if (TryLock (Self) > 0) { 
+-            assert (_succ != Self         , "invariant") ;  
+-            assert (_owner == Self        , "invariant") ; 
+-            assert (_Responsible != Self  , "invariant") ; 
+-            return ; 
++        if (TryLock (Self) > 0) {
++            assert (_succ != Self         , "invariant") ;
++            assert (_owner == Self        , "invariant") ;
++            assert (_Responsible != Self  , "invariant") ;
++            return ;
+         }
+     }
+ 
+     // Check for cxq|EntryList edge transition to non-null.  This indicates
+-    // the onset of contention.  While contention persists exiting threads 
++    // the onset of contention.  While contention persists exiting threads
+     // will use a ST:MEMBAR:LD 1-1 exit protocol.  When contention abates exit
+-    // operations revert to the faster 1-0 mode.  This enter operation may interleave 
+-    // (race) a concurrent 1-0 exit operation, resulting in stranding, so we 
+-    // arrange for one of the contending thread to use a timed park() operations 
+-    // to detect and recover from the race.  (Stranding is form of progress failure 
+-    // where the monitor is unlocked but all the contending threads remain parked).  
++    // operations revert to the faster 1-0 mode.  This enter operation may interleave
++    // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
++    // arrange for one of the contending thread to use a timed park() operations
++    // to detect and recover from the race.  (Stranding is form of progress failure
++    // where the monitor is unlocked but all the contending threads remain parked).
+     // That is, at least one of the contended threads will periodically poll _owner.
+-    // One of the contending threads will become the designated "Responsible" thread.  
+-    // The Responsible thread uses a timed park instead of a normal indefinite park 
+-    // operation -- it periodically wakes and checks for and recovers from potential 
+-    // strandings admitted by 1-0 exit operations.   We need at most one Responsible 
+-    // thread per-monitor at any given moment.  Only threads on cxq|EntryList may 
+-    // be responsible for a monitor.  
++    // One of the contending threads will become the designated "Responsible" thread.
++    // The Responsible thread uses a timed park instead of a normal indefinite park
++    // operation -- it periodically wakes and checks for and recovers from potential
++    // strandings admitted by 1-0 exit operations.   We need at most one Responsible
++    // thread per-monitor at any given moment.  Only threads on cxq|EntryList may
++    // be responsible for a monitor.
+     //
+     // Currently, one of the contended threads takes on the added role of "Responsible".
+     // A viable alternative would be to use a dedicated "stranding checker" thread
+     // that periodically iterated over all the threads (or active monitors) and unparked
+     // successors where there was risk of stranding.  This would help eliminate the
+     // timer scalability issues we see on some platforms as we'd only have one thread
+-    // -- the checker -- parked on a timer.   
++    // -- the checker -- parked on a timer.
+ 
+-    if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) { 
+-        // Try to assume the role of responsible thread for the monitor.  
+-        // CONSIDER:  ST vs CAS vs { if (Responsible==null) Responsible=Self } 
+-        Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ; 
+-    }
+-  
+-    // The lock have been released while this thread was occupied queueing 
+-    // itself onto _cxq.  To close the race and avoid "stranding" and 
+-    // progress-liveness failure we must resample-retry _owner before parking. 
++    if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
++        // Try to assume the role of responsible thread for the monitor.
++        // CONSIDER:  ST vs CAS vs { if (Responsible==null) Responsible=Self }
++        Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
++    }
++
++    // The lock have been released while this thread was occupied queueing
++    // itself onto _cxq.  To close the race and avoid "stranding" and
++    // progress-liveness failure we must resample-retry _owner before parking.
+     // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
+-    // In this case the ST-MEMBAR is accomplished with CAS().  
++    // In this case the ST-MEMBAR is accomplished with CAS().
+     //
+-    // TODO: Defer all thread state transitions until park-time.  
++    // TODO: Defer all thread state transitions until park-time.
+     // Since state transitions are heavy and inefficient we'd like
+     // to defer the state transitions until absolutely necessary,
+     // and in doing so avoid some transitions ...
+-    
+-    TEVENT (Inflated enter - Contention) ; 
+-    int nWakeups = 0 ; 
+-    int RecheckInterval = 1 ; 
++
++    TEVENT (Inflated enter - Contention) ;
++    int nWakeups = 0 ;
++    int RecheckInterval = 1 ;
+ 
+     for (;;) {
+ 
+-        if (TryLock (Self) > 0) break ; 
+-        assert (_owner != Self, "invariant") ; 
++        if (TryLock (Self) > 0) break ;
++        assert (_owner != Self, "invariant") ;
+ 
+-        if ((SyncFlags & 2) && _Responsible == NULL) { 
+-           Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ; 
++        if ((SyncFlags & 2) && _Responsible == NULL) {
++           Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
+         }
+ 
+         // park self
+-        if (_Responsible == Self || (SyncFlags & 1)) { 
+-            TEVENT (Inflated enter - park TIMED) ; 
+-            Self->_ParkEvent->park ((jlong) RecheckInterval) ; 
+-            // Increase the RecheckInterval, but clamp the value.  
+-            RecheckInterval *= 8 ; 
+-            if (RecheckInterval > 1000) RecheckInterval = 1000 ; 
+-        } else { 
+-            TEVENT (Inflated enter - park UNTIMED) ; 
+-            Self->_ParkEvent->park() ; 
++        if (_Responsible == Self || (SyncFlags & 1)) {
++            TEVENT (Inflated enter - park TIMED) ;
++            Self->_ParkEvent->park ((jlong) RecheckInterval) ;
++            // Increase the RecheckInterval, but clamp the value.
++            RecheckInterval *= 8 ;
++            if (RecheckInterval > 1000) RecheckInterval = 1000 ;
++        } else {
++            TEVENT (Inflated enter - park UNTIMED) ;
++            Self->_ParkEvent->park() ;
+         }
+ 
+-        if (TryLock(Self) > 0) break ; 
+- 
+-        // The lock is still contested.  
+-        // Keep a tally of the # of futile wakeups. 
++        if (TryLock(Self) > 0) break ;
++
++        // The lock is still contested.
++        // Keep a tally of the # of futile wakeups.
+         // Note that the counter is not protected by a lock or updated by atomics.
+         // That is by design - we trade "lossy" counters which are exposed to
+-        // races during updates for a lower probe effect.  
++        // races during updates for a lower probe effect.
+         TEVENT (Inflated enter - Futile wakeup) ;
+-        if (ObjectSynchronizer::_sync_FutileWakeups != NULL) { 
+-           ObjectSynchronizer::_sync_FutileWakeups->inc() ; 
+-        } 
+-        ++ nWakeups ; 
++        if (ObjectSynchronizer::_sync_FutileWakeups != NULL) {
++           ObjectSynchronizer::_sync_FutileWakeups->inc() ;
++        }
++        ++ nWakeups ;
+ 
+         // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
+         // We can defer clearing _succ until after the spin completes
+-        // TrySpin() must tolerate being called with _succ == Self.  
++        // TrySpin() must tolerate being called with _succ == Self.
+         // Try yet another round of adaptive spinning.
+-        if ((Knob_SpinAfterFutile & 1) && TrySpin (Self) > 0) break ; 
++        if ((Knob_SpinAfterFutile & 1) && TrySpin (Self) > 0) break ;
+ 
+         // We can find that we were unpark()ed and redesignated _succ while
+         // we were spinning.  That's harmless.  If we iterate and call park(),
+-        // park() will consume the event and return immediately and we'll 
+-        // just spin again.  This pattern can repeat, leaving _succ to simply 
+-        // spin on a CPU.  Enable Knob_ResetEvent to clear pending unparks().  
+-        // Alternately, we can sample fired() here, and if set, forgo spinning 
+-        // in the next iteration.  
+- 
+-        if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) { 
+-           Self->_ParkEvent->reset() ; 
+-           OrderAccess::fence() ; 
++        // park() will consume the event and return immediately and we'll
++        // just spin again.  This pattern can repeat, leaving _succ to simply
++        // spin on a CPU.  Enable Knob_ResetEvent to clear pending unparks().
++        // Alternately, we can sample fired() here, and if set, forgo spinning
++        // in the next iteration.
++
++        if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
++           Self->_ParkEvent->reset() ;
++           OrderAccess::fence() ;
+         }
+-        if (_succ == Self) _succ = NULL ; 
++        if (_succ == Self) _succ = NULL ;
+ 
+         // Invariant: after clearing _succ a thread *must* retry _owner before parking.
+         OrderAccess::fence() ;
+     }
+ 
+     // Egress :
+-    // Self has acquired the lock -- Unlink Self from the cxq or EntryList. 
++    // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
+     // Normally we'll find Self on the EntryList .
+-    // From the perspective of the lock owner (this thread), the 
+-    // EntryList is stable and cxq is prepend-only.  
+-    // The head of cxq is volatile but the interior is stable.  
++    // From the perspective of the lock owner (this thread), the
++    // EntryList is stable and cxq is prepend-only.
++    // The head of cxq is volatile but the interior is stable.
+     // In addition, Self.TState is stable.
+ 
+-    assert (_owner == Self      , "invariant") ; 
+-    assert (object() != NULL    , "invariant") ; 
++    assert (_owner == Self      , "invariant") ;
++    assert (object() != NULL    , "invariant") ;
+     // I'd like to write:
+-    //   guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; 
++    //   guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
+     // but as we're at a safepoint that's not safe.
+ 
+-    UnlinkAfterAcquire (Self, &node) ; 
+-    if (_succ == Self) _succ = NULL ; 
++    UnlinkAfterAcquire (Self, &node) ;
++    if (_succ == Self) _succ = NULL ;
+ 
+-    assert (_succ != Self, "invariant") ; 
+-    if (_Responsible == Self) { 
+-        _Responsible = NULL ; 
+-        // Dekker pivot-point.  
++    assert (_succ != Self, "invariant") ;
++    if (_Responsible == Self) {
++        _Responsible = NULL ;
++        // Dekker pivot-point.
+         // Consider OrderAccess::storeload() here
+ 
+-        // We may leave threads on cxq|EntryList without a designated 
+-        // "Responsible" thread.  This is benign.  When this thread subsequently 
+-        // exits the monitor it can "see" such preexisting "old" threads -- 
+-        // threads that arrived on the cxq|EntryList before the fence, above --  
+-        // by LDing cxq|EntryList.  Newly arrived threads -- that is, threads 
+-        // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible 
++        // We may leave threads on cxq|EntryList without a designated
++        // "Responsible" thread.  This is benign.  When this thread subsequently
++        // exits the monitor it can "see" such preexisting "old" threads --
++        // threads that arrived on the cxq|EntryList before the fence, above --
++        // by LDing cxq|EntryList.  Newly arrived threads -- that is, threads
++        // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
+         // non-null and elect a new "Responsible" timer thread.
+         //
+         // This thread executes:
+         //    ST Responsible=null; MEMBAR    (in enter epilog - here)
+         //    LD cxq|EntryList               (in subsequent exit)
+-        // 
++        //
+         // Entering threads in the slow/contended path execute:
+         //    ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
+         //    The (ST cxq; MEMBAR) is accomplished with CAS().
+         //
+         // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
+-        // exit operation from floating above the ST Responsible=null. 
++        // exit operation from floating above the ST Responsible=null.
+         //
+         // In *practice* however, EnterI() is always followed by some atomic
+         // operation such as the decrement of _count in ::enter().  Those atomics
+-        // obviate the need for the explicit MEMBAR, above. 
++        // obviate the need for the explicit MEMBAR, above.
+     }
+ 
+-    // We've acquired ownership with CAS().  
+-    // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.  
+-    // But since the CAS() this thread may have also stored into _succ, 
+-    // EntryList, cxq or Responsible.  These meta-data updates must be 
+-    // visible __before this thread subsequently drops the lock.  
+-    // Consider what could occur if we didn't enforce this constraint -- 
++    // We've acquired ownership with CAS().
++    // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
++    // But since the CAS() this thread may have also stored into _succ,
++    // EntryList, cxq or Responsible.  These meta-data updates must be
++    // visible __before this thread subsequently drops the lock.
++    // Consider what could occur if we didn't enforce this constraint --
+     // STs to monitor meta-data and user-data could reorder with (become
+-    // visible after) the ST in exit that drops ownership of the lock.  
++    // visible after) the ST in exit that drops ownership of the lock.
+     // Some other thread could then acquire the lock, but observe inconsistent
+-    // or old monitor meta-data and heap data.  That violates the JMM.  
+-    // To that end, the 1-0 exit() operation must have at least STST|LDST 
+-    // "release" barrier semantics.  Specifically, there must be at least a 
+-    // STST|LDST barrier in exit() before the ST of null into _owner that drops 
+-    // the lock.   The barrier ensures that changes to monitor meta-data and data 
+-    // protected by the lock will be visible before we release the lock, and 
+-    // therefore before some other thread (CPU) has a chance to acquire the lock.  
+-    // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.  
++    // or old monitor meta-data and heap data.  That violates the JMM.
++    // To that end, the 1-0 exit() operation must have at least STST|LDST
++    // "release" barrier semantics.  Specifically, there must be at least a
++    // STST|LDST barrier in exit() before the ST of null into _owner that drops
++    // the lock.   The barrier ensures that changes to monitor meta-data and data
++    // protected by the lock will be visible before we release the lock, and
++    // therefore before some other thread (CPU) has a chance to acquire the lock.
++    // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
+     //
+-    // Critically, any prior STs to _succ or EntryList must be visible before 
+-    // the ST of null into _owner in the *subsequent* (following) corresponding 
+-    // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily 
+-    // execute a serializing instruction.  
++    // Critically, any prior STs to _succ or EntryList must be visible before
++    // the ST of null into _owner in the *subsequent* (following) corresponding
++    // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
++    // execute a serializing instruction.
+ 
+     if (SyncFlags & 8) {
+-       OrderAccess::fence() ; 
++       OrderAccess::fence() ;
+     }
+-    return ; 
++    return ;
+ }
+ 
+ // ExitSuspendEquivalent:
+ // A faster alternate to handle_special_suspend_equivalent_condition()
+ //
+-// handle_special_suspend_equivalent_condition() unconditionally 
++// handle_special_suspend_equivalent_condition() unconditionally
+ // acquires the SR_lock.  On some platforms uncontended MutexLocker()
+-// operations have high latency.  Note that in ::enter() we call HSSEC 
+-// while holding the monitor, so we effectively lengthen the critical sections.  
++// operations have high latency.  Note that in ::enter() we call HSSEC
++// while holding the monitor, so we effectively lengthen the critical sections.
+ //
+ // There are a number of possible solutions:
+ //
+-// A.  To ameliorate the problem we might also defer state transitions 
+-//     to as late as possible -- just prior to parking.  
+-//     Given that, we'd call HSSEC after having returned from park(), 
+-//     but before attempting to acquire the monitor.  This is only a 
+-//     partial solution.  It avoids calling HSSEC while holding the 
+-//     monitor (good), but it still increases successor reacquistion latency --
++// A.  To ameliorate the problem we might also defer state transitions
++//     to as late as possible -- just prior to parking.
++//     Given that, we'd call HSSEC after having returned from park(),
++//     but before attempting to acquire the monitor.  This is only a
++//     partial solution.  It avoids calling HSSEC while holding the
++//     monitor (good), but it still increases successor reacquisition latency --
+ //     the interval between unparking a successor and the time the successor
+-//     resumes and retries the lock.  See ReenterI(), which defers state transitions.  
++//     resumes and retries the lock.  See ReenterI(), which defers state transitions.
+ //     If we use this technique we can also avoid EnterI()-exit() loop
+-//     in ::enter() where we iteratively drop the lock and then attempt 
++//     in ::enter() where we iteratively drop the lock and then attempt
+ //     to reacquire it after suspending.
+ //
+-// B.  In the future we might fold all the suspend bits into a 
++// B.  In the future we might fold all the suspend bits into a
+ //     composite per-thread suspend flag and then update it with CAS().
+-//     Alternately, a Dekker-like mechanism with multiple variables 
++//     Alternately, a Dekker-like mechanism with multiple variables
+ //     would suffice:
+ //       ST Self->_suspend_equivalent = false
+ //       MEMBAR
+@@ -2979,114 +3056,114 @@
+ //
+ 
+ 
+-bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) { 
+-   int Mode = Knob_FastHSSEC ; 
+-   if (Mode && !jSelf->is_external_suspend()) { 
+-      assert (jSelf->is_suspend_equivalent(), "invariant") ; 
+-      jSelf->clear_suspend_equivalent() ; 
++bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) {
++   int Mode = Knob_FastHSSEC ;
++   if (Mode && !jSelf->is_external_suspend()) {
++      assert (jSelf->is_suspend_equivalent(), "invariant") ;
++      jSelf->clear_suspend_equivalent() ;
+       if (2 == Mode) OrderAccess::storeload() ;
+-      if (!jSelf->is_external_suspend()) return false ; 
++      if (!jSelf->is_external_suspend()) return false ;
+       // We raced a suspension -- fall thru into the slow path
+-      TEVENT (ExitSuspendEquivalent - raced) ; 
+-      jSelf->set_suspend_equivalent() ; 
++      TEVENT (ExitSuspendEquivalent - raced) ;
++      jSelf->set_suspend_equivalent() ;
+    }
+-   return jSelf->handle_special_suspend_equivalent_condition() ; 
++   return jSelf->handle_special_suspend_equivalent_condition() ;
+ }
+ 
+ 
+ // ReenterI() is a specialized inline form of the latter half of the
+-// contended slow-path from EnterI().  We use ReenterI() only for 
++// contended slow-path from EnterI().  We use ReenterI() only for
+ // monitor reentry in wait().
+ //
+-// In the future we should reconcile EnterI() and ReenterI(), adding 
++// In the future we should reconcile EnterI() and ReenterI(), adding
+ // Knob_Reset and Knob_SpinAfterFutile support and restructuring the
+ // loop accordingly.
+ 
+-void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) { 
+-    assert (Self != NULL                , "invariant") ; 
+-    assert (SelfNode != NULL            , "invariant") ; 
+-    assert (SelfNode->_thread == Self   , "invariant") ; 
+-    assert (_waiters > 0                , "invariant") ; 
+-    assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ; 
+-    assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ; 
+-    JavaThread * jt = (JavaThread *) Self ; 
++void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
++    assert (Self != NULL                , "invariant") ;
++    assert (SelfNode != NULL            , "invariant") ;
++    assert (SelfNode->_thread == Self   , "invariant") ;
++    assert (_waiters > 0                , "invariant") ;
++    assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ;
++    assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
++    JavaThread * jt = (JavaThread *) Self ;
+ 
+-    int nWakeups = 0 ; 
++    int nWakeups = 0 ;
+     for (;;) {
+-        ObjectWaiter::TStates v = SelfNode->TState ; 
+-        guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ; 
+-        assert    (_owner != Self, "invariant") ; 
++        ObjectWaiter::TStates v = SelfNode->TState ;
++        guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
++        assert    (_owner != Self, "invariant") ;
+ 
+-        if (TryLock (Self) > 0) break ; 
+-        if (TrySpin (Self) > 0) break ;  
++        if (TryLock (Self) > 0) break ;
++        if (TrySpin (Self) > 0) break ;
+ 
+-        TEVENT (Wait Reentry - parking) ; 
++        TEVENT (Wait Reentry - parking) ;
+ 
+         // State transition wrappers around park() ...
+         // ReenterI() wisely defers state transitions until
+         // it's clear we must park the thread.
+-        { 
++        {
+            OSThreadContendState osts(Self->osthread());
+            ThreadBlockInVM tbivm(jt);
+ 
+-           // cleared by handle_special_suspend_equivalent_condition() 
++           // cleared by handle_special_suspend_equivalent_condition()
+            // or java_suspend_self()
+            jt->set_suspend_equivalent();
+-           if (SyncFlags & 1) { 
++           if (SyncFlags & 1) {
+               Self->_ParkEvent->park ((jlong)1000) ;
+-           } else { 
++           } else {
+               Self->_ParkEvent->park () ;
+            }
+ 
+            // were we externally suspended while we were waiting?
+-           for (;;) { 
+-              if (!ExitSuspendEquivalent (jt)) break ; 
+-              if (_succ == Self) { _succ = NULL; OrderAccess::fence(); } 
++           for (;;) {
++              if (!ExitSuspendEquivalent (jt)) break ;
++              if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
+               jt->java_suspend_self();
+               jt->set_suspend_equivalent();
+            }
+         }
+ 
+-        // Try again, but just so we distinguish between futile wakeups and 
+-        // successful wakeups.  The following test isn't algorithmically 
++        // Try again, but just so we distinguish between futile wakeups and
++        // successful wakeups.  The following test isn't algorithmically
+         // necessary, but it helps us maintain sensible statistics.
+-        if (TryLock(Self) > 0) break ; 
++        if (TryLock(Self) > 0) break ;
+ 
+-        // The lock is still contested.  
+-        // Keep a tally of the # of futile wakeups. 
++        // The lock is still contested.
++        // Keep a tally of the # of futile wakeups.
+         // Note that the counter is not protected by a lock or updated by atomics.
+         // That is by design - we trade "lossy" counters which are exposed to
+-        // races during updates for a lower probe effect.  
++        // races during updates for a lower probe effect.
+         TEVENT (Wait Reentry - futile wakeup) ;
+-        ++ nWakeups ; 
++        ++ nWakeups ;
+ 
+-        // Assuming this is not a spurious wakeup we'll normally 
++        // Assuming this is not a spurious wakeup we'll normally
+         // find that _succ == Self.
+         if (_succ == Self) _succ = NULL ;
+ 
+-        // Invariant: after clearing _succ a contending thread 
++        // Invariant: after clearing _succ a contending thread
+         // *must* retry  _owner before parking.
+         OrderAccess::fence() ;
+ 
+         if (ObjectSynchronizer::_sync_FutileWakeups != NULL) {
+-          ObjectSynchronizer::_sync_FutileWakeups->inc() ; 
++          ObjectSynchronizer::_sync_FutileWakeups->inc() ;
+         }
+     }
+ 
+-    // Self has acquired the lock -- Unlink Self from the cxq or EntryList . 
++    // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
+     // Normally we'll find Self on the EntryList.
+-    // Unlinking from the EntryList is constant-time and atomic-free.  
+-    // From the perspective of the lock owner (this thread), the 
+-    // EntryList is stable and cxq is prepend-only.  
+-    // The head of cxq is volatile but the interior is stable.  
++    // Unlinking from the EntryList is constant-time and atomic-free.
++    // From the perspective of the lock owner (this thread), the
++    // EntryList is stable and cxq is prepend-only.
++    // The head of cxq is volatile but the interior is stable.
+     // In addition, Self.TState is stable.
+ 
+-    assert (_owner == Self, "invariant") ; 
+-    assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; 
+-    UnlinkAfterAcquire (Self, SelfNode) ; 
+-    if (_succ == Self) _succ = NULL ; 
+-    assert (_succ != Self, "invariant") ; 
+-    SelfNode->TState = ObjectWaiter::TS_RUN ; 
++    assert (_owner == Self, "invariant") ;
++    assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
++    UnlinkAfterAcquire (Self, SelfNode) ;
++    if (_succ == Self) _succ = NULL ;
++    assert (_succ != Self, "invariant") ;
++    SelfNode->TState = ObjectWaiter::TS_RUN ;
+     OrderAccess::fence() ;      // see comments at the end of EnterI()
+ }
+ 
+@@ -3096,8 +3173,8 @@
+        assert(_recursions == 0, "internal state error");
+        _owner = THREAD ;
+        _recursions = 1 ;
+-       OwnerIsThread = 1 ; 
+-       return true; 
++       OwnerIsThread = 1 ;
++       return true;
+     }
+     if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
+       return false;
+@@ -3112,63 +3189,63 @@
+ void ATTR ObjectMonitor::enter(TRAPS) {
+   // The following code is ordered to check the most common cases first
+   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
+-  Thread * const Self = THREAD ; 
+-  void * cur ; 
++  Thread * const Self = THREAD ;
++  void * cur ;
+ 
+-  cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ; 
+-  if (cur == NULL) { 
+-     // Either ASSERT _recursions ==  0 or explicitly set _recursions = 0.
+-     assert (_recursions == 0   , "invariant") ; 
+-     assert (_owner      == Self, "invariant") ; 
++  cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
++  if (cur == NULL) {
++     // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
++     assert (_recursions == 0   , "invariant") ;
++     assert (_owner      == Self, "invariant") ;
+      // CONSIDER: set or assert OwnerIsThread == 1
+-     return ; 
++     return ;
+   }
+ 
+-  if (cur == Self) { 
+-     // TODO-FIXME: check for integer overflow!  BUGID 6557169.  
+-     _recursions ++ ; 
+-     return ; 
++  if (cur == Self) {
++     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
++     _recursions ++ ;
++     return ;
+   }
+ 
+   if (Self->is_lock_owned ((address)cur)) {
+     assert (_recursions == 0, "internal state error");
+     _recursions = 1 ;
+     // Commute owner from a thread-specific on-stack BasicLockObject address to
+-    // a full-fledged "Thread *". 
+-    _owner = Self ;           
+-    OwnerIsThread = 1 ; 
+-    return ; 
++    // a full-fledged "Thread *".
++    _owner = Self ;
++    OwnerIsThread = 1 ;
++    return ;
+   }
+ 
+   // We've encountered genuine contention.
+-  assert (Self->_Stalled == 0, "invariant") ; 
+-  Self->_Stalled = intptr_t(this) ; 
++  assert (Self->_Stalled == 0, "invariant") ;
++  Self->_Stalled = intptr_t(this) ;
+ 
+   // Try one round of spinning *before* enqueueing Self
+   // and before going through the awkward and expensive state
+   // transitions.  The following spin is strictly optional ...
+   // Note that if we acquire the monitor from an initial spin
+   // we forgo posting JVMTI events and firing DTRACE probes.
+-  if (Knob_SpinEarly && TrySpin (Self) > 0) { 
+-     assert (_owner == Self      , "invariant") ; 
+-     assert (_recursions == 0    , "invariant") ; 
+-     assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; 
+-     Self->_Stalled = 0 ; 
+-     return ; 
++  if (Knob_SpinEarly && TrySpin (Self) > 0) {
++     assert (_owner == Self      , "invariant") ;
++     assert (_recursions == 0    , "invariant") ;
++     assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
++     Self->_Stalled = 0 ;
++     return ;
+   }
+ 
+   assert (_owner != Self          , "invariant") ;
+-  assert (_succ  != Self          , "invariant") ;  
+-  assert (Self->is_Java_thread()  , "invariant") ; 
+-  JavaThread * jt = (JavaThread *) Self ;  
+-  assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ; 
+-  assert (jt->thread_state() != _thread_blocked   , "invariant") ; 
++  assert (_succ  != Self          , "invariant") ;
++  assert (Self->is_Java_thread()  , "invariant") ;
++  JavaThread * jt = (JavaThread *) Self ;
++  assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
++  assert (jt->thread_state() != _thread_blocked   , "invariant") ;
+   assert (this->object() != NULL  , "invariant") ;
+-  assert (_count >= 0, "invariant") ; 
++  assert (_count >= 0, "invariant") ;
+ 
+-  // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().  
+-  // Ensure the object-monitor relationship remains stable while there's contention.  
+-  Atomic::inc_ptr(&_count);         
++  // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
++  // Ensure the object-monitor relationship remains stable while there's contention.
++  Atomic::inc_ptr(&_count);
+ 
+   { // Change java thread status to indicate blocked on monitor enter.
+     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
+@@ -3180,117 +3257,117 @@
+ 
+     OSThreadContendState osts(Self->osthread());
+     ThreadBlockInVM tbivm(jt);
+-    
++
+     Self->set_current_pending_monitor(this);
+ 
+     // TODO-FIXME: change the following for(;;) loop to straight-line code.
+-    for (;;) { 
++    for (;;) {
+       jt->set_suspend_equivalent();
+-      // cleared by handle_special_suspend_equivalent_condition() 
++      // cleared by handle_special_suspend_equivalent_condition()
+       // or java_suspend_self()
+ 
+-      EnterI (THREAD) ; 
++      EnterI (THREAD) ;
++
++      if (!ExitSuspendEquivalent(jt)) break ;
+ 
+-      if (!ExitSuspendEquivalent(jt)) break ; 
+-      
+       //
+       // We have acquired the contended monitor, but while we were
+       // waiting another thread suspended us. We don't want to enter
+       // the monitor while suspended because that would surprise the
+       // thread that suspended us.
+       //
+-   	  _recursions = 0 ; 
+-      _succ = NULL ; 
+-      exit (Self) ; 
++          _recursions = 0 ;
++      _succ = NULL ;
++      exit (Self) ;
+ 
+       jt->java_suspend_self();
+-    } 
++    }
+     Self->set_current_pending_monitor(NULL);
+   }
+ 
+   Atomic::dec_ptr(&_count);
+-  assert (_count >= 0, "invariant") ; 
+-  Self->_Stalled = 0 ; 
++  assert (_count >= 0, "invariant") ;
++  Self->_Stalled = 0 ;
+ 
+-  // Must either set _recursions = 0 or ASSERT _recursions == 0. 
+-  assert (_recursions == 0     , "invariant") ; 
+-  assert (_owner == Self       , "invariant") ; 
+-  assert (_succ  != Self       , "invariant") ; 
+-  assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; 
+-
+-  // The thread -- now the owner -- is back in vm mode. 
+-  // Report the glorious news via TI,DTrace and jvmstat.  
+-  // The probe effect is non-trivial.  All the reportage occurs 
+-  // while we hold the monitor, increasing the length of the critical 
+-  // section.  Amdahl's parallel speedup law comes vividly into play.  
++  // Must either set _recursions = 0 or ASSERT _recursions == 0.
++  assert (_recursions == 0     , "invariant") ;
++  assert (_owner == Self       , "invariant") ;
++  assert (_succ  != Self       , "invariant") ;
++  assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
++
++  // The thread -- now the owner -- is back in vm mode.
++  // Report the glorious news via TI,DTrace and jvmstat.
++  // The probe effect is non-trivial.  All the reportage occurs
++  // while we hold the monitor, increasing the length of the critical
++  // section.  Amdahl's parallel speedup law comes vividly into play.
+   //
+   // Another option might be to aggregate the events (thread local or
+   // per-monitor aggregation) and defer reporting until a more opportune
+-  // time -- such as next time some thread encounters contention but has 
+-  // yet to acquire the lock.  While spinning that thread could 
++  // time -- such as next time some thread encounters contention but has
++  // yet to acquire the lock.  While spinning that thread could
+   // spinning we could increment JVMStat counters, etc.
+ 
+   DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
+   if (JvmtiExport::should_post_monitor_contended_entered()) {
+     JvmtiExport::post_monitor_contended_entered(jt, this);
+   }
+-  if (ObjectSynchronizer::_sync_ContendedLockAttempts != NULL) { 
+-     ObjectSynchronizer::_sync_ContendedLockAttempts->inc() ; 
++  if (ObjectSynchronizer::_sync_ContendedLockAttempts != NULL) {
++     ObjectSynchronizer::_sync_ContendedLockAttempts->inc() ;
+   }
+ }
+ 
+-void ObjectMonitor::ExitEpilog (Thread * Self, ObjectWaiter * Wakee) { 
+-   assert (_owner == Self, "invariant") ; 
++void ObjectMonitor::ExitEpilog (Thread * Self, ObjectWaiter * Wakee) {
++   assert (_owner == Self, "invariant") ;
+ 
+-   // Exit protocol: 
++   // Exit protocol:
+    // 1. ST _succ = wakee
+-   // 2. membar #loadstore|#storestore; 
+-   // 2. ST _owner = NULL 
++   // 2. membar #loadstore|#storestore;
++   // 2. ST _owner = NULL
+    // 3. unpark(wakee)
+-      
+-   _succ = Knob_SuccEnabled ? Wakee->_thread : NULL ; 
+-   ParkEvent * Trigger = Wakee->_event ; 
+ 
+-   // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.    
+-   // The thread associated with Wakee may have grabbed the lock and "Wakee" may be 
++   _succ = Knob_SuccEnabled ? Wakee->_thread : NULL ;
++   ParkEvent * Trigger = Wakee->_event ;
++
++   // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
++   // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
+    // out-of-scope (non-extant).
+-   Wakee  = NULL ;      
+-                          
++   Wakee  = NULL ;
++
+    // Drop the lock
+-   OrderAccess::release_store_ptr (&_owner, NULL) ;  
+-   OrderAccess::fence() ;	                        // ST _owner vs LD in unpark()     
++   OrderAccess::release_store_ptr (&_owner, NULL) ;
++   OrderAccess::fence() ;                               // ST _owner vs LD in unpark()
+ 
+    // TODO-FIXME:
+    // If there's a safepoint pending the best policy would be to
+    // get _this thread to a safepoint and only wake the successor
+    // after the safepoint completed.  monitorexit uses a "leaf"
+    // state transition, however, so this thread can't become
+-   // safe at this point in time.  (Its stack isn't walkable). 
++   // safe at this point in time.  (Its stack isn't walkable).
+    // The next best thing is to defer waking the successor by
+-   // adding to a list of thread to be unparked after at the 
+-   // end of the forthcoming STW).  
+-   if (SafepointSynchronize::do_call_back()) { 
++   // adding to a list of thread to be unparked after at the
++   // end of the forthcoming STW).
++   if (SafepointSynchronize::do_call_back()) {
+       TEVENT (unpark before SAFEPOINT) ;
+    }
+ 
+    // Possible optimizations ...
+-   // 
++   //
+    // * Consider: set Wakee->UnparkTime = timeNow()
+    //   When the thread wakes up it'll compute (timeNow() - Self->UnparkTime()).
+    //   By measuring recent ONPROC latency we can approximate the
+-   //   the system load.  In turn, we can feed that information back
+-   //   into the spinning & succession policies.  
+-   //   (ONPROC latency correlates strongly with load).  
++   //   system load.  In turn, we can feed that information back
++   //   into the spinning & succession policies.
++   //   (ONPROC latency correlates strongly with load).
+    //
+    // * Pull affinity:
+    //   If the wakee is cold then transiently setting it's affinity
+-   //   to the current CPU is a good idea.  
++   //   to the current CPU is a good idea.
+    //   See http://j2se.east/~dice/PERSIST/050624-PullAffinity.txt
+-   Trigger->unpark() ;     
++   Trigger->unpark() ;
+ 
+    // Maintain stats and report events to JVMTI
+-   if (ObjectSynchronizer::_sync_Parks != NULL) { 
+-      ObjectSynchronizer::_sync_Parks->inc() ; 
++   if (ObjectSynchronizer::_sync_Parks != NULL) {
++      ObjectSynchronizer::_sync_Parks->inc() ;
+    }
+    DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
+ }
+@@ -3300,103 +3377,103 @@
+ // ~~~~~~
+ // Note that the collector can't reclaim the objectMonitor or deflate
+ // the object out from underneath the thread calling ::exit() as the
+-// the thread calling ::exit() never transitions to a stable state.
+-// This inhibits GC, which in turn inhibits asynchronous (and 
+-// inopportune) reclamation of "this".  
++// thread calling ::exit() never transitions to a stable state.
++// This inhibits GC, which in turn inhibits asynchronous (and
++// inopportune) reclamation of "this".
+ //
+-// We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ; 
++// We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
+ // There's one exception to the claim above, however.  EnterI() can call
+ // exit() to drop a lock if the acquirer has been externally suspended.
+ // In that case exit() is called with _thread_state as _thread_blocked,
+-// but the monitor's _count field is > 0, which inhibits reclaimation.  
++// but the monitor's _count field is > 0, which inhibits reclamation.
+ //
+ // 1-0 exit
+ // ~~~~~~~~
+-// ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of 
+-// the fast-path operators have been optimized so the common ::exit() 
+-// operation is 1-0.  See i486.ad fast_unlock(), for instance.  
++// ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
++// the fast-path operators have been optimized so the common ::exit()
++// operation is 1-0.  See i486.ad fast_unlock(), for instance.
+ // The code emitted by fast_unlock() elides the usual MEMBAR.  This
+ // greatly improves latency -- MEMBAR and CAS having considerable local
+ // latency on modern processors -- but at the cost of "stranding".  Absent the
+ // MEMBAR, a thread in fast_unlock() can race a thread in the slow
+ // ::enter() path, resulting in the entering thread being stranding
+ // and a progress-liveness failure.   Stranding is extremely rare.
+-// We use timers (timed park operations) & periodic polling to detect 
++// We use timers (timed park operations) & periodic polling to detect
+ // and recover from stranding.  Potentially stranded threads periodically
+ // wake up and poll the lock.  See the usage of the _Responsible variable.
+ //
+ // The CAS() in enter provides for safety and exclusion, while the CAS or
+ // MEMBAR in exit provides for progress and avoids stranding.  1-0 locking
+ // eliminates the CAS/MEMBAR from the exist path, but it admits stranding.
+-// We detect and recover from stranding with timers.  
++// We detect and recover from stranding with timers.
+ //
+-// If a thread transiently strands it'll park until (a) another 
+-// thread acquires the lock and then drops the lock, at which time the 
+-// exiting thread will notice and unpark the stranded thread, or, (b) 
+-// the timer expires.  If the lock is high traffic then the stranding latency 
+-// will be low due to (a).  If the lock is low traffic then the odds of 
++// If a thread transiently strands it'll park until (a) another
++// thread acquires the lock and then drops the lock, at which time the
++// exiting thread will notice and unpark the stranded thread, or, (b)
++// the timer expires.  If the lock is high traffic then the stranding latency
++// will be low due to (a).  If the lock is low traffic then the odds of
+ // stranding are lower, although the worst-case stranding latency
+ // is longer.  Critically, we don't want to put excessive load in the
+ // platform's timer subsystem.  We want to minimize both the timer injection
+-// rate (timers created/sec) as well as the number of timers active at 
++// rate (timers created/sec) as well as the number of timers active at
+ // any one time.  (more precisely, we want to minimize timer-seconds, which is
+-// the integral of the # of active timers at any instant over time). 
+-// Both impinge on OS scalability.  Given that, at most one thread parked on 
+-// a monitor will use a timer.  
++// the integral of the # of active timers at any instant over time).
++// Both impinge on OS scalability.  Given that, at most one thread parked on
++// a monitor will use a timer.
+ 
+ void ATTR ObjectMonitor::exit(TRAPS) {
+-   Thread * Self = THREAD ; 
++   Thread * Self = THREAD ;
+    if (THREAD != _owner) {
+      if (THREAD->is_lock_owned((address) _owner)) {
+        // Transmute _owner from a BasicLock pointer to a Thread address.
+-       // We don't need to hold _mutex for this transition.  
++       // We don't need to hold _mutex for this transition.
+        // Non-null to Non-null is safe as long as all readers can
+-       // tolerate either flavor.  
+-       assert (_recursions == 0, "invariant") ; 
++       // tolerate either flavor.
++       assert (_recursions == 0, "invariant") ;
+        _owner = THREAD ;
+        _recursions = 0 ;
+-       OwnerIsThread = 1 ; 
++       OwnerIsThread = 1 ;
+      } else {
+        // NOTE: we need to handle unbalanced monitor enter/exit
+        // in native code by throwing an exception.
+        // TODO: Throw an IllegalMonitorStateException ?
+-       TEVENT (Exit - Throw IMSX) ; 
++       TEVENT (Exit - Throw IMSX) ;
+        assert(false, "Non-balanced monitor enter/exit!");
+-       if (false) { 
+-          THROW(vmSymbols::java_lang_IllegalMonitorStateException());           
++       if (false) {
++          THROW(vmSymbols::java_lang_IllegalMonitorStateException());
+        }
+        return;
+      }
+    }
+ 
+-   if (_recursions != 0) { 
++   if (_recursions != 0) {
+      _recursions--;        // this is simple recursive enter
+-     TEVENT (Inflated exit - recursive) ; 
+-     return ; 
++     TEVENT (Inflated exit - recursive) ;
++     return ;
+    }
+ 
+    // Invariant: after setting Responsible=null an thread must execute
+    // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
+    if ((SyncFlags & 4) == 0) {
+-      _Responsible = NULL ; 
++      _Responsible = NULL ;
+    }
+ 
+-   for (;;) { 
+-      assert (THREAD == _owner, "invariant") ; 
++   for (;;) {
++      assert (THREAD == _owner, "invariant") ;
+ 
+       // Fast-path monitor exit:
+       //
+-      // Observe the Dekker/Lamport duality: 
++      // Observe the Dekker/Lamport duality:
+       // A thread in ::exit() executes:
+       //   ST Owner=null; MEMBAR; LD EntryList|cxq.
+       // A thread in the contended ::enter() path executes the complementary:
+-      //   ST EntryList|cxq = nonnull; MEMBAR; LD Owner. 
+-      //  
++      //   ST EntryList|cxq = nonnull; MEMBAR; LD Owner.
++      //
+       // Note that there's a benign race in the exit path.  We can drop the
+       // lock, another thread can reacquire the lock immediately, and we can
+       // then wake a thread unnecessarily (yet another flavor of futile wakeup).
+-      // This is benign, and we've structured the code so the windows are short 
+-      // and the frequency of such futile wakeups is low.   
++      // This is benign, and we've structured the code so the windows are short
++      // and the frequency of such futile wakeups is low.
+       //
+       // We could eliminate the race by encoding both the "LOCKED" state and
+       // the queue head in a single word.  Exit would then use either CAS to
+@@ -3412,30 +3489,30 @@
+       // - Use _count instead of cxq|EntryList
+       //   We intend to eliminate _count, however, when we switch
+       //   to on-the-fly deflation in ::exit() as is used in
+-      //   Metalocks and RelaxedLocks.  
+-      // 
++      //   Metalocks and RelaxedLocks.
++      //
+       // - Establish the invariant that cxq == null implies EntryList == null.
+       //   set cxq == EMPTY (1) to encode the state where cxq is empty
+       //   by EntryList != null.  EMPTY is a distinguished value.
+       //   The fast-path exit() would fetch cxq but not EntryList.
+       //
+       // - Encode succ as follows:
+-      //   succ = t :  Thread t is the successor -- t is ready or is spinning.  
++      //   succ = t :  Thread t is the successor -- t is ready or is spinning.
+       //               Exiting thread does not need to wake a successor.
+-      //   succ = 0 :  No successor required -> (EntryList|cxq) == null 
++      //   succ = 0 :  No successor required -> (EntryList|cxq) == null
+       //               Exiting thread does not need to wake a successor
+-      //   succ = 1 :  Successor required    -> (EntryList|cxq) != null and 
+-      //               logically succ == null. 
++      //   succ = 1 :  Successor required    -> (EntryList|cxq) != null and
++      //               logically succ == null.
+       //               Exiting thread must wake a successor.
+-      // 
++      //
+       //   The 1-1 fast-exit path would appear as :
+       //     _owner = null ; membar ;
+       //     if (_succ == 1 && CAS (&_owner, null, Self) == null) goto SlowPath
+-      //     goto FastPathDone ; 
++      //     goto FastPathDone ;
+       //
+       //   and the 1-0 fast-exit path would appear as:
+       //      if (_succ == 1) goto SlowPath
+-      //      Owner = null ; 
++      //      Owner = null ;
+       //      goto FastPathDone
+       //
+       // - Encode the LSB of _owner as 1 to indicate that exit()
+@@ -3447,47 +3524,47 @@
+       //      _owner = null
+       //      goto FastPathDone
+ 
+-      if (Knob_ExitPolicy == 0) {  
++      if (Knob_ExitPolicy == 0) {
+          // release semantics: prior loads and stores from within the critical section
+          // must not float (reorder) past the following store that drops the lock.
+          // On SPARC that requires MEMBAR #loadstore|#storestore.
+          // But of course in TSO #loadstore|#storestore is not required.
+          // I'd like to write one of the following:
+-         // A.  OrderAccess::release() ; _owner = NULL 
++         // A.  OrderAccess::release() ; _owner = NULL
+          // B.  OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
+-         // Unfortunatey OrderAccess::release() and OrderAccess::loadstore() both
++         // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
+          // store into a _dummy variable.  That store is not needed, but can result
+-         // in massive wasteful coherency traffic on classic SMP systems.  
++         // in massive wasteful coherency traffic on classic SMP systems.
+          // Instead, I use release_store(), which is implemented as just a simple
+-         // ST on x64, x86 and SPARC.  
++         // ST on x64, x86 and SPARC.
+          OrderAccess::release_store_ptr (&_owner, NULL) ;   // drop the lock
+          OrderAccess::storeload() ;                         // See if we need to wake a successor
+          if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
+-            TEVENT (Inflated exit - simple egress) ; 
+-            return ; 
++            TEVENT (Inflated exit - simple egress) ;
++            return ;
+          }
+-         TEVENT (Inflated exit - complex egress) ; 
++         TEVENT (Inflated exit - complex egress) ;
+ 
+          // Normally the exiting thread is responsible for ensuring succession,
+          // but if other successors are ready or other entering threads are spinning
+          // then this thread can simply store NULL into _owner and exit without
+-         // waking a successor.  The existence of spinners or ready successors 
++         // waking a successor.  The existence of spinners or ready successors
+          // guarantees proper succession (liveness).  Responsibility passes to the
+          // ready or running successors.  The exiting thread delegates the duty.
+          // More precisely, if a successor already exists this thread is absolved
+-         // of the responsibility of waking (unparking) one.  
+-         // 
++         // of the responsibility of waking (unparking) one.
++         //
+          // The _succ variable is critical to reducing futile wakeup frequency.
+          // _succ identifies the "heir presumptive" thread that has been made
+          // ready (unparked) but that has not yet run.  We need only one such
+-         // successor thread to guarantee progress.  
++         // successor thread to guarantee progress.
+          // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
+-         // section 3.3 "Futile Wakeup Throttling" for details.  
+-         // 
+-         // Note that spinners in Enter() also set _succ non-null. 
+-         // In the current implementation spinners opportunistically set 
++         // section 3.3 "Futile Wakeup Throttling" for details.
++         //
++         // Note that spinners in Enter() also set _succ non-null.
++         // In the current implementation spinners opportunistically set
+          // _succ so that exiting threads might avoid waking a successor.
+-         // Another less appealing alternative would be for the exiting thread 
++         // Another less appealing alternative would be for the exiting thread
+          // to drop the lock and then spin briefly to see if a spinner managed
+          // to acquire the lock.  If so, the exiting thread could exit
+          // immediately without waking a successor, otherwise the exiting
+@@ -3495,35 +3572,35 @@
+          // (Note that we'd need to make the post-drop spin short, but no
+          // shorter than the worst-case round-trip cache-line migration time.
+          // The dropped lock needs to become visible to the spinner, and then
+-         // the acquistion of the lock by the spinner must become visible to
+-         // the exiting thread).  
+-         // 
++         // the acquisition of the lock by the spinner must become visible to
++         // the exiting thread).
++         //
+ 
+          // It appears that an heir-presumptive (successor) must be made ready.
+          // Only the current lock owner can manipulate the EntryList or
+          // drain _cxq, so we need to reacquire the lock.  If we fail
+-         // to reaquire the lock the responsibility for ensuring succession
+-         // falls to the new owner.  
+-         // 
++         // to reacquire the lock the responsibility for ensuring succession
++         // falls to the new owner.
++         //
+          if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
+-            return ; 
++            return ;
+          }
+-         TEVENT (Exit - Reacquired) ; 
+-      } else { 
+-         if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { 
++         TEVENT (Exit - Reacquired) ;
++      } else {
++         if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
+             OrderAccess::release_store_ptr (&_owner, NULL) ;   // drop the lock
+-            OrderAccess::storeload() ; 
+-            // Ratify the previously observed values. 
+-            if (_cxq == NULL || _succ != NULL) { 
+-                TEVENT (Inflated exit - simple egress) ; 
+-                return ; 
++            OrderAccess::storeload() ;
++            // Ratify the previously observed values.
++            if (_cxq == NULL || _succ != NULL) {
++                TEVENT (Inflated exit - simple egress) ;
++                return ;
+             }
+ 
+             // inopportune interleaving -- the exiting thread (this thread)
+             // in the fast-exit path raced an entering thread in the slow-enter
+-            // path.  
++            // path.
+             // We have two choices:
+-            // A.  Try to reaquire the lock. 
++            // A.  Try to reacquire the lock.
+             //     If the CAS() fails return immediately, otherwise
+             //     we either restart/rerun the exit operation, or simply
+             //     fall-through into the code below which wakes a successor.
+@@ -3531,125 +3608,125 @@
+             //     we could simply unpark() the lead thread and return
+             //     without having set _succ.
+             if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
+-               TEVENT (Inflated exit - reacquired succeeded) ; 
+-               return ; 
++               TEVENT (Inflated exit - reacquired succeeded) ;
++               return ;
+             }
+-            TEVENT (Inflated exit - reacquired failed) ; 
+-         } else { 
+-            TEVENT (Inflated exit - complex egress) ; 
++            TEVENT (Inflated exit - reacquired failed) ;
++         } else {
++            TEVENT (Inflated exit - complex egress) ;
+          }
+       }
+ 
+-      guarantee (_owner == THREAD, "invariant") ; 
++      guarantee (_owner == THREAD, "invariant") ;
+ 
+-      // Select an appropriate successor ("heir presumptive") from the EntryList 
+-      // and make it ready.  Generally we just wake the head of EntryList . 
+-      // There's no algorithmic constraint that we use the head - it's just 
+-      // a policy decision.   Note that the thread at head of the EntryList  
+-      // remains at the head until it acquires the lock.  This means we'll 
+-      // repeatedly wake the same thread until it manages to grab the lock.  
+-      // This is generally a good policy - if we're seeing lots of futile wakeups 
+-      // at least we're waking/rewaking a thread that's like to be hot or warm 
+-      // (have residual D$ and TLB affinity).  
+-      // 
++      // Select an appropriate successor ("heir presumptive") from the EntryList
++      // and make it ready.  Generally we just wake the head of EntryList .
++      // There's no algorithmic constraint that we use the head - it's just
++      // a policy decision.   Note that the thread at head of the EntryList
++      // remains at the head until it acquires the lock.  This means we'll
++      // repeatedly wake the same thread until it manages to grab the lock.
++      // This is generally a good policy - if we're seeing lots of futile wakeups
++      // at least we're waking/rewaking a thread that's like to be hot or warm
++      // (have residual D$ and TLB affinity).
++      //
+       // "Wakeup locality" optimization:
+       // http://j2se.east/~dice/PERSIST/040825-WakeLocality.txt
+       // In the future we'll try to bias the selection mechanism
+-      // to preferentially pick a thread that recently ran on 
++      // to preferentially pick a thread that recently ran on
+       // a processor element that shares cache with the CPU on which
+       // the exiting thread is running.   We need access to Solaris'
+-      // schedctl.sc_cpu to make that work.  
++      // schedctl.sc_cpu to make that work.
+       //
+-      ObjectWaiter * w = NULL ; 
+-      int QMode = Knob_QMode ; 
++      ObjectWaiter * w = NULL ;
++      int QMode = Knob_QMode ;
+ 
+       if (QMode == 2 && _cxq != NULL) {
+           // QMode == 2 : cxq has precedence over EntryList.
+-          // Try to directly wake a successor from the cxq.  
+-          // If successful, the successor will need to unlink itself from cxq.  
+-          w = _cxq ; 
+-          assert (w != NULL, "invariant") ; 
+-          assert (w->TState == ObjectWaiter::TS_CXQ, "Invariant") ; 
+-          ExitEpilog (Self, w) ; 
+-          return ; 
++          // Try to directly wake a successor from the cxq.
++          // If successful, the successor will need to unlink itself from cxq.
++          w = _cxq ;
++          assert (w != NULL, "invariant") ;
++          assert (w->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
++          ExitEpilog (Self, w) ;
++          return ;
+       }
+ 
+-      if (QMode == 3 && _cxq != NULL) { 
++      if (QMode == 3 && _cxq != NULL) {
+           // Aggressively drain cxq into EntryList at the first opportunity.
+           // This policy ensure that recently-run threads live at the head of EntryList.
+           // Drain _cxq into EntryList - bulk transfer.
+-          // First, detach _cxq.  
++          // First, detach _cxq.
+           // The following loop is tantamount to: w = swap (&cxq, NULL)
+-          w = _cxq ; 
++          w = _cxq ;
+           for (;;) {
+-             assert (w != NULL, "Invariant") ; 
+-             ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ; 
+-             if (u == w) break ; 
+-             w = u ; 
++             assert (w != NULL, "Invariant") ;
++             ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
++             if (u == w) break ;
++             w = u ;
+           }
+-          assert (w != NULL              , "invariant") ; 
++          assert (w != NULL              , "invariant") ;
+ 
+-          ObjectWaiter * q = NULL ; 
+-          ObjectWaiter * p ; 
+-          for (p = w ; p != NULL ; p = p->_next) { 
+-              guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ; 
+-              p->TState = ObjectWaiter::TS_ENTER ; 
+-              p->_prev = q ; 
+-              q = p ; 
++          ObjectWaiter * q = NULL ;
++          ObjectWaiter * p ;
++          for (p = w ; p != NULL ; p = p->_next) {
++              guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
++              p->TState = ObjectWaiter::TS_ENTER ;
++              p->_prev = q ;
++              q = p ;
+           }
+ 
+           // Append the RATs to the EntryList
+           // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time.
+-          ObjectWaiter * Tail ; 
+-          for (Tail = _EntryList ; Tail != NULL && Tail->_next != NULL ; Tail = Tail->_next) ; 
+-          if (Tail == NULL) { 
+-              _EntryList = w ; 
+-          } else { 
+-              Tail->_next = w ; 
+-              w->_prev = Tail ; 
++          ObjectWaiter * Tail ;
++          for (Tail = _EntryList ; Tail != NULL && Tail->_next != NULL ; Tail = Tail->_next) ;
++          if (Tail == NULL) {
++              _EntryList = w ;
++          } else {
++              Tail->_next = w ;
++              w->_prev = Tail ;
+           }
+ 
+           // Fall thru into code that tries to wake a successor from EntryList
+       }
+ 
+-      if (QMode == 4 && _cxq != NULL) { 
++      if (QMode == 4 && _cxq != NULL) {
+           // Aggressively drain cxq into EntryList at the first opportunity.
+           // This policy ensure that recently-run threads live at the head of EntryList.
+-          
++
+           // Drain _cxq into EntryList - bulk transfer.
+-          // First, detach _cxq.  
++          // First, detach _cxq.
+           // The following loop is tantamount to: w = swap (&cxq, NULL)
+-          w = _cxq ; 
++          w = _cxq ;
+           for (;;) {
+-             assert (w != NULL, "Invariant") ; 
+-             ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ; 
+-             if (u == w) break ; 
+-             w = u ; 
++             assert (w != NULL, "Invariant") ;
++             ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
++             if (u == w) break ;
++             w = u ;
+           }
+-          assert (w != NULL              , "invariant") ; 
++          assert (w != NULL              , "invariant") ;
+ 
+-          ObjectWaiter * q = NULL ; 
+-          ObjectWaiter * p ; 
+-          for (p = w ; p != NULL ; p = p->_next) { 
+-              guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ; 
+-              p->TState = ObjectWaiter::TS_ENTER ; 
+-              p->_prev = q ; 
+-              q = p ; 
++          ObjectWaiter * q = NULL ;
++          ObjectWaiter * p ;
++          for (p = w ; p != NULL ; p = p->_next) {
++              guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
++              p->TState = ObjectWaiter::TS_ENTER ;
++              p->_prev = q ;
++              q = p ;
+           }
+ 
+           // Prepend the RATs to the EntryList
+-          if (_EntryList != NULL) { 
+-              q->_next = _EntryList ; 
+-              _EntryList->_prev = q ; 
++          if (_EntryList != NULL) {
++              q->_next = _EntryList ;
++              _EntryList->_prev = q ;
+           }
+-          _EntryList = w ; 
++          _EntryList = w ;
+ 
+           // Fall thru into code that tries to wake a successor from EntryList
+       }
+ 
+-      w = _EntryList  ; 
+-      if (w != NULL) { 
+-          // I'd like to write: guarantee (w->_thread != Self). 
++      w = _EntryList  ;
++      if (w != NULL) {
++          // I'd like to write: guarantee (w->_thread != Self).
+           // But in practice an exiting thread may find itself on the EntryList.
+           // Lets say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
+           // then calls exit().  Exit release the lock by setting O._owner to NULL.
+@@ -3657,85 +3734,85 @@
+           // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
+           // release the lock "O".  T2 resumes immediately after the ST of null into
+           // _owner, above.  T2 notices that the EntryList is populated, so it
+-          // reaquires the lock and then finds itself on the EntryList.  
+-          // Given all that, we have to tolerate the circumstance where "w" is 
+-          // associated with Self.        
+-          assert (w->TState == ObjectWaiter::TS_ENTER, "invariant") ; 
+-          ExitEpilog (Self, w) ; 
+-          return ; 
++          // reacquires the lock and then finds itself on the EntryList.
++          // Given all that, we have to tolerate the circumstance where "w" is
++          // associated with Self.
++          assert (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
++          ExitEpilog (Self, w) ;
++          return ;
+       }
+ 
+       // If we find that both _cxq and EntryList are null then just
+-      // re-run the exit protocol from the top.  
+-      w = _cxq ; 
+-      if (w == NULL) continue ; 
++      // re-run the exit protocol from the top.
++      w = _cxq ;
++      if (w == NULL) continue ;
+ 
+       // Drain _cxq into EntryList - bulk transfer.
+-      // First, detach _cxq.  
++      // First, detach _cxq.
+       // The following loop is tantamount to: w = swap (&cxq, NULL)
+       for (;;) {
+-          assert (w != NULL, "Invariant") ; 
+-          ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ; 
+-          if (u == w) break ; 
+-          w = u ; 
++          assert (w != NULL, "Invariant") ;
++          ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
++          if (u == w) break ;
++          w = u ;
+       }
+-      TEVENT (Inflated exit - drain cxq into EntryList) ; 
++      TEVENT (Inflated exit - drain cxq into EntryList) ;
+ 
+-      assert (w != NULL              , "invariant") ; 
+-      assert (_EntryList  == NULL    , "invariant") ; 
++      assert (w != NULL              , "invariant") ;
++      assert (_EntryList  == NULL    , "invariant") ;
+ 
+-      // Convert the LIFO SLL anchored by _cxq into a DLL. 
++      // Convert the LIFO SLL anchored by _cxq into a DLL.
+       // The list reorganization step operates in O(LENGTH(w)) time.
+-      // It's critical that this step operate quickly as 
++      // It's critical that this step operate quickly as
+       // "Self" still holds the outer-lock, restricting parallelism
+       // and effectively lengthening the critical section.
+-      // Invariant: s chases t chases u.  
++      // Invariant: s chases t chases u.
+       // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
+-      // we have faster access to the tail. 
+-      
+-      if (QMode == 1) { 
++      // we have faster access to the tail.
++
++      if (QMode == 1) {
+          // QMode == 1 : drain cxq to EntryList, reversing order
+-         // We also reverse the order of the list.  
+-         ObjectWaiter * s = NULL ; 
+-         ObjectWaiter * t = w ; 
+-         ObjectWaiter * u = NULL ; 
++         // We also reverse the order of the list.
++         ObjectWaiter * s = NULL ;
++         ObjectWaiter * t = w ;
++         ObjectWaiter * u = NULL ;
+          while (t != NULL) {
+-             guarantee (t->TState == ObjectWaiter::TS_CXQ, "invariant") ; 
+-             t->TState = ObjectWaiter::TS_ENTER ; 
+-             u = t->_next ; 
+-             t->_prev = u ; 
+-             t->_next = s ; 
+-             s = t; 
+-             t = u ; 
++             guarantee (t->TState == ObjectWaiter::TS_CXQ, "invariant") ;
++             t->TState = ObjectWaiter::TS_ENTER ;
++             u = t->_next ;
++             t->_prev = u ;
++             t->_next = s ;
++             s = t;
++             t = u ;
+          }
+-         _EntryList  = s ; 
+-         assert (s != NULL, "invariant") ; 
+-      } else { 
+-         // QMode == 0 or QMode == 2 
++         _EntryList  = s ;
++         assert (s != NULL, "invariant") ;
++      } else {
++         // QMode == 0 or QMode == 2
+          _EntryList = w ;
+-         ObjectWaiter * q = NULL ; 
+-         ObjectWaiter * p ; 
+-         for (p = w ; p != NULL ; p = p->_next) { 
+-             guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ; 
+-             p->TState = ObjectWaiter::TS_ENTER ; 
+-             p->_prev = q ; 
+-             q = p ; 
++         ObjectWaiter * q = NULL ;
++         ObjectWaiter * p ;
++         for (p = w ; p != NULL ; p = p->_next) {
++             guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
++             p->TState = ObjectWaiter::TS_ENTER ;
++             p->_prev = q ;
++             q = p ;
+          }
+       }
+ 
+       // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
+-      // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().  
+-    
++      // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
++
+       // See if we can abdicate to a spinner instead of waking a thread.
+       // A primary goal of the implementation is to reduce the
+       // context-switch rate.
+-      if (_succ != NULL) continue; 
++      if (_succ != NULL) continue;
+ 
+-      w = _EntryList  ; 
+-      if (w != NULL) { 
+-          guarantee (w->TState == ObjectWaiter::TS_ENTER, "invariant") ; 
+-          ExitEpilog (Self, w) ; 
+-          return ; 
++      w = _EntryList  ;
++      if (w != NULL) {
++          guarantee (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
++          ExitEpilog (Self, w) ;
++          return ;
+       }
+    }
+ }
+@@ -3757,7 +3834,7 @@
+        assert(_recursions == 0, "internal state error");
+        _owner = THREAD ;   /* Convert from basiclock addr to Thread addr */
+        _recursions = 0 ;
+-       OwnerIsThread = 1 ; 
++       OwnerIsThread = 1 ;
+     }
+    }
+ 
+@@ -3775,7 +3852,7 @@
+    Thread * const Self = THREAD;
+    assert(Self->is_Java_thread(), "Must be Java thread!");
+    JavaThread *jt = (JavaThread *)THREAD;
+- 
++
+    guarantee(_owner != Self, "reenter already owner");
+    enter (THREAD);       // enter the monitor
+    guarantee (_recursions == 0, "reenter recursion");
+@@ -3786,13 +3863,13 @@
+ // Note: a subset of changes to ObjectMonitor::wait()
+ // will need to be replicated in complete_exit above
+ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
+-   Thread * const Self = THREAD ; 
++   Thread * const Self = THREAD ;
+    assert(Self->is_Java_thread(), "Must be Java thread!");
+    JavaThread *jt = (JavaThread *)THREAD;
+ 
+-   DeferredInitialize () ; 
++   DeferredInitialize () ;
+ 
+-   // Throw IMSX or IEX.  
++   // Throw IMSX or IEX.
+    CHECK_OWNER();
+ 
+    // check for a pending interrupt
+@@ -3800,46 +3877,46 @@
+      // post monitor waited event.  Note that this is past-tense, we are done waiting.
+      if (JvmtiExport::should_post_monitor_waited()) {
+         // Note: 'false' parameter is passed here because the
+-	// wait was not timed out due to thread interrupt.
++        // wait was not timed out due to thread interrupt.
+         JvmtiExport::post_monitor_waited(jt, this, false);
+      }
+-     TEVENT (Wait - Throw IEX) ; 
++     TEVENT (Wait - Throw IEX) ;
+      THROW(vmSymbols::java_lang_InterruptedException());
+-     return ; 
++     return ;
+    }
+-   TEVENT (Wait) ; 
++   TEVENT (Wait) ;
+ 
+-   assert (Self->_Stalled == 0, "invariant") ; 
+-   Self->_Stalled = intptr_t(this) ; 
++   assert (Self->_Stalled == 0, "invariant") ;
++   Self->_Stalled = intptr_t(this) ;
+    jt->set_current_waiting_monitor(this);
+- 
++
+    // create a node to be put into the queue
+    // Critically, after we reset() the event but prior to park(), we must check
+-   // for a pending interrupt.  
++   // for a pending interrupt.
+    ObjectWaiter node(Self);
+-   node.TState = ObjectWaiter::TS_WAIT ; 
+-   Self->_ParkEvent->reset() ; 
+-   OrderAccess::fence();          // ST into Event; membar ; LD interrupted-flag 
+- 
++   node.TState = ObjectWaiter::TS_WAIT ;
++   Self->_ParkEvent->reset() ;
++   OrderAccess::fence();          // ST into Event; membar ; LD interrupted-flag
++
+    // Enter the waiting queue, which is a circular doubly linked list in this case
+    // but it could be a priority queue or any data structure.
+    // _WaitSetLock protects the wait queue.  Normally the wait queue is accessed only
+    // by the the owner of the monitor *except* in the case where park()
+    // returns because of a timeout of interrupt.  Contention is exceptionally rare
+-   // so we use a simple spin-lock instead of a heavier-weight blocking lock.  
++   // so we use a simple spin-lock instead of a heavier-weight blocking lock.
+ 
+-   SpinAcquire (&_WaitSetLock, "WaitSet - add") ; 
+-   AddWaiter (&node) ; 
+-   SpinRelease (&_WaitSetLock) ; 
++   Thread::SpinAcquire (&_WaitSetLock, "WaitSet - add") ;
++   AddWaiter (&node) ;
++   Thread::SpinRelease (&_WaitSetLock) ;
+ 
+-   if ((SyncFlags & 4) == 0) { 
+-      _Responsible = NULL ; 
++   if ((SyncFlags & 4) == 0) {
++      _Responsible = NULL ;
+    }
+    intptr_t save = _recursions; // record the old recursion count
+    _waiters++;                  // increment the number of waiters
+    _recursions = 0;             // set the recursion level to be 1
+-   exit (Self) ; 	            // exit the monitor
+-   guarantee (_owner != Self, "invariant") ; 
++   exit (Self) ;                    // exit the monitor
++   guarantee (_owner != Self, "invariant") ;
+ 
+    // As soon as the ObjectMonitor's ownership is dropped in the exit()
+    // call above, another thread can enter() the ObjectMonitor, do the
+@@ -3856,37 +3933,37 @@
+       node._event->unpark();
+    }
+ 
+-   // The thread is on the WaitSet list - now park() it.  
+-   // On MP systems it's conceivable that a brief spin before we park 
++   // The thread is on the WaitSet list - now park() it.
++   // On MP systems it's conceivable that a brief spin before we park
+    // could be profitable.
+    //
+    // TODO-FIXME: change the following logic to a loop of the form
+    //   while (!timeout && !interrupted && _notified == 0) park()
+ 
+-   int ret = OS_OK ; 
+-   int WasNotified = 0 ; 
+-   { // State transition wrappers 
++   int ret = OS_OK ;
++   int WasNotified = 0 ;
++   { // State transition wrappers
+      OSThread* osthread = Self->osthread();
+-     OSThreadWaitState osts(osthread, true); 
++     OSThreadWaitState osts(osthread, true);
+      {
+        ThreadBlockInVM tbivm(jt);
+        // Thread is in thread_blocked state and oop access is unsafe.
+        jt->set_suspend_equivalent();
+-  
++
+        if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
+-           // Intentionally empty 
+-       } else 
+-       if (node._notified == 0) { 
+-         if (millis <= 0) { 
+-            Self->_ParkEvent->park () ; 
+-         } else { 
+-            ret = Self->_ParkEvent->park (millis) ; 
++           // Intentionally empty
++       } else
++       if (node._notified == 0) {
++         if (millis <= 0) {
++            Self->_ParkEvent->park () ;
++         } else {
++            ret = Self->_ParkEvent->park (millis) ;
+          }
+        }
+ 
+        // were we externally suspended while we were waiting?
+        if (ExitSuspendEquivalent (jt)) {
+-          // TODO-FIXME: add -- if succ == Self then succ = null.  
++          // TODO-FIXME: add -- if succ == Self then succ = null.
+           jt->java_suspend_self();
+        }
+ 
+@@ -3894,89 +3971,87 @@
+ 
+ 
+      // Node may be on the WaitSet, the EntryList (or cxq), or in transition
+-     // from the WaitSet to the EntryList.  
++     // from the WaitSet to the EntryList.
+      // See if we need to remove Node from the WaitSet.
+      // We use double-checked locking to avoid grabbing _WaitSetLock
+-     // if the thread is not on the wait queue.  
++     // if the thread is not on the wait queue.
+      //
+      // Note that we don't need a fence before the fetch of TState.
+      // In the worst case we'll fetch a old-stale value of TS_WAIT previously
+-     // written by the is thread. (perhaps the fetch might even be satisfied 
+-     // by a look-aside into the processor's own store buffer, although given 
+-     // the length of the code path between the prior ST and this load that's 
+-     // highly unlikely).  If the following LD fetches a stale TS_WAIT value 
++     // written by the is thread. (perhaps the fetch might even be satisfied
++     // by a look-aside into the processor's own store buffer, although given
++     // the length of the code path between the prior ST and this load that's
++     // highly unlikely).  If the following LD fetches a stale TS_WAIT value
+      // then we'll acquire the lock and then re-fetch a fresh TState value.
+      // That is, we fail toward safety.
+ 
+-     if (node.TState == ObjectWaiter::TS_WAIT) { 
+-         SpinAcquire (&_WaitSetLock, "WaitSet - unlink") ; 
+-         if (node.TState == ObjectWaiter::TS_WAIT) { 
++     if (node.TState == ObjectWaiter::TS_WAIT) {
++         Thread::SpinAcquire (&_WaitSetLock, "WaitSet - unlink") ;
++         if (node.TState == ObjectWaiter::TS_WAIT) {
+             DequeueSpecificWaiter (&node) ;       // unlink from WaitSet
+             assert(node._notified == 0, "invariant");
+-            node.TState = ObjectWaiter::TS_RUN ; 
++            node.TState = ObjectWaiter::TS_RUN ;
+          }
+-         SpinRelease (&_WaitSetLock) ; 
++         Thread::SpinRelease (&_WaitSetLock) ;
+      }
+ 
+-     // The thread is now either on off-list (TS_RUN), 
+-     // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).  
+-     // The Node's TState variable is stable from the perspective of this thread. 
++     // The thread is now either on off-list (TS_RUN),
++     // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
++     // The Node's TState variable is stable from the perspective of this thread.
+      // No other threads will asynchronously modify TState.
+-     guarantee (node.TState != ObjectWaiter::TS_WAIT, "invariant") ; 
+-     OrderAccess::loadload() ; 
+-     if (_succ == Self) _succ = NULL ; 
+-     WasNotified = node._notified ; 
+- 
+-     // Reentry phase -- reacquire the monitor.   
++     guarantee (node.TState != ObjectWaiter::TS_WAIT, "invariant") ;
++     OrderAccess::loadload() ;
++     if (_succ == Self) _succ = NULL ;
++     WasNotified = node._notified ;
++
++     // Reentry phase -- reacquire the monitor.
+      // re-enter contended monitor after object.wait().
+      // retain OBJECT_WAIT state until re-enter successfully completes
+      // Thread state is thread_in_vm and oop access is again safe,
+      // although the raw address of the object may have changed.
+-     // (Don't cache naked oops over safepoints, of course).  
++     // (Don't cache naked oops over safepoints, of course).
+ 
+-     // post monitor waited event.  Note that this is past-tense, we are done waiting.
++     // post monitor waited event. Note that this is past-tense, we are done waiting.
+      if (JvmtiExport::should_post_monitor_waited()) {
+-        JvmtiExport::post_monitor_waited(jt, this, (ret == OS_TIMEOUT) ? true : false);
+-        // Just to err on the conservative side ...
+-        if (_succ == Self) _succ = NULL ; 
++       JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
+      }
+-     OrderAccess::fence() ; 
++     OrderAccess::fence() ;
+ 
+-     assert (Self->_Stalled != 0, "invariant") ; 
+-     Self->_Stalled = 0 ; 
++     assert (Self->_Stalled != 0, "invariant") ;
++     Self->_Stalled = 0 ;
+ 
+-     assert (_owner != Self, "invariant") ; 
+-     ObjectWaiter::TStates v = node.TState ; 
++     assert (_owner != Self, "invariant") ;
++     ObjectWaiter::TStates v = node.TState ;
+      if (v == ObjectWaiter::TS_RUN) {
+-         enter (Self) ; 
++         enter (Self) ;
+      } else {
+-         guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ; 
+-         ReenterI (Self, &node) ; 
+-         node.TState = ObjectWaiter::TS_RUN ; 
++         guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
++         ReenterI (Self, &node) ;
++         node.wait_reenter_end(this);
+      }
+ 
+-     // Self has reacquired the lock.  
++     // Self has reacquired the lock.
+      // Lifecycle - the node representing Self must not appear on any queues.
+      // Node is about to go out-of-scope, but even if it were immortal we wouldn't
+      // want residual elements associated with this thread left on any lists.
+-     guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ; 
+-     assert    (_owner == Self, "invariant") ; 
+-     assert    (_succ != Self , "invariant") ; 
++     guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ;
++     assert    (_owner == Self, "invariant") ;
++     assert    (_succ != Self , "invariant") ;
+    } // OSThreadWaitState()
+ 
+    jt->set_current_waiting_monitor(NULL);
+- 
+-   guarantee (_recursions == 0, "invariant") ; 
++
++   guarantee (_recursions == 0, "invariant") ;
+    _recursions = save;     // restore the old recursion count
+    _waiters--;             // decrement the number of waiters
+ 
+    // Verify a few postconditions
+-   assert (_owner == Self       , "invariant") ; 
+-   assert (_succ  != Self       , "invariant") ; 
+-   assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; 
++   assert (_owner == Self       , "invariant") ;
++   assert (_succ  != Self       , "invariant") ;
++   assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
+ 
+    if (SyncFlags & 32) {
+-      OrderAccess::fence() ; 
++      OrderAccess::fence() ;
+    }
+ 
+    // check if the notification happened
+@@ -3984,7 +4059,7 @@
+      // no, it could be timeout or Thread.interrupt() or both
+      // check for interrupt event, otherwise it is timeout
+      if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
+-       TEVENT (Wait - throw IEX from epilog) ; 
++       TEVENT (Wait - throw IEX from epilog) ;
+        THROW(vmSymbols::java_lang_InterruptedException());
+      }
+    }
+@@ -3994,249 +4069,257 @@
+ }
+ 
+ 
+-// Consider: 
++// Consider:
+ // If the lock is cool (cxq == null && succ == null) and we're on an MP system
+ // then instead of transferring a thread from the WaitSet to the EntryList
+-// we might just dequeue a thread from the WaitSet and directly unpark() it.  
++// we might just dequeue a thread from the WaitSet and directly unpark() it.
+ 
+ void ObjectMonitor::notify(TRAPS) {
+   CHECK_OWNER();
+-  if (_WaitSet == NULL) { 
+-     TEVENT (Empty-Notify) ; 
+-     return ; 
++  if (_WaitSet == NULL) {
++     TEVENT (Empty-Notify) ;
++     return ;
+   }
+   DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
+ 
+-  int Policy = Knob_MoveNotifyee ; 
++  int Policy = Knob_MoveNotifyee ;
+ 
+-  SpinAcquire (&_WaitSetLock, "WaitSet - notify") ; 
+-  ObjectWaiter * iterator = DequeueWaiter() ; 
+-  if (iterator != NULL) { 
+-     TEVENT (Notify1 - Transfer) ; 
++  Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ;
++  ObjectWaiter * iterator = DequeueWaiter() ;
++  if (iterator != NULL) {
++     TEVENT (Notify1 - Transfer) ;
+      guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
+-     guarantee (iterator->_notified == 0, "invariant") ; 
++     guarantee (iterator->_notified == 0, "invariant") ;
+      // Disposition - what might we do with iterator ?
+-     // a.  add it directly to the EntryList - either tail or head.  
++     // a.  add it directly to the EntryList - either tail or head.
+      // b.  push it onto the front of the _cxq.
+-     // For now we use (a).  
+-     if (Policy != 4) { 
+-        iterator->TState = ObjectWaiter::TS_ENTER ; 
++     // For now we use (a).
++     if (Policy != 4) {
++        iterator->TState = ObjectWaiter::TS_ENTER ;
+      }
+-     iterator->_notified = 1 ; 
++     iterator->_notified = 1 ;
+ 
+      ObjectWaiter * List = _EntryList ;
+-     if (List != NULL) { 
+-        assert (List->_prev == NULL, "invariant") ; 
+-        assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ; 
+-        assert (List != iterator, "invariant") ; 
++     if (List != NULL) {
++        assert (List->_prev == NULL, "invariant") ;
++        assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
++        assert (List != iterator, "invariant") ;
+      }
+ 
+      if (Policy == 0) {       // prepend to EntryList
+-         if (List == NULL) { 
+-             iterator->_next = iterator->_prev = NULL ; 
+-             _EntryList = iterator ; 
+-         } else { 
+-             List->_prev = iterator ; 
+-             iterator->_next = List ; 
+-             iterator->_prev = NULL ; 
+-             _EntryList = iterator ; 
++         if (List == NULL) {
++             iterator->_next = iterator->_prev = NULL ;
++             _EntryList = iterator ;
++         } else {
++             List->_prev = iterator ;
++             iterator->_next = List ;
++             iterator->_prev = NULL ;
++             _EntryList = iterator ;
+         }
+-     } else 
++     } else
+      if (Policy == 1) {      // append to EntryList
+-         if (List == NULL) { 
+-             iterator->_next = iterator->_prev = NULL ; 
+-             _EntryList = iterator ; 
+-         } else { 
++         if (List == NULL) {
++             iterator->_next = iterator->_prev = NULL ;
++             _EntryList = iterator ;
++         } else {
+             // CONSIDER:  finding the tail currently requires a linear-time walk of
+             // the EntryList.  We can make tail access constant-time by converting to
+-            // a CDLL instead of using our current DLL. 
++            // a CDLL instead of using our current DLL.
+             ObjectWaiter * Tail ;
+-            for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ; 
+-            assert (Tail != NULL && Tail->_next == NULL, "invariant") ; 
+-            Tail->_next = iterator ; 
+-            iterator->_prev = Tail ; 
+-            iterator->_next = NULL ; 
++            for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
++            assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
++            Tail->_next = iterator ;
++            iterator->_prev = Tail ;
++            iterator->_next = NULL ;
+         }
+-     } else 
++     } else
+      if (Policy == 2) {      // prepend to cxq
+          // prepend to cxq
+-         if (List == NULL) { 
+-             iterator->_next = iterator->_prev = NULL ; 
+-             _EntryList = iterator ; 
+-         } else { 
+-            iterator->TState = ObjectWaiter::TS_CXQ ; 
+-            for (;;) { 
+-                ObjectWaiter * Front = _cxq ; 
+-                iterator->_next = Front ; 
++         if (List == NULL) {
++             iterator->_next = iterator->_prev = NULL ;
++             _EntryList = iterator ;
++         } else {
++            iterator->TState = ObjectWaiter::TS_CXQ ;
++            for (;;) {
++                ObjectWaiter * Front = _cxq ;
++                iterator->_next = Front ;
+                 if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
+-                    break ; 
++                    break ;
+                 }
+             }
+          }
+      } else
+      if (Policy == 3) {      // append to cxq
+-        iterator->TState = ObjectWaiter::TS_CXQ ; 
+-        for (;;) { 
+-            ObjectWaiter * Tail ; 
+-            Tail = _cxq ; 
+-            if (Tail == NULL) { 
+-                iterator->_next = NULL ; 
++        iterator->TState = ObjectWaiter::TS_CXQ ;
++        for (;;) {
++            ObjectWaiter * Tail ;
++            Tail = _cxq ;
++            if (Tail == NULL) {
++                iterator->_next = NULL ;
+                 if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
+-                   break ; 
++                   break ;
+                 }
+-            } else { 
+-                while (Tail->_next != NULL) Tail = Tail->_next ; 
+-                Tail->_next = iterator ; 
+-                iterator->_prev = Tail ; 
+-                iterator->_next = NULL ; 
+-                break ; 
++            } else {
++                while (Tail->_next != NULL) Tail = Tail->_next ;
++                Tail->_next = iterator ;
++                iterator->_prev = Tail ;
++                iterator->_next = NULL ;
++                break ;
+             }
+         }
+      } else {
+-        ParkEvent * ev = iterator->_event ; 
+-        iterator->TState = ObjectWaiter::TS_RUN ; 
+-        OrderAccess::fence() ; 
+-        ev->unpark() ; 
++        ParkEvent * ev = iterator->_event ;
++        iterator->TState = ObjectWaiter::TS_RUN ;
++        OrderAccess::fence() ;
++        ev->unpark() ;
++     }
++
++     if (Policy < 4) {
++       iterator->wait_reenter_begin(this);
+      }
+ 
+      // _WaitSetLock protects the wait queue, not the EntryList.  We could
+      // move the add-to-EntryList operation, above, outside the critical section
+-     // protected by _WaitSetLock.  In practice that's not useful.  With the 
+-     // exception of  wait() timeouts and interrupts the monitor owner 
+-     // is the only thread that grabs _WaitSetLock.  There's almost no contention 
+-     // on _WaitSetLock so it's not profitable to reduce the length of the 
++     // protected by _WaitSetLock.  In practice that's not useful.  With the
++     // exception of  wait() timeouts and interrupts the monitor owner
++     // is the only thread that grabs _WaitSetLock.  There's almost no contention
++     // on _WaitSetLock so it's not profitable to reduce the length of the
+      // critical section.
+   }
+ 
+-  SpinRelease (&_WaitSetLock) ; 
++  Thread::SpinRelease (&_WaitSetLock) ;
+ 
+-  if (iterator != NULL && ObjectSynchronizer::_sync_Notifications != NULL) { 
+-     ObjectSynchronizer::_sync_Notifications->inc() ; 
++  if (iterator != NULL && ObjectSynchronizer::_sync_Notifications != NULL) {
++     ObjectSynchronizer::_sync_Notifications->inc() ;
+   }
+ }
+ 
++
+ void ObjectMonitor::notifyAll(TRAPS) {
+   CHECK_OWNER();
+   ObjectWaiter* iterator;
+-  if (_WaitSet == NULL) { 
+-      TEVENT (Empty-NotifyAll) ; 
+-      return ; 
++  if (_WaitSet == NULL) {
++      TEVENT (Empty-NotifyAll) ;
++      return ;
+   }
+   DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
+ 
+-  int Policy = Knob_MoveNotifyee ; 
+-  int Tally = 0 ; 
+-  SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ; 
+-  
++  int Policy = Knob_MoveNotifyee ;
++  int Tally = 0 ;
++  Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ;
++
+   for (;;) {
+-     iterator = DequeueWaiter () ; 
+-     if (iterator == NULL) break ; 
+-     TEVENT (NotifyAll - Transfer1) ; 
+-     ++Tally ; 
++     iterator = DequeueWaiter () ;
++     if (iterator == NULL) break ;
++     TEVENT (NotifyAll - Transfer1) ;
++     ++Tally ;
+ 
+      // Disposition - what might we do with iterator ?
+-     // a.  add it directly to the EntryList - either tail or head.  
++     // a.  add it directly to the EntryList - either tail or head.
+      // b.  push it onto the front of the _cxq.
+-     // For now we use (a). 
+-     //  
++     // For now we use (a).
++     //
+      // TODO-FIXME: currently notifyAll() transfers the waiters one-at-a-time from the waitset
+      // to the EntryList.  This could be done more efficiently with a single bulk transfer,
+-     // but in practice it's not time-critical.  Beware too, that in prepend-mode we invert the 
+-     // order of the waiters.  Lets say that the waitset is "ABCD" and the EntryList is "XYZ".  
+-     // After a notifyAll() in prepend mode the waitset will be empty and the EntryList will 
+-     // be "DCBAXYZ".  
+-     
++     // but in practice it's not time-critical.  Beware too, that in prepend-mode we invert the
++     // order of the waiters.  Lets say that the waitset is "ABCD" and the EntryList is "XYZ".
++     // After a notifyAll() in prepend mode the waitset will be empty and the EntryList will
++     // be "DCBAXYZ".
++
+      guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
+-     guarantee (iterator->_notified == 0, "invariant") ; 
+-     iterator->_notified = 1 ; 
+-     if (Policy != 4) { 
+-        iterator->TState = ObjectWaiter::TS_ENTER ; 
++     guarantee (iterator->_notified == 0, "invariant") ;
++     iterator->_notified = 1 ;
++     if (Policy != 4) {
++        iterator->TState = ObjectWaiter::TS_ENTER ;
+      }
+ 
+      ObjectWaiter * List = _EntryList ;
+-     if (List != NULL) { 
+-        assert (List->_prev == NULL, "invariant") ; 
+-        assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ; 
+-        assert (List != iterator, "invariant") ; 
++     if (List != NULL) {
++        assert (List->_prev == NULL, "invariant") ;
++        assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
++        assert (List != iterator, "invariant") ;
+      }
+ 
+      if (Policy == 0) {       // prepend to EntryList
+-         if (List == NULL) { 
+-             iterator->_next = iterator->_prev = NULL ; 
+-             _EntryList = iterator ; 
+-         } else { 
+-             List->_prev = iterator ; 
+-             iterator->_next = List ; 
+-             iterator->_prev = NULL ; 
+-             _EntryList = iterator ; 
++         if (List == NULL) {
++             iterator->_next = iterator->_prev = NULL ;
++             _EntryList = iterator ;
++         } else {
++             List->_prev = iterator ;
++             iterator->_next = List ;
++             iterator->_prev = NULL ;
++             _EntryList = iterator ;
+         }
+-     } else 
++     } else
+      if (Policy == 1) {      // append to EntryList
+-         if (List == NULL) { 
+-             iterator->_next = iterator->_prev = NULL ; 
+-             _EntryList = iterator ; 
+-         } else { 
++         if (List == NULL) {
++             iterator->_next = iterator->_prev = NULL ;
++             _EntryList = iterator ;
++         } else {
+             // CONSIDER:  finding the tail currently requires a linear-time walk of
+             // the EntryList.  We can make tail access constant-time by converting to
+-            // a CDLL instead of using our current DLL. 
++            // a CDLL instead of using our current DLL.
+             ObjectWaiter * Tail ;
+-            for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ; 
+-            assert (Tail != NULL && Tail->_next == NULL, "invariant") ; 
+-            Tail->_next = iterator ; 
+-            iterator->_prev = Tail ; 
+-            iterator->_next = NULL ; 
++            for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
++            assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
++            Tail->_next = iterator ;
++            iterator->_prev = Tail ;
++            iterator->_next = NULL ;
+         }
+-     } else 
++     } else
+      if (Policy == 2) {      // prepend to cxq
+          // prepend to cxq
+-         iterator->TState = ObjectWaiter::TS_CXQ ; 
+-         for (;;) { 
+-             ObjectWaiter * Front = _cxq ; 
+-             iterator->_next = Front ; 
++         iterator->TState = ObjectWaiter::TS_CXQ ;
++         for (;;) {
++             ObjectWaiter * Front = _cxq ;
++             iterator->_next = Front ;
+              if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
+-                 break ; 
++                 break ;
+              }
+          }
+      } else
+      if (Policy == 3) {      // append to cxq
+-        iterator->TState = ObjectWaiter::TS_CXQ ; 
+-        for (;;) { 
+-            ObjectWaiter * Tail ; 
+-            Tail = _cxq ; 
+-            if (Tail == NULL) { 
+-                iterator->_next = NULL ; 
++        iterator->TState = ObjectWaiter::TS_CXQ ;
++        for (;;) {
++            ObjectWaiter * Tail ;
++            Tail = _cxq ;
++            if (Tail == NULL) {
++                iterator->_next = NULL ;
+                 if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
+-                   break ; 
++                   break ;
+                 }
+-            } else { 
+-                while (Tail->_next != NULL) Tail = Tail->_next ; 
+-                Tail->_next = iterator ; 
+-                iterator->_prev = Tail ; 
+-                iterator->_next = NULL ; 
+-                break ; 
++            } else {
++                while (Tail->_next != NULL) Tail = Tail->_next ;
++                Tail->_next = iterator ;
++                iterator->_prev = Tail ;
++                iterator->_next = NULL ;
++                break ;
+             }
+         }
+      } else {
+-        ParkEvent * ev = iterator->_event ; 
+-        iterator->TState = ObjectWaiter::TS_RUN ; 
+-        OrderAccess::fence() ; 
+-        ev->unpark() ; 
++        ParkEvent * ev = iterator->_event ;
++        iterator->TState = ObjectWaiter::TS_RUN ;
++        OrderAccess::fence() ;
++        ev->unpark() ;
+      }
+ 
++     if (Policy < 4) {
++       iterator->wait_reenter_begin(this);
++     }
+ 
+      // _WaitSetLock protects the wait queue, not the EntryList.  We could
+      // move the add-to-EntryList operation, above, outside the critical section
+-     // protected by _WaitSetLock.  In practice that's not useful.  With the 
+-     // exception of  wait() timeouts and interrupts the monitor owner 
+-     // is the only thread that grabs _WaitSetLock.  There's almost no contention 
+-     // on _WaitSetLock so it's not profitable to reduce the length of the 
++     // protected by _WaitSetLock.  In practice that's not useful.  With the
++     // exception of  wait() timeouts and interrupts the monitor owner
++     // is the only thread that grabs _WaitSetLock.  There's almost no contention
++     // on _WaitSetLock so it's not profitable to reduce the length of the
+      // critical section.
+   }
+ 
+-  SpinRelease (&_WaitSetLock) ; 
++  Thread::SpinRelease (&_WaitSetLock) ;
+ 
+-  if (Tally != 0 && ObjectSynchronizer::_sync_Notifications != NULL) { 
+-     ObjectSynchronizer::_sync_Notifications->inc(Tally) ; 
++  if (Tally != 0 && ObjectSynchronizer::_sync_Notifications != NULL) {
++     ObjectSynchronizer::_sync_Notifications->inc(Tally) ;
+   }
+ }
+ 
+@@ -4244,31 +4327,31 @@
+ // TODO-FIXME: remove check_slow() -- it's likely dead.
+ 
+ void ObjectMonitor::check_slow(TRAPS) {
+-  TEVENT (check_slow - throw IMSX) ; 
++  TEVENT (check_slow - throw IMSX) ;
+   assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
+   THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
+ }
+ 
+- 
++
+ // -------------------------------------------------------------------------
+-// The raw monitor subsystem is entirely distinct from normal 
+-// java-synchronization or jni-synchronization.  raw monitors are not 
+-// associated with objects.  They can be implemented in any manner 
+-// that makes sense.  The original implementors decided to piggy-back 
+-// the raw-monitor implementation on the existing Java objectMonitor mechanism.  
++// The raw monitor subsystem is entirely distinct from normal
++// java-synchronization or jni-synchronization.  raw monitors are not
++// associated with objects.  They can be implemented in any manner
++// that makes sense.  The original implementors decided to piggy-back
++// the raw-monitor implementation on the existing Java objectMonitor mechanism.
+ // This flaw needs to fixed.  We should reimplement raw monitors as sui-generis.
+-// Specifically, we should not implement raw monitors via java monitors. 
+-// Time permitting, we should disentangle and deconvolve the two implementations 
+-// and move the resulting raw monitor implementation over to the JVMTI directories.  
+-// Ideally, the raw monitor implementation would be built on top of 
++// Specifically, we should not implement raw monitors via java monitors.
++// Time permitting, we should disentangle and deconvolve the two implementations
++// and move the resulting raw monitor implementation over to the JVMTI directories.
++// Ideally, the raw monitor implementation would be built on top of
+ // park-unpark and nothing else.
+ //
+ // raw monitors are used mainly by JVMTI
+ // The raw monitor implementation borrows the ObjectMonitor structure,
+-// but the operators are degenerate and extremely simple.  
++// but the operators are degenerate and extremely simple.
+ //
+ // Mixed use of a single objectMonitor instance -- as both a raw monitor
+-// and a normal java monitor -- is not permissible.  
++// and a normal java monitor -- is not permissible.
+ //
+ // Note that we use the single RawMonitor_lock to protect queue operations for
+ // _all_ raw monitors.  This is a scalability impediment, but since raw monitor usage
+@@ -4278,115 +4361,115 @@
+ // -------------------------------------------------------------------------
+ 
+ int ObjectMonitor::SimpleEnter (Thread * Self) {
+-  for (;;) { 
++  for (;;) {
+     if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
+-       return OS_OK ; 
++       return OS_OK ;
+     }
+ 
+-    ObjectWaiter Node (Self) ; 
++    ObjectWaiter Node (Self) ;
+     Self->_ParkEvent->reset() ;     // strictly optional
+-    Node.TState = ObjectWaiter::TS_ENTER ; 
++    Node.TState = ObjectWaiter::TS_ENTER ;
+ 
+-    RawMonitor_lock->lock_without_safepoint_check() ; 
+-    Node._next  = _EntryList ; 
+-    _EntryList  = &Node ; 
+-    OrderAccess::fence() ; 
++    RawMonitor_lock->lock_without_safepoint_check() ;
++    Node._next  = _EntryList ;
++    _EntryList  = &Node ;
++    OrderAccess::fence() ;
+     if (_owner == NULL && Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
+-        _EntryList = Node._next ; 
+-        RawMonitor_lock->unlock() ; 
+-        return OS_OK ; 
++        _EntryList = Node._next ;
++        RawMonitor_lock->unlock() ;
++        return OS_OK ;
+     }
+-    RawMonitor_lock->unlock() ; 
++    RawMonitor_lock->unlock() ;
+     while (Node.TState == ObjectWaiter::TS_ENTER) {
+-       Self->_ParkEvent->park() ; 
++       Self->_ParkEvent->park() ;
+     }
+   }
+ }
+ 
+-int ObjectMonitor::SimpleExit (Thread * Self) { 
+-  guarantee (_owner == Self, "invariant") ; 
+-  OrderAccess::release_store_ptr (&_owner, NULL) ; 
+-  OrderAccess::fence() ; 
+-  if (_EntryList == NULL) return OS_OK ; 
+-  ObjectWaiter * w ; 
++int ObjectMonitor::SimpleExit (Thread * Self) {
++  guarantee (_owner == Self, "invariant") ;
++  OrderAccess::release_store_ptr (&_owner, NULL) ;
++  OrderAccess::fence() ;
++  if (_EntryList == NULL) return OS_OK ;
++  ObjectWaiter * w ;
+ 
+-  RawMonitor_lock->lock_without_safepoint_check() ; 
+-  w = _EntryList ; 
+-  if (w != NULL) { 
+-      _EntryList = w->_next ; 
++  RawMonitor_lock->lock_without_safepoint_check() ;
++  w = _EntryList ;
++  if (w != NULL) {
++      _EntryList = w->_next ;
+   }
+-  RawMonitor_lock->unlock() ; 
+-  if (w != NULL) { 
+-      guarantee (w ->TState == ObjectWaiter::TS_ENTER, "invariant") ; 
+-      ParkEvent * ev = w->_event ; 
+-      w->TState = ObjectWaiter::TS_RUN ; 
+-      OrderAccess::fence() ; 
+-      ev->unpark() ; 
++  RawMonitor_lock->unlock() ;
++  if (w != NULL) {
++      guarantee (w ->TState == ObjectWaiter::TS_ENTER, "invariant") ;
++      ParkEvent * ev = w->_event ;
++      w->TState = ObjectWaiter::TS_RUN ;
++      OrderAccess::fence() ;
++      ev->unpark() ;
+   }
+-  return OS_OK ; 
++  return OS_OK ;
+ }
+ 
+-int ObjectMonitor::SimpleWait (Thread * Self, jlong millis) { 
+-  guarantee (_owner == Self  , "invariant") ; 
+-  guarantee (_recursions == 0, "invariant") ; 
++int ObjectMonitor::SimpleWait (Thread * Self, jlong millis) {
++  guarantee (_owner == Self  , "invariant") ;
++  guarantee (_recursions == 0, "invariant") ;
+ 
+-  ObjectWaiter Node (Self) ; 
+-  Node._notified = 0 ; 
+-  Node.TState    = ObjectWaiter::TS_WAIT ; 
++  ObjectWaiter Node (Self) ;
++  Node._notified = 0 ;
++  Node.TState    = ObjectWaiter::TS_WAIT ;
+ 
+-  RawMonitor_lock->lock_without_safepoint_check() ; 
+-  Node._next     = _WaitSet ; 
+-  _WaitSet       = &Node ; 
+-  RawMonitor_lock->unlock() ; 
++  RawMonitor_lock->lock_without_safepoint_check() ;
++  Node._next     = _WaitSet ;
++  _WaitSet       = &Node ;
++  RawMonitor_lock->unlock() ;
+ 
+-  SimpleExit (Self) ; 
+-  guarantee (_owner != Self, "invariant") ; 
++  SimpleExit (Self) ;
++  guarantee (_owner != Self, "invariant") ;
+ 
+-  int ret = OS_OK ; 
++  int ret = OS_OK ;
+   if (millis <= 0) {
+     Self->_ParkEvent->park();
+   } else {
+     ret = Self->_ParkEvent->park(millis);
+   }
+ 
+-  // If thread still resides on the waitset then unlink it. 
++  // If thread still resides on the waitset then unlink it.
+   // Double-checked locking -- the usage is safe in this context
+   // as we TState is volatile and the lock-unlock operators are
+-  // serializing (barrier-equivalent).  
++  // serializing (barrier-equivalent).
+ 
+-  if (Node.TState == ObjectWaiter::TS_WAIT) { 
+-    RawMonitor_lock->lock_without_safepoint_check() ; 
+-    if (Node.TState == ObjectWaiter::TS_WAIT) { 
++  if (Node.TState == ObjectWaiter::TS_WAIT) {
++    RawMonitor_lock->lock_without_safepoint_check() ;
++    if (Node.TState == ObjectWaiter::TS_WAIT) {
+       // Simple O(n) unlink, but performance isn't critical here.
+-      ObjectWaiter * p ; 
+-      ObjectWaiter * q = NULL ; 
++      ObjectWaiter * p ;
++      ObjectWaiter * q = NULL ;
+       for (p = _WaitSet ; p != &Node; p = p->_next) {
+-         q = p ; 
++         q = p ;
+       }
+-      guarantee (p == &Node, "invariant") ; 
+-      if (q == NULL) { 
+-        guarantee (p == _WaitSet, "invariant") ; 
+-        _WaitSet = p->_next ; 
+-      } else { 
+-        guarantee (p == q->_next, "invariant") ; 
+-        q->_next = p->_next ; 
++      guarantee (p == &Node, "invariant") ;
++      if (q == NULL) {
++        guarantee (p == _WaitSet, "invariant") ;
++        _WaitSet = p->_next ;
++      } else {
++        guarantee (p == q->_next, "invariant") ;
++        q->_next = p->_next ;
+       }
+-      Node.TState = ObjectWaiter::TS_RUN ; 
++      Node.TState = ObjectWaiter::TS_RUN ;
+     }
+-    RawMonitor_lock->unlock() ; 
++    RawMonitor_lock->unlock() ;
+   }
+ 
+-  guarantee (Node.TState == ObjectWaiter::TS_RUN, "invariant") ; 
+-  SimpleEnter (Self) ; 
++  guarantee (Node.TState == ObjectWaiter::TS_RUN, "invariant") ;
++  SimpleEnter (Self) ;
+ 
+-  guarantee (_owner == Self, "invariant") ; 
+-  guarantee (_recursions == 0, "invariant") ; 
+-  return ret ; 
++  guarantee (_owner == Self, "invariant") ;
++  guarantee (_recursions == 0, "invariant") ;
++  return ret ;
+ }
+ 
+-int ObjectMonitor::SimpleNotify (Thread * Self, bool All) { 
+-  guarantee (_owner == Self, "invariant") ; 
+-  if (_WaitSet == NULL) return OS_OK ; 
++int ObjectMonitor::SimpleNotify (Thread * Self, bool All) {
++  guarantee (_owner == Self, "invariant") ;
++  if (_WaitSet == NULL) return OS_OK ;
+ 
+   // We have two options:
+   // A. Transfer the threads from the WaitSet to the EntryList
+@@ -4396,27 +4479,27 @@
+   // context switching.  In particular (B) induces lots of contention.
+ 
+   ParkEvent * ev = NULL ;       // consider using a small auto array ...
+-  RawMonitor_lock->lock_without_safepoint_check() ; 
+-  for (;;) { 
+-      ObjectWaiter * w = _WaitSet ; 
+-      if (w == NULL) break ; 
+-      _WaitSet = w->_next ; 
+-      if (ev != NULL) { ev->unpark(); ev = NULL; } 
+-      ev = w->_event ; 
+-      OrderAccess::loadstore() ; 
+-      w->TState = ObjectWaiter::TS_RUN ; 
+-      OrderAccess::storeload(); 
+-      if (!All) break ; 
+-  }
+-  RawMonitor_lock->unlock() ; 
+-  if (ev != NULL) ev->unpark(); 
+-  return OS_OK ; 
++  RawMonitor_lock->lock_without_safepoint_check() ;
++  for (;;) {
++      ObjectWaiter * w = _WaitSet ;
++      if (w == NULL) break ;
++      _WaitSet = w->_next ;
++      if (ev != NULL) { ev->unpark(); ev = NULL; }
++      ev = w->_event ;
++      OrderAccess::loadstore() ;
++      w->TState = ObjectWaiter::TS_RUN ;
++      OrderAccess::storeload();
++      if (!All) break ;
++  }
++  RawMonitor_lock->unlock() ;
++  if (ev != NULL) ev->unpark();
++  return OS_OK ;
+ }
+ 
+ // Any JavaThread will enter here with state _thread_blocked
+ int ObjectMonitor::raw_enter(TRAPS) {
+-  TEVENT (raw_enter) ; 
+-  void * Contended ; 
++  TEVENT (raw_enter) ;
++  void * Contended ;
+ 
+   // don't enter raw monitor if thread is being externally suspended, it will
+   // surprise the suspender if a "suspended" thread can still enter monitor
+@@ -4429,80 +4512,80 @@
+       jt->SR_lock()->lock_without_safepoint_check();
+     }
+     // guarded by SR_lock to avoid racing with new external suspend requests.
+-    Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ; 
++    Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
+     jt->SR_lock()->unlock();
+-  } else { 
+-    Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ; 
++  } else {
++    Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
+   }
+ 
+   if (Contended == THREAD) {
+-     _recursions ++ ; 
+-     return OM_OK ; 
++     _recursions ++ ;
++     return OM_OK ;
+   }
+ 
+-  if (Contended == NULL) { 
+-     guarantee (_owner == THREAD, "invariant") ; 
++  if (Contended == NULL) {
++     guarantee (_owner == THREAD, "invariant") ;
+      guarantee (_recursions == 0, "invariant") ;
+-     return OM_OK ; 
++     return OM_OK ;
+   }
+ 
+   THREAD->set_current_pending_monitor(this);
+ 
+   if (!THREAD->is_Java_thread()) {
+-     // No other non-Java threads besides VM thread would acquire 
++     // No other non-Java threads besides VM thread would acquire
+      // a raw monitor.
+      assert(THREAD->is_VM_thread(), "must be VM thread");
+-     SimpleEnter (THREAD) ; 
++     SimpleEnter (THREAD) ;
+    } else {
+-     guarantee (jt->thread_state() == _thread_blocked, "invariant") ; 
+-     for (;;) { 
++     guarantee (jt->thread_state() == _thread_blocked, "invariant") ;
++     for (;;) {
+        jt->set_suspend_equivalent();
+        // cleared by handle_special_suspend_equivalent_condition() or
+        // java_suspend_self()
+-       SimpleEnter (THREAD) ; 
++       SimpleEnter (THREAD) ;
+ 
+        // were we externally suspended while we were waiting?
+-       if (!jt->handle_special_suspend_equivalent_condition()) break ; 
++       if (!jt->handle_special_suspend_equivalent_condition()) break ;
+ 
+        // This thread was externally suspended
+        //
+        // This logic isn't needed for JVMTI raw monitors,
+        // but doesn't hurt just in case the suspend rules change. This
+-	   // logic is needed for the ObjectMonitor.wait() reentry phase.
+-	   // We have reentered the contended monitor, but while we were
+-	   // waiting another thread suspended us. We don't want to reenter
+-	   // the monitor while suspended because that would surprise the
+-	   // thread that suspended us.
+-	   //
+-	   // Drop the lock - 
+-       SimpleExit (THREAD) ; 
++           // logic is needed for the ObjectMonitor.wait() reentry phase.
++           // We have reentered the contended monitor, but while we were
++           // waiting another thread suspended us. We don't want to reenter
++           // the monitor while suspended because that would surprise the
++           // thread that suspended us.
++           //
++           // Drop the lock -
++       SimpleExit (THREAD) ;
+ 
+-	   jt->java_suspend_self();
+-	 }
++           jt->java_suspend_self();
++         }
+ 
+      assert(_owner == THREAD, "Fatal error with monitor owner!");
+      assert(_recursions == 0, "Fatal error with monitor recursions!");
+   }
+ 
+   THREAD->set_current_pending_monitor(NULL);
+-  guarantee (_recursions == 0, "invariant") ; 
++  guarantee (_recursions == 0, "invariant") ;
+   return OM_OK;
+ }
+ 
+ // Used mainly for JVMTI raw monitor implementation
+ // Also used for ObjectMonitor::wait().
+ int ObjectMonitor::raw_exit(TRAPS) {
+-  TEVENT (raw_exit) ; 
++  TEVENT (raw_exit) ;
+   if (THREAD != _owner) {
+     return OM_ILLEGAL_MONITOR_STATE;
+   }
+-  if (_recursions > 0) { 
+-    --_recursions ; 
+-    return OM_OK ; 
++  if (_recursions > 0) {
++    --_recursions ;
++    return OM_OK ;
+   }
+ 
+-  void * List = _EntryList ; 
+-  SimpleExit (THREAD) ; 
++  void * List = _EntryList ;
++  SimpleExit (THREAD) ;
+ 
+   return OM_OK;
+ }
+@@ -4511,66 +4594,66 @@
+ // All JavaThreads will enter here with state _thread_blocked
+ 
+ int ObjectMonitor::raw_wait(jlong millis, bool interruptible, TRAPS) {
+-  TEVENT (raw_wait) ; 
++  TEVENT (raw_wait) ;
+   if (THREAD != _owner) {
+     return OM_ILLEGAL_MONITOR_STATE;
+   }
+ 
+   // To avoid spurious wakeups we reset the parkevent -- This is strictly optional.
+-  // The caller must be able to tolerate spurious returns from raw_wait().  
+-  THREAD->_ParkEvent->reset() ; 
+-  OrderAccess::fence() ; 
++  // The caller must be able to tolerate spurious returns from raw_wait().
++  THREAD->_ParkEvent->reset() ;
++  OrderAccess::fence() ;
+ 
+   // check interrupt event
+   if (interruptible && Thread::is_interrupted(THREAD, true)) {
+     return OM_INTERRUPTED;
+   }
+ 
+-  intptr_t save = _recursions ; 
+-  _recursions = 0 ; 
+-  _waiters ++ ; 
++  intptr_t save = _recursions ;
++  _recursions = 0 ;
++  _waiters ++ ;
+   if (THREAD->is_Java_thread()) {
+-    guarantee (((JavaThread *) THREAD)->thread_state() == _thread_blocked, "invariant") ; 
++    guarantee (((JavaThread *) THREAD)->thread_state() == _thread_blocked, "invariant") ;
+     ((JavaThread *)THREAD)->set_suspend_equivalent();
+   }
+-  int rv = SimpleWait (THREAD, millis) ; 
+-  _recursions = save ; 
+-  _waiters -- ; 
++  int rv = SimpleWait (THREAD, millis) ;
++  _recursions = save ;
++  _waiters -- ;
+ 
+-  guarantee (THREAD == _owner, "invariant") ; 
++  guarantee (THREAD == _owner, "invariant") ;
+   if (THREAD->is_Java_thread()) {
+-     JavaThread * jSelf = (JavaThread *) THREAD ; 
+-     for (;;) { 
+-        if (!jSelf->handle_special_suspend_equivalent_condition()) break ; 
+-        SimpleExit (THREAD) ; 
++     JavaThread * jSelf = (JavaThread *) THREAD ;
++     for (;;) {
++        if (!jSelf->handle_special_suspend_equivalent_condition()) break ;
++        SimpleExit (THREAD) ;
+         jSelf->java_suspend_self();
+-        SimpleEnter (THREAD) ; 
+-        jSelf->set_suspend_equivalent() ; 
++        SimpleEnter (THREAD) ;
++        jSelf->set_suspend_equivalent() ;
+      }
+   }
+-  guarantee (THREAD == _owner, "invariant") ; 
++  guarantee (THREAD == _owner, "invariant") ;
+ 
+   if (interruptible && Thread::is_interrupted(THREAD, true)) {
+     return OM_INTERRUPTED;
+-  } 
+-  return OM_OK ; 
++  }
++  return OM_OK ;
+ }
+ 
+ int ObjectMonitor::raw_notify(TRAPS) {
+-  TEVENT (raw_notify) ; 
++  TEVENT (raw_notify) ;
+   if (THREAD != _owner) {
+     return OM_ILLEGAL_MONITOR_STATE;
+   }
+-  SimpleNotify (THREAD, false) ; 
++  SimpleNotify (THREAD, false) ;
+   return OM_OK;
+ }
+ 
+ int ObjectMonitor::raw_notifyAll(TRAPS) {
+-  TEVENT (raw_notifyAll) ; 
++  TEVENT (raw_notifyAll) ;
+   if (THREAD != _owner) {
+     return OM_ILLEGAL_MONITOR_STATE;
+   }
+-  SimpleNotify (THREAD, true) ; 
++  SimpleNotify (THREAD, true) ;
+   return OM_OK;
+ }
+ 
+@@ -4611,7 +4694,7 @@
+ 
+ // Check if monitor belongs to the monitor cache
+ // The list is grow-only so it's *relatively* safe to traverse
+-// the list of extant blocks without taking a lock.  
++// the list of extant blocks without taking a lock.
+ 
+ int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
+   ObjectMonitor* block = gBlockList;
+@@ -4631,6 +4714,3 @@
+ }
+ 
+ #endif
+-
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/synchronizer.hpp openjdk/hotspot/src/share/vm/runtime/synchronizer.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/synchronizer.hpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/synchronizer.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)synchronizer.hpp	1.71 07/05/26 16:04:34 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,23 +19,23 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class BasicLock VALUE_OBJ_CLASS_SPEC {
+   friend class VMStructs;
+- private:  
++ private:
+   volatile markOop _displaced_header;
+  public:
+   markOop      displaced_header() const               { return _displaced_header; }
+   void         set_displaced_header(markOop header)   { _displaced_header = header; }
+-  
++
+   void print_on(outputStream* st) const;
+ 
+   // move a basic lock (used during deoptimization
+   void move_to(oop obj, BasicLock* dest);
+ 
+-  static int displaced_header_offset_in_bytes()       { return offset_of(BasicLock, _displaced_header); }  
++  static int displaced_header_offset_in_bytes()       { return offset_of(BasicLock, _displaced_header); }
+ };
+ 
+ // A BasicObjectLock associates a specific Java object with a BasicLock.
+@@ -59,12 +56,12 @@
+  public:
+   // Manipulation
+   oop      obj() const                                { return _obj;  }
+-  void set_obj(oop obj)                               { _obj = obj; } 
++  void set_obj(oop obj)                               { _obj = obj; }
+   BasicLock* lock()                                   { return &_lock; }
+ 
+   // Note: Use frame::interpreter_frame_monitor_size() for the size of BasicObjectLocks
+   //       in interpreter activation frames since it includes machine-specific padding.
+-  static int size()                                   { return sizeof(BasicObjectLock)/wordSize; }          
++  static int size()                                   { return sizeof(BasicObjectLock)/wordSize; }
+ 
+   // GC support
+   void oops_do(OopClosure* f) { f->do_oop(&_obj); }
+@@ -75,7 +72,7 @@
+ 
+ class ObjectMonitor;
+ 
+-class ObjectSynchronizer : AllStatic { 
++class ObjectSynchronizer : AllStatic {
+   friend class VMStructs;
+  public:
+   typedef enum {
+@@ -83,21 +80,21 @@
+     owner_none,
+     owner_other
+   } LockOwnership;
+-  // exit must be implemented non-blocking, since the compiler cannot easily handle 
++  // exit must be implemented non-blocking, since the compiler cannot easily handle
+   // deoptimization at monitor exit. Hence, it does not take a Handle argument.
+ 
+   // This is full version of monitor enter and exit. I choose not
+   // to use enter() and exit() in order to make sure user be ware
+   // of the performance and semantics difference. They are normally
+-  // used by ObjectLocker etc. The interpreter and compiler use 
++  // used by ObjectLocker etc. The interpreter and compiler use
+   // assembly copies of these routines. Please keep them synchornized.
+   //
+   // attempt_rebias flag is used by UseBiasedLocking implementation
+   static void fast_enter  (Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS);
+   static void fast_exit   (oop obj,    BasicLock* lock, Thread* THREAD);
+ 
+-  // WARNING: They are ONLY used to handle the slow cases. They should 
+-  // only be used when the fast cases failed. Use of these functions 
++  // WARNING: They are ONLY used to handle the slow cases. They should
++  // only be used when the fast cases failed. Use of these functions
+   // without previous fast case check may cause fatal error.
+   static void slow_enter  (Handle obj, BasicLock* lock, TRAPS);
+   static void slow_exit   (oop obj,    BasicLock* lock, Thread* THREAD);
+@@ -115,8 +112,8 @@
+ 
+   // Special internal-use-only method for use by JVM infrastructure
+   // that needs to wait() on a java-level object but that can't risk
+-  // throwing unexpected InterruptedExecutionExceptions.  
+-  static void waitUninterruptibly (Handle obj, jlong Millis, Thread * THREAD) ; 
++  // throwing unexpected InterruptedExecutionExceptions.
++  static void waitUninterruptibly (Handle obj, jlong Millis, Thread * THREAD) ;
+ 
+   // used by classloading to free classloader object lock,
+   // wait on an internal lock, and reclaim original lock
+@@ -126,8 +123,8 @@
+ 
+   // thread-specific and global objectMonitor free list accessors
+   static ObjectMonitor * omAlloc (Thread * Self) ;
+-  static void omRelease (Thread * Self, ObjectMonitor * m) ; 
+-  static void omFlush   (Thread * Self) ; 
++  static void omRelease (Thread * Self, ObjectMonitor * m) ;
++  static void omFlush   (Thread * Self) ;
+ 
+   // Inflate light weight monitor to heavy weight monitor
+   static ObjectMonitor* inflate(Thread * Self, oop obj);
+@@ -136,9 +133,9 @@
+ 
+   // Returns the identity hash value for an oop
+   // NOTE: It may cause monitor inflation
+-  static intptr_t identity_hash_value_for(Handle obj);  
++  static intptr_t identity_hash_value_for(Handle obj);
+   static intptr_t FastHashCode (Thread * Self, oop obj) ;
+-  
++
+   // java.lang.Thread support
+   static bool current_thread_holds_lock(JavaThread* thread, Handle h_obj);
+   static LockOwnership query_lock_ownership(JavaThread * self, Handle h_obj);
+@@ -148,7 +145,7 @@
+   // JNI detach support
+   static void release_monitors_owned_by_thread(TRAPS);
+   static void monitors_iterate(MonitorClosure* m);
+-  
++
+   // GC: we current use aggressive monitor deflation policy
+   // Basically we deflate all monitors that are not busy.
+   // An adaptive profile-based deflation policy could be used if needed
+@@ -161,19 +158,19 @@
+   static int  verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0;
+ 
+  private:
+-  enum { _BLOCKSIZE = 128 };     
++  enum { _BLOCKSIZE = 128 };
+   static ObjectMonitor* gBlockList;
+   static ObjectMonitor * volatile gFreeList;
+-  
++
+  public:
+   static void Initialize () ;
+-  static PerfCounter * _sync_ContendedLockAttempts ; 
+-  static PerfCounter * _sync_FutileWakeups ; 
+-  static PerfCounter * _sync_Parks ; 
+-  static PerfCounter * _sync_EmptyNotifications ; 
+-  static PerfCounter * _sync_Notifications ; 
+-  static PerfCounter * _sync_SlowEnter ; 
+-  static PerfCounter * _sync_SlowExit ; 
++  static PerfCounter * _sync_ContendedLockAttempts ;
++  static PerfCounter * _sync_FutileWakeups ;
++  static PerfCounter * _sync_Parks ;
++  static PerfCounter * _sync_EmptyNotifications ;
++  static PerfCounter * _sync_Notifications ;
++  static PerfCounter * _sync_SlowEnter ;
++  static PerfCounter * _sync_SlowExit ;
+   static PerfCounter * _sync_SlowNotify ;
+   static PerfCounter * _sync_SlowNotifyAll ;
+   static PerfCounter * _sync_FailedSpins ;
+@@ -183,12 +180,12 @@
+   static PerfCounter * _sync_MonInCirculation ;
+   static PerfCounter * _sync_MonScavenged ;
+   static PerfCounter * _sync_Inflations ;
+-  static PerfCounter * _sync_Deflations ; 
++  static PerfCounter * _sync_Deflations ;
+   static PerfLongVariable * _sync_MonExtant ;
+ 
+  public:
+   static void RegisterSpinCallback (int (*)(intptr_t, int), intptr_t) ;
+-  
++
+ };
+ 
+ // ObjectLocker enforced balanced locking and can never thrown an
+@@ -206,12 +203,12 @@
+   bool      _dolock;   // default true
+  public:
+   ObjectLocker(Handle obj, Thread* thread, bool doLock = true);
+-  ~ObjectLocker();                                
+-  
++  ~ObjectLocker();
++
+   // Monitor behavior
+   void wait      (TRAPS)      { ObjectSynchronizer::wait     (_obj, 0, CHECK); } // wait forever
+-  void notify_all(TRAPS)      { ObjectSynchronizer::notifyall(_obj,    CHECK); } 
+-  void waitUninterruptibly (TRAPS) { ObjectSynchronizer::waitUninterruptibly (_obj, 0, CHECK);} 
++  void notify_all(TRAPS)      { ObjectSynchronizer::notifyall(_obj,    CHECK); }
++  void waitUninterruptibly (TRAPS) { ObjectSynchronizer::waitUninterruptibly (_obj, 0, CHECK);}
+   // complete_exit gives up lock completely, returning recursion count
+   // reenter reclaims lock with original recursion count
+   intptr_t complete_exit(TRAPS) { return  ObjectSynchronizer::complete_exit(_obj, CHECK_0); }
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/task.cpp openjdk/hotspot/src/share/vm/runtime/task.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/task.cpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/task.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)task.cpp	1.27 07/05/05 17:06:59 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -84,7 +81,7 @@
+ }
+ 
+ bool PeriodicTask::is_enrolled() const {
+-  for(int index = 0; index < _num_tasks; index++) 
++  for(int index = 0; index < _num_tasks; index++)
+     if (_tasks[index] == this) return true;
+   return false;
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/task.hpp openjdk/hotspot/src/share/vm/runtime/task.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/task.hpp	2008-02-28 05:02:43.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/task.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)task.hpp	1.23 07/05/05 17:06:59 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A PeriodicTask has the sole purpose of executing its task
+@@ -40,8 +37,8 @@
+   // is appropriate;  it must be between min_interval and max_interval,
+   // and have a granularity of interval_gran (all in millis).
+   enum { max_tasks     = 10,       // Max number of periodic tasks in system
+-         interval_gran = 10,       
+-         min_interval  = 10, 
++         interval_gran = 10,
++         min_interval  = 10,
+          max_interval  = 10000 };
+ 
+   static int num_tasks()   { return _num_tasks; }
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/thread.cpp openjdk/hotspot/src/share/vm/runtime/thread.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/thread.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/thread.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)thread.cpp	1.809 07/06/01 01:36:09 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -34,9 +31,9 @@
+ 
+ HS_DTRACE_PROBE_DECL(hotspot, vm__init__begin);
+ HS_DTRACE_PROBE_DECL(hotspot, vm__init__end);
+-HS_DTRACE_PROBE_DECL5(hotspot, thread__start, char*, intptr_t, 
++HS_DTRACE_PROBE_DECL5(hotspot, thread__start, char*, intptr_t,
+   intptr_t, intptr_t, bool);
+-HS_DTRACE_PROBE_DECL5(hotspot, thread__stop, char*, intptr_t, 
++HS_DTRACE_PROBE_DECL5(hotspot, thread__stop, char*, intptr_t,
+   intptr_t, intptr_t, bool);
+ 
+ #define DTRACE_THREAD_PROBE(probe, javathread)                             \
+@@ -54,7 +51,7 @@
+ 
+ #else //  ndef DTRACE_ENABLED
+ 
+-#define DTRACE_THREAD_PROBE(probe, javathread) 
++#define DTRACE_THREAD_PROBE(probe, javathread)
+ 
+ #endif // ndef DTRACE_ENABLED
+ 
+@@ -105,7 +102,7 @@
+ 
+ 
+ Thread::Thread() {
+-  // stack  
++  // stack
+   _stack_base   = NULL;
+   _stack_size   = 0;
+   _self_raw_id  = 0;
+@@ -115,7 +112,7 @@
+   // allocated data structures
+   set_resource_area(new ResourceArea());
+   set_handle_area(new HandleArea(NULL));
+-  set_active_handles(NULL); 
++  set_active_handles(NULL);
+   set_free_handle_block(NULL);
+   set_last_handle_mark(NULL);
+   set_osthread(NULL);
+@@ -126,10 +123,10 @@
+   // the handle mark links itself to last_handle_mark
+   new HandleMark(this);
+ 
+-  // plain initialization  
+-  debug_only(_owned_locks = NULL;) 
++  // plain initialization
++  debug_only(_owned_locks = NULL;)
+   debug_only(_allow_allocation_count = 0;)
+-  NOT_PRODUCT(_allow_safepoint_count = 0;)  
++  NOT_PRODUCT(_allow_safepoint_count = 0;)
+   CHECK_UNHANDLED_OOPS_ONLY(_gc_locked_out_count = 0;)
+   _highest_lock = NULL;
+   _jvmti_env_iteration_count = 0;
+@@ -139,24 +136,35 @@
+   _current_pending_monitor_is_from_java = true;
+   _current_waiting_monitor = NULL;
+   _num_nested_signal = 0;
+-  omFreeList = NULL ; 
+-  omFreeCount = 0 ; 
+-  omFreeProvision = 32 ; 
++  omFreeList = NULL ;
++  omFreeCount = 0 ;
++  omFreeProvision = 32 ;
+ 
+   _SR_lock = new Monitor(Mutex::suspend_resume, "SR_lock", true);
+   _suspend_flags = 0;
+ 
+-  // thread-specific hashCode stream generator state. 
+-  _hashStateX = os::random() ; 
+-  _hashStateY = 842502087 ; 
+-  _hashStateZ = 0x8767 ;    // (int)(3579807591LL & 0xffff) ; 
+-  _hashStateW = 273326509 ; 
+-
+-  _OnTrap   = 0 ; 
+-  _schedctl = NULL ; 
+-  _Stalled  = 0 ; 
+-  _TypeTag  = 0x2BAD ; 
+-  _ParkEvent = ParkEvent::Allocate (this) ; 
++  // thread-specific hashCode stream generator state - Marsaglia shift-xor form
++  _hashStateX = os::random() ;
++  _hashStateY = 842502087 ;
++  _hashStateZ = 0x8767 ;    // (int)(3579807591LL & 0xffff) ;
++  _hashStateW = 273326509 ;
++
++  _OnTrap   = 0 ;
++  _schedctl = NULL ;
++  _Stalled  = 0 ;
++  _TypeTag  = 0x2BAD ;
++
++  // Many of the following fields are effectively final - immutable
++  // Note that nascent threads can't use the Native Monitor-Mutex
++  // construct until the _MutexEvent is initialized ...
++  // CONSIDER: instead of using a fixed set of purpose-dedicated ParkEvents
++  // we might instead use a stack of ParkEvents that we could provision on-demand.
++  // The stack would act as a cache to avoid calls to ParkEvent::Allocate()
++  // and ::Release()
++  _ParkEvent   = ParkEvent::Allocate (this) ;
++  _SleepEvent  = ParkEvent::Allocate (this) ;
++  _MutexEvent  = ParkEvent::Allocate (this) ;
++  _MuxEvent    = ParkEvent::Allocate (this) ;
+ 
+ #ifdef CHECK_UNHANDLED_OOPS
+   if (CheckUnhandledOops) {
+@@ -174,7 +182,7 @@
+ }
+ 
+ void Thread::initialize_thread_local_storage() {
+-  // Note: Make sure this method only calls 
++  // Note: Make sure this method only calls
+   // non-blocking operations. Otherwise, it might not work
+   // with the thread-startup/safepoint interaction.
+ 
+@@ -184,7 +192,7 @@
+ 
+   // initialize structure dependent on thread local storage
+   ThreadLocalStorage::set_thread(this);
+-  
++
+   // set up any platform-specific state.
+   os::initialize_thread();
+ 
+@@ -198,7 +206,7 @@
+ 
+ Thread::~Thread() {
+   // Reclaim the objectmonitors from the omFreeList of the moribund thread.
+-  ObjectSynchronizer::omFlush (this) ; 
++  ObjectSynchronizer::omFlush (this) ;
+ 
+   // deallocate data structures
+   delete resource_area();
+@@ -208,11 +216,12 @@
+   delete last_handle_mark();
+   assert(last_handle_mark() == NULL, "check we have reached the end");
+ 
+-  // It's possible we can encounter a null _ParkEvent in stillborn threads.
+-  if (_ParkEvent != NULL) {
+-     ParkEvent::Release (_ParkEvent) ; 
+-     _ParkEvent = NULL ; 
+-  }
++  // It's possible we can encounter a null _ParkEvent, etc., in stillborn threads.
++  // We NULL out the fields for good hygiene.
++  ParkEvent::Release (_ParkEvent)   ; _ParkEvent   = NULL ;
++  ParkEvent::Release (_SleepEvent)  ; _SleepEvent  = NULL ;
++  ParkEvent::Release (_MutexEvent)  ; _MutexEvent  = NULL ;
++  ParkEvent::Release (_MuxEvent)    ; _MuxEvent    = NULL ;
+ 
+   delete handle_area();
+ 
+@@ -220,7 +229,7 @@
+   if (osthread() != NULL) os::free_thread(osthread());
+ 
+   delete _SR_lock;
+-  
++
+   // clear thread local storage if the Thread is deleting itself
+   if (this == Thread::current()) {
+     ThreadLocalStorage::set_thread(NULL);
+@@ -249,13 +258,13 @@
+ 
+ #ifndef PRODUCT
+ // Tracing method for basic thread operations
+-void Thread::trace(const char* msg, const Thread* const thread) {    
++void Thread::trace(const char* msg, const Thread* const thread) {
+   if (!TraceThreadEvents) return;
+   ResourceMark rm;
+   ThreadCritical tc;
+   const char *name = "non-Java thread";
+   int prio = -1;
+-  if (thread->is_Java_thread() 
++  if (thread->is_Java_thread()
+       && !thread->is_Compiler_thread()) {
+     // The Threads_lock must be held to get information about
+     // this thread but may not be in some situations when
+@@ -269,22 +278,22 @@
+     name = (char *)jt->get_thread_name();
+     oop thread_oop = jt->threadObj();
+     if (thread_oop != NULL) {
+-      prio = java_lang_Thread::priority(thread_oop); 
++      prio = java_lang_Thread::priority(thread_oop);
+     }
+     if (release_Threads_lock) {
+       Threads_lock->unlock();
+     }
+-  }    
++  }
+   tty->print_cr("Thread::%s " INTPTR_FORMAT " [%lx] %s (prio: %d)", msg, thread, thread->osthread()->thread_id(), name, prio);
+ }
+ #endif
+ 
+ 
+-ThreadPriority Thread::get_priority(const Thread* const thread) {  
+-  trace("get priority", thread);  
++ThreadPriority Thread::get_priority(const Thread* const thread) {
++  trace("get priority", thread);
+   ThreadPriority priority;
+   // Can return an error!
+-  (void)os::get_priority(thread, priority);  
++  (void)os::get_priority(thread, priority);
+   assert(MinPriority <= priority && priority <= MaxPriority, "non-Java priority found");
+   return priority;
+ }
+@@ -307,15 +316,15 @@
+       // Can not set it after the thread started because we do not know the
+       // exact thread state at that time. It could be in MONITOR_WAIT or
+       // in SLEEPING or some other state.
+-      java_lang_Thread::set_thread_status(((JavaThread*)thread)->threadObj(), 
+- 					  java_lang_Thread::RUNNABLE);
++      java_lang_Thread::set_thread_status(((JavaThread*)thread)->threadObj(),
++                                          java_lang_Thread::RUNNABLE);
+     }
+     os::start_thread(thread);
+   }
+ }
+ 
+ // Enqueue a VM_Operation to do the job for us - sometime later
+-void Thread::send_async_exception(oop java_thread, oop java_throwable) {    
++void Thread::send_async_exception(oop java_thread, oop java_throwable) {
+   VM_ThreadStop* vm_stop = new VM_ThreadStop(java_thread, java_throwable);
+   VMThread::execute(vm_stop);
+ }
+@@ -421,7 +430,7 @@
+       *bits |= 0x00000100;
+       return false;
+     }
+-  
++
+     if (!is_external_suspend()) {
+       // Suspend request is cancelled. This is always checked before
+       // is_ext_suspended() to reduce the risk of a rogue resume
+@@ -429,13 +438,13 @@
+       *bits |= 0x00000200;
+       return false;
+     }
+-  
++
+     if (is_ext_suspended()) {
+       // thread is suspended
+       *bits |= 0x00000400;
+       return true;
+     }
+-  
++
+     // Now that we no longer do hard suspends of threads running
+     // native code, the target thread can be changing thread state
+     // while we are in this routine:
+@@ -463,9 +472,9 @@
+       *bits |= 0x00001000;
+       return true;
+     } else if (save_state == _thread_in_native && frame_anchor()->walkable()) {
+-      // Threads running native code will self-suspend on native==>VM/Java 
+-      // transitions. If its stack is walkable (should always be the case 
+-      // unless this function is called before the actual java_suspend() 
++      // Threads running native code will self-suspend on native==>VM/Java
++      // transitions. If its stack is walkable (should always be the case
++      // unless this function is called before the actual java_suspend()
+       // call), then the wait is done.
+       *bits |= 0x00002000;
+       return true;
+@@ -496,17 +505,17 @@
+ 
+       // We wait for the thread to transition to a more usable state.
+       for (int i = 1; i <= SuspendRetryCount; i++) {
+-	// We used to do an "os::yield_all(i)" call here with the intention
+-	// that yielding would increase on each retry. However, the parameter
+-	// is ignored on Linux which means the yield didn't scale up. Waiting
+-	// on the SR_lock below provides a much more predictable scale up for
+-	// the delay. It also provides a simple/direct point to check for any
+-	// safepoint requests from the VMThread
+-
+-	// temporarily drops SR_lock while doing wait with safepoint check 
+-	// (if we're a JavaThread - the WatcherThread can also call this) 
+-	// and increase delay with each retry
+-	SR_lock()->wait(!Thread::current()->is_Java_thread(), i * delay);
++        // We used to do an "os::yield_all(i)" call here with the intention
++        // that yielding would increase on each retry. However, the parameter
++        // is ignored on Linux which means the yield didn't scale up. Waiting
++        // on the SR_lock below provides a much more predictable scale up for
++        // the delay. It also provides a simple/direct point to check for any
++        // safepoint requests from the VMThread
++
++        // temporarily drops SR_lock while doing wait with safepoint check
++        // (if we're a JavaThread - the WatcherThread can also call this)
++        // and increase delay with each retry
++        SR_lock()->wait(!Thread::current()->is_Java_thread(), i * delay);
+ 
+         // check the actual thread state instead of what we saved above
+         if (thread_state() != _thread_in_native_trans) {
+@@ -549,8 +558,8 @@
+ 
+   {
+     MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
+-    is_suspended = is_ext_suspend_completed(true /* called_by_wait */, 
+-					    delay, bits);
++    is_suspended = is_ext_suspend_completed(true /* called_by_wait */,
++                                            delay, bits);
+     pending = is_external_suspend();
+   }
+   // must release SR_lock to allow suspension to complete
+@@ -584,8 +593,8 @@
+       // can also call this)  and increase delay with each retry
+       SR_lock()->wait(!Thread::current()->is_Java_thread(), i * delay);
+ 
+-      is_suspended = is_ext_suspend_completed(true /* called_by_wait */, 
+-					      delay, bits);
++      is_suspended = is_ext_suspend_completed(true /* called_by_wait */,
++                                              delay, bits);
+ 
+       // It is possible for the external suspend request to be cancelled
+       // (by a resume) before the actual suspend operation is completed.
+@@ -628,7 +637,7 @@
+ }
+ #endif /* PRODUCT */
+ 
+-// Called by flat profiler 
++// Called by flat profiler
+ // Callers have already called wait_for_ext_suspend_completion
+ // The assertion for that is currently too complex to put here:
+ bool JavaThread::profile_last_Java_frame(frame* _fr) {
+@@ -648,7 +657,7 @@
+ }
+ 
+ bool Thread::is_interrupted(Thread* thread, bool clear_interrupted) {
+-  trace("is_interrupted", thread);  
++  trace("is_interrupted", thread);
+   debug_only(check_for_dangling_thread_pointer(thread);)
+   // Note:  If clear_interrupted==false, this simply fetches and
+   // returns the value of the field osthread()->interrupted().
+@@ -665,12 +674,12 @@
+     else {
+       guarantee(res == strong_roots_parity, "Or else what?");
+       assert(SharedHeap::heap()->n_par_threads() > 0,
+-	     "Should only fail when parallel.");
++             "Should only fail when parallel.");
+       return false;
+-    }      
++    }
+   }
+   assert(SharedHeap::heap()->n_par_threads() > 0,
+-	 "Should only fail when parallel.");
++         "Should only fail when parallel.");
+   return false;
+ }
+ 
+@@ -713,8 +722,8 @@
+ }
+ 
+ #ifdef ASSERT
+-void Thread::print_owned_locks_on(outputStream* st) const {  
+-  Mutex *cur = _owned_locks;
++void Thread::print_owned_locks_on(outputStream* st) const {
++  Monitor *cur = _owned_locks;
+   if (cur == NULL) {
+     st->print(" (no locks) ");
+   } else {
+@@ -729,7 +738,7 @@
+ static int ref_use_count  = 0;
+ 
+ bool Thread::owns_locks_but_compiled_lock() const {
+-  for(Mutex *cur = _owned_locks; cur; cur = cur->next()) {
++  for(Monitor *cur = _owned_locks; cur; cur = cur->next()) {
+     if (cur != Compile_lock) return true;
+   }
+   return false;
+@@ -743,7 +752,7 @@
+ // The flag: potential_vm_operation notifies if this particular safepoint state could potential
+ // invoke the vm-thread (i.e., and oop allocation). In that case, we also have to make sure that
+ // no threads which allow_vm_block's are held
+-void Thread::check_for_valid_safepoint_state(bool potential_vm_operation) {  
++void Thread::check_for_valid_safepoint_state(bool potential_vm_operation) {
+     // Check if current thread is allowed to block at a safepoint
+     if (!(_allow_safepoint_count == 0))
+       fatal("Possible safepoint reached by thread that does not allow it");
+@@ -751,23 +760,23 @@
+       fatal("LEAF method calling lock?");
+     }
+ 
+-#ifdef ASSERT    
+-    if (potential_vm_operation && is_Java_thread() 
++#ifdef ASSERT
++    if (potential_vm_operation && is_Java_thread()
+         && !Universe::is_bootstrapping()) {
+       // Make sure we do not hold any locks that the VM thread also uses.
+-      // This could potentially lead to deadlocks      
+-      for(Mutex *cur = _owned_locks; cur; cur = cur->next()) {
++      // This could potentially lead to deadlocks
++      for(Monitor *cur = _owned_locks; cur; cur = cur->next()) {
+         // Threads_lock is special, since the safepoint synchronization will not start before this is
+         // acquired. Hence, a JavaThread cannot be holding it at a safepoint. So is VMOperationRequest_lock,
+         // since it is used to transfer control between JavaThreads and the VMThread
+         // Do not *exclude* any locks unless you are absolutly sure it is correct. Ask someone else first!
+-        if ( (cur->allow_vm_block() && 
+-              cur != Threads_lock && 
++        if ( (cur->allow_vm_block() &&
++              cur != Threads_lock &&
+               cur != Compile_lock &&               // Temporary: should not be necessary when we get spearate compilation
+-              cur != VMOperationRequest_lock &&               
++              cur != VMOperationRequest_lock &&
+               cur != VMOperationQueue_lock) ||
+-              cur->rank() == Mutex::special) { 
+-          warning("Thread holding lock at safepoint that vm can block on: %s", cur->name());          
++              cur->rank() == Mutex::special) {
++          warning("Thread holding lock at safepoint that vm can block on: %s", cur->name());
+         }
+       }
+     }
+@@ -789,7 +798,7 @@
+   // frame has been popped already.  Correct as long as stacks are at least 4K long and aligned.)
+   address end = os::current_stack_pointer();
+   if (_highest_lock >= adr && adr >= end) return true;
+-  
++
+   return false;
+ }
+ 
+@@ -828,15 +837,15 @@
+ static Handle create_initial_thread_group(TRAPS) {
+   klassOop k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_ThreadGroup(), true, CHECK_NH);
+   instanceKlassHandle klass (THREAD, k);
+-  
++
+   Handle system_instance = klass->allocate_instance_handle(CHECK_NH);
+   {
+     JavaValue result(T_VOID);
+-    JavaCalls::call_special(&result, 
+-                            system_instance, 
+-                            klass, 
+-                            vmSymbolHandles::object_initializer_name(), 
+-                            vmSymbolHandles::void_method_signature(), 
++    JavaCalls::call_special(&result,
++                            system_instance,
++                            klass,
++                            vmSymbolHandles::object_initializer_name(),
++                            vmSymbolHandles::void_method_signature(),
+                             CHECK_NH);
+   }
+   Universe::set_system_thread_group(system_instance());
+@@ -858,34 +867,34 @@
+ }
+ 
+ // Creates the initial Thread
+-static oop create_initial_thread(Handle thread_group, JavaThread* thread, TRAPS) {  
++static oop create_initial_thread(Handle thread_group, JavaThread* thread, TRAPS) {
+   klassOop k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_Thread(), true, CHECK_NULL);
+   instanceKlassHandle klass (THREAD, k);
+   instanceHandle thread_oop = klass->allocate_instance_handle(CHECK_NULL);
+-  
++
+   java_lang_Thread::set_thread(thread_oop(), thread);
+   java_lang_Thread::set_priority(thread_oop(), NormPriority);
+   thread->set_threadObj(thread_oop());
+-    
++
+   Handle string = java_lang_String::create_from_str("main", CHECK_NULL);
+-  
++
+   JavaValue result(T_VOID);
+-  JavaCalls::call_special(&result, thread_oop, 
+-                                   klass, 
+-                                   vmSymbolHandles::object_initializer_name(), 
+-                                   vmSymbolHandles::threadgroup_string_void_signature(), 
+-                                   thread_group, 
+-                                   string, 
+-                                   CHECK_NULL);  
++  JavaCalls::call_special(&result, thread_oop,
++                                   klass,
++                                   vmSymbolHandles::object_initializer_name(),
++                                   vmSymbolHandles::threadgroup_string_void_signature(),
++                                   thread_group,
++                                   string,
++                                   CHECK_NULL);
+   return thread_oop();
+ }
+ 
+ static void call_initializeSystemClass(TRAPS) {
+   klassOop k =  SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_System(), true, CHECK);
+   instanceKlassHandle klass (THREAD, k);
+-  
++
+   JavaValue result(T_VOID);
+-  JavaCalls::call_static(&result, klass, vmSymbolHandles::initializeSystemClass_name(), 
++  JavaCalls::call_static(&result, klass, vmSymbolHandles::initializeSystemClass_name(),
+                                          vmSymbolHandles::void_method_signature(), CHECK);
+ }
+ 
+@@ -893,26 +902,26 @@
+   // the vm info string
+   ResourceMark rm(THREAD);
+   const char *vm_info = VM_Version::vm_info_string();
+-  
++
+   // java.lang.System class
+   klassOop k =  SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_System(), true, CHECK);
+   instanceKlassHandle klass (THREAD, k);
+-  
++
+   // setProperty arguments
+   Handle key_str    = java_lang_String::create_from_str("java.vm.info", CHECK);
+   Handle value_str  = java_lang_String::create_from_str(vm_info, CHECK);
+-  
++
+   // return value
+-  JavaValue r(T_OBJECT); 
++  JavaValue r(T_OBJECT);
+ 
+   // public static String setProperty(String key, String value);
+   JavaCalls::call_static(&r,
+                          klass,
+-                         vmSymbolHandles::setProperty_name(), 
+-                         vmSymbolHandles::string_string_string_signature(), 
+-                         key_str, 
+-                         value_str, 
+-                         CHECK);  
++                         vmSymbolHandles::setProperty_name(),
++                         vmSymbolHandles::string_string_string_signature(),
++                         key_str,
++                         value_str,
++                         CHECK);
+ }
+ 
+ 
+@@ -934,10 +943,10 @@
+     // Thread gets assigned specified name and null target
+     JavaCalls::call_special(&result,
+                             thread_oop,
+-                            klass, 
+-                            vmSymbolHandles::object_initializer_name(), 
++                            klass,
++                            vmSymbolHandles::object_initializer_name(),
+                             vmSymbolHandles::threadgroup_string_void_signature(),
+-                            thread_group, // Argument 1                        
++                            thread_group, // Argument 1
+                             name,         // Argument 2
+                             THREAD);
+   } else {
+@@ -945,10 +954,10 @@
+     // (java.lang.Thread doesn't have a constructor taking only a ThreadGroup argument)
+     JavaCalls::call_special(&result,
+                             thread_oop,
+-                            klass, 
+-                            vmSymbolHandles::object_initializer_name(), 
++                            klass,
++                            vmSymbolHandles::object_initializer_name(),
+                             vmSymbolHandles::threadgroup_runnable_void_signature(),
+-                            thread_group, // Argument 1                        
++                            thread_group, // Argument 1
+                             Handle(),     // Argument 2
+                             THREAD);
+   }
+@@ -961,15 +970,15 @@
+   if (HAS_PENDING_EXCEPTION) {
+     return;
+   }
+-  
++
+   KlassHandle group(this, SystemDictionary::threadGroup_klass());
+   Handle threadObj(this, this->threadObj());
+ 
+-  JavaCalls::call_special(&result, 
+-                         thread_group, 
++  JavaCalls::call_special(&result,
++                         thread_group,
+                          group,
+-                         vmSymbolHandles::add_method_name(), 
+-                         vmSymbolHandles::thread_void_signature(), 
++                         vmSymbolHandles::add_method_name(),
++                         vmSymbolHandles::thread_void_signature(),
+                          threadObj,          // Arg 1
+                          THREAD);
+ 
+@@ -994,7 +1003,7 @@
+   _name = NEW_C_HEAP_ARRAY(char, max_name_len);
+   guarantee(_name != NULL, "alloc failure");
+   va_list ap;
+-  va_start(ap, format);  
++  va_start(ap, format);
+   jio_vsnprintf(_name, max_name_len, format, ap);
+   va_end(ap);
+ }
+@@ -1012,10 +1021,10 @@
+   assert(watcher_thread() == NULL, "we can only allocate one WatcherThread");
+   if (os::create_thread(this, os::watcher_thread)) {
+     _watcher_thread = this;
+-    
+-    // Set the watcher thread to the highest OS priority which should not be 
+-    // used, unless a Java thread with priority java.lang.Thread.MAX_PRIORITY 
+-    // is created. The only normal thread using this priority is the reference 
++
++    // Set the watcher thread to the highest OS priority which should not be
++    // used, unless a Java thread with priority java.lang.Thread.MAX_PRIORITY
++    // is created. The only normal thread using this priority is the reference
+     // handler thread, which runs for very short intervals only.
+     // If the VMThread's priority is not lower than the WatcherThread profiling
+     // will be inaccurate.
+@@ -1032,7 +1041,7 @@
+   this->record_stack_base_and_size();
+   this->initialize_thread_local_storage();
+   this->set_active_handles(JNIHandleBlock::allocate_block());
+-  while(!_should_terminate) {    
++  while(!_should_terminate) {
+     assert(watcher_thread() == Thread::current(),  "thread consistency check");
+     assert(watcher_thread() == this,  "thread consistency check");
+ 
+@@ -1047,14 +1056,14 @@
+       // rare cases, the error handler itself might deadlock. Here we try to
+       // kill JVM if the fatal error handler fails to abort in 2 minutes.
+       //
+-      // This code is in WatcherThread because WatcherThread wakes up 
+-      // periodically so the fatal error handler doesn't need to do anything; 
++      // This code is in WatcherThread because WatcherThread wakes up
++      // periodically so the fatal error handler doesn't need to do anything;
+       // also because the WatcherThread is less likely to crash than other
+       // threads.
+ 
+       for (;;) {
+-        if (!ShowMessageBoxOnError 
+-         && (OnError == NULL || OnError[0] == '\0') 
++        if (!ShowMessageBoxOnError
++         && (OnError == NULL || OnError[0] == '\0')
+          && Arguments::abort_hook() == NULL) {
+              os::sleep(this, 2 * 60 * 1000, false);
+              fdStream err(defaultStream::output_fd());
+@@ -1062,7 +1071,7 @@
+              // skip atexit/vm_exit/vm_abort hooks
+              os::die();
+         }
+- 
++
+         // Wake up 5 seconds later, the fatal handler may reset OnError or
+         // ShowMessageBoxOnError when it is ready to abort.
+         os::sleep(this, 5 * 1000, false);
+@@ -1077,14 +1086,14 @@
+       _should_terminate = true;
+     }
+   }
+-  
++
+   // Signal that it is terminated
+   {
+     MutexLockerEx mu(Terminator_lock, Mutex::_no_safepoint_check_flag);
+     _watcher_thread = NULL;
+     Terminator_lock->notify();
+   }
+-  
++
+   // Thread destructor usually does this..
+   ThreadLocalStorage::set_thread(NULL);
+ }
+@@ -1093,14 +1102,14 @@
+   if (watcher_thread() == NULL) {
+     _should_terminate = false;
+     // Create the single instance of WatcherThread
+-    new WatcherThread();   
++    new WatcherThread();
+   }
+ }
+ 
+ void WatcherThread::stop() {
+   // it is ok to take late safepoints here, if needed
+   MutexLocker mu(Terminator_lock);
+-  _should_terminate = true;  
++  _should_terminate = true;
+   while(watcher_thread() != NULL) {
+     // This wait should make safepoint checks, wait without a timeout,
+     // and wait as a suspend-equivalent condition.
+@@ -1114,7 +1123,7 @@
+     //
+     Terminator_lock->wait(!Mutex::_no_safepoint_check_flag, 0,
+                           Mutex::_as_suspend_equivalent_flag);
+-  }  
++  }
+ }
+ 
+ void WatcherThread::print_on(outputStream* st) const {
+@@ -1130,7 +1139,7 @@
+ void JavaThread::initialize() {
+   // Initialize fields
+   set_saved_exception_pc(NULL);
+-  set_threadObj(NULL);  
++  set_threadObj(NULL);
+   _anchor.clear();
+   set_entry_point(NULL);
+   set_jni_functions(jni_functions());
+@@ -1143,8 +1152,8 @@
+   set_deopt_mark(NULL);
+   clear_must_deopt_id();
+   set_monitor_chunks(NULL);
+-  set_next(NULL);  
+-  set_thread_state(_thread_new);  
++  set_next(NULL);
++  set_thread_state(_thread_new);
+   _terminated = _not_terminated;
+   _privileged_stack_top = NULL;
+   _array_for_gc = NULL;
+@@ -1168,7 +1177,7 @@
+   _jni_active_critical = 0;
+   _do_not_unlock_if_synchronized = false;
+   _cached_monitor_info = NULL;
+-  _parker = Parker::Allocate(this) ; 
++  _parker = Parker::Allocate(this) ;
+ 
+ #ifndef PRODUCT
+   _jmp_ring_index = 0;
+@@ -1188,7 +1197,7 @@
+   }
+ 
+   // Setup safepoint state info for this thread
+-  ThreadSafepointState::create(this);  
++  ThreadSafepointState::create(this);
+ 
+   debug_only(_java_call_counter = 0);
+ 
+@@ -1212,7 +1221,7 @@
+ 
+   if (register_stack_overflow()) {
+     // For those architectures which have separate register and
+-    // memory stacks, we must check the register stack to see if 
++    // memory stacks, we must check the register stack to see if
+     // it has overflowed.
+     return false;
+   }
+@@ -1268,8 +1277,8 @@
+   //
+   // The thread is still suspended when we reach here. Thread must be explicit started
+   // by creator! Furthermore, the thread must also explicitly be added to the Threads list
+-  // by calling Threads:add. The reason why this is not done here, is because the thread 
+-  // object must be fully initialized (take a look at JVM_Start)  
++  // by calling Threads:add. The reason why this is not done here, is because the thread
++  // object must be fully initialized (take a look at JVM_Start)
+ }
+ 
+ JavaThread::~JavaThread() {
+@@ -1279,7 +1288,7 @@
+ 
+   // JSR166 -- return the parker to the free list
+   Parker::Release(_parker);
+-  _parker = NULL ; 
++  _parker = NULL ;
+ 
+   // Free any remaining  previous UnrollBlock
+   vframeArray* old_array = vframe_array_last();
+@@ -1303,8 +1312,8 @@
+     } while (deferred->length() != 0);
+     delete deferred;
+   }
+-  
+-  // All Java related clean up happens in exit  
++
++  // All Java related clean up happens in exit
+   ThreadSafepointState::destroy(this);
+   if (_thread_profiler != NULL) delete _thread_profiler;
+   if (_thread_stat != NULL) delete _thread_stat;
+@@ -1322,28 +1331,28 @@
+ 
+   // used to test validitity of stack trace backs
+   this->record_base_of_stack_pointer();
+-  
++
+   // Record real stack base and size.
+   this->record_stack_base_and_size();
+ 
+   // Initialize thread local storage; set before calling MutexLocker
+-  this->initialize_thread_local_storage();  
++  this->initialize_thread_local_storage();
+ 
+   this->create_stack_guard_pages();
+-  
++
+   // Thread is now sufficient initialized to be handled by the safepoint code as being
+-  // in the VM. Change thread state from _thread_new to _thread_in_vm  
+-  ThreadStateTransition::transition_and_fence(this, _thread_new, _thread_in_vm);  
+-  
++  // in the VM. Change thread state from _thread_new to _thread_in_vm
++  ThreadStateTransition::transition_and_fence(this, _thread_new, _thread_in_vm);
++
+   assert(JavaThread::current() == this, "sanity check");
+   assert(!Thread::current()->owns_locks(), "sanity check");
+ 
+   DTRACE_THREAD_PROBE(start, this);
+-  
++
+   // This operation might block. We call that after all safepoint checks for a new thread has
+   // been completed.
+   this->set_active_handles(JNIHandleBlock::allocate_block());
+-  
++
+   if (JvmtiExport::should_post_thread_life()) {
+     JvmtiExport::post_thread_start(this);
+   }
+@@ -1356,26 +1365,26 @@
+ }
+ 
+ 
+-void JavaThread::thread_main_inner() {  
++void JavaThread::thread_main_inner() {
+   assert(JavaThread::current() == this, "sanity check");
+   assert(this->threadObj() != NULL, "just checking");
+ 
+-  // Execute thread entry point. If this thread is being asked to restart, 
++  // Execute thread entry point. If this thread is being asked to restart,
+   // or has been stopped before starting, do not reexecute entry point.
+   // Note: Due to JVM_StopThread we can have pending exceptions already!
+   if (!this->has_pending_exception() && !java_lang_Thread::is_stillborn(this->threadObj())) {
+     // enter the thread's entry point only if we have no pending exceptions
+-    HandleMark hm(this);    
++    HandleMark hm(this);
+     this->entry_point()(this, this);
+   }
+ 
+   DTRACE_THREAD_PROBE(stop, this);
+-  
++
+   this->exit(false);
+   delete this;
+ }
+ 
+-  
++
+ static void ensure_join(JavaThread* thread) {
+   // We do not need to grap the Threads_lock, since we are operating on ourself.
+   Handle threadObj(thread, thread->threadObj());
+@@ -1383,9 +1392,9 @@
+   ObjectLocker lock(threadObj, thread);
+   // Ignore pending exception (ThreadDeath), since we are exiting anyway
+   thread->clear_pending_exception();
+-  // It is of profound importance that we set the stillborn bit and reset the thread object, 
++  // It is of profound importance that we set the stillborn bit and reset the thread object,
+   // before we do the notify. Since, changing these two variable will make JVM_IsAlive return
+-  // false. So in case another thread is doing a join on this thread , it will detect that the thread 
++  // false. So in case another thread is doing a join on this thread , it will detect that the thread
+   // is dead when it gets notified.
+   java_lang_Thread::set_stillborn(threadObj());
+   // Thread is exiting. So set thread_status field in  java.lang.Thread class to TERMINATED.
+@@ -1407,7 +1416,7 @@
+   this->clear_pending_exception();
+   Handle threadObj(this, this->threadObj());
+   assert(threadObj.not_null(), "Java thread object should be created");
+-  
++
+   if (get_thread_profiler() != NULL) {
+     get_thread_profiler()->disengage();
+     ResourceMark rm;
+@@ -1418,19 +1427,19 @@
+   // FIXIT: This code should be moved into else part, when reliable 1.2/1.3 check is in place
+   {
+     EXCEPTION_MARK;
+-    
++
+     CLEAR_PENDING_EXCEPTION;
+   }
+   // FIXIT: The is_null check is only so it works better on JDK1.2 VM's. This
+   // has to be fixed by a runtime query method
+   if (!destroy_vm || JDK_Version::is_jdk12x_version()) {
+     // JSR-166: change call from from ThreadGroup.uncaughtException to
+-    // java.lang.Thread.dispatchUncaughtException 
++    // java.lang.Thread.dispatchUncaughtException
+     if (uncaught_exception.not_null()) {
+       Handle group(this, java_lang_Thread::threadGroup(threadObj()));
+-      Events::log("uncaught exception INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT", 
++      Events::log("uncaught exception INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT",
+         (address)uncaught_exception(), (address)threadObj(), (address)group());
+-      { 
++      {
+         EXCEPTION_MARK;
+         // Check if the method Thread.dispatchUncaughtException() exists. If so
+         // call it.  Otherwise we have an older library without the JSR-166 changes,
+@@ -1446,19 +1455,19 @@
+         methodHandle method = callinfo.selected_method();
+         if (method.not_null()) {
+           JavaValue result(T_VOID);
+-          JavaCalls::call_virtual(&result, 
++          JavaCalls::call_virtual(&result,
+                                   threadObj, thread_klass,
+-                                  vmSymbolHandles::dispatchUncaughtException_name(), 
+-                                  vmSymbolHandles::throwable_void_signature(), 
++                                  vmSymbolHandles::dispatchUncaughtException_name(),
++                                  vmSymbolHandles::throwable_void_signature(),
+                                   uncaught_exception,
+                                   THREAD);
+         } else {
+           KlassHandle thread_group(THREAD, SystemDictionary::threadGroup_klass());
+           JavaValue result(T_VOID);
+-          JavaCalls::call_virtual(&result, 
++          JavaCalls::call_virtual(&result,
+                                   group, thread_group,
+-                                  vmSymbolHandles::uncaughtException_name(), 
+-                                  vmSymbolHandles::thread_throwable_void_signature(), 
++                                  vmSymbolHandles::uncaughtException_name(),
++                                  vmSymbolHandles::thread_throwable_void_signature(),
+                                   threadObj,           // Arg 1
+                                   uncaught_exception,  // Arg 2
+                                   THREAD);
+@@ -1466,7 +1475,7 @@
+         CLEAR_PENDING_EXCEPTION;
+       }
+     }
+- 
++
+     // Call Thread.exit(). We try 3 times in case we got another Thread.stop during
+     // the execution of the method. If that is not enough, then we don't really care. Thread.stop
+     // is deprecated anyhow.
+@@ -1475,14 +1484,14 @@
+         EXCEPTION_MARK;
+         JavaValue result(T_VOID);
+         KlassHandle thread_klass(THREAD, SystemDictionary::thread_klass());
+-        JavaCalls::call_virtual(&result, 
++        JavaCalls::call_virtual(&result,
+                               threadObj, thread_klass,
+-                              vmSymbolHandles::exit_method_name(), 
+-                              vmSymbolHandles::void_method_signature(), 
+-                              THREAD);  
++                              vmSymbolHandles::exit_method_name(),
++                              vmSymbolHandles::void_method_signature(),
++                              THREAD);
+         CLEAR_PENDING_EXCEPTION;
+       }
+-    }      
++    }
+ 
+     // notify JVMTI
+     if (JvmtiExport::should_post_thread_life()) {
+@@ -1519,7 +1528,7 @@
+   } else {
+     // before_exit() has already posted JVMTI THREAD_END events
+   }
+-  
++
+   // Notify waiters on thread object. This has to be done after exit() is called
+   // on the thread (if the thread is the last thread in a daemon ThreadGroup the
+   // group should have the destroyed bit set before waiters are notified).
+@@ -1562,8 +1571,8 @@
+     tlab().make_parsable(true);  // retire TLAB
+   }
+ 
+-  // Remove from list of active threads list, and notify VM thread if we are the last non-daemon thread 
+-  Threads::remove(this);  
++  // Remove from list of active threads list, and notify VM thread if we are the last non-daemon thread
++  Threads::remove(this);
+ }
+ 
+ void JavaThread::cleanup_failed_attach_current_thread() {
+@@ -1573,23 +1582,23 @@
+        ResourceMark rm;
+        get_thread_profiler()->print(get_thread_name());
+      }
+-    
++
+      if (active_handles() != NULL) {
+       JNIHandleBlock* block = active_handles();
+       set_active_handles(NULL);
+       JNIHandleBlock::release_block(block);
+      }
+-     
++
+      if (free_handle_block() != NULL) {
+        JNIHandleBlock* block = free_handle_block();
+        set_free_handle_block(NULL);
+        JNIHandleBlock::release_block(block);
+      }
+-    
++
+      if (UseTLAB) {
+        tlab().make_parsable(true);  // retire TLAB, if any
+      }
+- 
++
+      Threads::remove(this);
+      delete this;
+ }
+@@ -1609,9 +1618,9 @@
+   }
+ }
+ 
+-bool JavaThread::is_lock_owned(address adr) const {  
++bool JavaThread::is_lock_owned(address adr) const {
+   if (lock_is_in_stack(adr)) return true;
+-    
++
+   for (MonitorChunk* chunk = monitor_chunks(); chunk != NULL; chunk = chunk->next()) {
+     if (chunk->contains(adr)) return true;
+   }
+@@ -1632,7 +1641,7 @@
+   } else {
+     MonitorChunk* prev = monitor_chunks();
+     while (prev->next() != chunk) prev = prev->next();
+-    prev->set_next(chunk->next()); 
++    prev->set_next(chunk->next());
+   }
+ }
+ 
+@@ -1646,7 +1655,7 @@
+   if (has_last_Java_frame() && has_async_condition()) {
+     // If we are at a polling page safepoint (not a poll return)
+     // then we must defer async exception because live registers
+-    // will be clobbered by the exception path. Poll return is 
++    // will be clobbered by the exception path. Poll return is
+     // ok because the call we a returning from already collides
+     // with exception handling registers and so there is no issue.
+     // (The exception handling path kills call result registers but
+@@ -1661,11 +1670,11 @@
+       frame caller_fr = last_frame().sender(&map);
+       assert(caller_fr.is_compiled_frame(), "what?");
+       if (caller_fr.is_deoptimized_frame()) {
+-	if (TraceExceptions) {
+-	  ResourceMark rm;
+-	  tty->print_cr("deferred async exception at compiled safepoint");
+-	}
+-	return;
++        if (TraceExceptions) {
++          ResourceMark rm;
++          tty->print_cr("deferred async exception at compiled safepoint");
++        }
++        return;
+       }
+     }
+   }
+@@ -1687,21 +1696,21 @@
+     // Only overwrite an already pending exception, if it is not a threadDeath.
+     if (!has_pending_exception() || !pending_exception()->is_a(SystemDictionary::threaddeath_klass())) {
+ 
+-      // We cannot call Exceptions::_throw(...) here because we cannot block      
++      // We cannot call Exceptions::_throw(...) here because we cannot block
+       set_pending_exception(_pending_async_exception, __FILE__, __LINE__);
+-      
++
+       if (TraceExceptions) {
+-	ResourceMark rm;
+-	tty->print("Async. exception installed at runtime exit (" INTPTR_FORMAT ")", this);
+-	if (has_last_Java_frame() ) {
+-	  frame f = last_frame();
+-	  tty->print(" (pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " )", f.pc(), f.sp());
+-	}
+-	tty->print_cr(" of type: %s", instanceKlass::cast(_pending_async_exception->klass())->external_name());
++        ResourceMark rm;
++        tty->print("Async. exception installed at runtime exit (" INTPTR_FORMAT ")", this);
++        if (has_last_Java_frame() ) {
++          frame f = last_frame();
++          tty->print(" (pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " )", f.pc(), f.sp());
++        }
++        tty->print_cr(" of type: %s", instanceKlass::cast(_pending_async_exception->klass())->external_name());
+       }
+       _pending_async_exception = NULL;
+       clear_has_async_exception();
+-    }        
++    }
+   }
+ 
+   if (check_unsafe_error &&
+@@ -1710,20 +1719,20 @@
+     switch (thread_state()) {
+     case _thread_in_vm:
+       {
+-	JavaThread* THREAD = this;
+-	THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in an unsafe memory access operation");
++        JavaThread* THREAD = this;
++        THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in an unsafe memory access operation");
+       }
+     case _thread_in_native:
+       {
+-	ThreadInVMfromNative tiv(this);
+-	JavaThread* THREAD = this;
+-	THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in an unsafe memory access operation");
++        ThreadInVMfromNative tiv(this);
++        JavaThread* THREAD = this;
++        THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in an unsafe memory access operation");
+       }
+     case _thread_in_Java:
+       {
+-	ThreadInVMfromJava tiv(this);
+-	JavaThread* THREAD = this;
+-	THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in a recent unsafe memory access operation in compiled Java code");
++        ThreadInVMfromJava tiv(this);
++        JavaThread* THREAD = this;
++        THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in a recent unsafe memory access operation in compiled Java code");
+       }
+     default:
+       ShouldNotReachHere();
+@@ -1732,7 +1741,7 @@
+ 
+   assert(condition == _no_async_condition || has_pending_exception() ||
+          (!check_unsafe_error && condition == _async_unsafe_access_error),
+-	 "must have handled the async condition, if no exception");
++         "must have handled the async condition, if no exception");
+ }
+ 
+ void JavaThread::handle_special_runtime_exit_condition(bool check_asyncs) {
+@@ -1756,7 +1765,7 @@
+     // here as _thread_in_native_trans so we can't use a normal transition
+     // constructor/destructor pair because they assert on that type of
+     // transition. We could do something like:
+-    // 
++    //
+     // JavaThreadState state = thread_state();
+     // set_thread_state(_thread_in_vm);
+     // {
+@@ -1777,7 +1786,7 @@
+ 
+     // We might be here for reasons in addition to the self-suspend request
+     // so check for other async requests.
+-  }   
++  }
+ 
+   if (check_asyncs) {
+     check_and_handle_async_exceptions();
+@@ -1787,7 +1796,7 @@
+ void JavaThread::send_thread_stop(oop java_throwable)  {
+   assert(Thread::current()->is_VM_thread(), "should be in the vm thread");
+   assert(Threads_lock->is_locked(), "Threads_lock should be locked by safepoint code");
+-  assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");  
++  assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
+ 
+   // Do not throw asynchronous exceptions against the compiler thread
+   // (the compiler thread should not be a Java thread -- fix in 1.4.2)
+@@ -1808,18 +1817,18 @@
+       // may not be valid
+       if (has_last_Java_frame()) {
+         frame f = last_frame();
+-        if (f.is_runtime_frame() || f.is_safepoint_blob_frame()) {          
++        if (f.is_runtime_frame() || f.is_safepoint_blob_frame()) {
+           // BiasedLocking needs an updated RegisterMap for the revoke monitors pass
+           RegisterMap reg_map(this, UseBiasedLocking);
+           frame compiled_frame = f.sender(&reg_map);
+           if (compiled_frame.can_be_deoptimized()) {
+             Deoptimization::deoptimize(this, compiled_frame, &reg_map);
+-          }          
++          }
+         }
+       }
+-    
++
+       // Set async. pending exception in thread.
+-      set_pending_async_exception(java_throwable);    
++      set_pending_async_exception(java_throwable);
+ 
+       if (TraceExceptions) {
+        ResourceMark rm;
+@@ -1830,20 +1839,20 @@
+     }
+   }
+ 
+-  
++
+   // Interrupt thread so it will wake up from a potential wait()
+-  Thread::interrupt(this);  
++  Thread::interrupt(this);
+ }
+ 
+ // External suspension mechanism.
+ //
+-// Tell the VM to suspend a thread when ever it knows that it does not hold on 
++// Tell the VM to suspend a thread when ever it knows that it does not hold on
+ // to any VM_locks and it is at a transition
+ // Self-suspension will happen on the transition out of the vm.
+ // Catch "this" coming in from JNIEnv pointers when the thread has been freed
+ //
+ // Guarantees on return:
+-//   + Target thread will not execute any new bytecode (that's why we need to 
++//   + Target thread will not execute any new bytecode (that's why we need to
+ //     force a safepoint)
+ //   + Target thread will not enter any new monitors
+ //
+@@ -1865,8 +1874,8 @@
+     // Warning: is_ext_suspend_completed() may temporarily drop the
+     // SR_lock to allow the thread to reach a stable thread state if
+     // it is currently in a transient thread state.
+-    if (is_ext_suspend_completed(false /* !called_by_wait */, 
+-				 SuspendRetryDelay, &debug_bits) ) {
++    if (is_ext_suspend_completed(false /* !called_by_wait */,
++                                 SuspendRetryDelay, &debug_bits) ) {
+       return;
+     }
+   }
+@@ -1900,7 +1909,7 @@
+   assert(_anchor.walkable() ||
+     (is_Java_thread() && !((JavaThread*)this)->has_last_Java_frame()),
+     "must have walkable stack");
+-  
++
+   MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
+ 
+   assert(!this->is_any_suspended(),
+@@ -1942,12 +1951,12 @@
+ void JavaThread::verify_not_published() {
+   if (!Threads_lock->owned_by_self()) {
+    MutexLockerEx ml(Threads_lock,  Mutex::_no_safepoint_check_flag);
+-   assert( !Threads::includes(this), 
+-	   "java thread shouldn't have been published yet!");
++   assert( !Threads::includes(this),
++           "java thread shouldn't have been published yet!");
+   }
+   else {
+-   assert( !Threads::includes(this), 
+-	   "java thread shouldn't have been published yet!");
++   assert( !Threads::includes(this),
++           "java thread shouldn't have been published yet!");
+   }
+ }
+ #endif
+@@ -1983,11 +1992,11 @@
+     // wait until the thread changes to other thread state. There is no
+     // guarantee on how soon we can obtain the SR_lock and complete the
+     // self-suspend request. It would be a bad idea to let safepoint wait for
+-    // too long. Temporarily change the state to _thread_blocked to 
++    // too long. Temporarily change the state to _thread_blocked to
+     // let the VM thread know that this thread is ready for GC. The problem
+     // of changing thread state is that safepoint could happen just after
+-    // java_suspend_self() returns after being resumed, and VM thread will 
+-    // see the _thread_blocked state. We must check for safepoint 
++    // java_suspend_self() returns after being resumed, and VM thread will
++    // see the _thread_blocked state. We must check for safepoint
+     // after restoring the state and make sure we won't leave while a safepoint
+     // is in progress.
+     thread->set_thread_state(_thread_blocked);
+@@ -2000,7 +2009,7 @@
+         OrderAccess::fence();
+       } else {
+         // Must use this rather than serialization page in particular on Windows
+-        InterfaceSupport::serialize_memory(thread);      
++        InterfaceSupport::serialize_memory(thread);
+       }
+     }
+   }
+@@ -2045,12 +2054,12 @@
+   }
+ }
+ 
+-// We need to guarantee the Threads_lock here, since resumes are not 
++// We need to guarantee the Threads_lock here, since resumes are not
+ // allowed during safepoint synchronization
+ // Can only resume from an external suspension
+ void JavaThread::java_resume() {
+-  assert_locked_or_safepoint(Threads_lock);  
+-  
++  assert_locked_or_safepoint(Threads_lock);
++
+   // Sanity check: thread is gone, has started exiting or the thread
+   // was not externally suspended.
+   if (!Threads::includes(this) || is_exiting() || !is_external_suspend()) {
+@@ -2106,7 +2115,7 @@
+     if (os::unguard_memory((char *) low_addr, len)) {
+       _stack_guard_state = stack_guard_unused;
+     } else {
+-	warning("Attempt to unprotect stack guard pages failed.");
++        warning("Attempt to unprotect stack guard pages failed.");
+     }
+   }
+ }
+@@ -2118,7 +2127,7 @@
+   // The base notation is from the stacks point of view, growing downward.
+   // We need to adjust it to work correctly with guard_memory()
+   address base = stack_yellow_zone_base() - stack_yellow_zone_size();
+-  
++
+   guarantee(base < stack_base(),"Error calculating stack yellow zone");
+   guarantee(base < os::current_stack_pointer(),"Error calculating stack yellow zone");
+ 
+@@ -2140,7 +2149,7 @@
+   // The base notation is from the stacks point of view, growing downward.
+   // We need to adjust it to work correctly with guard_memory()
+   address base = stack_yellow_zone_base() - stack_yellow_zone_size();
+-  
++
+   if (os::unguard_memory((char *)base, stack_yellow_zone_size())) {
+     _stack_guard_state = stack_guard_yellow_disabled;
+   } else {
+@@ -2154,7 +2163,7 @@
+   // We need to adjust it to work correctly with guard_memory()
+   assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
+   address base = stack_red_zone_base() - stack_red_zone_size();
+-  
++
+   guarantee(base < stack_base(),"Error calculating stack red zone");
+   guarantee(base < os::current_stack_pointer(),"Error calculating stack red zone");
+ 
+@@ -2176,11 +2185,11 @@
+ void JavaThread::frames_do(void f(frame*, const RegisterMap* map)) {
+   // ignore is there is no stack
+   if (!has_last_Java_frame()) return;
+-  // traverse the stack frames. Starts from top frame.  
++  // traverse the stack frames. Starts from top frame.
+   for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
+-    frame* fr = fst.current();    
+-    f(fr, fst.register_map());    
+-  }  
++    frame* fr = fst.current();
++    f(fr, fst.register_map());
++  }
+ }
+ 
+ 
+@@ -2277,7 +2286,7 @@
+ }
+ 
+ 
+-void JavaThread::oops_do(OopClosure* f) {  
++void JavaThread::oops_do(OopClosure* f) {
+   // The ThreadProfiler oops_do is done from FlatProfiler::oops_do
+   // since there may be more than one thread using each ThreadProfiler.
+ 
+@@ -2306,11 +2315,11 @@
+       chunk->oops_do(f);
+     }
+ 
+-    // Traverse the execution stack    
++    // Traverse the execution stack
+     for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
+       fst.current()->oops_do(f, fst.register_map());
+     }
+-  } 
++  }
+ 
+   // callee_target is never live across a gc point so NULL it here should
+   // it still contain a methdOop.
+@@ -2340,7 +2349,7 @@
+   }
+ }
+ 
+-void JavaThread::nmethods_do() {  
++void JavaThread::nmethods_do() {
+   // Traverse the GCHandles
+   Thread::nmethods_do();
+ 
+@@ -2348,11 +2357,11 @@
+           (has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
+ 
+   if (has_last_Java_frame()) {
+-    // Traverse the execution stack    
++    // Traverse the execution stack
+     for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
+       fst.current()->nmethods_do();
+     }
+-  } 
++  }
+ }
+ 
+ // Printing
+@@ -2461,10 +2470,10 @@
+     typeArrayOop name = java_lang_Thread::name(thread_obj);
+     if (name != NULL) {
+       if (buf == NULL) {
+-	name_str = UNICODE::as_utf8((jchar*) name->base(T_CHAR), name->length());
++        name_str = UNICODE::as_utf8((jchar*) name->base(T_CHAR), name->length());
+       }
+       else {
+-	name_str = UNICODE::as_utf8((jchar*) name->base(T_CHAR), name->length(), buf, buflen);
++        name_str = UNICODE::as_utf8((jchar*) name->base(T_CHAR), name->length(), buf, buflen);
+       }
+     }
+     else if (is_attaching()) { // workaround for 6412693 - see 6404306
+@@ -2508,11 +2517,11 @@
+       oop parent = java_lang_ThreadGroup::parent(thread_group);
+       if (parent != NULL) {
+         typeArrayOop name = java_lang_ThreadGroup::name(parent);
+-	// ThreadGroup.name can be null
+-	if (name != NULL) {
++        // ThreadGroup.name can be null
++        if (name != NULL) {
+           const char* str = UNICODE::as_utf8((jchar*) name->base(T_CHAR), name->length());
+           return str;
+-	}
++        }
+       }
+     }
+   }
+@@ -2539,13 +2548,13 @@
+   // Set the Java level thread object (jthread) field of the
+   // new thread (a JavaThread *) to C++ thread object using the
+   // "thread_oop" handle.
+-  
++
+   // Set the thread field (a JavaThread *) of the
+   // oop representing the java_lang_Thread to the new thread (a JavaThread *).
+ 
+   Handle thread_oop(Thread::current(),
+                     JNIHandles::resolve_non_null(jni_thread));
+-  assert(instanceKlass::cast(thread_oop->klass())->is_linked(), 
++  assert(instanceKlass::cast(thread_oop->klass())->is_linked(),
+     "must be initialized");
+   set_threadObj(thread_oop());
+   java_lang_Thread::set_thread(thread_oop(), this);
+@@ -2558,9 +2567,9 @@
+   // Push the Java priority down to the native thread; needs Threads_lock
+   Thread::set_priority(this, prio);
+ 
+-  // Add the new thread to the Threads list and set it in motion. 
+-  // We must have threads lock in order to call Threads::add. 
+-  // It is crucial that we do not block before the thread is 
++  // Add the new thread to the Threads list and set it in motion.
++  // We must have threads lock in order to call Threads::add.
++  // It is crucial that we do not block before the thread is
+   // added to the Threads list for if a GC happens, then the java_thread oop
+   // will not be visited by GC.
+   Threads::add(this);
+@@ -2576,7 +2585,7 @@
+ }
+ 
+ 
+-void JavaThread::print_stack_on(outputStream* st) {  
++void JavaThread::print_stack_on(outputStream* st) {
+   if (!has_last_Java_frame()) return;
+   ResourceMark rm;
+   HandleMark   hm;
+@@ -2588,7 +2597,7 @@
+     if (f->is_java_frame()) {
+       javaVFrame* jvf = javaVFrame::cast(f);
+       java_lang_Throwable::print_stack_element(st, jvf->method(), jvf->bci());
+-      
++
+       // Print out lock information
+       if (JavaMonitorsInStackTrace) {
+         jvf->print_lock_info_on(st, count);
+@@ -2599,7 +2608,7 @@
+ 
+     // Bail-out case for too deep stacks
+     count++;
+-    if (MaxJavaStackTraceDepth == count) return;    
++    if (MaxJavaStackTraceDepth == count) return;
+   }
+ }
+ 
+@@ -2687,12 +2696,12 @@
+ }
+ 
+ 
+-klassOop JavaThread::security_get_caller_class(int depth) {  
++klassOop JavaThread::security_get_caller_class(int depth) {
+   vframeStream vfst(this);
+   vfst.security_get_caller_frame(depth);
+   if (!vfst.at_end()) {
+     return vfst.method()->method_holder();
+-  }    
++  }
+   return NULL;
+ }
+ 
+@@ -2709,6 +2718,10 @@
+   _task  = NULL;
+   _queue = queue;
+   _counters = counters;
++
++#ifndef PRODUCT
++  _ideal_graph_printer = NULL;
++#endif
+ }
+ 
+ 
+@@ -2747,7 +2760,7 @@
+ }
+ 
+ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
+-  
++
+   // Check version
+   if (!is_supported_jni_version(args->version)) return JNI_EVERSION;
+ 
+@@ -2814,10 +2827,10 @@
+   main_thread->set_thread_state(_thread_in_vm);
+   // must do this before set_active_handles and initialize_thread_local_storage
+   // Note: on solaris initialize_thread_local_storage() will (indirectly)
+-  // change the stack size recorded here to one based on the java thread 
++  // change the stack size recorded here to one based on the java thread
+   // stacksize. This adjusted size is what is used to figure the placement
+   // of the guard pages.
+-  main_thread->record_stack_base_and_size(); 
++  main_thread->record_stack_base_and_size();
+   main_thread->initialize_thread_local_storage();
+ 
+   main_thread->set_active_handles(JNIHandleBlock::allocate_block());
+@@ -2834,10 +2847,8 @@
+   // crash Linux VM, see notes in os_linux.cpp.
+   main_thread->create_stack_guard_pages();
+ 
+-  HandleMark hm;
+-
+   // Initialize Java-Leve synchronization subsystem
+-  ObjectSynchronizer::Initialize() ; 
++  ObjectSynchronizer::Initialize() ;
+ 
+   // Initialize global modules
+   jint status = init_globals();
+@@ -2847,6 +2858,8 @@
+     return status;
+   }
+ 
++  HandleMark hm;
++
+   { MutexLocker mu(Threads_lock);
+     Threads::add(main_thread);
+   }
+@@ -2861,11 +2874,11 @@
+     Universe::verify();   // make sure we're starting with a clean slate
+   }
+ 
+-  // Create the VMThread  
++  // Create the VMThread
+   { TraceTime timer("Start VMThread", TraceStartupTime);
+     VMThread::create();
+     Thread* vmthread = VMThread::vm_thread();
+-    
++
+     if (!os::create_thread(vmthread, os::vm_thread))
+       vm_exit_during_initialization("Cannot create VM thread. Out of system resources.");
+ 
+@@ -2879,7 +2892,7 @@
+       }
+     }
+   }
+-  
++
+   assert (Universe::is_fully_initialized(), "not initialized");
+   EXCEPTION_MARK;
+ 
+@@ -2913,7 +2926,7 @@
+     }
+ 
+     // Initialize java_lang.System (needed before creating the thread)
+-    if (InitializeJavaLangSystem) {      
++    if (InitializeJavaLangSystem) {
+       initialize_class(vmSymbolHandles::java_lang_System(), CHECK_0);
+       initialize_class(vmSymbolHandles::java_lang_ThreadGroup(), CHECK_0);
+       Handle thread_group = create_initial_thread_group(CHECK_0);
+@@ -2921,11 +2934,11 @@
+       initialize_class(vmSymbolHandles::java_lang_Thread(), CHECK_0);
+       oop thread_object = create_initial_thread(thread_group, main_thread, CHECK_0);
+       main_thread->set_threadObj(thread_object);
+-      // Set thread status to running since main thread has 
++      // Set thread status to running since main thread has
+       // been started and running.
+-      java_lang_Thread::set_thread_status(thread_object, 
+-					  java_lang_Thread::RUNNABLE);
+-  
++      java_lang_Thread::set_thread_status(thread_object,
++                                          java_lang_Thread::RUNNABLE);
++
+       // The VM preresolve methods to these classes. Make sure that get initialized
+       initialize_class(vmSymbolHandles::java_lang_reflect_Method(), CHECK_0);
+       initialize_class(vmSymbolHandles::java_lang_ref_Finalizer(),  CHECK_0);
+@@ -2943,8 +2956,8 @@
+       initialize_class(vmSymbolHandles::java_lang_ClassCastException(), CHECK_0);
+       initialize_class(vmSymbolHandles::java_lang_ArrayStoreException(), CHECK_0);
+       initialize_class(vmSymbolHandles::java_lang_ArithmeticException(), CHECK_0);
+-      initialize_class(vmSymbolHandles::java_lang_StackOverflowError(), CHECK_0);      
+-      initialize_class(vmSymbolHandles::java_lang_IllegalMonitorStateException(), CHECK_0);      
++      initialize_class(vmSymbolHandles::java_lang_StackOverflowError(), CHECK_0);
++      initialize_class(vmSymbolHandles::java_lang_IllegalMonitorStateException(), CHECK_0);
+     } else {
+       warning("java.lang.OutOfMemoryError has not been initialized");
+       warning("java.lang.NullPointerException has not been initialized");
+@@ -2954,34 +2967,34 @@
+       warning("java.lang.StackOverflowError has not been initialized");
+     }
+   }
+-  
+-  // See        : bugid 4211085.  
+-  // Background : the static initializer of java.lang.Compiler tries to read 
++
++  // See        : bugid 4211085.
++  // Background : the static initializer of java.lang.Compiler tries to read
+   //              property"java.compiler" and read & write property "java.vm.info".
+   //              When a security manager is installed through the command line
+   //              option "-Djava.security.manager", the above properties are not
+   //              readable and the static initializer for java.lang.Compiler fails
+   //              resulting in a NoClassDefFoundError.  This can happen in any
+-  //              user code which calls methods in java.lang.Compiler.  
++  //              user code which calls methods in java.lang.Compiler.
+   // Hack :       the hack is to pre-load and initialize this class, so that only
+   //              system domains are on the stack when the properties are read.
+   //              Currently even the AWT code has calls to methods in java.lang.Compiler.
+   //              On the classic VM, java.lang.Compiler is loaded very early to load the JIT.
+-  // Future Fix : the best fix is to grant everyone permissions to read "java.compiler" and 
++  // Future Fix : the best fix is to grant everyone permissions to read "java.compiler" and
+   //              read and write"java.vm.info" in the default policy file. See bugid 4211383
+   //              Once that is done, we should remove this hack.
+   initialize_class(vmSymbolHandles::java_lang_Compiler(), CHECK_0);
+-  
++
+   // More hackery - the static initializer of java.lang.Compiler adds the string "nojit" to
+   // the java.vm.info property if no jit gets loaded through java.lang.Compiler (the hotspot
+-  // compiler does not get loaded through java.lang.Compiler).  "java -version" with the 
++  // compiler does not get loaded through java.lang.Compiler).  "java -version" with the
+   // hotspot vm says "nojit" all the time which is confusing.  So, we reset it here.
+   // This should also be taken out as soon as 4211383 gets fixed.
+   reset_vm_info_property(CHECK_0);
+-  
++
+   quicken_jni_functions();
+ 
+-  // Set flag that basic initialization has completed. Used by exceptions and various 
++  // Set flag that basic initialization has completed. Used by exceptions and various
+   // debug stuff, that does not work until all basic classes have been initialized.
+   set_init_completed();
+ 
+@@ -2991,15 +3004,15 @@
+   Management::record_vm_init_completed();
+ 
+   // Compute system loader. Note that this has to occur after set_init_completed, since
+-  // valid exceptions may be thrown in the process. 
+-  // Note that we do not use CHECK_0 here since we are inside an EXCEPTION_MARK and 
++  // valid exceptions may be thrown in the process.
++  // Note that we do not use CHECK_0 here since we are inside an EXCEPTION_MARK and
+   // set_init_completed has just been called, causing exceptions not to be shortcut
+   // anymore. We call vm_exit_during_initialization directly instead.
+   SystemDictionary::compute_java_system_loader(THREAD);
+   if (HAS_PENDING_EXCEPTION) {
+     vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
+   }
+-  
++
+ #ifndef SERIALGC
+   // Support for ConcurrentMarkSweep. This should be cleaned up
+   // and better encapsulated. XXX YSR
+@@ -3026,7 +3039,7 @@
+   }
+ 
+   // Launch -Xrun agents
+-  // Must be done in the JVMTI live phase so that for backward compatibility the JDWP 
++  // Must be done in the JVMTI live phase so that for backward compatibility the JDWP
+   // back-end can launch with -Xdebug -Xrunjdwp.
+   if (!EagerXrunInit && Arguments::init_libraries_at_startup()) {
+     create_vm_init_libraries();
+@@ -3042,7 +3055,7 @@
+ 
+   Management::initialize(THREAD);
+   if (HAS_PENDING_EXCEPTION) {
+-    // management agent fails to start possibly due to 
++    // management agent fails to start possibly due to
+     // configuration problem and is responsible for printing
+     // stack trace if appropriate. Simply exit VM.
+     vm_exit(1);
+@@ -3050,8 +3063,8 @@
+ 
+   if (Arguments::has_profile())       FlatProfiler::engage(main_thread, true);
+   if (Arguments::has_alloc_profile()) AllocationProfiler::engage();
+-  if (MemProfiling)                   MemProfiler::engage();    
+-  StatSampler::engage();    
++  if (MemProfiling)                   MemProfiler::engage();
++  StatSampler::engage();
+   if (CheckJNICalls)                  JniPeriodicChecker::engage();
+   if (CacheTimeMillis)                TimeMillisUpdateTask::engage();
+ 
+@@ -3096,6 +3109,28 @@
+       // Try to load the agent from the standard dll directory
+       hpi::dll_build_name(buffer, sizeof(buffer), Arguments::get_dll_dir(), name);
+       library = hpi::dll_load(buffer, ebuf, sizeof ebuf);
++#ifdef KERNEL
++      // Download instrument dll
++      if (library == NULL && strcmp(name, "instrument") == 0) {
++        char *props = Arguments::get_kernel_properties();
++        char *home  = Arguments::get_java_home();
++        const char *fmt   = "%s/bin/java %s -Dkernel.background.download=false"
++                      " sun.jkernel.DownloadManager -download client_jvm";
++        int length = strlen(props) + strlen(home) + strlen(fmt) + 1;
++        char *cmd = AllocateHeap(length);
++        jio_snprintf(cmd, length, fmt, home, props);
++        int status = os::fork_and_exec(cmd);
++        FreeHeap(props);
++        FreeHeap(cmd);
++        if (status == -1) {
++          warning(cmd);
++          vm_exit_during_initialization("fork_and_exec failed: %s",
++                                         strerror(errno));
++        }
++        // when this comes back the instrument.dll should be where it belongs.
++        library = hpi::dll_load(buffer, ebuf, sizeof ebuf);
++      }
++#endif // KERNEL
+       if (library == NULL) { // Try the local directory
+         char ns[1] = {0};
+         hpi::dll_build_name(buffer, sizeof(buffer), ns, name);
+@@ -3160,7 +3195,7 @@
+ // Invokes Agent_OnLoad
+ // Called very early -- before JavaThreads exist
+ void Threads::create_vm_init_agents() {
+-  extern struct JavaVM_ main_vm; 
++  extern struct JavaVM_ main_vm;
+   AgentLibrary* agent;
+ 
+   JvmtiExport::enter_onload_phase();
+@@ -3180,6 +3215,33 @@
+   JvmtiExport::enter_primordial_phase();
+ }
+ 
++extern "C" {
++  typedef void (JNICALL *Agent_OnUnload_t)(JavaVM *);
++}
++
++void Threads::shutdown_vm_agents() {
++  // Send any Agent_OnUnload notifications
++  const char *on_unload_symbols[] = AGENT_ONUNLOAD_SYMBOLS;
++  extern struct JavaVM_ main_vm;
++  for (AgentLibrary* agent = Arguments::agents(); agent != NULL; agent = agent->next()) {
++
++    // Find the Agent_OnUnload function.
++    for (uint symbol_index = 0; symbol_index < ARRAY_SIZE(on_unload_symbols); symbol_index++) {
++      Agent_OnUnload_t unload_entry = CAST_TO_FN_PTR(Agent_OnUnload_t,
++               hpi::dll_lookup(agent->os_lib(), on_unload_symbols[symbol_index]));
++
++      // Invoke the Agent_OnUnload function
++      if (unload_entry != NULL) {
++        JavaThread* thread = JavaThread::current();
++        ThreadToNativeFromVM ttn(thread);
++        HandleMark hm(thread);
++        (*unload_entry)(&main_vm);
++        break;
++      }
++    }
++  }
++}
++
+ // Called for after the VM is initialized for -Xrun libraries which have not been converted to agent libraries
+ // Invokes JVM_OnLoad
+ void Threads::create_vm_init_libraries() {
+@@ -3236,9 +3298,9 @@
+   CLEAR_PENDING_EXCEPTION;
+ }
+ 
+-// Threads::destroy_vm() is normally called from jni_DestroyJavaVM() when 
+-// the program falls off the end of main(). Another VM exit path is through 
+-// vm_exit() when the program calls System.exit() to return a value or when 
++// Threads::destroy_vm() is normally called from jni_DestroyJavaVM() when
++// the program falls off the end of main(). Another VM exit path is through
++// vm_exit() when the program calls System.exit() to return a value or when
+ // there is a serious error in VM. The two shutdown paths are not exactly
+ // the same, but they share Shutdown.shutdown() at Java level and before_exit()
+ // and VM_Exit op at VM level.
+@@ -3322,10 +3384,15 @@
+     VMThread::destroy();
+   }
+ 
++  // clean up ideal graph printers
++#if defined(COMPILER2) && !defined(PRODUCT)
++  IdealGraphPrinter::clean_up();
++#endif
++
+   // Now, all Java threads are gone except daemon threads. Daemon threads
+   // running Java code or in VM are stopped by the Safepoint. However,
+   // daemon threads executing native code are still running.  But they
+-  // will be stopped at native=>Java/VM barriers. Note that we can't 
++  // will be stopped at native=>Java/VM barriers. Note that we can't
+   // simply kill or suspend them, as it is inherently deadlock-prone.
+ 
+ #ifndef PRODUCT
+@@ -3366,7 +3433,7 @@
+ void Threads::add(JavaThread* p, bool force_daemon) {
+   // The threads lock must be owned at this point
+   assert_locked_or_safepoint(Threads_lock);
+-  p->set_next(_thread_list);  
++  p->set_next(_thread_list);
+   _thread_list = p;
+   _number_of_threads++;
+   oop threadObj = p->threadObj();
+@@ -3384,14 +3451,14 @@
+   Events::log("Thread added: " INTPTR_FORMAT, p);
+ }
+ 
+-void Threads::remove(JavaThread* p) {  
++void Threads::remove(JavaThread* p) {
+   // Extra scope needed for Thread_lock, so we can check
+   // that we do not remove thread without safepoint code notice
+   { MutexLocker ml(Threads_lock);
+-  
++
+     assert(includes(p), "p must be present");
+ 
+-    JavaThread* current = _thread_list; 
++    JavaThread* current = _thread_list;
+     JavaThread* prev    = NULL;
+ 
+     while (current != p) {
+@@ -3403,7 +3470,7 @@
+       prev->set_next(current->next());
+     } else {
+       _thread_list = p->next();
+-    }  
++    }
+     _number_of_threads--;
+     oop threadObj = p->threadObj();
+     bool daemon = true;
+@@ -3413,24 +3480,24 @@
+ 
+       // Only one thread left, do a notify on the Threads_lock so a thread waiting
+       // on destroy_vm will wake up.
+-      if (number_of_non_daemon_threads() == 1) 
++      if (number_of_non_daemon_threads() == 1)
+         Threads_lock->notify_all();
+-    }    
++    }
+     ThreadService::remove_thread(p, daemon);
+ 
+     // Make sure that safepoint code disregard this thread. This is needed since
+     // the thread might mess around with locks after this point. This can cause it
+     // to do callbacks into the safepoint code. However, the safepoint code is not aware
+     // of this thread since it is removed from the queue.
+-    p->set_terminated_value();        
+-  } // unlock Threads_lock 
++    p->set_terminated_value();
++  } // unlock Threads_lock
+ 
+   // Since Events::log uses a lock, we grab it outside the Threads_lock
+   Events::log("Thread exited: " INTPTR_FORMAT, p);
+ }
+ 
+ // Threads_lock must be held when this is called (or must be called during a safepoint)
+-bool Threads::includes(JavaThread* p) {  
++bool Threads::includes(JavaThread* p) {
+   assert(Threads_lock->is_locked(), "sanity check");
+   ALL_JAVA_THREADS(q) {
+     if (q == p ) {
+@@ -3444,7 +3511,7 @@
+ // but the garbage collector must provide a safe context for them to run.
+ // In particular, these things should never be called when the Threads_lock
+ // is held by some other thread. (Note: the Safepoint abstraction also
+-// uses the Threads_lock to gurantee this property. It also makes sure that  
++// uses the Threads_lock to gurantee this property. It also makes sure that
+ // all threads gets blocked when exiting or starting).
+ 
+ void Threads::oops_do(OopClosure* f) {
+@@ -3503,7 +3570,7 @@
+ }
+ 
+ void Threads::gc_prologue() {
+-  ALL_JAVA_THREADS(p) {    
++  ALL_JAVA_THREADS(p) {
+     p->gc_prologue();
+   }
+ }
+@@ -3540,8 +3607,8 @@
+ 
+ 
+ JavaThread *Threads::owning_thread_from_monitor_owner(address owner, bool doLock) {
+-  assert(doLock || 
+-         Threads_lock->owned_by_self() || 
++  assert(doLock ||
++         Threads_lock->owned_by_self() ||
+          SafepointSynchronize::is_at_safepoint(),
+          "must grab Threads_lock or be at safepoint");
+ 
+@@ -3589,7 +3656,7 @@
+   return the_owner;
+ }
+ 
+-// Threads::print_on() is called at safepoint by VM_PrintThreads operation. 
++// Threads::print_on() is called at safepoint by VM_PrintThreads operation.
+ void Threads::print_on(outputStream* st, bool print_stacks, bool internal_format, bool print_concurrent_locks) {
+   char buf[32];
+   st->print_cr(os::local_time_string(buf, sizeof(buf)));
+@@ -3639,7 +3706,7 @@
+ 
+ // Threads::print_on_error() is called by fatal error handler. It's possible
+ // that VM is not at safepoint and/or current thread is inside signal handler.
+-// Don't print stack trace, as the stack may not be walkable. Don't allocate 
++// Don't print stack trace, as the stack may not be walkable. Don't allocate
+ // memory (even in resource area), it might deadlock the error handler.
+ void Threads::print_on_error(outputStream* st, Thread* current, char* buf, int buflen) {
+   bool found_current = false;
+@@ -3689,17 +3756,18 @@
+ 
+ 
+ // Lifecycle management for TSM ParkEvents.
+-// ParkEvents are type-stable (TSM). 
+-// 
++// ParkEvents are type-stable (TSM).
++// In our particular implementation they happen to be immortal.
++//
+ // We manage concurrency on the FreeList with a CAS-based
+ // detach-modify-reattach idiom that avoids the ABA problems
+ // that would otherwise be present in a simple CAS-based
+-// push-pop implementation. 
++// push-pop implementation.   (push-one and pop-all)
+ //
+ // Caveat: Allocate() and Release() may be called from threads
+ // other than the thread associated with the Event!
+-// If we need to call Allocate() when running as the thread in 
+-// question then look for the PD calls to initialize native TLS.  
++// If we need to call Allocate() when running as the thread in
++// question then look for the PD calls to initialize native TLS.
+ // Native TLS (Win32/Linux/Solaris) can only be initialized or
+ // accessed by the associated thread.
+ // See also pd_initialize().
+@@ -3708,94 +3776,112 @@
+ // until the 1st time the thread calls park().  unpark() calls to
+ // an unprovisioned thread would be ignored.  The first park() call
+ // for a thread would allocate and associate a ParkEvent and return
+-// immediately.  
++// immediately.
+ 
+-volatile int ParkEvent::ListLock = 0 ; 
+-ParkEvent * volatile ParkEvent::FreeList = NULL ; 
++volatile int ParkEvent::ListLock = 0 ;
++ParkEvent * volatile ParkEvent::FreeList = NULL ;
+ 
+-ParkEvent * ParkEvent::Allocate (Thread * t) { 
+-  guarantee (t != NULL, "invariant") ; 
+-  ParkEvent * ev ; 
++ParkEvent * ParkEvent::Allocate (Thread * t) {
++  // In rare cases -- JVM_RawMonitor* operations -- we can find t == null.
++  ParkEvent * ev ;
+ 
+   // Start by trying to recycle an existing but unassociated
+   // ParkEvent from the global free list.
+-  for (;;) { 
+-    ev = FreeList ; 
+-    if (ev == NULL) break ; 
+-    // 1: Detach
++  for (;;) {
++    ev = FreeList ;
++    if (ev == NULL) break ;
++    // 1: Detach - sequester or privatize the list
+     // Tantamount to ev = Swap (&FreeList, NULL)
+     if (Atomic::cmpxchg_ptr (NULL, &FreeList, ev) != ev) {
+-       continue ; 
++       continue ;
+     }
+ 
+     // We've detached the list.  The list in-hand is now
+     // local to this thread.   This thread can operate on the
+     // list without risk of interference from other threads.
+     // 2: Extract -- pop the 1st element from the list.
+-    ParkEvent * List = ev->FreeNext ; 
+-    if (List == NULL) break ; 
+-    for (;;) { 
++    ParkEvent * List = ev->FreeNext ;
++    if (List == NULL) break ;
++    for (;;) {
+         // 3: Try to reattach the residual list
+-        guarantee (List != NULL, "invariant") ; 
+-        ParkEvent * Arv =  (ParkEvent *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ; 
+-        if (Arv == NULL) break ; 
++        guarantee (List != NULL, "invariant") ;
++        ParkEvent * Arv =  (ParkEvent *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
++        if (Arv == NULL) break ;
+ 
+         // New nodes arrived.  Try to detach the recent arrivals.
+-        if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) { 
+-            continue ; 
++        if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
++            continue ;
+         }
+-        guarantee (Arv != NULL, "invariant") ; 
++        guarantee (Arv != NULL, "invariant") ;
+         // 4: Merge Arv into List
+-        ParkEvent * Tail = List ; 
+-        while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ; 
+-        Tail->FreeNext = Arv ; 
++        ParkEvent * Tail = List ;
++        while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
++        Tail->FreeNext = Arv ;
+     }
+-    break ; 
++    break ;
+   }
+ 
+-  if (ev != NULL) { 
+-    guarantee (ev->AssociatedWith == NULL, "invariant") ; 
++  if (ev != NULL) {
++    guarantee (ev->AssociatedWith == NULL, "invariant") ;
+   } else {
+     // Do this the hard way -- materialize a new ParkEvent.
+-    // In rare cases an allocating thread might detach
+-    // a long list -- installing null into FreeList --and 
+-    // then stall.  Another thread calling Allocate() would see 
+-    // FreeList == null and then invoke the ctor.  In this case we 
+-    // end up with more ParkEvents in circulation than we need, but 
+-    // the race is rare and the outcome is benign.  
+-    // Ideally, the # of extant ParkEvents is equal to the
+-    // maximum # of threads that existed at any one time.
+-    // Because of the race mentioned above, segments of the
+-    // freelist can be transiently inaccessible.  At worst
+-    // we may end up with the # of ParkEvents in circulation
+-    // slightly above the ideal.  
+-    ev = new ParkEvent () ; 
++    // In rare cases an allocating thread might detach a long list --
++    // installing null into FreeList -- and then stall or be obstructed.
++    // A 2nd thread calling Allocate() would see FreeList == null.
++    // The list held privately by the 1st thread is unavailable to the 2nd thread.
++    // In that case the 2nd thread would have to materialize a new ParkEvent,
++    // even though free ParkEvents existed in the system.  In this case we end up
++    // with more ParkEvents in circulation than we need, but the race is
++    // rare and the outcome is benign.  Ideally, the # of extant ParkEvents
++    // is equal to the maximum # of threads that existed at any one time.
++    // Because of the race mentioned above, segments of the freelist
++    // can be transiently inaccessible.  At worst we may end up with the
++    // # of ParkEvents in circulation slightly above the ideal.
++    // Note that if we didn't have the TSM/immortal constraint, then
++    // when reattaching, above, we could trim the list.
++    ev = new ParkEvent () ;
++    guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ;
+   }
++  ev->reset() ;                     // courtesy to caller
+   ev->AssociatedWith = t ;          // Associate ev with t
+-  ev->FreeNext       = NULL ; 
+-  return ev ; 
++  ev->FreeNext       = NULL ;
++  return ev ;
+ }
+ 
+-
+-
+ void ParkEvent::Release (ParkEvent * ev) {
+-  if (ev == NULL) return ; 
+-  guarantee (ev->AssociatedWith != NULL, "invariant") ; 
+-  guarantee (ev->FreeNext == NULL      , "invariant") ; 
+-  ev->AssociatedWith = NULL ; 
+-  for (;;) { 
++  if (ev == NULL) return ;
++  guarantee (ev->FreeNext == NULL      , "invariant") ;
++  ev->AssociatedWith = NULL ;
++  for (;;) {
+     // Push ev onto FreeList
+-    ParkEvent * List = FreeList ; 
+-    ev->FreeNext = List ; 
+-    if (Atomic::cmpxchg_ptr (ev, &FreeList, List) == List) break ; 
++    // The mechanism is "half" lock-free.
++    ParkEvent * List = FreeList ;
++    ev->FreeNext = List ;
++    if (Atomic::cmpxchg_ptr (ev, &FreeList, List) == List) break ;
+   }
+ }
+ 
++// Override operator new and delete so we can ensure that the
++// least significant byte of ParkEvent addresses is 0.
++// Beware that excessive address alignment is undesirable
++// as it can result in D$ index usage imbalance as
++// well as bank access imbalance on Niagara-like platforms,
++// although Niagara's hash function should help.
++
++void * ParkEvent::operator new (size_t sz) {
++  return (void *) ((intptr_t (CHeapObj::operator new (sz + 256)) + 256) & -256) ;
++}
++
++void ParkEvent::operator delete (void * a) {
++  // ParkEvents are type-stable and immortal ...
++  ShouldNotReachHere();
++}
++
+ 
+ // 6399321 As a temporary measure we copied & modified the ParkEvent::
+ // allocate() and release() code for use by Parkers.  The Parker:: forms
+ // will eventually be removed as we consolide and shift over to ParkEvents
+-// for both builtin synchronization and JSR166 operations. 
++// for both builtin synchronization and JSR166 operations.
+ 
+ volatile int Parker::ListLock = 0 ;
+ Parker * volatile Parker::FreeList = NULL ;
+@@ -3877,7 +3963,7 @@
+   }
+ }
+ 
+-void Threads::verify() {  
++void Threads::verify() {
+   ALL_JAVA_THREADS(p) {
+     p->verify();
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/threadCritical.hpp openjdk/hotspot/src/share/vm/runtime/threadCritical.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/threadCritical.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/threadCritical.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)threadCritical.hpp	1.13 07/05/05 17:07:00 JVM"
+-#endif
+ /*
+  * Copyright 2001-2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ThreadCritical is used to protect short non-blocking critical sections.
+@@ -54,5 +51,3 @@
+   ThreadCritical();
+   ~ThreadCritical();
+ };
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/thread.hpp openjdk/hotspot/src/share/vm/runtime/thread.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/thread.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/thread.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)thread.hpp	1.453 07/07/05 17:14:54 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class ThreadSafepointState;
+@@ -32,6 +29,7 @@
+ class JvmtiGetLoadedClassesClosure;
+ class ThreadStatistics;
+ class ConcurrentLocksDump;
++class ParkEvent ;
+ 
+ class ciEnv;
+ class CompileThread;
+@@ -39,7 +37,6 @@
+ class CompileTask;
+ class CompileQueue;
+ class CompilerCounters;
+-class RawMonitor;
+ class vframeArray;
+ 
+ class DeoptResourceMark;
+@@ -47,6 +44,7 @@
+ 
+ class GCTaskQueue;
+ class ThreadClosure;
++class IdealGraphPrinter;
+ 
+ // Class hierarchy
+ // - Thread
+@@ -89,8 +87,8 @@
+   // suspend requests. The higher level APIs reject suspend requests
+   // for already suspended threads.
+   //
+-  // The external_suspend 
+-  // flag is checked by has_special_runtime_exit_condition() and java thread 
++  // The external_suspend
++  // flag is checked by has_special_runtime_exit_condition() and java thread
+   // will self-suspend when handle_special_runtime_exit_condition() is
+   // called. Most uses of the _thread_blocked state in JavaThreads are
+   // considered the same as being externally suspended; if the blocking
+@@ -101,7 +99,7 @@
+   //
+   // In general, java_suspend() does not wait for an external suspend
+   // request to complete. When it returns, the only guarantee is that
+-  // the _external_suspend field is true. 
++  // the _external_suspend field is true.
+   //
+   // wait_for_ext_suspend_completion() is used to wait for an external
+   // suspend request to complete. External suspend requests are usually
+@@ -121,12 +119,12 @@
+   // that a single check indicates whether any special action is needed
+   // eg. for async exceptions.
+   // -------------------------------------------------------------------
+-  // Notes: 
++  // Notes:
+   // 1. The suspend/resume logic no longer uses ThreadState in OSThread
+-  // but we still update its value to keep other part of the system (mainly 
+-  // JVMTI) happy. ThreadState is legacy code (see notes in 
++  // but we still update its value to keep other part of the system (mainly
++  // JVMTI) happy. ThreadState is legacy code (see notes in
+   // osThread.hpp).
+-  // 
++  //
+   // 2. It would be more natural if set_external_suspend() is private and
+   // part of java_suspend(), but that probably would affect the suspend/query
+   // performance. Need more investigation on this.
+@@ -172,7 +170,7 @@
+   // Point to the last handle mark
+   HandleMark* _last_handle_mark;
+ 
+-  // The parity of the last strong_roots iteration in which this thread was 
++  // The parity of the last strong_roots iteration in which this thread was
+   // claimed as a task.
+   jint _oops_do_parity;
+ 
+@@ -182,7 +180,7 @@
+   private:
+ 
+   // debug support for checking if code does allow safepoints or not
+-  // GC points in the VM can happen because of allocation, invoking a VM operation, or blocking on 
++  // GC points in the VM can happen because of allocation, invoking a VM operation, or blocking on
+   // mutex, or blocking on an object synchronizer (Java locking).
+   // If !allow_safepoint(), then an assertion failure will happen in any of the above cases
+   // If !allow_allocation(), then an assertion failure will happen during allocation
+@@ -191,7 +189,7 @@
+   // The two classes No_Safepoint_Verifier and No_Allocation_Verifier are used to set these counters.
+   //
+   NOT_PRODUCT(int _allow_safepoint_count;)       // If 0, thread allow a safepoint to happen
+-  debug_only (int _allow_allocation_count;)      // If 0, the thread is allowed to allocate oops.  
++  debug_only (int _allow_allocation_count;)      // If 0, the thread is allowed to allocate oops.
+ 
+   // Record when GC is locked out via the GC_locker mechanism
+   CHECK_UNHANDLED_OOPS_ONLY(int _gc_locked_out_count;)
+@@ -207,9 +205,9 @@
+   // variable is initially set to NULL, indicating no locks are used by the thread. During the thread's
+   // execution, it will be set whenever locking can happen, i.e., when we call out to Java code or use
+   // an ObjectLocker. The value is never decreased, hence, it will over the lifetime of a thread
+-  // approximate the real stackbase. 
++  // approximate the real stackbase.
+   address _highest_lock;                         // Highest stack address where a JavaLock exist
+-  
++
+   ThreadLocalAllocBuffer _tlab;                  // Thread-local eden
+ 
+   int   _vm_operation_started_count;             // VM_Operation support
+@@ -222,16 +220,16 @@
+   // ObjectMonitor on which this thread called Object.wait()
+   ObjectMonitor* _current_waiting_monitor;
+ 
+-  // Private thread-local objectmonitor list - a simple cache organized as a SLL.  
++  // Private thread-local objectmonitor list - a simple cache organized as a SLL.
+  public:
+-  ObjectMonitor * omFreeList ; 
++  ObjectMonitor * omFreeList ;
+   int omFreeCount ;                             // length of omFreeList
+   int omFreeProvision ;                         // reload chunk size
+-  
+- public: 
++
++ public:
+   enum {
+     is_definitely_current_thread = true
+-  };    
++  };
+ 
+   // Constructor
+   Thread();
+@@ -248,8 +246,8 @@
+   virtual bool is_Java_thread()     const            { return false; }
+   // Remove this ifdef when C1 is ported to the compiler interface.
+   virtual bool is_Compiler_thread() const            { return false; }
+-  virtual bool is_hidden_from_external_view() const  { return false; }  
+-  virtual bool is_jvmti_agent_thread() const 	     { return false; }
++  virtual bool is_hidden_from_external_view() const  { return false; }
++  virtual bool is_jvmti_agent_thread() const         { return false; }
+   // True iff the thread can perform GC operations at a safepoint.
+   // Generally will be true only of VM thread and parallel GC WorkGang
+   // threads.
+@@ -260,28 +258,28 @@
+   virtual char* name() const { return (char*)"Unknown thread"; }
+ 
+   // Returns the current thread
+-  static inline Thread* current();  
++  static inline Thread* current();
+ 
+   // Common thread operations
+   static void set_priority(Thread* thread, ThreadPriority priority);
+   static ThreadPriority get_priority(const Thread* const thread);
+   static void start(Thread* thread);
+   static void interrupt(Thread* thr);
+-  static bool is_interrupted(Thread* thr, bool clear_interrupted);  
++  static bool is_interrupted(Thread* thr, bool clear_interrupted);
+ 
+   Monitor* SR_lock() const                       { return _SR_lock; }
+ 
+   bool has_async_exception() const { return (_suspend_flags & _has_async_exception) != 0; }
+ 
+-  void set_suspend_flag(SuspendFlags f) { 
++  void set_suspend_flag(SuspendFlags f) {
+     assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch");
+     uint32_t flags;
+     do {
+       flags = _suspend_flags;
+     }
+-    while (Atomic::cmpxchg((jint)(flags | f), 
+-			   (volatile jint*)&_suspend_flags, 
+-			   (jint)flags) != (jint)flags);
++    while (Atomic::cmpxchg((jint)(flags | f),
++                           (volatile jint*)&_suspend_flags,
++                           (jint)flags) != (jint)flags);
+   }
+   void clear_suspend_flag(SuspendFlags f) {
+     assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch");
+@@ -289,9 +287,9 @@
+     do {
+       flags = _suspend_flags;
+     }
+-    while (Atomic::cmpxchg((jint)(flags & ~f), 
+-			   (volatile jint*)&_suspend_flags, 
+-			   (jint)flags) != (jint)flags);
++    while (Atomic::cmpxchg((jint)(flags & ~f),
++                           (volatile jint*)&_suspend_flags,
++                           (jint)flags) != (jint)flags);
+   }
+ 
+   void set_has_async_exception() {
+@@ -376,7 +374,7 @@
+ 
+   // GC support
+   // Apply "f->do_oop" to all root oops in "this".
+-  void oops_do(OopClosure* f);  
++  void oops_do(OopClosure* f);
+ 
+   // Handles the parallel case for the method below.
+ private:
+@@ -387,9 +385,9 @@
+   // "collection_parity", and returns "true".  If "is_par" is true,
+   // uses an atomic instruction to set the current threads parity to
+   // "collection_parity", if it is not already.  Returns "true" iff the
+-  // calling thread does the update, this indicates that the calling thread 
++  // calling thread does the update, this indicates that the calling thread
+   // has claimed the thread's stack as a root groop in the current
+-  // collection. 
++  // collection.
+   bool claim_oops_do(bool is_par, int collection_parity) {
+     if (!is_par) {
+       _oops_do_parity = collection_parity;
+@@ -400,8 +398,8 @@
+   }
+ 
+   // Sweeper support
+-  void nmethods_do();  
+-  
++  void nmethods_do();
++
+   // Fast-locking support
+   address highest_lock() const                   { return _highest_lock; }
+   void update_highest_lock(address base)         { if (base > _highest_lock) _highest_lock = base; }
+@@ -411,7 +409,7 @@
+   // Warning: the method can only be used on the running thread
+   // Fast lock support uses these methods
+   virtual bool lock_is_in_stack(address adr) const;
+-  virtual bool is_lock_owned(address adr) const;  
++  virtual bool is_lock_owned(address adr) const;
+ 
+   // Check if address is in the stack of the thread (not just for locks).
+   bool is_in_stack(address adr) const;
+@@ -422,7 +420,7 @@
+ 
+  protected:
+   // OS data associated with the thread
+-  OSThread* _osthread;  // Platform-specific thread information 
++  OSThread* _osthread;  // Platform-specific thread information
+ 
+   // Thread local resource area for temporary allocation within the VM
+   ResourceArea* _resource_area;
+@@ -430,7 +428,7 @@
+   // Thread local handle area for allocation of handles within the VM
+   HandleArea* _handle_area;
+ 
+-  // Support for stack overflow handling, get_thread, etc.  
++  // Support for stack overflow handling, get_thread, etc.
+   address          _stack_base;
+   size_t           _stack_size;
+   uintptr_t        _self_raw_id;      // used by get_thread (mutable)
+@@ -439,7 +437,7 @@
+  public:
+   // Stack overflow support
+   address stack_base() const           { assert(_stack_base != NULL,"Sanity check"); return _stack_base; }
+-  
++
+   void    set_stack_base(address base) { _stack_base = base; }
+   size_t  stack_size() const           { return _stack_size; }
+   void    set_stack_size(size_t size)  { _stack_size = size; }
+@@ -458,16 +456,17 @@
+ #ifdef ASSERT
+  private:
+   // Deadlock detection support for Mutex locks. List of locks own by thread.
+-  Mutex *_owned_locks;
++  Monitor *_owned_locks;
+   // Mutex::set_owner_implementation is the only place where _owned_locks is modified,
+   // thus the friendship
+   friend class Mutex;
++  friend class Monitor;
+ 
+- public:  
++ public:
+   void print_owned_locks_on(outputStream* st) const;
+-  void print_owned_locks() const		 { print_owned_locks_on(tty);	 }
+-  Mutex* owned_locks() const			 { return _owned_locks;          }
+-  bool owns_locks() const                        { return owned_locks() != NULL; }  
++  void print_owned_locks() const                 { print_owned_locks_on(tty);    }
++  Monitor * owned_locks() const                  { return _owned_locks;          }
++  bool owns_locks() const                        { return owned_locks() != NULL; }
+   bool owns_locks_but_compiled_lock() const;
+ 
+   // Deadlock detection
+@@ -479,7 +478,7 @@
+  private:
+   volatile int _jvmti_env_iteration_count;
+ 
+- public:  
++ public:
+   void entering_jvmti_env_iteration()            { ++_jvmti_env_iteration_count; }
+   void leaving_jvmti_env_iteration()             { --_jvmti_env_iteration_count; }
+   bool is_inside_jvmti_env_iteration()           { return _jvmti_env_iteration_count > 0; }
+@@ -491,7 +490,7 @@
+ 
+   static ByteSize stack_base_offset()            { return byte_offset_of(Thread, _stack_base ); }
+   static ByteSize stack_size_offset()            { return byte_offset_of(Thread, _stack_size ); }
+-  static ByteSize omFreeList_offset()            { return byte_offset_of(Thread, omFreeList); } 
++  static ByteSize omFreeList_offset()            { return byte_offset_of(Thread, omFreeList); }
+ 
+ #define TLAB_FIELD_OFFSET(name) \
+   static ByteSize tlab_##name##_offset()            { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
+@@ -509,19 +508,36 @@
+ #undef TLAB_FIELD_OFFSET
+ 
+  public:
+-  volatile intptr_t _Stalled ; 
+-  volatile int _TypeTag ; 
+-  class ParkEvent * _ParkEvent ; 
++  volatile intptr_t _Stalled ;
++  volatile int _TypeTag ;
++  ParkEvent * _ParkEvent ;                     // for synchronized()
++  ParkEvent * _SleepEvent ;                    // for Thread.sleep
++  ParkEvent * _MutexEvent ;                    // for native internal Mutex/Monitor
++  ParkEvent * _MuxEvent ;                      // for low-level muxAcquire-muxRelease
++  int NativeSyncRecursion ;                    // diagnostic
++
+   volatile int _OnTrap ;                       // Resume-at IP delta
+-  int _hashStateW ; 
+-  int _hashStateX ;                            // thread-specific hashCode generator state
+-  int _hashStateY ; 
+-  int _hashStateZ ; 
+-  void * _schedctl ; 
+-  intptr_t _ScratchA, _ScratchB ;              // Scratch locations for fast-path sync code
++  jint _hashStateW ;                           // Marsaglia Shift-XOR thread-local RNG
++  jint _hashStateX ;                           // thread-specific hashCode generator state
++  jint _hashStateY ;
++  jint _hashStateZ ;
++  void * _schedctl ;
+ 
++  intptr_t _ScratchA, _ScratchB ;              // Scratch locations for fast-path sync code
+   static ByteSize ScratchA_offset()            { return byte_offset_of(Thread, _ScratchA ); }
+   static ByteSize ScratchB_offset()            { return byte_offset_of(Thread, _ScratchB ); }
++
++  volatile jint rng [4] ;                      // RNG for spin loop
++
++  // Low-level leaf-lock primitives used to implement synchronization
++  // and native monitor-mutex infrastructure.
++  // Not for general synchronization use.
++  static void SpinAcquire (volatile int * Lock, const char * Name) ;
++  static void SpinRelease (volatile int * Lock) ;
++  static void muxAcquire  (volatile intptr_t * Lock, const char * Name) ;
++  static void muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) ;
++  static void muxRelease  (volatile intptr_t * Lock) ;
++
+ };
+ 
+ // Inline implementation of Thread::current()
+@@ -532,12 +548,12 @@
+ 
+ inline Thread* Thread::current() {
+ #ifdef ASSERT
+-// This function is very high traffic. Define PARANOID to enable expensive 
++// This function is very high traffic. Define PARANOID to enable expensive
+ // asserts.
+ #ifdef PARANOID
+   // Signal handler should call ThreadLocalStorage::get_thread_slow()
+   Thread* t = ThreadLocalStorage::get_thread_slow();
+-  assert(t != NULL && !t->is_inside_signal_handler(), 
++  assert(t != NULL && !t->is_inside_signal_handler(),
+          "Don't use Thread::current() inside signal handler");
+ #endif
+ #endif
+@@ -582,7 +598,7 @@
+  private:
+   static WatcherThread* _watcher_thread;
+ 
+-  static bool _should_terminate;  
++  static bool _should_terminate;
+  public:
+   enum SomeConstants {
+     delay_interval = 10                          // interrupt delay in milliseconds
+@@ -612,9 +628,9 @@
+ 
+ typedef void (*ThreadFunction)(JavaThread*, TRAPS);
+ 
+-class JavaThread: public Thread {  
++class JavaThread: public Thread {
+   friend class VMStructs;
+- private:  
++ private:
+   JavaThread*    _next;                          // The next thread in the Threads list
+   oop            _threadObj;                     // The Java level thread object
+ 
+@@ -641,18 +657,18 @@
+ #endif
+ 
+   JavaFrameAnchor _anchor;                       // Encapsulation of current java frame and it state
+- 
++
+   ThreadFunction _entry_point;
+ 
+   JNIEnv        _jni_environment;
+-  
++
+   // Deopt support
+   DeoptResourceMark*  _deopt_mark;               // Holds special ResourceMark for deoptimization
+ 
+   intptr_t*      _must_deopt_id;                 // id of frame that needs to be deopted once we
+                                                  // transition out of native
+ 
+-  vframeArray*  _vframe_array_head;              // Holds the heap of the active vframeArrays  
++  vframeArray*  _vframe_array_head;              // Holds the heap of the active vframeArrays
+   vframeArray*  _vframe_array_last;              // Holds last vFrameArray we popped
+   // Because deoptimization is lazy we must save jvmti requests to set locals
+   // in compiled frames until we deoptimize and we have an interpreter frame.
+@@ -669,12 +685,12 @@
+   // code in i2c adapters and handle_wrong_method.
+ 
+   methodOop     _callee_target;
+-  
+-  // Oop results of VM runtime calls  
++
++  // Oop results of VM runtime calls
+   oop           _vm_result;                      // Used to pass back an oop result into Java code, GC-preserved
+   oop           _vm_result_2;                    // Used to pass back an oop result into Java code, GC-preserved
+ 
+-  MonitorChunk* _monitor_chunks;                 // Contains the off stack monitors 
++  MonitorChunk* _monitor_chunks;                 // Contains the off stack monitors
+                                                  // allocated during deoptimization
+                                                  // and by JNI_MonitorEnter/Exit
+ 
+@@ -716,7 +732,7 @@
+                                                  // handlers thread is in
+   volatile bool         _doing_unsafe_access;    // Thread may fault due to unsafe access
+   bool                  _do_not_unlock_if_synchronized; // Do not unlock the receiver of a synchronized method (since it was
+-						 // never locked) when throwing an exception. Used by interpreter only.
++                                                 // never locked) when throwing an exception. Used by interpreter only.
+ 
+   //  Flag to mark a JNI thread in the process of attaching - See CR 6404306
+   //  This flag is never set true other than at construction, and in that case
+@@ -726,9 +742,9 @@
+  public:
+   // State of the stack guard pages for this thread.
+   enum StackGuardState {
+-    stack_guard_unused,		// not needed
++    stack_guard_unused,         // not needed
+     stack_guard_yellow_disabled,// disabled (temporarily) after stack overflow
+-    stack_guard_enabled		// enabled
++    stack_guard_enabled         // enabled
+   };
+ 
+  private:
+@@ -776,7 +792,7 @@
+  public:
+   // Constructor
+   JavaThread(bool is_attaching = false); // for main thread and JNI attached threads
+-  JavaThread(ThreadFunction entry_point, size_t stack_size = 0); 
++  JavaThread(ThreadFunction entry_point, size_t stack_size = 0);
+   ~JavaThread();
+ 
+ #ifdef ASSERT
+@@ -814,7 +830,7 @@
+   // Thread chain operations
+   JavaThread* next() const                       { return _next; }
+   void set_next(JavaThread* p)                   { _next = p; }
+-  
++
+   // Thread oop. threadObj() can be NULL for initial JavaThread
+   // (or for threads attached via JNI)
+   oop threadObj() const                          { return _threadObj; }
+@@ -829,8 +845,8 @@
+ 
+   void set_saved_exception_pc(address pc)        { _saved_exception_pc = pc; }
+   address saved_exception_pc()                   { return _saved_exception_pc; }
+-  
+-    
++
++
+   ThreadFunction entry_point() const             { return _entry_point; }
+ 
+   // Allocates a new Java level thread object for this thread. thread_name may be NULL.
+@@ -840,7 +856,7 @@
+ 
+   JavaFrameAnchor* frame_anchor(void)                { return &_anchor; }
+ 
+-  // last_Java_sp 
++  // last_Java_sp
+   bool has_last_Java_frame() const                   { return _anchor.has_last_Java_frame(); }
+   intptr_t* last_Java_sp() const                     { return _anchor.last_Java_sp(); }
+ 
+@@ -868,11 +884,11 @@
+ 
+   bool doing_unsafe_access()                     { return _doing_unsafe_access; }
+   void set_doing_unsafe_access(bool val)         { _doing_unsafe_access = val; }
+-    
++
+   bool do_not_unlock_if_synchronized()             { return _do_not_unlock_if_synchronized; }
+   void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; }
+ 
+-  
++
+   // Suspend/resume support for JavaThread
+ 
+  private:
+@@ -881,7 +897,7 @@
+ 
+  public:
+   void java_suspend();
+-  void java_resume();  
++  void java_resume();
+   int  java_suspend_self();
+ 
+   void check_and_wait_while_suspended() {
+@@ -909,7 +925,7 @@
+     // SR_lock to allow the thread to reach a stable thread state if
+     // it is currently in a transient thread state.
+     return is_ext_suspend_completed(false /*!called_by_wait */,
+-				    SuspendRetryDelay, bits);
++                                    SuspendRetryDelay, bits);
+   }
+ 
+   // We cannot allow wait_for_ext_suspend_completion() to run forever or
+@@ -927,17 +943,17 @@
+   void clear_deopt_suspend()      { clear_suspend_flag(_deopt_suspend); }
+   bool is_deopt_suspend()         { return (_suspend_flags & _deopt_suspend) != 0; }
+ 
+-  bool is_external_suspend() const { 
+-    return (_suspend_flags & _external_suspend) != 0; 
++  bool is_external_suspend() const {
++    return (_suspend_flags & _external_suspend) != 0;
+   }
+   // Whenever a thread transitions from native to vm/java it must suspend
+   // if external|deopt suspend is present.
+-  bool is_suspend_after_native() const { 
++  bool is_suspend_after_native() const {
+     return (_suspend_flags & (_external_suspend | _deopt_suspend) ) != 0;
+   }
+ 
+   // external suspend request is completed
+-  bool is_ext_suspended() const { 
++  bool is_ext_suspended() const {
+     return (_suspend_flags & _ext_suspended) != 0;
+   }
+ 
+@@ -994,7 +1010,7 @@
+     return x;
+   }
+ 
+-  // Are any async conditions present? 
++  // Are any async conditions present?
+   bool has_async_condition() { return (_special_runtime_exit_condition != _no_async_condition); }
+ 
+   void check_and_handle_async_exceptions(bool check_unsafe_error = true);
+@@ -1012,7 +1028,7 @@
+     // _external_suspend field change either not made yet or not visible
+     // yet. However, this is okay because the request is asynchronous and
+     // we will see the new flag value the next time through. It's also
+-    // possible that the external suspend request is dropped after 
++    // possible that the external suspend request is dropped after
+     // we have checked is_external_suspend(), we will recheck its value
+     // under SR_lock in java_suspend_self().
+     return (_special_runtime_exit_condition != _no_async_condition) ||
+@@ -1026,9 +1042,9 @@
+     _special_runtime_exit_condition = _async_exception;
+     set_has_async_exception();
+   }
+-  
++
+   // Fast-locking support
+-  bool is_lock_owned(address adr) const;  
++  bool is_lock_owned(address adr) const;
+ 
+   // Accessors for vframe array top
+   // The linked list of vframe arrays are sorted on sp. This means when we
+@@ -1069,7 +1085,7 @@
+   int      exception_stack_size() const          { return _exception_stack_size; }
+   address  exception_pc() const                  { return _exception_pc; }
+   address  exception_handler_pc() const          { return _exception_handler_pc; }
+-  
++
+   void set_exception_oop(oop o)                  { _exception_oop = o; }
+   void set_exception_pc(address a)               { _exception_pc = a; }
+   void set_exception_handler_pc(address a)       { _exception_handler_pc = a; }
+@@ -1140,7 +1156,7 @@
+   static ByteSize callee_target_offset()         { return byte_offset_of(JavaThread, _callee_target       ); }
+   static ByteSize vm_result_offset()             { return byte_offset_of(JavaThread, _vm_result           ); }
+   static ByteSize vm_result_2_offset()           { return byte_offset_of(JavaThread, _vm_result_2         ); }
+-  static ByteSize thread_state_offset()          { return byte_offset_of(JavaThread, _thread_state        ); }  
++  static ByteSize thread_state_offset()          { return byte_offset_of(JavaThread, _thread_state        ); }
+   static ByteSize saved_exception_pc_offset()    { return byte_offset_of(JavaThread, _saved_exception_pc  ); }
+   static ByteSize osthread_offset()              { return byte_offset_of(JavaThread, _osthread            ); }
+   static ByteSize exception_oop_offset()         { return byte_offset_of(JavaThread, _exception_oop       ); }
+@@ -1174,16 +1190,16 @@
+                           _jni_active_critical++; }
+   void exit_critical()  { assert(Thread::current() == this,
+                                  "this must be current thread");
+-                          _jni_active_critical--; 
++                          _jni_active_critical--;
+                           assert(_jni_active_critical >= 0,
+                                  "JNI critical nesting problem?"); }
+ 
+   // For deadlock detection
+   int depth_first_number() { return _depth_first_number; }
+   void set_depth_first_number(int dfn) { _depth_first_number = dfn; }
+-  
++
+  private:
+-  void set_monitor_chunks(MonitorChunk* monitor_chunks) { _monitor_chunks = monitor_chunks; } 
++  void set_monitor_chunks(MonitorChunk* monitor_chunks) { _monitor_chunks = monitor_chunks; }
+ 
+  public:
+   MonitorChunk* monitor_chunks() const           { return _monitor_chunks; }
+@@ -1202,10 +1218,10 @@
+   void set_entry_point(ThreadFunction entry_point) { _entry_point = entry_point; }
+ 
+  public:
+-     
+-  // Frame iteration; calls the function f for all frames on the stack 
++
++  // Frame iteration; calls the function f for all frames on the stack
+   void frames_do(void f(frame*, const RegisterMap*));
+-  
++
+   // Memory operations
+   void oops_do(OopClosure* f);
+ 
+@@ -1221,8 +1237,8 @@
+   void print_on(outputStream* st) const;
+   void print() const { print_on(tty); }
+   void print_value();
+-  void print_thread_state_on(outputStream* ) const	PRODUCT_RETURN;
+-  void print_thread_state() const			PRODUCT_RETURN;
++  void print_thread_state_on(outputStream* ) const      PRODUCT_RETURN;
++  void print_thread_state() const                       PRODUCT_RETURN;
+   void print_on_error(outputStream* st, char* buf, int buflen) const;
+   void verify();
+   const char* get_thread_name() const;
+@@ -1234,15 +1250,15 @@
+   const char* get_parent_name() const;
+ 
+   // Accessing frames
+-  frame last_frame() { 
++  frame last_frame() {
+     _anchor.make_walkable(this);
+     return pd_last_frame();
+   }
+   javaVFrame* last_java_vframe(RegisterMap* reg_map);
+ 
+-  // Returns method at 'depth' java or native frames down the stack  
++  // Returns method at 'depth' java or native frames down the stack
+   // Used for security checks
+-  klassOop security_get_caller_class(int depth);  
++  klassOop security_get_caller_class(int depth);
+ 
+   // Print stack trace in external format
+   void print_stack_on(outputStream* st);
+@@ -1299,16 +1315,16 @@
+  private:
+   // PRIVILEGED STACK
+   PrivilegedElement*  _privileged_stack_top;
+-  GrowableArray<oop>* _array_for_gc; 
++  GrowableArray<oop>* _array_for_gc;
+  public:
+-    
++
+   // Returns the privileged_stack information.
+   PrivilegedElement* privileged_stack_top() const       { return _privileged_stack_top; }
+   void set_privileged_stack_top(PrivilegedElement *e)   { _privileged_stack_top = e; }
+   void register_array_for_gc(GrowableArray<oop>* array) { _array_for_gc = array; }
+ 
+  public:
+-  // Thread local information maintained by JVMTI. 
++  // Thread local information maintained by JVMTI.
+   void set_jvmti_thread_state(JvmtiThreadState *value)                           { _jvmti_thread_state = value; }
+   JvmtiThreadState *jvmti_thread_state() const                                   { return _jvmti_thread_state; }
+   static ByteSize jvmti_thread_state_offset()                                    { return byte_offset_of(JavaThread, _jvmti_thread_state); }
+@@ -1384,20 +1400,20 @@
+  public:
+   ThreadStatistics* get_thread_stat() const    { return _thread_stat; }
+ 
+-  // Return a blocker object for which this thread is blocked parking. 
++  // Return a blocker object for which this thread is blocked parking.
+   oop current_park_blocker();
+ 
+  private:
+   static size_t _stack_size_at_create;
+- 
++
+  public:
+-  static inline size_t stack_size_at_create(void) { 
+-    return _stack_size_at_create; 
++  static inline size_t stack_size_at_create(void) {
++    return _stack_size_at_create;
+   }
+-  static inline void set_stack_size_at_create(size_t value) { 
++  static inline void set_stack_size_at_create(size_t value) {
+     _stack_size_at_create = value;
+   }
+-  
++
+   // Machine dependent stuff
+   #include "incls/_thread_pd.hpp.incl"
+ 
+@@ -1495,11 +1511,11 @@
+   CompilerThread(CompileQueue* queue, CompilerCounters* counters);
+ 
+   bool is_Compiler_thread() const                { return true; }
+-  // Hide this compiler thread from external view.  
+-  bool is_hidden_from_external_view() const	 { return true; }
++  // Hide this compiler thread from external view.
++  bool is_hidden_from_external_view() const      { return true; }
+ 
+-  CompileQueue* queue()                          { return _queue; }        
+-  CompilerCounters* counters()                   { return _counters; }        
++  CompileQueue* queue()                          { return _queue; }
++  CompilerCounters* counters()                   { return _counters; }
+ 
+   // Get/set the thread's compilation environment.
+   ciEnv*        env()                            { return _env; }
+@@ -1513,6 +1529,14 @@
+     _log = log;
+   }
+ 
++#ifndef PRODUCT
++private:
++  IdealGraphPrinter *_ideal_graph_printer;
++public:
++  IdealGraphPrinter *ideal_graph_printer()                       { return _ideal_graph_printer; }
++  void set_ideal_graph_printer(IdealGraphPrinter *n)             { _ideal_graph_printer = n; }
++#endif
++
+   // Get/set the thread's current task
+   CompileTask*  task()                           { return _task; }
+   void          set_task(CompileTask* task)      { _task = task; }
+@@ -1542,12 +1566,13 @@
+   static bool includes(JavaThread* p);
+   static JavaThread* first()                     { return _thread_list; }
+   static void threads_do(ThreadClosure* tc);
+-  
++
+   // Initializes the vm and creates the vm thread
+   static jint create_vm(JavaVMInitArgs* args, bool* canTryAgain);
+   static void convert_vm_init_libraries_to_agents();
+   static void create_vm_init_libraries();
+   static void create_vm_init_agents();
++  static void shutdown_vm_agents();
+   static bool destroy_vm();
+   // Supported VM versions via JNI
+   // Includes JNI_VERSION_1_1
+@@ -1562,7 +1587,7 @@
+   // This version may only be called by sequential code.
+   static void oops_do(OopClosure* f);
+   // This version may be called by sequential or parallel code.
+-  static void possibly_parallel_oops_do(OopClosure* f);  
++  static void possibly_parallel_oops_do(OopClosure* f);
+   // This creates a list of GCTasks, one per thread.
+   static void create_thread_roots_tasks(GCTaskQueue* q);
+   // This creates a list of GCTasks, one per thread, for marking objects.
+@@ -1634,89 +1659,99 @@
+ };
+ 
+ // ParkEvents are type-stable and immortal.
+-// 
+-// Lifecycle: Once a ParkEvent is associated with a thread that ParkEvent remains 
++//
++// Lifecycle: Once a ParkEvent is associated with a thread that ParkEvent remains
+ // associated with the thread for the thread's entire lifetime - the relationship is
+-// stable. A thread will be associated at most one ParkEvent.  When the thread 
+-// expires, the ParkEvent moves to the EventFreeList.  New threads attempt to allocate from 
+-// the EventFreeList before creating a new Event.  Type-stability frees us from 
++// stable. A thread will be associated at most one ParkEvent.  When the thread
++// expires, the ParkEvent moves to the EventFreeList.  New threads attempt to allocate from
++// the EventFreeList before creating a new Event.  Type-stability frees us from
+ // worrying about stale Event or Thread references in the objectMonitor subsystem.
+-// (A reference to ParkEvent is always valid, even though the event may no longer be associated 
+-// with the desired or expected thread.  A key aspect of this design is that the callers of 
++// (A reference to ParkEvent is always valid, even though the event may no longer be associated
++// with the desired or expected thread.  A key aspect of this design is that the callers of
+ // park, unpark, etc must tolerate stale references and spurious wakeups).
+ //
+ // Only the "associated" thread can block (park) on the ParkEvent, although
+ // any other thread can unpark a reachable parkevent.  Park() is allowed to
+ // return spuriously.  In fact park-unpark a really just an optimization to
+ // avoid unbounded spinning and surrender the CPU to be a polite system citizen.
+-// A degenerate albeit "impolite" park-unpark implementation could simply return. 
+-// See http://blogs.sun.com/dave for more details.  
++// A degenerate albeit "impolite" park-unpark implementation could simply return.
++// See http://blogs.sun.com/dave for more details.
+ //
+ // Eventually I'd like to eliminate Events and ObjectWaiters, both of which serve as
+ // thread proxies, and simply make the THREAD structure type-stable and persistent.
+-// Currently, we unpark events associated with threads, but ideally we'd just 
+-// unpark threads. 
++// Currently, we unpark events associated with threads, but ideally we'd just
++// unpark threads.
+ //
+ // The base-class, PlatformEvent, is platform-specific while the ParkEvent is
+-// platform-independent.  PlatformEvent provides park(), unpark(), etc.  
+-// Equivalently we could have defined a platform-independent base-class that 
+-// exported Allocate(), Release(), etc.  The platform-specific class would extend 
+-// that base-class, adding park(), unpark(), etc.  
++// platform-independent.  PlatformEvent provides park(), unpark(), etc., and
++// is abstract -- that is, a PlatformEvent should never be instantiated except
++// as part of a ParkEvent.
++// Equivalently we could have defined a platform-independent base-class that
++// exported Allocate(), Release(), etc.  The platform-specific class would extend
++// that base-class, adding park(), unpark(), etc.
+ //
+-// A word of caution: The JVM uses 3 very similar constructs:
++// A word of caution: The JVM uses 2 very similar constructs:
+ // 1. ParkEvent are used for Java-level "monitor" synchronization.
+-// 2. Parkers are used by JSR166-JUC park-unpark. 
+-// 3. interrupt_event() is used for threads blocked in Thread.sleep().
++// 2. Parkers are used by JSR166-JUC park-unpark.
+ //
+ // We'll want to eventually merge these redundant facilities and use ParkEvent.
+ 
+ 
+-class ParkEvent : public os::PlatformEvent { 
++class ParkEvent : public os::PlatformEvent {
+   private:
+-    ParkEvent * FreeNext ; 
++    ParkEvent * FreeNext ;
+ 
+     // Current association
+-    Thread * AssociatedWith ; 
++    Thread * AssociatedWith ;
+     intptr_t RawThreadIdentity ;        // LWPID etc
+     volatile int Incarnation ;
+ 
+-  public:
+-    // MCS-CLH list linkage
+-    ParkEvent * volatile ListNext ; 
+-    ParkEvent * volatile ListPrev ; 
+-    volatile intptr_t OnList ; 
+-    volatile int TState ; 
+-
+     // diagnostic : keep track of last thread to wake this thread.
+     // this is useful for construction of dependency graphs.
+-    void * LastWaker ; 
+-   
++    void * LastWaker ;
++
++  public:
++    // MCS-CLH list linkage and Native Mutex/Monitor
++    ParkEvent * volatile ListNext ;
++    ParkEvent * volatile ListPrev ;
++    volatile intptr_t OnList ;
++    volatile int TState ;
++    volatile int Notified ;             // for native monitor construct
++    volatile int IsWaiting ;            // Enqueued on WaitSet
++
+ 
+   private:
+-    static ParkEvent * volatile FreeList ; 
+-    static volatile int ListLock ; 
++    static ParkEvent * volatile FreeList ;
++    static volatile int ListLock ;
+ 
+     // It's prudent to mark the dtor as "private"
+     // ensuring that it's not visible outside the package.
+     // Unfortunately gcc warns about such usage, so
+     // we revert to the less desirable "protected" visibility.
+-    // The other compilers accept private dtors.  
++    // The other compilers accept private dtors.
+ 
+   protected:        // Ensure dtor is never invoked
+     ~ParkEvent() { guarantee (0, "invariant") ; }
+ 
+-    ParkEvent() : PlatformEvent() { 
+-       AssociatedWith = NULL ; 
+-       FreeNext       = NULL ; 
+-       ListNext       = NULL ; 
+-       ListPrev       = NULL ; 
+-       OnList         = 0 ; 
+-       TState         = 0 ; 
++    ParkEvent() : PlatformEvent() {
++       AssociatedWith = NULL ;
++       FreeNext       = NULL ;
++       ListNext       = NULL ;
++       ListPrev       = NULL ;
++       OnList         = 0 ;
++       TState         = 0 ;
++       Notified       = 0 ;
++       IsWaiting      = 0 ;
+     }
+ 
+-  public:
+-    static ParkEvent * Allocate (Thread * t) ; 
+-    static void Release (ParkEvent * e) ; 
+-} ; 
++    // We use placement-new to force ParkEvent instances to be
++    // aligned on 256-byte address boundaries.  This ensures that the least
++    // significant byte of a ParkEvent address is always 0.
+ 
++    void * operator new (size_t sz) ;
++    void operator delete (void * a) ;
+ 
++  public:
++    static ParkEvent * Allocate (Thread * t) ;
++    static void Release (ParkEvent * e) ;
++} ;
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/threadLocalStorage.cpp openjdk/hotspot/src/share/vm/runtime/threadLocalStorage.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/threadLocalStorage.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/threadLocalStorage.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)threadLocalStorage.cpp	1.46 07/05/05 17:07:00 JVM"
+-#endif
+ /*
+  * Copyright 1997-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -50,4 +47,3 @@
+   set_thread_index(os::allocate_thread_local_storage());
+   generate_code_for_get_thread();
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/threadLocalStorage.hpp openjdk/hotspot/src/share/vm/runtime/threadLocalStorage.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/threadLocalStorage.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/threadLocalStorage.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)threadLocalStorage.hpp	1.45 07/05/05 17:07:00 JVM"
+-#endif
+ /*
+  * Copyright 1997-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Interface for thread local storage
+@@ -53,14 +50,13 @@
+ 
+  private:
+   static int     _thread_index;
+- 
++
+   static void    generate_code_for_get_thread();
+- 
++
+   // Processor dependent parts of set_thread and initialization
+   static void pd_set_thread(Thread* thread);
+   static void pd_init();
+   // Invalidate any thread cacheing or optimization schemes.
+   static void pd_invalidate_all();
+-  
+-};
+ 
++};
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/timer.cpp openjdk/hotspot/src/share/vm/runtime/timer.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/timer.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/timer.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)timer.cpp	1.33 07/05/05 17:06:59 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -62,7 +59,7 @@
+   if (!_active) {
+     return ticks();
+   }
+-  jlong counter = _counter + os::elapsed_counter() - _start_counter; 
++  jlong counter = _counter + os::elapsed_counter() - _start_counter;
+   return counter;
+ }
+ 
+@@ -72,7 +69,7 @@
+   assert(is_updated(), "must not look clear");
+ }
+ 
+-void TimeStamp::update() { 
++void TimeStamp::update() {
+   update_to(os::elapsed_counter());
+ }
+ 
+@@ -99,9 +96,9 @@
+ }
+ 
+ TraceTime::TraceTime(const char* title,
+-		     bool doit,
+-		     bool print_cr,
+-		     outputStream* logfile) {
++                     bool doit,
++                     bool print_cr,
++                     outputStream* logfile) {
+   _active   = doit;
+   _verbose  = true;
+   _print_cr = print_cr;
+@@ -120,10 +117,10 @@
+ }
+ 
+ TraceTime::TraceTime(const char* title,
+-		     elapsedTimer* accumulator,
+-		     bool doit,
+-		     bool verbose,
+-		     outputStream* logfile) {
++                     elapsedTimer* accumulator,
++                     bool doit,
++                     bool verbose,
++                     outputStream* logfile) {
+   _active = doit;
+   _verbose = verbose;
+   _print_cr = true;
+@@ -131,8 +128,8 @@
+   if (_active) {
+     if (_verbose) {
+       if (PrintGCTimeStamps) {
+-	_logfile->stamp();
+-	_logfile->print(": ");
++        _logfile->stamp();
++        _logfile->print(": ");
+       }
+       _logfile->print("[%s", title);
+       _logfile->flush();
+@@ -157,14 +154,14 @@
+   }
+ }
+ 
+-TraceCPUTime::TraceCPUTime(bool doit, 
+-	       bool print_cr, 
+-	       outputStream *logfile) :
++TraceCPUTime::TraceCPUTime(bool doit,
++               bool print_cr,
++               outputStream *logfile) :
+   _active(doit),
+   _print_cr(print_cr),
+-  _starting_user_time(0.0), 
+-  _starting_system_time(0.0), 
+-  _starting_real_time(0.0), 
++  _starting_user_time(0.0),
++  _starting_system_time(0.0),
++  _starting_real_time(0.0),
+   _logfile(logfile),
+   _error(false) {
+   if (_active) {
+@@ -174,9 +171,9 @@
+       _logfile = tty;
+     }
+ 
+-    _error = !os::getTimesSecs(&_starting_real_time, 
+-			       &_starting_user_time, 
+-			       &_starting_system_time);    
++    _error = !os::getTimesSecs(&_starting_real_time,
++                               &_starting_user_time,
++                               &_starting_system_time);
+   }
+ }
+ 
+@@ -184,21 +181,21 @@
+   if (_active) {
+     bool valid = false;
+     if (!_error) {
+-      double real_secs;  		// walk clock time
+-      double system_secs;		// system time
+-      double user_secs;			// user time for all threads
++      double real_secs;                 // walk clock time
++      double system_secs;               // system time
++      double user_secs;                 // user time for all threads
+ 
+       double real_time, user_time, system_time;
+       valid = os::getTimesSecs(&real_time, &user_time, &system_time);
+       if (valid) {
+ 
+-	user_secs = user_time - _starting_user_time;
+-	system_secs = system_time - _starting_system_time;
+-	real_secs = real_time - _starting_real_time;
+-
+-	_logfile->print(" [Times: user=%3.2f sys=%3.2f, real=%3.2f secs] ",
+-	  user_secs, system_secs, real_secs);
+-	  
++        user_secs = user_time - _starting_user_time;
++        system_secs = system_time - _starting_system_time;
++        real_secs = real_time - _starting_real_time;
++
++        _logfile->print(" [Times: user=%3.2f sys=%3.2f, real=%3.2f secs] ",
++          user_secs, system_secs, real_secs);
++
+       } else {
+         _logfile->print("[Invalid result in TraceCPUTime]");
+       }
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/timer.hpp openjdk/hotspot/src/share/vm/runtime/timer.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/timer.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/timer.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)timer.hpp	1.36 07/05/05 17:06:59 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Timers for simple measurement.
+@@ -87,14 +84,14 @@
+  public:
+   // Constuctors
+   TraceTime(const char* title,
+-	    bool doit = true,
+-	    bool print_cr = true,
+-	    outputStream *logfile = NULL);
++            bool doit = true,
++            bool print_cr = true,
++            outputStream *logfile = NULL);
+   TraceTime(const char* title,
+-	    elapsedTimer* accumulator,
+-	    bool doit = true,
++            elapsedTimer* accumulator,
++            bool doit = true,
+             bool verbose = false,
+-	    outputStream *logfile = NULL );
++            outputStream *logfile = NULL );
+   ~TraceTime();
+ 
+   // Accessors
+@@ -108,17 +105,17 @@
+ 
+ class TraceCPUTime: public StackObj {
+  private:
+-  bool _active;			// true if times will be measured and printed
+-  bool _print_cr;		// if true print carriage return at end
+-  double _starting_user_time;	// user time at start of measurement
++  bool _active;                 // true if times will be measured and printed
++  bool _print_cr;               // if true print carriage return at end
++  double _starting_user_time;   // user time at start of measurement
+   double _starting_system_time; // system time at start of measurement
+-  double _starting_real_time;	// real time at start of measurement
+-  outputStream* _logfile;	// output is printed to this stream
+-  bool _error;			// true if an error occurred, turns off output
++  double _starting_real_time;   // real time at start of measurement
++  outputStream* _logfile;       // output is printed to this stream
++  bool _error;                  // true if an error occurred, turns off output
+ 
+  public:
+-  TraceCPUTime(bool doit = true, 
+-	       bool print_cr = true, 
+-	       outputStream *logfile = NULL);
++  TraceCPUTime(bool doit = true,
++               bool print_cr = true,
++               outputStream *logfile = NULL);
+   ~TraceCPUTime();
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/unhandledOops.cpp openjdk/hotspot/src/share/vm/runtime/unhandledOops.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/unhandledOops.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/unhandledOops.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)unhandledOops.cpp	1.11 07/05/05 17:07:00 JVM"
+-#endif
+ /*
+  * Copyright 2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -120,7 +117,7 @@
+     // in the unhandled oop generator.
+     if(!_thread->is_in_stack((address)entry._oop_ptr)) {
+       tty->print_cr("oop_ptr is " INTPTR_FORMAT, (address)entry._oop_ptr);
+-      tty->print_cr("thread is " INTPTR_FORMAT " from pc " INTPTR_FORMAT, 
++      tty->print_cr("thread is " INTPTR_FORMAT " from pc " INTPTR_FORMAT,
+                      (address)_thread, (address)entry._pc);
+       assert(false, "heap is corrupted by the unhandled oop detector");
+     }
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/unhandledOops.hpp openjdk/hotspot/src/share/vm/runtime/unhandledOops.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/unhandledOops.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/unhandledOops.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)unhandledOops.hpp	1.7 07/05/05 17:07:01 JVM"
+-#endif
+ /*
+  * Copyright 2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ #ifdef CHECK_UNHANDLED_OOPS
+ 
+@@ -56,7 +53,7 @@
+  public:
+   oop* oop_ptr() { return _oop_ptr; }
+   UnhandledOopEntry() : _oop_ptr(NULL), _ok_for_gc(false), _pc(NULL) {}
+-  UnhandledOopEntry(oop* op, address pc) : 
++  UnhandledOopEntry(oop* op, address pc) :
+                         _oop_ptr(op),   _ok_for_gc(false), _pc(pc) {}
+ };
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/vframeArray.cpp openjdk/hotspot/src/share/vm/runtime/vframeArray.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/vframeArray.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/vframeArray.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)vframeArray.cpp	1.144 07/06/08 15:21:45 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -91,16 +88,16 @@
+   for(index = 0; index < locs->size(); index++) {
+     StackValue* value = locs->at(index);
+     switch(value->type()) {
+-      case T_OBJECT: 
++      case T_OBJECT:
+         // preserve object type
+-	_locals->add( new StackValue((intptr_t) (value->get_obj()()), T_OBJECT ));
++        _locals->add( new StackValue((intptr_t) (value->get_obj()()), T_OBJECT ));
+         break;
+       case T_CONFLICT:
+-	// A dead local.  Will be initialized to null/zero.
+-	_locals->add( new StackValue());
++        // A dead local.  Will be initialized to null/zero.
++        _locals->add( new StackValue());
+         break;
+       case T_INT:
+-	_locals->add( new StackValue(value->get_int()));
++        _locals->add( new StackValue(value->get_int()));
+         break;
+       default:
+         ShouldNotReachHere();
+@@ -110,23 +107,23 @@
+   // Now the expressions off-stack
+   // Same silliness as above
+ 
+-  StackValueCollection *exprs = vf->expressions();  
++  StackValueCollection *exprs = vf->expressions();
+   _expressions = new StackValueCollection(exprs->size());
+   for(index = 0; index < exprs->size(); index++) {
+     StackValue* value = exprs->at(index);
+     switch(value->type()) {
+-      case T_OBJECT: 
++      case T_OBJECT:
+         // preserve object type
+-	_expressions->add( new StackValue((intptr_t) (value->get_obj()()), T_OBJECT ));
++        _expressions->add( new StackValue((intptr_t) (value->get_obj()()), T_OBJECT ));
+         break;
+       case T_CONFLICT:
+-	// A dead stack element.  Will be initialized to null/zero.
++        // A dead stack element.  Will be initialized to null/zero.
+         // This can occur when the compiler emits a state in which stack
+         // elements are known to be dead (because of an imminent exception).
+-	_expressions->add( new StackValue());
++        _expressions->add( new StackValue());
+         break;
+       case T_INT:
+-	_expressions->add( new StackValue(value->get_int()));
++        _expressions->add( new StackValue(value->get_int()));
+         break;
+       default:
+         ShouldNotReachHere();
+@@ -137,13 +134,10 @@
+ int unpack_counter = 0;
+ 
+ void vframeArrayElement::unpack_on_stack(int callee_parameters,
+-					 int callee_locals,
+-					 frame* caller,
+-					 bool is_top_frame,
+-					 int exec_mode) {
+-#if  defined(CC_INTERP) && !defined(IA64)
+-  ShouldNotReachHere();
+-#else
++                                         int callee_locals,
++                                         frame* caller,
++                                         bool is_top_frame,
++                                         int exec_mode) {
+   JavaThread* thread = (JavaThread*) Thread::current();
+ 
+   // Look at bci and decide on bcp and continuation pc
+@@ -169,7 +163,7 @@
+   // For Compiler2, there should be no pending exception when deoptimizing at monitorenter
+   // because there is no safepoint at the null pointer check (it is either handled explicitly
+   // or prior to the monitorenter) and asynchronous exceptions are not made "pending" by the
+-  // runtime interface for the slow case (see JRT_ENTRY_NO_ASYNC).  If an asynchronous 
++  // runtime interface for the slow case (see JRT_ENTRY_FOR_MONITORENTER).  If an asynchronous
+   // exception was processed, the bytecode pointer would have to be extended one bytecode beyond
+   // the monitorenter to place it in the proper exception range.
+   //
+@@ -194,7 +188,7 @@
+ #else
+         // Do an uncommon trap type entry. c++ interpreter will know
+         // to pop frame and preserve the args
+-	pc = Interpreter::deopt_entry(vtos, 0);
++        pc = Interpreter::deopt_entry(vtos, 0);
+         use_next_mdp = false;
+ #endif
+       } else {
+@@ -248,12 +242,12 @@
+   Interpreter::layout_activation(method(),
+                                  temps + callee_parameters,
+                                  popframe_preserved_args_size_in_words,
+-				 locks,
+-				 callee_parameters,
+-				 callee_locals,
+-				 caller,
+-				 iframe(),
+-				 is_top_frame);
++                                 locks,
++                                 callee_parameters,
++                                 callee_locals,
++                                 caller,
++                                 iframe(),
++                                 is_top_frame);
+ 
+   // Update the pc in the frame object and overwrite the temporary pc
+   // we placed in the skeletal frame now that we finally know the
+@@ -284,7 +278,7 @@
+     }
+   }
+ 
+-  // Unpack expression stack  
++  // Unpack expression stack
+   // If this is an intermediate frame (i.e. not top frame) then this
+   // only unpacks the part of the expression stack not used by callee
+   // as parameters. The callee parameters are unpacked as part of the
+@@ -301,7 +295,7 @@
+         *addr = value->get_int(T_OBJECT);
+         break;
+       case T_CONFLICT:
+-	// A dead stack slot.  Initialize to null in case it is an oop.
++        // A dead stack slot.  Initialize to null in case it is an oop.
+         *addr = NULL_WORD;
+         break;
+       default:
+@@ -309,7 +303,7 @@
+     }
+     if (TaggedStackInterpreter) {
+       // Write tag to the stack
+-      iframe()->interpreter_frame_set_expression_stack_tag(i, 
++      iframe()->interpreter_frame_set_expression_stack_tag(i,
+                                   frame::tag_for_basic_type(value->type()));
+     }
+   }
+@@ -319,7 +313,7 @@
+   for(i = 0; i < locals()->size(); i++) {
+     StackValue *value = locals()->at(i);
+     intptr_t* addr  = iframe()->interpreter_frame_local_at(i);
+-    switch(value->type()) {      
++    switch(value->type()) {
+       case T_INT:
+         *addr = value->get_int();
+         break;
+@@ -327,7 +321,7 @@
+         *addr = value->get_int(T_OBJECT);
+         break;
+       case T_CONFLICT:
+-	// A dead location. If it is an oop then we need a NULL to prevent GC from following it
++        // A dead location. If it is an oop then we need a NULL to prevent GC from following it
+         *addr = NULL_WORD;
+         break;
+       default:
+@@ -335,7 +329,7 @@
+     }
+     if (TaggedStackInterpreter) {
+       // Write tag to stack
+-      iframe()->interpreter_frame_set_local_tag(i, 
++      iframe()->interpreter_frame_set_local_tag(i,
+                                   frame::tag_for_basic_type(value->type()));
+     }
+   }
+@@ -373,16 +367,17 @@
+ 
+ #ifndef PRODUCT
+   if (TraceDeoptimization && Verbose) {
++    ttyLocker ttyl;
+     tty->print_cr("[%d Interpreted Frame]", ++unpack_counter);
+     iframe()->print_on(tty);
+     RegisterMap map(thread);
+     vframe* f = vframe::new_vframe(iframe(), &map, thread);
+     f->print();
+     iframe()->interpreter_frame_print_on(tty);
+-   
++
+     tty->print_cr("locals size     %d", locals()->size());
+     tty->print_cr("expression size %d", expressions()->size());
+-    
++
+     method()->print_value();
+     tty->cr();
+     // method()->print_codes();
+@@ -403,17 +398,16 @@
+ 
+   _locals = _expressions = NULL;
+ 
+-#endif /* !CC_INTERP */
+ }
+ 
+ int vframeArrayElement::on_stack_size(int callee_parameters,
+-				      int callee_locals,
+-				      bool is_top_frame,
+-				      int popframe_extra_stack_expression_els) const {
++                                      int callee_locals,
++                                      bool is_top_frame,
++                                      int popframe_extra_stack_expression_els) const {
+   assert(method()->max_locals() == locals()->size(), "just checking");
+   int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors();
+   int temps = expressions()->size();
+-  return Interpreter::size_activation(method(), 
++  return Interpreter::size_activation(method(),
+                                       temps + callee_parameters,
+                                       popframe_extra_stack_expression_els,
+                                       locks,
+@@ -429,8 +423,8 @@
+ 
+   // Allocate the vframeArray
+   vframeArray * result = (vframeArray*) AllocateHeap(sizeof(vframeArray) + // fixed part
+-						     sizeof(vframeArrayElement) * (chunk->length() - 1), // variable part
+-						     "vframeArray::allocate");
++                                                     sizeof(vframeArrayElement) * (chunk->length() - 1), // variable part
++                                                     "vframeArray::allocate");
+   result->_frames = chunk->length();
+   result->_owner_thread = thread;
+   result->_sender = sender;
+@@ -441,10 +435,10 @@
+   return result;
+ }
+ 
+-void vframeArray::fill_in(JavaThread* thread, 
+-			  int frame_size, 
+-			  GrowableArray<compiledVFrame*>* chunk, 
+-			  const RegisterMap *reg_map) {
++void vframeArray::fill_in(JavaThread* thread,
++                          int frame_size,
++                          GrowableArray<compiledVFrame*>* chunk,
++                          const RegisterMap *reg_map) {
+   // Set owner first, it is used when adding monitor chunks
+ 
+   _frame_size = frame_size;
+@@ -466,12 +460,12 @@
+       // in frame_amd64.cpp and the values of the phantom high half registers
+       // in amd64.ad.
+       //      if (VMReg::Name(i) < SharedInfo::stack0 && is_even(i)) {
+-	intptr_t* src = (intptr_t*) reg_map->location(VMRegImpl::as_VMReg(i));
+-	_callee_registers[i] = src != NULL ? *src : NULL_WORD;
+-	//      } else {
+-	//	jint* src = (jint*) reg_map->location(VMReg::Name(i));
+-	//      _callee_registers[i] = src != NULL ? *src : NULL_WORD;
+-	//      }
++        intptr_t* src = (intptr_t*) reg_map->location(VMRegImpl::as_VMReg(i));
++        _callee_registers[i] = src != NULL ? *src : NULL_WORD;
++        //      } else {
++        //      jint* src = (jint*) reg_map->location(VMReg::Name(i));
++        //      _callee_registers[i] = src != NULL ? *src : NULL_WORD;
++        //      }
+ #else
+       jint* src = (jint*) reg_map->location(VMRegImpl::as_VMReg(i));
+       _callee_registers[i] = src != NULL ? *src : NULL_WORD;
+@@ -491,7 +485,7 @@
+   // stack picture
+   //   unpack_frame
+   //   [new interpreter frames ] (frames are skeletal but walkable)
+-  //   caller_frame 
++  //   caller_frame
+   //
+   //  This routine fills in the missing data for the skeletal interpreter frames
+   //  in the above picture.
+@@ -516,22 +510,17 @@
+   for (index = frames() - 1; index >= 0 ; index--) {
+     int callee_parameters = index == 0 ? 0 : element(index-1)->method()->size_of_parameters();
+     int callee_locals     = index == 0 ? 0 : element(index-1)->method()->max_locals();
+-    element(index)->unpack_on_stack(callee_parameters, 
+-				    callee_locals,
+-				    &caller_frame,
+-				    index == 0,
+-				    exec_mode);
++    element(index)->unpack_on_stack(callee_parameters,
++                                    callee_locals,
++                                    &caller_frame,
++                                    index == 0,
++                                    exec_mode);
+     if (index == frames() - 1) {
+       Deoptimization::unwind_callee_save_values(element(index)->iframe(), this);
+     }
+     caller_frame = *element(index)->iframe();
+   }
+ 
+-#ifdef CC_INTERP
+-#ifndef IA64
+-  ShouldNotReachHere();
+-#endif
+-#endif
+ 
+   deallocate_monitor_chunks();
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/vframeArray.hpp openjdk/hotspot/src/share/vm/runtime/vframeArray.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/vframeArray.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/vframeArray.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)vframeArray.hpp	1.77 07/05/05 17:07:01 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A vframeArray is an array used for momentarily storing off stack Java method activations
+@@ -75,17 +72,17 @@
+ 
+   // Returns the on stack word size for this frame
+   // callee_parameters is the number of callee locals residing inside this frame
+-  int on_stack_size(int callee_parameters, 
+-		    int callee_locals, 
+-		    bool is_top_frame, 
+-		    int popframe_extra_stack_expression_els) const;
++  int on_stack_size(int callee_parameters,
++                    int callee_locals,
++                    bool is_top_frame,
++                    int popframe_extra_stack_expression_els) const;
+ 
+   // Unpacks the element to skeletal interpreter frame
+-  void unpack_on_stack(int callee_parameters, 
+-		       int callee_locals,
+-		       frame* caller,
+-		       bool is_top_frame,
+-		       int exec_mode);
++  void unpack_on_stack(int callee_parameters,
++                       int callee_locals,
++                       frame* caller,
++                       bool is_top_frame,
++                       int exec_mode);
+ 
+ #ifndef PRODUCT
+   void print(outputStream* st);
+@@ -103,16 +100,16 @@
+   // Here is what a vframeArray looks like in memory
+ 
+   /*
+-      fixed part 
+-	description of the original frame
+-	_frames - number of vframes in this array
+-	adapter info
+-	callee register save area
++      fixed part
++        description of the original frame
++        _frames - number of vframes in this array
++        adapter info
++        callee register save area
+       variable part
+-	vframeArrayElement   [ 0 ]
+-	...
+-	vframeArrayElement   [_frames - 1]
+-       
++        vframeArrayElement   [ 0 ]
++        ...
++        vframeArrayElement   [_frames - 1]
++
+   */
+ 
+   JavaThread*                  _owner_thread;
+@@ -129,7 +126,7 @@
+   intptr_t                     _callee_registers[RegisterMap::reg_count];
+   unsigned char                _valid[RegisterMap::reg_count];
+ 
+-  vframeArrayElement           _elements[1];   // First variable section. 
++  vframeArrayElement           _elements[1];   // First variable section.
+ 
+   void fill_in_element(int index, compiledVFrame* vf);
+ 
+@@ -200,6 +197,5 @@
+   // Comparing
+   bool structural_compare(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk);
+ #endif
+-  
+-};
+ 
++};
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/vframe.cpp openjdk/hotspot/src/share/vm/runtime/vframe.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/vframe.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/vframe.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)vframe.cpp	1.162 07/05/17 16:07:02 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,19 +19,19 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_vframe.cpp.incl"
+ 
+-vframe::vframe(const frame* fr, const RegisterMap* reg_map, JavaThread* thread) 
++vframe::vframe(const frame* fr, const RegisterMap* reg_map, JavaThread* thread)
+ : _reg_map(reg_map), _thread(thread) {
+   assert(fr != NULL, "must have frame");
+   _fr = *fr;
+ }
+ 
+-vframe::vframe(const frame* fr, JavaThread* thread) 
++vframe::vframe(const frame* fr, JavaThread* thread)
+ : _reg_map(thread), _thread(thread) {
+   assert(fr != NULL, "must have frame");
+   _fr = *fr;
+@@ -44,18 +41,18 @@
+   // Interpreter frame
+   if (f->is_interpreted_frame()) {
+     return new interpretedVFrame(f, reg_map, thread);
+-  }  
++  }
+ 
+   // Compiled frame
+   CodeBlob* cb = f->cb();
+-  if (cb != NULL) {  
+-    if (cb->is_nmethod()) {      
+-      nmethod* nm = (nmethod*)cb;            
++  if (cb != NULL) {
++    if (cb->is_nmethod()) {
++      nmethod* nm = (nmethod*)cb;
+       return new compiledVFrame(f, reg_map, thread, nm);
+     }
+ 
+     if (f->is_runtime_frame()) {
+-      // Skip this frame and try again.      
++      // Skip this frame and try again.
+       RegisterMap temp_map = *reg_map;
+       frame s = f->sender(&temp_map);
+       return new_vframe(&s, &temp_map, thread);
+@@ -68,7 +65,7 @@
+ 
+ vframe* vframe::sender() const {
+   RegisterMap temp_map = *register_map();
+-  assert(is_top(), "just checking"); 
++  assert(is_top(), "just checking");
+   if (_fr.is_entry_frame() && _fr.is_first_frame()) return NULL;
+   frame s = _fr.real_sender(&temp_map);
+   if (s.is_first_frame()) return NULL;
+@@ -96,23 +93,23 @@
+ GrowableArray<MonitorInfo*>* javaVFrame::locked_monitors() {
+   assert(SafepointSynchronize::is_at_safepoint() || JavaThread::current() == thread(),
+          "must be at safepoint or it's a java frame of the current thread");
+-                                                                                
++
+   GrowableArray<MonitorInfo*>* mons = monitors();
+   GrowableArray<MonitorInfo*>* result = new GrowableArray<MonitorInfo*>(mons->length());
+   if (mons->is_empty()) return result;
+-                                                                                
++
+   bool found_first_monitor = false;
+   ObjectMonitor *pending_monitor = thread()->current_pending_monitor();
+   ObjectMonitor *waiting_monitor = thread()->current_waiting_monitor();
+   oop pending_obj = (pending_monitor != NULL ? (oop) pending_monitor->object() : NULL);
+   oop waiting_obj = (waiting_monitor != NULL ? (oop) waiting_monitor->object() : NULL);
+-                                                                                
++
+   for (int index = (mons->length()-1); index >= 0; index--) {
+     MonitorInfo* monitor = mons->at(index);
+     oop obj = monitor->owner();
+     if (obj == NULL) continue; // skip unowned monitor
+     //
+-    // Skip the monitor that the thread is blocked to enter or waiting on 
++    // Skip the monitor that the thread is blocked to enter or waiting on
+     //
+     if (!found_first_monitor && (obj == pending_obj || obj == waiting_obj)) {
+       continue;
+@@ -136,21 +133,21 @@
+   }
+ }
+ 
+-void javaVFrame::print_lock_info_on(outputStream* st, int frame_count) {        
++void javaVFrame::print_lock_info_on(outputStream* st, int frame_count) {
+   ResourceMark rm;
+ 
+   // If this is the first frame, and java.lang.Object.wait(...) then print out the receiver.
+   if (frame_count == 0) {
+-    if (method()->name() == vmSymbols::wait_name() && 
++    if (method()->name() == vmSymbols::wait_name() &&
+         instanceKlass::cast(method()->method_holder())->name() == vmSymbols::java_lang_Object()) {
+-      StackValueCollection* locs = locals();      
++      StackValueCollection* locs = locals();
+       if (!locs->is_empty()) {
+         StackValue* sv = locs->at(0);
+         if (sv->type() == T_OBJECT) {
+-          Handle o = locs->at(0)->get_obj();    
++          Handle o = locs->at(0)->get_obj();
+           print_locked_object_class_name(st, o, "waiting on");
+         }
+-      }        
++      }
+     } else if (thread()->current_park_blocker() != NULL) {
+       oop obj = thread()->current_park_blocker();
+       Klass* k = Klass::cast(obj->klass());
+@@ -158,7 +155,7 @@
+     }
+   }
+ 
+-  
++
+   // Print out all monitors that we have locked or are trying to lock
+   GrowableArray<MonitorInfo*>* mons = monitors();
+   if (!mons->is_empty()) {
+@@ -174,17 +171,17 @@
+ 
+         const char *lock_state = "locked"; // assume we have the monitor locked
+         if (!found_first_monitor && frame_count == 0) {
+-	 markOop mark = monitor->owner()->mark();
+-	 if (mark->has_monitor() && 
+-	     mark->monitor() == thread()->current_pending_monitor()) {
++         markOop mark = monitor->owner()->mark();
++         if (mark->has_monitor() &&
++             mark->monitor() == thread()->current_pending_monitor()) {
+             lock_state = "waiting to lock";
+-	 }
++         }
+         }
+ 
+         found_first_monitor = true;
+-        print_locked_object_class_name(st, monitor->owner(), lock_state); 
++        print_locked_object_class_name(st, monitor->owner(), lock_state);
+       }
+-    }  
++    }
+   }
+ }
+ 
+@@ -224,10 +221,10 @@
+ 
+ StackValueCollection* interpretedVFrame::locals() const {
+   int length = method()->max_locals();
+- 
++
+   if (method()->is_native()) {
+     // If the method is native, max_locals is not telling the truth.
+-    // maxlocals then equals the size of parameters 
++    // maxlocals then equals the size of parameters
+     length = method()->size_of_parameters();
+   }
+ 
+@@ -237,10 +234,10 @@
+   if (TaggedStackInterpreter) {
+     for(int i=0; i < length; i++) {
+       // Find stack location
+-      intptr_t *addr = locals_addr_at(i); 
++      intptr_t *addr = locals_addr_at(i);
+ 
+       // Depending on oop/int put it in the right package
+-      StackValue *sv;    
++      StackValue *sv;
+       frame::Tag tag = fr().interpreter_frame_local_tag(i);
+       if (tag == frame::TagReference) {
+         // oop value
+@@ -264,10 +261,10 @@
+     // handle locals
+     for(int i=0; i < length; i++) {
+       // Find stack location
+-      intptr_t *addr = locals_addr_at(i); 
++      intptr_t *addr = locals_addr_at(i);
+ 
+       // Depending on oop/int put it in the right package
+-      StackValue *sv;    
++      StackValue *sv;
+       if (oop_mask.is_oop(i)) {
+         // oop value
+         Handle h(*(oop *)addr);
+@@ -289,7 +286,7 @@
+   int length = method()->max_locals();
+   if (method()->is_native()) {
+     // If the method is native, max_locals is not telling the truth.
+-    // maxlocals then equals the size of parameters 
++    // maxlocals then equals the size of parameters
+     length = method()->size_of_parameters();
+   }
+ 
+@@ -298,7 +295,7 @@
+   // handle locals
+   for (int i = 0; i < length; i++) {
+     // Find stack location
+-    intptr_t *addr = locals_addr_at(i); 
++    intptr_t *addr = locals_addr_at(i);
+ 
+     // Depending on oop/int put it in the right package
+     StackValue *sv = values->at(i);
+@@ -329,7 +326,7 @@
+       frame::Tag tag = fr().interpreter_frame_expression_stack_tag(i);
+ 
+       // Depending on oop/int put it in the right package
+-      StackValue *sv;    
++      StackValue *sv;
+       if (tag == frame::TagReference) {
+         // oop value
+         Handle h(*(oop *)addr);
+@@ -356,7 +353,7 @@
+       intptr_t *addr = fr().interpreter_frame_expression_stack_at(i);
+ 
+       // Depending on oop/int put it in the right package
+-      StackValue *sv;    
++      StackValue *sv;
+       if (oop_mask.is_oop(i + nof_locals)) {
+         // oop value
+         Handle h(*(oop *)addr);
+@@ -364,7 +361,7 @@
+       } else {
+         // integer
+         sv = new StackValue(*addr);
+-      }    
++      }
+       assert(sv != NULL, "sanity check");
+       result->add(sv);
+     }
+@@ -401,18 +398,18 @@
+ 
+ 
+ // Step back n frames, skip any pseudo frames in between.
+-// This function is used in Class.forName, Class.newInstance, Method.Invoke, 
++// This function is used in Class.forName, Class.newInstance, Method.Invoke,
+ // AccessController.doPrivileged.
+ //
+ // NOTE that in JDK 1.4 this has been exposed to Java as
+ // sun.reflect.Reflection.getCallerClass(), which can be inlined.
+-// Inlined versions must match this routine's logic. 
++// Inlined versions must match this routine's logic.
+ // Native method prefixing logic does not need to match since
+ // the method names don't match and inlining will not occur.
+ // See, for example,
+ // Parse::inline_native_Reflection_getCallerClass in
+ // opto/library_call.cpp.
+-void vframeStreamCommon::security_get_caller_frame(int depth) { 
++void vframeStreamCommon::security_get_caller_frame(int depth) {
+   bool use_new_reflection = JDK_Version::is_gte_jdk14x_version() && UseNewReflection;
+ 
+   while (!at_end()) {
+@@ -436,7 +433,7 @@
+     }
+   }
+ }
+-  
++
+ 
+ void vframeStreamCommon::skip_prefixed_method_and_wrappers() {
+   ResourceMark rm;
+@@ -463,7 +460,7 @@
+     for (; prefix_index >= 0; --prefix_index) {
+       const char* possible_prefix = method_prefixes[prefix_index];
+       size_t possible_prefix_len = strlen(possible_prefix);
+-      if (possible_prefix_len == prefix_len && 
++      if (possible_prefix_len == prefix_len &&
+           strncmp(possible_prefix, prefixed_name, prefix_len) == 0) {
+         break; // matching prefix found
+       }
+@@ -497,8 +494,8 @@
+   ((vframe*)this)->print();
+ }
+ 
+- 
+-void entryVFrame::print_value() const { 
++
++void entryVFrame::print_value() const {
+   ((entryVFrame*)this)->print();
+ }
+ 
+@@ -521,7 +518,7 @@
+ void javaVFrame::print() {
+   ResourceMark rm;
+   vframe::print();
+-  tty->print("\t"); 
++  tty->print("\t");
+   method()->print_value();
+   tty->cr();
+   tty->print_cr("\tbci:    %d", bci());
+@@ -534,12 +531,12 @@
+   tty->print_cr("\tmonitor list:");
+   for (int index = (list->length()-1); index >= 0; index--) {
+     MonitorInfo* monitor = list->at(index);
+-    tty->print("\t  obj\t"); monitor->owner()->print_value(); 
++    tty->print("\t  obj\t"); monitor->owner()->print_value();
+     tty->print("(" INTPTR_FORMAT ")", (address)monitor->owner());
+     tty->cr();
+     tty->print("\t  ");
+     monitor->lock()->print_on(tty);
+-    tty->cr(); 
++    tty->cr();
+   }
+ }
+ 
+@@ -547,7 +544,8 @@
+ void javaVFrame::print_value() const {
+   methodOop  m = method();
+   klassOop   k = m->method_holder();
+-  tty->print_cr("frame( sp=" INTPTR_FORMAT ", fp=" INTPTR_FORMAT ", pc=" INTPTR_FORMAT ")", _fr.sp(), _fr.fp(), _fr.pc());
++  tty->print_cr("frame( sp=" INTPTR_FORMAT ", unextended_sp=" INTPTR_FORMAT ", fp=" INTPTR_FORMAT ", pc=" INTPTR_FORMAT ")",
++                _fr.sp(),  _fr.unextended_sp(), _fr.fp(), _fr.pc());
+   tty->print("%s.%s", Klass::cast(k)->internal_name(), m->name()->as_C_string());
+ 
+   if (!m->is_native()) {
+@@ -609,7 +607,7 @@
+   tty->print("%2d - ", index);
+   ((vframe*)this)->print_value();
+   tty->cr();
+-    
++
+   if (WizardMode) {
+     ((vframe*)this)->print();
+     tty->cr();
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/vframe_hp.cpp openjdk/hotspot/src/share/vm/runtime/vframe_hp.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/vframe_hp.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/vframe_hp.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)vframe_hp.cpp	1.158 07/05/05 17:07:02 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -31,7 +28,7 @@
+ 
+ // ------------- compiledVFrame --------------
+ 
+-StackValueCollection* compiledVFrame::locals() const {  
++StackValueCollection* compiledVFrame::locals() const {
+   // Natives has no scope
+   if (scope() == NULL) return new StackValueCollection(0);
+   GrowableArray<ScopeValue*>*  scv_list = scope()->locals();
+@@ -49,8 +46,8 @@
+     // In real life this never happens or is typically a single element search
+     for (int i = 0; i < list->length(); i++) {
+       if (list->at(i)->matches((vframe*)this)) {
+-	deferred = list->at(i)->locals();
+-	break;
++        deferred = list->at(i)->locals();
++        break;
+       }
+     }
+   }
+@@ -65,37 +62,37 @@
+       jvmtiDeferredLocalVariable* val = deferred->at(l);
+       switch (val->type()) {
+       case T_BOOLEAN:
+-	result->set_int_at(val->index(), val->value().z);
+-	break;
++        result->set_int_at(val->index(), val->value().z);
++        break;
+       case T_CHAR:
+-	result->set_int_at(val->index(), val->value().c);
+-	break;
++        result->set_int_at(val->index(), val->value().c);
++        break;
+       case T_FLOAT:
+-	result->set_float_at(val->index(), val->value().f);
+-	break;
++        result->set_float_at(val->index(), val->value().f);
++        break;
+       case T_DOUBLE:
+-	result->set_double_at(val->index(), val->value().d);
+-	break;
++        result->set_double_at(val->index(), val->value().d);
++        break;
+       case T_BYTE:
+-	result->set_int_at(val->index(), val->value().b);
+-	break;
++        result->set_int_at(val->index(), val->value().b);
++        break;
+       case T_SHORT:
+-	result->set_int_at(val->index(), val->value().s);
+-	break;
++        result->set_int_at(val->index(), val->value().s);
++        break;
+       case T_INT:
+-	result->set_int_at(val->index(), val->value().i);
+-	break;
++        result->set_int_at(val->index(), val->value().i);
++        break;
+       case T_LONG:
+-	result->set_long_at(val->index(), val->value().j);
+-	break;
+-      case T_OBJECT: 
+-	{
+-	  Handle obj((oop)val->value().l);
+-	  result->set_obj_at(val->index(), obj);
+-	}
+-	break;
++        result->set_long_at(val->index(), val->value().j);
++        break;
++      case T_OBJECT:
++        {
++          Handle obj((oop)val->value().l);
++          result->set_obj_at(val->index(), obj);
++        }
++        break;
+       default:
+-	ShouldNotReachHere();
++        ShouldNotReachHere();
+       }
+     }
+   }
+@@ -121,18 +118,18 @@
+     int f;
+     for ( f = 0 ; f < deferred->length() ; f++ ) {
+       if (deferred->at(f)->matches(this)) {
+-	// Matching, vframe now see if the local already had deferred write
+-	GrowableArray<jvmtiDeferredLocalVariable*>* locals = deferred->at(f)->locals();
+-	int l;
+-	for (l = 0 ; l < locals->length() ; l++ ) {
+-	  if (locals->at(l)->index() == index) {
+-	    locals->at(l)->set_value(value);
+-	    return;
+-	  }
+-	}
+-	// No matching local already present. Push a new value onto the deferred collection
+-	locals->push(new jvmtiDeferredLocalVariable(index, type, value));
+-	return;
++        // Matching, vframe now see if the local already had deferred write
++        GrowableArray<jvmtiDeferredLocalVariable*>* locals = deferred->at(f)->locals();
++        int l;
++        for (l = 0 ; l < locals->length() ; l++ ) {
++          if (locals->at(l)->index() == index) {
++            locals->at(l)->set_value(value);
++            return;
++          }
++        }
++        // No matching local already present. Push a new value onto the deferred collection
++        locals->push(new jvmtiDeferredLocalVariable(index, type, value));
++        return;
+       }
+     }
+     // No matching vframe must push a new vframe
+@@ -163,130 +160,17 @@
+   return result;
+ }
+ 
+-StackValue *compiledVFrame::create_stack_value(ScopeValue *sv) const {
+-  if (sv->is_location()) {
+-    // Stack or register value
+-    Location loc = ((LocationValue *)sv)->location();
+-
+-#ifdef SPARC
+-    // %%%%% Callee-save floats will NOT be working on a Sparc until we
+-    // handle the case of a 2 floats in a single double register.
+-    assert( !(loc.is_register() && loc.type() == Location::float_in_dbl), "Sparc does not handle callee-save floats yet" );
+-#endif // SPARC
+-
+-    // First find address of value
+-
+-    address value_addr = loc.is_register()
+-      // Value was in a callee-save register
+-      ? register_map()->location(VMRegImpl::as_VMReg(loc.register_number()))
+-      // Else value was directly saved on the stack. The frame's original stack pointer,
+-      // before any extension by its callee (due to Compiler1 linkage on SPARC), must be used.
+-      : ((address)_fr.unextended_sp()) + loc.stack_offset();
+-
+-    // Then package it right depending on type
+-    // Note: the transfer of the data is thru a union that contains
+-    // an intptr_t. This is because an interpreter stack slot is
+-    // really an intptr_t. The use of a union containing an intptr_t
+-    // ensures that on a 64 bit platform we have proper alignment
+-    // and that we store the value where the interpreter will expect
+-    // to find it (i.e. proper endian). Similarly on a 32bit platform
+-    // using the intptr_t ensures that when a value is larger than
+-    // a stack slot (jlong/jdouble) that we capture the proper part
+-    // of the value for the stack slot in question.
+-    //
+-    switch( loc.type() ) {
+-    case Location::float_in_dbl: { // Holds a float in a double register?
+-      // The callee has no clue whether the register holds a float,
+-      // double or is unused.  He always saves a double.  Here we know
+-      // a double was saved, but we only want a float back.  Narrow the
+-      // saved double to the float that the JVM wants.
+-      assert( loc.is_register(), "floats always saved to stack in 1 word" );
+-      union { intptr_t p; jfloat jf; } value;
+-      value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
+-      value.jf = (jfloat) *(jdouble*) value_addr;
+-      return new StackValue(value.p); // 64-bit high half is stack junk
+-    }
+-    case Location::int_in_long: { // Holds an int in a long register?
+-      // The callee has no clue whether the register holds an int,
+-      // long or is unused.  He always saves a long.  Here we know
+-      // a long was saved, but we only want an int back.  Narrow the
+-      // saved long to the int that the JVM wants.
+-      assert( loc.is_register(), "ints always saved to stack in 1 word" );
+-      union { intptr_t p; jint ji;} value;
+-      value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
+-      value.ji = (jint) *(jlong*) value_addr;
+-      return new StackValue(value.p); // 64-bit high half is stack junk
+-    }
+-#ifdef _LP64
+-    case Location::dbl:
+-      // Double value in an aligned adjacent pair
+-      return new StackValue(*(intptr_t*)value_addr);
+-    case Location::lng:
+-      // Long   value in an aligned adjacent pair
+-      return new StackValue(*(intptr_t*)value_addr);
+-#endif
+-    case Location::oop: {
+-      Handle h(*(oop *)value_addr); // Wrap a handle around the oop
+-      return new StackValue(h);
+-    }
+-    case Location::addr: {
+-      ShouldNotReachHere(); // both C1 and C2 now inline jsrs
+-    }
+-    case Location::normal: {
+-      // Just copy all other bits straight through
+-      union { intptr_t p; jint ji;} value;
+-      value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
+-      value.ji = *(jint*)value_addr;
+-      return new StackValue(value.p);
+-    }
+-    case Location::invalid:
+-      return new StackValue();
+-    default:
+-      ShouldNotReachHere();
+-    }
+ 
+-  } else if (sv->is_constant_int()) {
+-    // Constant int: treat same as register int.
+-    union { intptr_t p; jint ji;} value;
+-    value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
+-    value.ji = (jint)((ConstantIntValue*)sv)->value();
+-    return new StackValue(value.p); 
+-  } else if (sv->is_constant_oop()) {
+-    // constant oop        
+-    return new StackValue(((ConstantOopReadValue *)sv)->value());
+-#ifdef _LP64
+-  } else if (sv->is_constant_double()) {
+-    // Constant double in a single stack slot
+-    union { intptr_t p; double d; } value;
+-    value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
+-    value.d = ((ConstantDoubleValue *)sv)->value();
+-    return new StackValue(value.p);
+-  } else if (sv->is_constant_long()) {
+-    // Constant long in a single stack slot
+-    union { intptr_t p; jlong jl; } value;
+-    value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
+-    value.jl = ((ConstantLongValue *)sv)->value();
+-    return new StackValue(value.p);
+-#endif
+-  }
+-
+-  // Unknown ScopeValue type
+-  ShouldNotReachHere();    
+-  return new StackValue((intptr_t) 0);   // dummy  
++// The implementation of the following two methods was factorized into the
++// class StackValue because it is also used from within deoptimization.cpp for
++// rematerialization and relocking of non-escaping objects.
++
++StackValue *compiledVFrame::create_stack_value(ScopeValue *sv) const {
++  return StackValue::create_stack_value(&_fr, register_map(), sv);
+ }
+ 
+ BasicLock* compiledVFrame::resolve_monitor_lock(Location location) const {
+-  assert(location.is_stack(), "for now we only look at the stack");
+-  int word_offset = location.stack_offset() / wordSize;
+-  // (stack picture)
+-  // high: [     ]  word_offset + 1
+-  // low   [     ]  word_offset
+-  //       
+-  // sp->  [     ]  0
+-  // the word_offset is the distance from the stack pointer to the lowest address
+-  // The frame's original stack pointer, before any extension by its callee
+-  // (due to Compiler1 linkage on SPARC), must be used.
+-  return (BasicLock*) (fr().unextended_sp() + word_offset);
++  return StackValue::resolve_monitor_lock(&_fr, location);
+ }
+ 
+ 
+@@ -389,7 +273,7 @@
+     assert(nm->is_native_method(), "must be native");
+     return vframe::sender();
+   } else {
+-    return scope()->is_top() 
++    return scope()->is_top()
+       ? vframe::sender()
+       : new compiledVFrame(&f, register_map(), thread(), scope()->sender());
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/vframe_hp.hpp openjdk/hotspot/src/share/vm/runtime/vframe_hp.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/vframe_hp.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/vframe_hp.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)vframe_hp.hpp	1.54 07/05/05 17:07:01 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class compiledVFrame: public javaVFrame {
+@@ -48,7 +45,7 @@
+   }
+ 
+  public:
+-  // Constructors  
++  // Constructors
+   compiledVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread, nmethod* nm);
+ 
+   // Update a local in a compiled frame. Update happens when deopt occurs
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/vframe.hpp openjdk/hotspot/src/share/vm/runtime/vframe.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/vframe.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/vframe.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)vframe.hpp	1.89 07/05/17 16:07:04 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,11 +19,11 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // vframes are virtual stack frames representing source level activations.
+-// A single frame may hold several source level activations in the case of 
++// A single frame may hold several source level activations in the case of
+ // optimized code. The debugging stored with the optimized code enables
+ // us to unfold a frame as a stack of vframes.
+ // A cVFrame represents an activation of a non-java method.
+@@ -52,13 +49,13 @@
+  public:
+   // Factory method for creating vframes
+   static vframe* new_vframe(const frame* f, const RegisterMap *reg_map, JavaThread* thread);
+-  
++
+   // Accessors
+   frame              fr()           const { return _fr;       }
+   CodeBlob*          cb()         const { return _fr.cb();  }
+-  nmethod*           nm()         const { 
+-      assert( cb() != NULL && cb()->is_nmethod(), "usage"); 
+-      return (nmethod*) cb(); 
++  nmethod*           nm()         const {
++      assert( cb() != NULL && cb()->is_nmethod(), "usage");
++      return (nmethod*) cb();
+   }
+ 
+ // ???? Does this need to be a copy?
+@@ -72,15 +69,15 @@
+   // Returns the next javaVFrame on the stack (skipping all other kinds of frame)
+   javaVFrame *java_sender() const;
+ 
+-  // Answers if the this is the top vframe in the frame, i.e., if the sender vframe 
++  // Answers if the this is the top vframe in the frame, i.e., if the sender vframe
+   // is in the caller frame
+   virtual bool is_top() const { return true; }
+ 
+-  // Returns top vframe within same frame (see is_top())	
++  // Returns top vframe within same frame (see is_top())
+   virtual vframe* top() const;
+ 
+   // Type testing operations
+-  virtual bool is_entry_frame()       const { return false; }  
++  virtual bool is_entry_frame()       const { return false; }
+   virtual bool is_java_frame()        const { return false; }
+   virtual bool is_interpreted_frame() const { return false; }
+   virtual bool is_compiled_frame()    const { return false; }
+@@ -99,7 +96,7 @@
+   virtual methodOop                    method()         const = 0;
+   virtual int                          bci()            const = 0;
+   virtual StackValueCollection*        locals()         const = 0;
+-  virtual StackValueCollection*        expressions()    const = 0;  
++  virtual StackValueCollection*        expressions()    const = 0;
+   // the order returned by monitors() is from oldest -> youngest#4418568
+   virtual GrowableArray<MonitorInfo*>* monitors()       const = 0;
+ 
+@@ -108,7 +105,7 @@
+   // Deoptimize first if necessary.
+   virtual void set_locals(StackValueCollection* values) const = 0;
+ 
+-  // Test operation 
++  // Test operation
+   bool is_java_frame() const { return true; }
+ 
+  protected:
+@@ -127,7 +124,7 @@
+ 
+   // printing used during stack dumps
+   void print_lock_info_on(outputStream* st, int frame_count);
+-  void print_lock_info(int frame_count)	{ print_lock_info_on(tty, frame_count); }
++  void print_lock_info(int frame_count) { print_lock_info_on(tty, frame_count); }
+ 
+ #ifndef PRODUCT
+  public:
+@@ -179,7 +176,7 @@
+ 
+   // returns where the parameters starts relative to the frame pointer
+   int start_of_parameters() const;
+-  
++
+ #ifndef PRODUCT
+  public:
+   // verify operations
+@@ -218,7 +215,7 @@
+ 
+ #ifndef PRODUCT
+  public:
+-  // printing 
++  // printing
+   void print_value() const;
+   void print();
+ #endif
+@@ -246,7 +243,7 @@
+ 
+ class vframeStreamCommon : StackObj {
+  protected:
+-  // common 
++  // common
+   frame        _frame;
+   JavaThread*  _thread;
+   RegisterMap  _reg_map;
+@@ -286,9 +283,9 @@
+   address frame_pc() const { return _frame.pc(); }
+ 
+   CodeBlob*          cb()         const { return _frame.cb();  }
+-  nmethod*           nm()         const { 
+-      assert( cb() != NULL && cb()->is_nmethod(), "usage"); 
+-      return (nmethod*) cb(); 
++  nmethod*           nm()         const {
++      assert( cb() != NULL && cb()->is_nmethod(), "usage");
++      return (nmethod*) cb();
+   }
+ 
+   // Frame type
+@@ -302,7 +299,7 @@
+ 
+     // handle general case
+     do {
+-      _frame = _frame.sender(&_reg_map);    
++      _frame = _frame.sender(&_reg_map);
+     } while (!fill_from_frame());
+   }
+ 
+@@ -386,32 +383,32 @@
+   _method               = methodOop(buffer.read_oop());
+   _bci                  = buffer.read_bci();
+ 
+-  assert(_method->is_method(), "checking type of decoded method");  
++  assert(_method->is_method(), "checking type of decoded method");
+ }
+ 
+ // The native frames are handled specially. We do not rely on ScopeDesc info
+ // since the pc might not be exact due to the _last_native_pc trick.
+ inline void vframeStreamCommon::fill_from_compiled_native_frame() {
+   _mode = compiled_mode;
+-  _sender_decode_offset = DebugInformationRecorder::serialized_null; 
++  _sender_decode_offset = DebugInformationRecorder::serialized_null;
+   _method = nm()->method();
+   _bci = 0;
+ }
+ 
+-inline bool vframeStreamCommon::fill_from_frame() {  
++inline bool vframeStreamCommon::fill_from_frame() {
+   // Interpreted frame
+   if (_frame.is_interpreted_frame()) {
+     fill_from_interpreter_frame();
+     return true;
+   }
+ 
+-  // Compiled frame  
++  // Compiled frame
+ 
+   if (cb() != NULL && cb()->is_nmethod()) {
+     if (nm()->is_native_method()) {
+       // Do not rely on scopeDesc since the pc might be unprecise due to the _last_native_pc trick.
+       fill_from_compiled_native_frame();
+-    } else {    
++    } else {
+       PcDesc* pc_desc = nm()->pc_desc_at(_frame.pc());
+       int decode_offset;
+       if (pc_desc == NULL) {
+@@ -429,7 +426,7 @@
+   if (_frame.is_first_frame() || (_stop_at_java_call_stub && _frame.is_entry_frame())) {
+     _mode = at_end_mode;
+     return true;
+-  }    
++  }
+ 
+   return false;
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/virtualspace.cpp openjdk/hotspot/src/share/vm/runtime/virtualspace.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/virtualspace.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/virtualspace.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)virtualspace.cpp	1.62 07/05/05 17:07:03 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -34,43 +31,178 @@
+   initialize(size, 0, false, NULL);
+ }
+ 
+-ReservedSpace::ReservedSpace(size_t size, size_t forced_base_alignment,
++ReservedSpace::ReservedSpace(size_t size, size_t alignment,
+                              bool large, char* requested_address) {
+-  initialize(size, forced_base_alignment, large, requested_address);
++  initialize(size, alignment, large, requested_address);
+ }
+ 
+-void ReservedSpace::initialize(size_t size, size_t forced_base_alignment,
+-                               bool large, char* requested_address) {
++char *
++ReservedSpace::align_reserved_region(char* addr, const size_t len,
++                                     const size_t prefix_size,
++                                     const size_t prefix_align,
++                                     const size_t suffix_size,
++                                     const size_t suffix_align)
++{
++  assert(addr != NULL, "sanity");
++  const size_t required_size = prefix_size + suffix_size;
++  assert(len >= required_size, "len too small");
++
++  const size_t s = size_t(addr);
++  const size_t beg_ofs = s + prefix_size & suffix_align - 1;
++  const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
++
++  if (len < beg_delta + required_size) {
++     return NULL; // Cannot do proper alignment.
++  }
++  const size_t end_delta = len - (beg_delta + required_size);
++
++  if (beg_delta != 0) {
++    os::release_memory(addr, beg_delta);
++  }
++
++  if (end_delta != 0) {
++    char* release_addr = (char*) (s + beg_delta + required_size);
++    os::release_memory(release_addr, end_delta);
++  }
++
++  return (char*) (s + beg_delta);
++}
++
++char* ReservedSpace::reserve_and_align(const size_t reserve_size,
++                                       const size_t prefix_size,
++                                       const size_t prefix_align,
++                                       const size_t suffix_size,
++                                       const size_t suffix_align)
++{
++  assert(reserve_size > prefix_size + suffix_size, "should not be here");
++
++  char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align);
++  if (raw_addr == NULL) return NULL;
++
++  char* result = align_reserved_region(raw_addr, reserve_size, prefix_size,
++                                       prefix_align, suffix_size,
++                                       suffix_align);
++  if (result == NULL && !os::release_memory(raw_addr, reserve_size)) {
++    fatal("os::release_memory failed");
++  }
++
++#ifdef ASSERT
++  if (result != NULL) {
++    const size_t raw = size_t(raw_addr);
++    const size_t res = size_t(result);
++    assert(res >= raw, "alignment decreased start addr");
++    assert(res + prefix_size + suffix_size <= raw + reserve_size,
++           "alignment increased end addr");
++    assert((res & prefix_align - 1) == 0, "bad alignment of prefix");
++    assert((res + prefix_size & suffix_align - 1) == 0,
++           "bad alignment of suffix");
++  }
++#endif
++
++  return result;
++}
++
++ReservedSpace::ReservedSpace(const size_t prefix_size,
++                             const size_t prefix_align,
++                             const size_t suffix_size,
++                             const size_t suffix_align)
++{
++  assert(prefix_size != 0, "sanity");
++  assert(prefix_align != 0, "sanity");
++  assert(suffix_size != 0, "sanity");
++  assert(suffix_align != 0, "sanity");
++  assert((prefix_size & prefix_align - 1) == 0,
++    "prefix_size not divisible by prefix_align");
++  assert((suffix_size & suffix_align - 1) == 0,
++    "suffix_size not divisible by suffix_align");
++  assert((suffix_align & prefix_align - 1) == 0,
++    "suffix_align not divisible by prefix_align");
++
++  // On systems where the entire region has to be reserved and committed up
++  // front, the compound alignment normally done by this method is unnecessary.
++  const bool try_reserve_special = UseLargePages &&
++    prefix_align == os::large_page_size();
++  if (!os::can_commit_large_page_memory() && try_reserve_special) {
++    initialize(prefix_size + suffix_size, prefix_align, true);
++    return;
++  }
+ 
+-  assert(size % os::vm_allocation_granularity() == 0,
+-         "size not allocation aligned");
+-  assert((forced_base_alignment % os::vm_allocation_granularity()) == 0,
+-         "size not allocation aligned");
+   _base = NULL;
+   _size = 0;
++  _alignment = 0;
+   _special = false;
++
++  // Optimistically try to reserve the exact size needed.
++  const size_t size = prefix_size + suffix_size;
++  char* addr = os::reserve_memory(size, NULL, prefix_align);
++  if (addr == NULL) return;
++
++  // Check whether the result has the needed alignment (unlikely unless
++  // prefix_align == suffix_align).
++  const size_t ofs = size_t(addr) + prefix_size & suffix_align - 1;
++  if (ofs != 0) {
++    // Wrong alignment.  Release, allocate more space and do manual alignment.
++    //
++    // On most operating systems, another allocation with a somewhat larger size
++    // will return an address "close to" that of the previous allocation.  The
++    // result is often the same address (if the kernel hands out virtual
++    // addresses from low to high), or an address that is offset by the increase
++    // in size.  Exploit that to minimize the amount of extra space requested.
++    if (!os::release_memory(addr, size)) {
++      fatal("os::release_memory failed");
++    }
++
++    const size_t extra = MAX2(ofs, suffix_align - ofs);
++    addr = reserve_and_align(size + extra, prefix_size, prefix_align,
++                             suffix_size, suffix_align);
++    if (addr == NULL) {
++      // Try an even larger region.  If this fails, address space is exhausted.
++      addr = reserve_and_align(size + suffix_align, prefix_size,
++                               prefix_align, suffix_size, suffix_align);
++    }
++  }
++
++  _base = addr;
++  _size = size;
++  _alignment = prefix_align;
++}
++
++void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
++                               char* requested_address) {
++  const size_t granularity = os::vm_allocation_granularity();
++  assert((size & granularity - 1) == 0,
++         "size not aligned to os::vm_allocation_granularity()");
++  assert((alignment & granularity - 1) == 0,
++         "alignment not aligned to os::vm_allocation_granularity()");
++  assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
++         "not a power of 2");
++
++  _base = NULL;
++  _size = 0;
++  _special = false;
++  _alignment = 0;
+   if (size == 0) {
+     return;
+   }
+ 
+-  // If OS doesn't support demand paging for large page memory, we need 
++  // If OS doesn't support demand paging for large page memory, we need
+   // to use reserve_memory_special() to reserve and pin the entire region.
+   bool special = large && !os::can_commit_large_page_memory();
+   char* base = NULL;
+ 
+   if (special) {
+     // It's not hard to implement reserve_memory_special() such that it can
+-    // allocate at fixed address, but there seems no use of this feature 
++    // allocate at fixed address, but there seems no use of this feature
+     // for now, so it's not implemented.
+     assert(requested_address == NULL, "not implemented");
+ 
+     base = os::reserve_memory_special(size);
+-    
++
+     if (base != NULL) {
+       // Check alignment constraints
+-      if (forced_base_alignment > 0) {
+-        assert((uintptr_t) base % forced_base_alignment == 0, 
+-               "Large pages returned a non-aligned address"); 
++      if (alignment > 0) {
++        assert((uintptr_t) base % alignment == 0,
++               "Large pages returned a non-aligned address");
+       }
+       _special = true;
+     } else {
+@@ -80,7 +212,7 @@
+ 
+   if (base == NULL) {
+     // Optimistically assume that the OSes returns an aligned base pointer.
+-    // When reserving a large address range, most OSes seem to align to at 
++    // When reserving a large address range, most OSes seem to align to at
+     // least 64K.
+ 
+     // If the memory was requested at a particular address, use
+@@ -90,23 +222,23 @@
+     if (requested_address != 0) {
+       base = os::attempt_reserve_memory_at(size, requested_address);
+     } else {
+-      base = os::reserve_memory(size, NULL);
++      base = os::reserve_memory(size, NULL, alignment);
+     }
+ 
+     if (base == NULL) return;
+ 
+     // Check alignment constraints
+-    if (forced_base_alignment > 0 && ((uintptr_t) base % forced_base_alignment != 0)) {
++    if (alignment > 0 && ((size_t)base & alignment - 1) != 0) {
+       // Base not aligned, retry
+       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
+       // Reserve size large enough to do manual alignment and
+       // increase size to a multiple of the desired alignment
+-      size = align_size_up(size, forced_base_alignment);
+-      size_t extra_size = size + forced_base_alignment;
+-      char* extra_base = os::reserve_memory(extra_size);
++      size = align_size_up(size, alignment);
++      size_t extra_size = size + alignment;
++      char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
+       if (extra_base == NULL) return;
+       // Do manual alignement
+-      base = (char*) align_size_up((uintptr_t) extra_base, forced_base_alignment);
++      base = (char*) align_size_up((uintptr_t) extra_base, alignment);
+       assert(base >= extra_base, "just checking");
+       // Release unused areas
+       size_t unused_bottom_size = base - extra_base;
+@@ -116,7 +248,7 @@
+       assert(unused_top_size % os::vm_allocation_granularity() == 0,
+              "size not allocation aligned");
+       if (unused_bottom_size > 0) {
+-	os::release_memory(extra_base, unused_bottom_size);
++        os::release_memory(extra_base, unused_bottom_size);
+       }
+       if (unused_top_size > 0) {
+         os::release_memory(base + size, unused_top_size);
+@@ -126,36 +258,42 @@
+   // Done
+   _base = base;
+   _size = size;
++  _alignment = MAX2(alignment, (size_t) os::vm_page_size());
++
+   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
+-	 "area must be distinguisable from marks for mark-sweep");
++         "area must be distinguisable from marks for mark-sweep");
+   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
+-	 "area must be distinguisable from marks for mark-sweep");
++         "area must be distinguisable from marks for mark-sweep");
+ }
+ 
+ 
+-ReservedSpace::ReservedSpace(char* base, size_t size, bool special) {
++ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
++                             bool special) {
+   assert((size % os::vm_allocation_granularity()) == 0,
+          "size not allocation aligned");
+   _base = base;
+   _size = size;
++  _alignment = alignment;
+   _special = special;
+ }
+ 
+ 
+-ReservedSpace ReservedSpace::first_part(size_t partition_size,
++ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
+                                         bool split, bool realloc) {
+   assert(partition_size <= size(), "partition failed");
+   if (split) {
+     os::split_reserved_memory(_base, _size, partition_size, realloc);
+   }
+-  ReservedSpace result(base(), partition_size, special());
++  ReservedSpace result(base(), partition_size, alignment, special());
+   return result;
+ }
+ 
+ 
+-ReservedSpace ReservedSpace::last_part(size_t partition_size) {
++ReservedSpace
++ReservedSpace::last_part(size_t partition_size, size_t alignment) {
+   assert(partition_size <= size(), "partition failed");
+-  ReservedSpace result(base() + partition_size, size() - partition_size, special());
++  ReservedSpace result(base() + partition_size, size() - partition_size,
++                       alignment, special());
+   return result;
+ }
+ 
+@@ -223,23 +361,19 @@
+   _high = low();
+ 
+   _special = rs.special();
+-  
++
+   // When a VirtualSpace begins life at a large size, make all future expansion
+   // and shrinking occur aligned to a granularity of large pages.  This avoids
+   // fragmentation of physical addresses that inhibits the use of large pages
+-  // by the OS virtual memory system.  Empirically,  we see that with a 4MB 
+-  // page size, the only spaces that get handled this way are codecache and 
+-  // the heap itself, both of which provide a substantial performance 
++  // by the OS virtual memory system.  Empirically,  we see that with a 4MB
++  // page size, the only spaces that get handled this way are codecache and
++  // the heap itself, both of which provide a substantial performance
+   // boost in many benchmarks when covered by large pages.
+   //
+-  // No attempt is made to force large page alignment at the very top and 
+-  // bottom of the space if they are not aligned so already. 
++  // No attempt is made to force large page alignment at the very top and
++  // bottom of the space if they are not aligned so already.
+   _lower_alignment  = os::vm_page_size();
+-  if (UseLargePages && rs.size() >= os::large_page_size()) {
+-    _middle_alignment = os::large_page_size();
+-  } else {
+-    _middle_alignment = os::vm_page_size();
+-  }
++  _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
+   _upper_alignment  = os::vm_page_size();
+ 
+   // End of each region
+@@ -262,7 +396,7 @@
+ }
+ 
+ 
+-VirtualSpace::~VirtualSpace() { 
++VirtualSpace::~VirtualSpace() {
+   release();
+ }
+ 
+@@ -286,7 +420,7 @@
+ }
+ 
+ 
+-size_t VirtualSpace::committed_size() const { 
++size_t VirtualSpace::committed_size() const {
+   return pointer_delta(high(), low(), sizeof(char));
+ }
+ 
+@@ -305,12 +439,12 @@
+   return low() <= (const char*) p && (const char*) p < high();
+ }
+ 
+-/* 
++/*
+    First we need to determine if a particular virtual space is using large
+    pages.  This is done at the initialize function and only virtual spaces
+    that are larger than LargePageSizeInBytes use large pages.  Once we
+    have determined this, all expand_by and shrink_by calls must grow and
+-   shrink by large page size chunks.  If a particular request 
++   shrink by large page size chunks.  If a particular request
+    is within the current large page, the call to commit and uncommit memory
+    can be ignored.  In the case that the low and high boundaries of this
+    space is not large page aligned, the pages leading to the first large
+@@ -328,25 +462,25 @@
+ 
+   char* previous_high = high();
+   char* unaligned_new_high = high() + bytes;
+-  assert(unaligned_new_high <= high_boundary(), 
++  assert(unaligned_new_high <= high_boundary(),
+          "cannot expand by more than upper boundary");
+ 
+   // Calculate where the new high for each of the regions should be.  If
+   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
+-  // then the unaligned lower and upper new highs would be the 
++  // then the unaligned lower and upper new highs would be the
+   // lower_high() and upper_high() respectively.
+-  char* unaligned_lower_new_high = 
++  char* unaligned_lower_new_high =
+     MIN2(unaligned_new_high, lower_high_boundary());
+-  char* unaligned_middle_new_high = 
++  char* unaligned_middle_new_high =
+     MIN2(unaligned_new_high, middle_high_boundary());
+-  char* unaligned_upper_new_high = 
++  char* unaligned_upper_new_high =
+     MIN2(unaligned_new_high, upper_high_boundary());
+ 
+-  // Align the new highs based on the regions alignment.  lower and upper 
++  // Align the new highs based on the regions alignment.  lower and upper
+   // alignment will always be default page size.  middle alignment will be
+   // LargePageSizeInBytes if the actual size of the virtual space is in
+   // fact larger than LargePageSizeInBytes.
+-  char* aligned_lower_new_high = 
++  char* aligned_lower_new_high =
+     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
+   char* aligned_middle_new_high =
+     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
+@@ -356,8 +490,8 @@
+   // Determine which regions need to grow in this expand_by call.
+   // If you are growing in the lower region, high() must be in that
+   // region so calcuate the size based on high().  For the middle and
+-  // upper regions, determine the starting point of growth based on the 
+-  // location of high().  By getting the MAX of the region's low address 
++  // upper regions, determine the starting point of growth based on the
++  // location of high().  By getting the MAX of the region's low address
+   // (or the prevoius region's high address) and high(), we can tell if it
+   // is an intra or inter region growth.
+   size_t lower_needs = 0;
+@@ -377,21 +511,21 @@
+   }
+ 
+   // Check contiguity.
+-  assert(low_boundary() <= lower_high() && 
++  assert(low_boundary() <= lower_high() &&
+          lower_high() <= lower_high_boundary(),
+-	 "high address must be contained within the region");
+-  assert(lower_high_boundary() <= middle_high() && 
+-	 middle_high() <= middle_high_boundary(),
+-	 "high address must be contained within the region");
++         "high address must be contained within the region");
++  assert(lower_high_boundary() <= middle_high() &&
++         middle_high() <= middle_high_boundary(),
++         "high address must be contained within the region");
+   assert(middle_high_boundary() <= upper_high() &&
+-	 upper_high() <= upper_high_boundary(), 
+-	 "high address must be contained within the region");
++         upper_high() <= upper_high_boundary(),
++         "high address must be contained within the region");
+ 
+   // Commit regions
+   if (lower_needs > 0) {
+     assert(low_boundary() <= lower_high() &&
+-	   lower_high() + lower_needs <= lower_high_boundary(), 
+-	   "must not expand beyond region");
++           lower_high() + lower_needs <= lower_high_boundary(),
++           "must not expand beyond region");
+     if (!os::commit_memory(lower_high(), lower_needs)) {
+       debug_only(warning("os::commit_memory failed"));
+       return false;
+@@ -401,18 +535,18 @@
+   }
+   if (middle_needs > 0) {
+     assert(lower_high_boundary() <= middle_high() &&
+-	   middle_high() + middle_needs <= middle_high_boundary(), 
+-	   "must not expand beyond region");
++           middle_high() + middle_needs <= middle_high_boundary(),
++           "must not expand beyond region");
+     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment())) {
+       debug_only(warning("os::commit_memory failed"));
+       return false;
+-    } 
++    }
+     _middle_high += middle_needs;
+   }
+   if (upper_needs > 0) {
+     assert(middle_high_boundary() <= upper_high() &&
+-	   upper_high() + upper_needs <= upper_high_boundary(), 
+-	   "must not expand beyond region");
++           upper_high() + upper_needs <= upper_high_boundary(),
++           "must not expand beyond region");
+     if (!os::commit_memory(upper_high(), upper_needs)) {
+       debug_only(warning("os::commit_memory failed"));
+       return false;
+@@ -424,8 +558,8 @@
+   if (pre_touch || AlwaysPreTouch) {
+     int vm_ps = os::vm_page_size();
+     for (char* curr = previous_high;
+- 	 curr < unaligned_new_high;
+- 	 curr += vm_ps) {
++         curr < unaligned_new_high;
++         curr += vm_ps) {
+       // Note the use of a write here; originally we tried just a read, but
+       // since the value read was unused, the optimizer removed the read.
+       // If we ever have a concurrent touchahead thread, we'll want to use
+@@ -436,7 +570,7 @@
+       *curr = 0;
+     }
+   }
+-  
++
+   _high += bytes;
+   return true;
+ }
+@@ -445,7 +579,7 @@
+ // Continue to decrement the high() pointer until it reaches a page boundary
+ // in which case that particular page can now be uncommitted.
+ void VirtualSpace::shrink_by(size_t size) {
+-  if (committed_size() < size) 
++  if (committed_size() < size)
+     fatal("Cannot shrink virtual space to negative size");
+ 
+   if (special()) {
+@@ -458,65 +592,65 @@
+   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
+ 
+   // Calculate new unaligned address
+-  char* unaligned_upper_new_high = 
++  char* unaligned_upper_new_high =
+     MAX2(unaligned_new_high, middle_high_boundary());
+-  char* unaligned_middle_new_high = 
++  char* unaligned_middle_new_high =
+     MAX2(unaligned_new_high, lower_high_boundary());
+-  char* unaligned_lower_new_high = 
++  char* unaligned_lower_new_high =
+     MAX2(unaligned_new_high, low_boundary());
+ 
+   // Align address to region's alignment
+-  char* aligned_upper_new_high = 
++  char* aligned_upper_new_high =
+     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
+-  char* aligned_middle_new_high = 
++  char* aligned_middle_new_high =
+     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
+-  char* aligned_lower_new_high = 
++  char* aligned_lower_new_high =
+     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
+-  
++
+   // Determine which regions need to shrink
+   size_t upper_needs = 0;
+   if (aligned_upper_new_high < upper_high()) {
+-    upper_needs = 
++    upper_needs =
+       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
+   }
+   size_t middle_needs = 0;
+   if (aligned_middle_new_high < middle_high()) {
+-    middle_needs = 
++    middle_needs =
+       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
+   }
+   size_t lower_needs = 0;
+   if (aligned_lower_new_high < lower_high()) {
+-    lower_needs = 
++    lower_needs =
+       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
+   }
+-  
++
+   // Check contiguity.
+   assert(middle_high_boundary() <= upper_high() &&
+-	 upper_high() <= upper_high_boundary(), 
+-	 "high address must be contained within the region");
+-  assert(lower_high_boundary() <= middle_high() && 
+-	 middle_high() <= middle_high_boundary(),
+-	 "high address must be contained within the region");
+-  assert(low_boundary() <= lower_high() && 
+-	 lower_high() <= lower_high_boundary(),
+-	 "high address must be contained within the region");
++         upper_high() <= upper_high_boundary(),
++         "high address must be contained within the region");
++  assert(lower_high_boundary() <= middle_high() &&
++         middle_high() <= middle_high_boundary(),
++         "high address must be contained within the region");
++  assert(low_boundary() <= lower_high() &&
++         lower_high() <= lower_high_boundary(),
++         "high address must be contained within the region");
+ 
+   // Uncommit
+   if (upper_needs > 0) {
+     assert(middle_high_boundary() <= aligned_upper_new_high &&
+-	   aligned_upper_new_high + upper_needs <= upper_high_boundary(), 
+-	   "must not shrink beyond region");
++           aligned_upper_new_high + upper_needs <= upper_high_boundary(),
++           "must not shrink beyond region");
+     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
+       debug_only(warning("os::uncommit_memory failed"));
+       return;
+     } else {
+       _upper_high -= upper_needs;
+     }
+-  } 
++  }
+   if (middle_needs > 0) {
+     assert(lower_high_boundary() <= aligned_middle_new_high &&
+-	   aligned_middle_new_high + middle_needs <= middle_high_boundary(), 
+-	   "must not shrink beyond region");
++           aligned_middle_new_high + middle_needs <= middle_high_boundary(),
++           "must not shrink beyond region");
+     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
+       debug_only(warning("os::uncommit_memory failed"));
+       return;
+@@ -526,8 +660,8 @@
+   }
+   if (lower_needs > 0) {
+     assert(low_boundary() <= aligned_lower_new_high &&
+-	   aligned_lower_new_high + lower_needs <= lower_high_boundary(), 
+-	   "must not shrink beyond region");
++           aligned_lower_new_high + lower_needs <= lower_high_boundary(),
++           "must not shrink beyond region");
+     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
+       debug_only(warning("os::uncommit_memory failed"));
+       return;
+@@ -535,17 +669,17 @@
+       _lower_high -= lower_needs;
+     }
+   }
+-  
++
+   _high -= size;
+ }
+ 
+ #ifndef PRODUCT
+ void VirtualSpace::check_for_contiguity() {
+   // Check contiguity.
+-  assert(low_boundary() <= lower_high() && 
++  assert(low_boundary() <= lower_high() &&
+          lower_high() <= lower_high_boundary(),
+          "high address must be contained within the region");
+-  assert(lower_high_boundary() <= middle_high() && 
++  assert(lower_high_boundary() <= middle_high() &&
+          middle_high() <= middle_high_boundary(),
+          "high address must be contained within the region");
+   assert(middle_high_boundary() <= upper_high() &&
+@@ -557,7 +691,7 @@
+   assert(high() <= upper_high(), "upper high");
+ }
+ 
+-void VirtualSpace::print() {  
++void VirtualSpace::print() {
+   tty->print   ("Virtual space:");
+   if (special()) tty->print(" (pinned in memory)");
+   tty->cr();
+@@ -568,4 +702,3 @@
+ }
+ 
+ #endif
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/virtualspace.hpp openjdk/hotspot/src/share/vm/runtime/virtualspace.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/virtualspace.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/virtualspace.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)virtualspace.hpp	1.41 07/05/05 17:07:02 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // ReservedSpace is a data structure for reserving a contiguous address range.
+@@ -32,31 +29,62 @@
+  private:
+   char*  _base;
+   size_t _size;
++  size_t _alignment;
+   bool   _special;
+ 
+   // ReservedSpace
+-  ReservedSpace(char* base, size_t size, bool large);
+-  void initialize(size_t size, size_t forced_base_alignment,
+-                  bool large,  char* requested_address = NULL);
++  ReservedSpace(char* base, size_t size, size_t alignment, bool special);
++  void initialize(size_t size, size_t alignment, bool large,
++                  char* requested_address = NULL);
++
++  // Release parts of an already-reserved memory region [addr, addr + len) to
++  // get a new region that has "compound alignment."  Return the start of the
++  // resulting region, or NULL on failure.
++  //
++  // The region is logically divided into a prefix and a suffix.  The prefix
++  // starts at the result address, which is aligned to prefix_align.  The suffix
++  // starts at result address + prefix_size, which is aligned to suffix_align.
++  // The total size of the result region is size prefix_size + suffix_size.
++  char* align_reserved_region(char* addr, const size_t len,
++                              const size_t prefix_size,
++                              const size_t prefix_align,
++                              const size_t suffix_size,
++                              const size_t suffix_align);
++
++  // Reserve memory, call align_reserved_region() to alignment it and return the
++  // result.
++  char* reserve_and_align(const size_t reserve_size,
++                          const size_t prefix_size,
++                          const size_t prefix_align,
++                          const size_t suffix_size,
++                          const size_t suffix_align);
+ 
+  public:
+   // Constructor
+   ReservedSpace(size_t size);
+-  ReservedSpace(size_t size, size_t forced_base_alignment,
+-                bool large, char* requested_address = NULL);
++  ReservedSpace(size_t size, size_t alignment, bool large,
++                char* requested_address = NULL);
++  ReservedSpace(const size_t prefix_size, const size_t prefix_align,
++                const size_t suffix_size, const size_t suffix_align);
+ 
+   // Accessors
+-  char*  base()   { return _base;   }
+-  size_t size()   { return _size;   }
+-  bool   special(){ return _special;}
++  char*  base()      const { return _base;      }
++  size_t size()      const { return _size;      }
++  size_t alignment() const { return _alignment; }
++  bool   special()   const { return _special;   }
+ 
+-  bool is_reserved() { return _base != NULL; }
++  bool is_reserved() const { return _base != NULL; }
+   void release();
+ 
+   // Splitting
+-  ReservedSpace first_part(size_t partition_size,
++  ReservedSpace first_part(size_t partition_size, size_t alignment,
+                            bool split = false, bool realloc = true);
+-  ReservedSpace last_part (size_t partition_size);
++  ReservedSpace last_part (size_t partition_size, size_t alignment);
++
++  // These simply call the above using the default alignment.
++  inline ReservedSpace first_part(size_t partition_size,
++                                  bool split = false, bool realloc = true);
++  inline ReservedSpace last_part (size_t partition_size);
+ 
+   // Alignment
+   static size_t page_align_size_up(size_t size);
+@@ -65,6 +93,16 @@
+   static size_t allocation_align_size_down(size_t size);
+ };
+ 
++ReservedSpace
++ReservedSpace::first_part(size_t partition_size, bool split, bool realloc)
++{
++  return first_part(partition_size, alignment(), split, realloc);
++}
++
++ReservedSpace ReservedSpace::last_part(size_t partition_size)
++{
++  return last_part(partition_size, alignment());
++}
+ 
+ // VirtualSpace is data structure for committing a previously reserved address range in smaller chunks.
+ 
+@@ -85,11 +123,11 @@
+ 
+   // MPSS Support
+   // Each virtualspace region has a lower, middle, and upper region.
+-  // Each region has an end boundary and a high pointer which is the 
++  // Each region has an end boundary and a high pointer which is the
+   // high water mark for the last allocated byte.
+   // The lower and upper unaligned to LargePageSizeInBytes uses default page.
+   // size.  The middle region uses large page size.
+-  char* _lower_high;  
++  char* _lower_high;
+   char* _middle_high;
+   char* _upper_high;
+ 
+@@ -109,7 +147,7 @@
+   char* lower_high_boundary() const { return _lower_high_boundary; }
+   char* middle_high_boundary() const { return _middle_high_boundary; }
+   char* upper_high_boundary() const { return _upper_high_boundary; }
+-  
++
+   size_t lower_alignment() const { return _lower_alignment; }
+   size_t middle_alignment() const { return _middle_alignment; }
+   size_t upper_alignment() const { return _upper_alignment; }
+@@ -129,7 +167,7 @@
+   // Initialization
+   VirtualSpace();
+   bool initialize(ReservedSpace rs, size_t committed_byte_size);
+-  
++
+   // Destruction
+   ~VirtualSpace();
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/vm_operations.cpp openjdk/hotspot/src/share/vm/runtime/vm_operations.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/vm_operations.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/vm_operations.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)vm_operations.cpp	1.192 07/05/23 10:54:18 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,33 +19,33 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_vm_operations.cpp.incl"
+ 
+-#define VM_OP_NAME_INITIALIZE(name) #name,  
++#define VM_OP_NAME_INITIALIZE(name) #name,
+ 
+ const char* VM_Operation::_names[VM_Operation::VMOp_Terminating] = \
+   { VM_OPS_DO(VM_OP_NAME_INITIALIZE) };
+ 
+-void VM_Operation::set_calling_thread(Thread* thread, ThreadPriority priority) { 
+-  _calling_thread = thread; 
++void VM_Operation::set_calling_thread(Thread* thread, ThreadPriority priority) {
++  _calling_thread = thread;
+   assert(MinPriority <= priority && priority <= MaxPriority, "sanity check");
+   _priority = priority;
+-}  
++}
+ 
+ 
+ void VM_Operation::evaluate() {
+   ResourceMark rm;
+-  if (TraceVMOperation) { 
+-    tty->print("["); 
++  if (TraceVMOperation) {
++    tty->print("[");
+     NOT_PRODUCT(print();)
+   }
+   doit();
+-  if (TraceVMOperation) { 
+-    tty->print_cr("]"); 
++  if (TraceVMOperation) {
++    tty->print_cr("]");
+   }
+ }
+ 
+@@ -74,13 +71,13 @@
+ 
+ void VM_ThreadStop::doit() {
+   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
+-  JavaThread* target = java_lang_Thread::thread(target_thread());  
++  JavaThread* target = java_lang_Thread::thread(target_thread());
+   // Note that this now allows multiple ThreadDeath exceptions to be
+   // thrown at a thread.
+   if (target != NULL) {
+     // the thread has run and is not already in the process of exiting
+     target->send_thread_stop(throwable());
+-  }  
++  }
+ }
+ 
+ void VM_Deoptimize::doit() {
+@@ -88,7 +85,7 @@
+   ResourceMark rm;
+   DeoptimizationMarker dm;
+ 
+-  // Deoptimize all activations depending on marked nmethods  
++  // Deoptimize all activations depending on marked nmethods
+   Deoptimization::deoptimize_dependents();
+ 
+   // Make the dependent methods zombies
+@@ -115,7 +112,7 @@
+   if (DeoptimizeALot) {
+     for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
+       if (thread->has_last_Java_frame()) {
+-	thread->deoptimize();
++        thread->deoptimize();
+       }
+     }
+   } else if (DeoptimizeRandom) {
+@@ -126,20 +123,20 @@
+     int tcount = 0;
+     for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
+       if (thread->has_last_Java_frame()) {
+-	if (tcount++ == tnum)  {
+-	tcount = 0;
+-	  int fcount = 0;
+-	  // Deoptimize some selected frames.
++        if (tcount++ == tnum)  {
++        tcount = 0;
++          int fcount = 0;
++          // Deoptimize some selected frames.
+           // Biased llocking wants a updated register map
+-	  for(StackFrameStream fst(thread, UseBiasedLocking); !fst.is_done(); fst.next()) {
+-	    if (fst.current()->can_be_deoptimized()) {
+-	      if (fcount++ == fnum) {
+-		fcount = 0;
+-		Deoptimization::deoptimize(thread, *fst.current(), fst.register_map());
+-	      }
+-	    }
+-	  }
+-	}
++          for(StackFrameStream fst(thread, UseBiasedLocking); !fst.is_done(); fst.next()) {
++            if (fst.current()->can_be_deoptimized()) {
++              if (fcount++ == fnum) {
++                fcount = 0;
++                Deoptimization::deoptimize(thread, *fst.current(), fst.register_map());
++              }
++            }
++          }
++        }
+       }
+     }
+   }
+@@ -160,7 +157,7 @@
+ 
+ bool VM_PrintThreads::doit_prologue() {
+   assert(Thread::current()->is_Java_thread(), "just checking");
+-  
++
+   // Make sure AbstractOwnableSynchronizer is loaded
+   if (JDK_Version::is_gte_jdk16x_version()) {
+     java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(JavaThread::current());
+@@ -179,7 +176,7 @@
+ 
+ void VM_PrintThreads::doit_epilogue() {
+   if (_print_concurrent_locks) {
+-    // Release Heap_lock 
++    // Release Heap_lock
+     Heap_lock->unlock();
+   }
+ }
+@@ -201,8 +198,8 @@
+ 
+ bool VM_FindDeadlocks::doit_prologue() {
+   assert(Thread::current()->is_Java_thread(), "just checking");
+-  
+-  // Load AbstractOwnableSynchronizer class 
++
++  // Load AbstractOwnableSynchronizer class
+   if (_concurrent_locks && JDK_Version::is_gte_jdk16x_version()) {
+     java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(JavaThread::current());
+   }
+@@ -231,7 +228,7 @@
+ 
+ VM_ThreadDump::VM_ThreadDump(ThreadDumpResult* result,
+                              int max_depth,
+-                             bool with_locked_monitors, 
++                             bool with_locked_monitors,
+                              bool with_locked_synchronizers) {
+   _result = result;
+   _num_threads = 0; // 0 indicates all threads
+@@ -246,7 +243,7 @@
+                              GrowableArray<instanceHandle>* threads,
+                              int num_threads,
+                              int max_depth,
+-                             bool with_locked_monitors, 
++                             bool with_locked_monitors,
+                              bool with_locked_synchronizers) {
+   _result = result;
+   _num_threads = num_threads;
+@@ -259,7 +256,7 @@
+ 
+ bool VM_ThreadDump::doit_prologue() {
+   assert(Thread::current()->is_Java_thread(), "just checking");
+-  
++
+   // Load AbstractOwnableSynchronizer class before taking thread snapshots
+   if (JDK_Version::is_gte_jdk16x_version()) {
+     java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(JavaThread::current());
+@@ -275,7 +272,7 @@
+ 
+ void VM_ThreadDump::doit_epilogue() {
+   if (_with_locked_synchronizers) {
+-    // Release Heap_lock 
++    // Release Heap_lock
+     Heap_lock->unlock();
+   }
+ }
+@@ -291,11 +288,11 @@
+   if (_num_threads == 0) {
+     // Snapshot all live threads
+     for (JavaThread* jt = Threads::first(); jt != NULL; jt = jt->next()) {
+-      if (jt->is_exiting() || 
++      if (jt->is_exiting() ||
+           jt->is_hidden_from_external_view())  {
+         // skip terminating threads and hidden threads
+         continue;
+-      } 
++      }
+       ThreadConcurrentLocks* tcl = NULL;
+       if (_with_locked_synchronizers) {
+         tcl = concurrent_locks.thread_concurrent_locks(jt);
+@@ -303,13 +300,13 @@
+       ThreadSnapshot* ts = snapshot_thread(jt, tcl);
+       _result->add_thread_snapshot(ts);
+     }
+-  } else { 
++  } else {
+     // Snapshot threads in the given _threads array
+     // A dummy snapshot is created if a thread doesn't exist
+     for (int i = 0; i < _num_threads; i++) {
+       instanceHandle th = _threads->at(i);
+       if (th() == NULL) {
+-        // skip if the thread doesn't exist 
++        // skip if the thread doesn't exist
+         // Add a dummy snapshot
+         _result->add_thread_snapshot(new ThreadSnapshot());
+         continue;
+@@ -318,14 +315,14 @@
+       // Dump thread stack only if the thread is alive and not exiting
+       // and not VM internal thread.
+       JavaThread* jt = java_lang_Thread::thread(th());
+-      if (jt == NULL || /* thread not alive */ 
++      if (jt == NULL || /* thread not alive */
+           jt->is_exiting() ||
+           jt->is_hidden_from_external_view())  {
+         // add a NULL snapshot if skipped
+         _result->add_thread_snapshot(new ThreadSnapshot());
+         continue;
+       }
+-      ThreadConcurrentLocks* tcl = NULL;      
++      ThreadConcurrentLocks* tcl = NULL;
+       if (_with_locked_synchronizers) {
+         tcl = concurrent_locks.thread_concurrent_locks(jt);
+       }
+@@ -373,7 +370,7 @@
+ 
+   // Compiler threads need longer wait because they can access VM data directly
+   // while in native. If they are active and some structures being used are
+-  // deleted by the shutdown sequence, they will crash. On the other hand, user 
++  // deleted by the shutdown sequence, they will crash. On the other hand, user
+   // threads must go through native=>Java/VM transitions first to access VM
+   // data, and they will be stopped during state transition. In theory, we
+   // don't have to wait for user threads to be quiescent, but it's always
+@@ -422,7 +419,7 @@
+   // Among 16276 JCK tests, 94% of them come here without any threads still
+   // running in native; the other 6% are quiescent within 250ms (Ultra 80).
+   wait_for_threads_in_native_to_block();
+-  
++
+   set_vm_exited();
+ 
+   // cleanup globals resources before exiting. exit_globals() currently
+@@ -432,7 +429,7 @@
+   // Check for exit hook
+   exit_hook_t exit_hook = Arguments::exit_hook();
+   if (exit_hook != NULL) {
+-    // exit hook should exit. 
++    // exit hook should exit.
+     exit_hook(_exit_code);
+     // ... but if it didn't, we must do it here
+     vm_direct_exit(_exit_code);
+@@ -443,7 +440,7 @@
+ 
+ 
+ void VM_Exit::wait_if_vm_exited() {
+-  if (_vm_exited && 
++  if (_vm_exited &&
+       ThreadLocalStorage::get_thread_slow() != _shutdown_thread) {
+     // _vm_exited is set at safepoint, and the Threads_lock is never released
+     // we will block here until the process dies
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/vm_operations.hpp openjdk/hotspot/src/share/vm/runtime/vm_operations.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/vm_operations.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/vm_operations.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)vm_operations.hpp	1.130 07/05/23 10:54:21 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The following classes are used for operations
+@@ -31,7 +28,7 @@
+ 
+ #define VM_OP_ENUM(type)   VMOp_##type,
+ 
+-// Note: When new VM_XXX comes up, add 'XXX' to the template table. 
++// Note: When new VM_XXX comes up, add 'XXX' to the template table.
+ #define VM_OPS_DO(template)                       \
+   template(Dummy)                                 \
+   template(ThreadStop)                            \
+@@ -88,21 +85,21 @@
+     _safepoint,       // blocking,        safepoint, vm_op C-heap allocated
+     _no_safepoint,    // blocking,     no safepoint, vm_op C-Heap allocated
+     _concurrent,      // non-blocking, no safepoint, vm_op C-Heap allocated
+-    _async_safepoint  // non-blocking,    safepoint, vm_op C-Heap allocated 
++    _async_safepoint  // non-blocking,    safepoint, vm_op C-Heap allocated
+   };
+ 
+   enum VMOp_Type {
+     VM_OPS_DO(VM_OP_ENUM)
+     VMOp_Terminating
+   };
+-                
++
+  private:
+-  Thread*	  _calling_thread;
++  Thread*         _calling_thread;
+   ThreadPriority  _priority;
+   long            _timestamp;
+-  VM_Operation*	  _next;  
++  VM_Operation*   _next;
+   VM_Operation*   _prev;
+-  
++
+   // The VM operation name array
+   static const char* _names[];
+ 
+@@ -110,41 +107,41 @@
+   VM_Operation()  { _calling_thread = NULL; _next = NULL; _prev = NULL; }
+   virtual ~VM_Operation() {}
+ 
+-  // VM operation support (used by VM thread)  
+-  Thread* calling_thread() const                 { return _calling_thread; }  
++  // VM operation support (used by VM thread)
++  Thread* calling_thread() const                 { return _calling_thread; }
+   ThreadPriority priority()                      { return _priority; }
+   void set_calling_thread(Thread* thread, ThreadPriority priority);
+-  
++
+   long timestamp() const              { return _timestamp; }
+-  void set_timestamp(long timestamp)  { _timestamp = timestamp; } 
+-    
++  void set_timestamp(long timestamp)  { _timestamp = timestamp; }
++
+   // Called by VM thread - does in turn invoke doit(). Do not override this
+-  void evaluate();  
+-    
+-  // evaluate() is called by the VMThread and in turn calls doit(). 
+-  // If the thread invoking VMThread::execute((VM_Operation*) is a JavaThread, 
+-  // doit_prologue() is called in that thread before transferring control to 
++  void evaluate();
++
++  // evaluate() is called by the VMThread and in turn calls doit().
++  // If the thread invoking VMThread::execute((VM_Operation*) is a JavaThread,
++  // doit_prologue() is called in that thread before transferring control to
+   // the VMThread.
+-  // If doit_prologue() returns true the VM operation will proceed, and 
+-  // doit_epilogue() will be called by the JavaThread once the VM operation 
+-  // completes. If doit_prologue() returns false the VM operation is cancelled.    
++  // If doit_prologue() returns true the VM operation will proceed, and
++  // doit_epilogue() will be called by the JavaThread once the VM operation
++  // completes. If doit_prologue() returns false the VM operation is cancelled.
+   virtual void doit()                            = 0;
+   virtual bool doit_prologue()                   { return true; };
+   virtual void doit_epilogue()                   {}; // Note: Not called if mode is: _concurrent
+ 
+   // Type test
+   virtual bool is_methodCompiler() const         { return false; }
+-  
++
+   // Linking
+-  VM_Operation *next() const			 { return _next; }
++  VM_Operation *next() const                     { return _next; }
+   VM_Operation *prev() const                     { return _prev; }
+-  void set_next(VM_Operation *next)		 { _next = next; }
+-  void set_prev(VM_Operation *prev)		 { _prev = prev; }
+-  
+-  // Configuration. Override these appropriatly in subclasses.             
++  void set_next(VM_Operation *next)              { _next = next; }
++  void set_prev(VM_Operation *prev)              { _prev = prev; }
++
++  // Configuration. Override these appropriatly in subclasses.
+   virtual VMOp_Type type() const = 0;
+-  virtual Mode evaluation_mode() const            { return _safepoint; }  
+-  virtual bool allow_nested_vm_operations() const { return false; }    
++  virtual Mode evaluation_mode() const            { return _safepoint; }
++  virtual bool allow_nested_vm_operations() const { return false; }
+   virtual bool is_cheap_allocated() const         { return false; }
+   virtual void oops_do(OopClosure* f)              { /* do nothing */ };
+ 
+@@ -158,18 +155,18 @@
+   virtual bool evaluate_at_safepoint() const {
+     return evaluation_mode() == _safepoint  ||
+            evaluation_mode() == _async_safepoint;
+-  }   
++  }
+   virtual bool evaluate_concurrently() const {
+     return evaluation_mode() == _concurrent ||
+            evaluation_mode() == _async_safepoint;
+-  }       
++  }
+ 
+   // Debugging
+   void print_on_error(outputStream* st) const;
+   const char* name() const { return _names[type()]; }
+-  static const char* name(int type) { 
+-    assert(type >= 0 && type < VMOp_Terminating, "invalid VM operation type"); 
+-    return _names[type]; 
++  static const char* name(int type) {
++    assert(type >= 0 && type < VMOp_Terminating, "invalid VM operation type");
++    return _names[type];
+   }
+ #ifndef PRODUCT
+   void print_on(outputStream* st) const { print_on_error(st); }
+@@ -177,14 +174,14 @@
+ };
+ 
+ class VM_ThreadStop: public VM_Operation {
+- private:  
++ private:
+   oop     _thread;        // The Thread that the Throwable is thrown against
+-  oop     _throwable;     // The Throwable thrown at the target Thread  
++  oop     _throwable;     // The Throwable thrown at the target Thread
+  public:
+   // All oops are passed as JNI handles, since there is no guarantee that a GC might happen before the
+   // VM operation is executed.
+-  VM_ThreadStop(oop thread, oop throwable) {    
+-    _thread    = thread;  
++  VM_ThreadStop(oop thread, oop throwable) {
++    _thread    = thread;
+     _throwable = throwable;
+   }
+   VMOp_Type type() const                         { return VMOp_ThreadStop; }
+@@ -205,7 +202,7 @@
+ // dummy vm op, evaluated just to force a safepoint
+ class VM_ForceSafepoint: public VM_Operation {
+  public:
+-  VM_ForceSafepoint() {}  
++  VM_ForceSafepoint() {}
+   void doit()         {}
+   VMOp_Type type() const { return VMOp_ForceSafepoint; }
+ };
+@@ -213,7 +210,7 @@
+ // dummy vm op, evaluated just to force a safepoint
+ class VM_ForceAsyncSafepoint: public VM_Operation {
+  public:
+-  VM_ForceAsyncSafepoint() {}  
++  VM_ForceAsyncSafepoint() {}
+   void doit()              {}
+   VMOp_Type type() const                         { return VMOp_ForceAsyncSafepoint; }
+   Mode evaluation_mode() const                   { return _async_safepoint; }
+@@ -287,8 +284,8 @@
+  private:
+   outputStream* _out;
+  public:
+-  VM_PrintJNI() 			{ _out = tty; }
+-  VM_PrintJNI(outputStream* out)  	{ _out = out; }
++  VM_PrintJNI()                         { _out = tty; }
++  VM_PrintJNI(outputStream* out)        { _out = out; }
+   VMOp_Type type() const                { return VMOp_PrintJNI; }
+   void doit();
+ };
+@@ -329,14 +326,14 @@
+  public:
+   VM_ThreadDump(ThreadDumpResult* result,
+                 int max_depth,  // -1 indicates entire stack
+-                bool with_locked_monitors, 
++                bool with_locked_monitors,
+                 bool with_locked_synchronizers);
+ 
+   VM_ThreadDump(ThreadDumpResult* result,
+                 GrowableArray<instanceHandle>* threads,
+                 int num_threads, // -1 indicates entire stack
+                 int max_depth,
+-                bool with_locked_monitors, 
++                bool with_locked_monitors,
+                 bool with_locked_synchronizers);
+ 
+   VMOp_Type type() const { return VMOp_ThreadDump; }
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/vmStructs.cpp openjdk/hotspot/src/share/vm/runtime/vmStructs.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/vmStructs.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/vmStructs.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)vmStructs.cpp	1.186 07/08/20 18:05:45 JVM"
+-#endif
+ /*
+  * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -244,6 +241,7 @@
+      static_field(Universe,                    _constantPoolCacheKlassObj,                    klassOop)                              \
+      static_field(Universe,                    _compiledICHolderKlassObj,                     klassOop)                              \
+      static_field(Universe,                    _systemObjArrayKlassObj,                       klassOop)                              \
++     static_field(Universe,                    _mirrors[0],                                   oop)                                  \
+      static_field(Universe,                    _main_thread_group,                            oop)                                   \
+      static_field(Universe,                    _system_thread_group,                          oop)                                   \
+      static_field(Universe,                    _the_empty_byte_array,                         typeArrayOop)                          \
+@@ -289,11 +287,12 @@
+   nonstatic_field(CardGeneration,              _rs,                                           GenRemSet*)                            \
+   nonstatic_field(CardGeneration,              _bts,                                          BlockOffsetSharedArray*)               \
+                                                                                                                                      \
+-  nonstatic_field(CardTableModRefBS,           _whole_heap,                                   MemRegion)                             \
++  nonstatic_field(CardTableModRefBS,           _whole_heap,                                   const MemRegion)                       \
++  nonstatic_field(CardTableModRefBS,           _guard_index,                                  const size_t)                          \
++  nonstatic_field(CardTableModRefBS,           _last_valid_index,                             const size_t)                          \
++  nonstatic_field(CardTableModRefBS,           _page_size,                                    const size_t)                          \
++  nonstatic_field(CardTableModRefBS,           _byte_map_size,                                const size_t)                          \
+   nonstatic_field(CardTableModRefBS,           _byte_map,                                     jbyte*)                                \
+-  nonstatic_field(CardTableModRefBS,           _byte_map_size,                                size_t)                                \
+-  nonstatic_field(CardTableModRefBS,           _last_valid_index,                             size_t)                                \
+-  nonstatic_field(CardTableModRefBS,           _guard_index,                                  size_t)                                \
+   nonstatic_field(CardTableModRefBS,           _cur_covered_regions,                          int)                                   \
+   nonstatic_field(CardTableModRefBS,           _covered,                                      MemRegion*)                            \
+   nonstatic_field(CardTableModRefBS,           _committed,                                    MemRegion*)                            \
+@@ -488,7 +487,6 @@
+       static_field(SystemDictionary,            _vector_klass,                                 klassOop)                             \
+       static_field(SystemDictionary,            _hashtable_klass,                              klassOop)                             \
+       static_field(SystemDictionary,            _box_klasses[0],                               klassOop)                             \
+-      static_field(SystemDictionary,            _mirrors[0],                                   oop)                                  \
+       static_field(SystemDictionary,            _java_system_loader,                           oop)                                  \
+                                                                                                                                      \
+   /*******************/                                                                                                              \
+@@ -1007,6 +1005,7 @@
+   declare_toplevel_type(GenerationSpec)                                   \
+   declare_toplevel_type(HeapWord)                                         \
+   declare_toplevel_type(MemRegion)                                        \
++  declare_toplevel_type(const MemRegion)                                  \
+   declare_toplevel_type(PermanentGenerationSpec)                          \
+   declare_toplevel_type(ThreadLocalAllocBuffer)                           \
+   declare_toplevel_type(VirtualSpace)                                     \
+@@ -1271,7 +1270,7 @@
+    declare_integer_type(ReferenceType)                                    \
+   declare_toplevel_type(StubQueue*)                                       \
+   declare_toplevel_type(Thread*)                                          \
+-  declare_toplevel_type(Universe)  
++  declare_toplevel_type(Universe)
+ 
+   /* NOTE that we do not use the last_entry() macro here; it is used  */
+   /* in vmStructs_<os>_<cpu>.hpp's VM_TYPES_OS_CPU macro (and must be */
+@@ -1735,15 +1734,15 @@
+ // This macro checks the type of a VMStructEntry by comparing pointer types
+ #define CHECK_NONSTATIC_VM_STRUCT_ENTRY(typeName, fieldName, type)                 \
+  {typeName *dummyObj = NULL; type* dummy = &dummyObj->fieldName; }
+- 
++
+ // This macro checks the type of a volatile VMStructEntry by comparing pointer types
+ #define CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY(typeName, fieldName, type)        \
+  {typedef type dummyvtype; typeName *dummyObj = NULL; volatile dummyvtype* dummy = &dummyObj->fieldName; }
+- 
++
+ // This macro checks the type of a VMStructEntry by comparing pointer types
+ #define CHECK_STATIC_VM_STRUCT_ENTRY(typeName, fieldName, type)                    \
+  {type* dummy = &typeName::fieldName; }
+- 
++
+ // This macro ensures the type of a field and its containing type are
+ // present in the type table. The assertion string is shorter than
+ // preferable because (incredibly) of a bug in Solstice NFS client
+@@ -1932,7 +1931,7 @@
+ 
+ //
+ // Instantiation of VMStructEntries, VMTypeEntries and VMIntConstantEntries
+-// 
++//
+ 
+ // These initializers are allowed to access private fields in classes
+ // as long as class VMStructs is a friend
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/vmStructs.hpp openjdk/hotspot/src/share/vm/runtime/vmStructs.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/vmStructs.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/vmStructs.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)vmStructs.hpp	1.13 07/05/05 17:07:02 JVM"
+-#endif
+ /*
+  * Copyright 2000-2001 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This table encapsulates the debugging information required by the
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/vmThread.cpp openjdk/hotspot/src/share/vm/runtime/vmThread.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/vmThread.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/vmThread.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)vmThread.cpp	1.91 07/05/23 10:54:15 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -40,7 +37,7 @@
+   for(int i = 0; i < nof_priorities; i++) {
+     _queue_length[i] = 0;
+     _queue_counter = 0;
+-    _queue[i] = new VM_Dummy(); 
++    _queue[i] = new VM_Dummy();
+     _queue[i]->set_next(_queue[i]);
+     _queue[i]->set_prev(_queue[i]);
+   }
+@@ -53,7 +50,7 @@
+   bool empty = (_queue[prio] == _queue[prio]->next());
+   assert( (_queue_length[prio] == 0 && empty) ||
+           (_queue_length[prio] > 0  && !empty), "sanity check");
+-  return _queue_length[prio] == 0;  
++  return _queue_length[prio] == 0;
+ }
+ 
+ // Inserts an element to the right of the q element
+@@ -62,33 +59,33 @@
+   n->set_prev(q);
+   n->set_next(q->next());
+   q->next()->set_prev(n);
+-  q->set_next(n);  
++  q->set_next(n);
+ }
+ 
+-void VMOperationQueue::queue_add_front(int prio, VM_Operation *op) {   
+-  _queue_length[prio]++; 
+-  insert(_queue[prio]->next(), op);   
++void VMOperationQueue::queue_add_front(int prio, VM_Operation *op) {
++  _queue_length[prio]++;
++  insert(_queue[prio]->next(), op);
+ }
+ 
+-void VMOperationQueue::queue_add_back(int prio, VM_Operation *op) {   
+-  _queue_length[prio]++; 
+-  insert(_queue[prio]->prev(), op); 
++void VMOperationQueue::queue_add_back(int prio, VM_Operation *op) {
++  _queue_length[prio]++;
++  insert(_queue[prio]->prev(), op);
+ }
+- 
+ 
+-void VMOperationQueue::unlink(VM_Operation* q) {  
++
++void VMOperationQueue::unlink(VM_Operation* q) {
+   assert(q->next()->prev() == q && q->prev()->next() == q, "sanity check");
+-  q->prev()->set_next(q->next());  
++  q->prev()->set_next(q->next());
+   q->next()->set_prev(q->prev());
+ }
+ 
+-VM_Operation* VMOperationQueue::queue_remove_front(int prio) {  
++VM_Operation* VMOperationQueue::queue_remove_front(int prio) {
+   if (queue_empty(prio)) return NULL;
+   assert(_queue_length[prio] >= 0, "sanity check");
+   _queue_length[prio]--;
+   VM_Operation* r = _queue[prio]->next();
+   assert(r != _queue[prio], "cannot remove base element");
+-  unlink(r);    
++  unlink(r);
+   return r;
+ }
+ 
+@@ -101,53 +98,53 @@
+   assert(r != _queue[prio], "cannot remove base element");
+   // remove links to base element from head and tail
+   r->set_prev(NULL);
+-  _queue[prio]->prev()->set_next(NULL);    
++  _queue[prio]->prev()->set_next(NULL);
+   // restore queue to empty state
+   _queue[prio]->set_next(_queue[prio]);
+   _queue[prio]->set_prev(_queue[prio]);
+   assert(queue_empty(prio), "drain corrupted queue")
+ #ifdef DEBUG
+-  int len = 0; 
+-  VM_Operation* cur; 
+-  for(cur = r; cur != NULL; cur=cur->next()) len++; 
++  int len = 0;
++  VM_Operation* cur;
++  for(cur = r; cur != NULL; cur=cur->next()) len++;
+   assert(len == length, "drain lost some ops");
+ #endif
+   return r;
+ }
+ 
+-void VMOperationQueue::queue_oops_do(int queue, OopClosure* f) {  
++void VMOperationQueue::queue_oops_do(int queue, OopClosure* f) {
+   VM_Operation* cur = _queue[queue];
+   cur = cur->next();
+   while (cur != _queue[queue]) {
+     cur->oops_do(f);
+     cur = cur->next();
+-  }  
++  }
+ }
+ 
+-void VMOperationQueue::drain_list_oops_do(OopClosure* f) {  
++void VMOperationQueue::drain_list_oops_do(OopClosure* f) {
+   VM_Operation* cur = _drain_list;
+   while (cur != NULL) {
+     cur->oops_do(f);
+     cur = cur->next();
+-  }  
++  }
+ }
+ 
+ //-----------------------------------------------------------------
+ // High-level interface
+-bool VMOperationQueue::add(VM_Operation *op) {  
++bool VMOperationQueue::add(VM_Operation *op) {
+   // Encapsulates VM queue policy. Currently, that
+   // only involves putting them on the right list
+   if (op->evaluate_at_safepoint()) {
+     queue_add_back(SafepointPriority, op);
+     return true;
+-  } 
+-  
++  }
++
+   queue_add_back(MediumPriority, op);
+   return true;
+ }
+ 
+-VM_Operation* VMOperationQueue::remove_next() {   
+-  // Assuming VMOperation queue is two-level priority queue. If there are 
++VM_Operation* VMOperationQueue::remove_next() {
++  // Assuming VMOperation queue is two-level priority queue. If there are
+   // more than two priorities, we need a different scheduling algorithm.
+   assert(SafepointPriority == 0 && MediumPriority == 1 && nof_priorities == 2,
+          "current algorithm does not work");
+@@ -159,7 +156,7 @@
+       high_prio = SafepointPriority;
+       low_prio  = MediumPriority;
+   } else {
+-      _queue_counter = 0; 
++      _queue_counter = 0;
+       high_prio = MediumPriority;
+       low_prio  = SafepointPriority;
+   }
+@@ -178,10 +175,10 @@
+ //------------------------------------------------------------------------------------------------------------------
+ // Implementation of VMThread stuff
+ 
+-bool	            VMThread::_should_terminate   = false;
++bool                VMThread::_should_terminate   = false;
+ bool              VMThread::_terminated         = false;
+ Monitor*          VMThread::_terminate_lock     = NULL;
+-VMThread*         VMThread::_vm_thread	        = NULL;
++VMThread*         VMThread::_vm_thread          = NULL;
+ VM_Operation*     VMThread::_cur_vm_operation   = NULL;
+ VMOperationQueue* VMThread::_vm_queue           = NULL;
+ PerfCounter*      VMThread::_perf_accumulated_vm_operation_time = NULL;
+@@ -196,7 +193,7 @@
+   guarantee(_vm_queue != NULL, "just checking");
+ 
+   _terminate_lock = new Monitor(Mutex::safepoint, "VMThread::_terminate_lock", true);
+-  
++
+   if (UsePerfData) {
+     // jvmstat performance counters
+     Thread* THREAD = Thread::current();
+@@ -204,16 +201,11 @@
+                  PerfDataManager::create_counter(SUN_THREADS, "vmOperationTime",
+                                                  PerfData::U_Ticks, CHECK);
+   }
+-
+-  // Initialize safepoint intrumentation buffer etc.
+-  if (PrintSafepointStatistics || PrintSafepointStatisticsTimeout > 0){
+-    SafepointSynchronize::initialize_stat();
+-  }
+ }
+ 
+ 
+ VMThread::VMThread() : Thread() {
+-  // nothing to do  
++  // nothing to do
+ }
+ 
+ void VMThread::destroy() {
+@@ -232,15 +224,15 @@
+   // case of spurious wakeup, it should wait on the last
+   // value set prior to the notify
+   this->set_active_handles(JNIHandleBlock::allocate_block());
+-  
++
+   {
+     MutexLocker ml(Notify_lock);
+     Notify_lock->notify();
+   }
+-  // Notify_lock is destroyed by Threads::create_vm()  
++  // Notify_lock is destroyed by Threads::create_vm()
+ 
+-  int prio = (VMThreadPriority == -1) 
+-    ? os::java_to_os_priority[NearMaxPriority] 
++  int prio = (VMThreadPriority == -1)
++    ? os::java_to_os_priority[NearMaxPriority]
+     : VMThreadPriority;
+   // Note that I cannot call os::set_priority because it expects Java
+   // priorities and I am *explicitly* using OS priorities so that it's
+@@ -248,7 +240,7 @@
+   os::set_native_priority( this, prio );
+ 
+   // Wait for VM_Operations until termination
+-  this->loop();      
++  this->loop();
+ 
+   // Note the intention to exit before safepointing.
+   // 6295565  This has the effect of waiting for any large tty
+@@ -260,7 +252,7 @@
+     xtty->end_elem();
+     assert(should_terminate(), "termination flag must be set");
+   }
+-  
++
+   // 4526887 let VM thread exit at Safepoint
+   SafepointSynchronize::begin();
+ 
+@@ -271,10 +263,10 @@
+     os::check_heap();
+     Universe::verify(true, true); // Silent verification to not polute normal output
+   }
+-  
++
+   CompileBroker::set_should_block();
+ 
+-  // wait for threads (compiler threads or daemon threads) in the 
++  // wait for threads (compiler threads or daemon threads) in the
+   // _thread_in_native state to block.
+   VM_Exit::wait_for_threads_in_native_to_block();
+ 
+@@ -298,28 +290,28 @@
+ }
+ 
+ 
+-// Notify the VMThread that the last non-daemon JavaThread has terminated, 
++// Notify the VMThread that the last non-daemon JavaThread has terminated,
+ // and wait until operation is performed.
+-void VMThread::wait_for_vm_thread_exit() { 
+-  { MutexLocker mu(VMOperationQueue_lock);    
++void VMThread::wait_for_vm_thread_exit() {
++  { MutexLocker mu(VMOperationQueue_lock);
+     _should_terminate = true;
+     VMOperationQueue_lock->notify();
+   }
+-  
+-  // Note: VM thread leaves at Safepoint. We are not stopped by Safepoint 
+-  // because this thread has been removed from the threads list. But anything 
+-  // that could get blocked by Safepoint should not be used after this point, 
++
++  // Note: VM thread leaves at Safepoint. We are not stopped by Safepoint
++  // because this thread has been removed from the threads list. But anything
++  // that could get blocked by Safepoint should not be used after this point,
+   // otherwise we will hang, since there is no one can end the safepoint.
+ 
+   // Wait until VM thread is terminated
+-  // Note: it should be OK to use Terminator_lock here. But this is called 
+-  // at a very delicate time (VM shutdown) and we are operating in non- VM 
++  // Note: it should be OK to use Terminator_lock here. But this is called
++  // at a very delicate time (VM shutdown) and we are operating in non- VM
+   // thread at Safepoint. It's safer to not share lock with other threads.
+   { MutexLockerEx ml(_terminate_lock, Mutex::_no_safepoint_check_flag);
+     while(!VMThread::is_terminated()) {
+         _terminate_lock->wait(Mutex::_no_safepoint_check_flag);
+     }
+-  }  
++  }
+ }
+ 
+ void VMThread::print_on(outputStream* st) const {
+@@ -341,88 +333,88 @@
+ 
+   // Mark as completed
+   if (!op->evaluate_concurrently()) {
+-    op->calling_thread()->increment_vm_operation_completed_count();    
+-  }                
++    op->calling_thread()->increment_vm_operation_completed_count();
++  }
+   // It is unsafe to access the _cur_vm_operation after the 'increment_vm_operation_completed_count' call,
+-  // since if it is stack allocated the calling thread might have deallocated        
++  // since if it is stack allocated the calling thread might have deallocated
+   if (c_heap_allocated) {
+     delete _cur_vm_operation;
+   }
+ }
+- 
+ 
+-void VMThread::loop() {    
++
++void VMThread::loop() {
+   assert(_cur_vm_operation == NULL, "no current one should be executing");
+ 
+-  while(true) { 
++  while(true) {
+     VM_Operation* safepoint_ops = NULL;
+     //
+     // Wait for VM operation
+     //
+     // use no_safepoint_check to get lock without attempting to "sneak"
+-    { MutexLockerEx mu_queue(VMOperationQueue_lock, 
+-			     Mutex::_no_safepoint_check_flag);
++    { MutexLockerEx mu_queue(VMOperationQueue_lock,
++                             Mutex::_no_safepoint_check_flag);
+ 
+       // Look for new operation
+       assert(_cur_vm_operation == NULL, "no current one should be executing");
+       _cur_vm_operation = _vm_queue->remove_next();
+ 
+       // Stall time tracking code
+-      if (PrintVMQWaitTime && _cur_vm_operation != NULL && 
++      if (PrintVMQWaitTime && _cur_vm_operation != NULL &&
+           !_cur_vm_operation->evaluate_concurrently()) {
+         long stall = os::javaTimeMillis() - _cur_vm_operation->timestamp();
+-        if (stall > 0) 
+-	  tty->print_cr("%s stall: %Ld",  _cur_vm_operation->name(), stall);
++        if (stall > 0)
++          tty->print_cr("%s stall: %Ld",  _cur_vm_operation->name(), stall);
+       }
+-  
++
+       while (!should_terminate() && _cur_vm_operation == NULL) {
+         // wait with a timeout to guarantee safepoints at regular intervals
+-        bool timedout = 
+-	  VMOperationQueue_lock->wait(Mutex::_no_safepoint_check_flag, 
+-				      GuaranteedSafepointInterval); 
++        bool timedout =
++          VMOperationQueue_lock->wait(Mutex::_no_safepoint_check_flag,
++                                      GuaranteedSafepointInterval);
+ 
+         // Support for self destruction
+-        if ((SelfDestructTimer != 0) && !is_error_reported() && 
+-	    (os::elapsedTime() > SelfDestructTimer * 60)) {
++        if ((SelfDestructTimer != 0) && !is_error_reported() &&
++            (os::elapsedTime() > SelfDestructTimer * 60)) {
+           tty->print_cr("VM self-destructed");
+           exit(-1);
+         }
+ 
+-        if (timedout && (SafepointALot || 
+-			 SafepointSynchronize::is_cleanup_needed())) {
++        if (timedout && (SafepointALot ||
++                         SafepointSynchronize::is_cleanup_needed())) {
+           MutexUnlockerEx mul(VMOperationQueue_lock,
+-			      Mutex::_no_safepoint_check_flag);
++                              Mutex::_no_safepoint_check_flag);
+           // Force a safepoint since we have not had one for at least
+           // 'GuaranteedSafepointInterval' milliseconds.  This will run all
+           // the clean-up processing that needs to be done regularly at a
+           // safepoint
+-          SafepointSynchronize::begin();	          
+-	  #ifdef ASSERT
+-	    if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
+-	  #endif
+-          SafepointSynchronize::end();            
++          SafepointSynchronize::begin();
++          #ifdef ASSERT
++            if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
++          #endif
++          SafepointSynchronize::end();
+         }
+-        _cur_vm_operation = _vm_queue->remove_next();   
++        _cur_vm_operation = _vm_queue->remove_next();
+ 
+-	// If we are at a safepoint we will evaluate all the operations that
+-	// follow that also require a safepoint
+-	if (_cur_vm_operation != NULL && 
+-	    _cur_vm_operation->evaluate_at_safepoint()) {
+-	  safepoint_ops = _vm_queue->drain_at_safepoint_priority();
+-	}
+-      }     
+-      
+-      if (should_terminate()) break;    
++        // If we are at a safepoint we will evaluate all the operations that
++        // follow that also require a safepoint
++        if (_cur_vm_operation != NULL &&
++            _cur_vm_operation->evaluate_at_safepoint()) {
++          safepoint_ops = _vm_queue->drain_at_safepoint_priority();
++        }
++      }
++
++      if (should_terminate()) break;
+     } // Release mu_queue_lock
+-        
+-    // 
++
++    //
+     // Execute VM operation
+     //
+-    { HandleMark hm(VMThread::vm_thread());      
+-          
++    { HandleMark hm(VMThread::vm_thread());
++
+       EventMark em("Executing VM operation: %s", vm_operation()->name());
+       assert(_cur_vm_operation != NULL, "we should have found an operation to execute");
+-  
++
+       // Give the VM thread an extra quantum.  Jobs tend to be bursty and this
+       // helps the VM thread to finish up the job.
+       // FIXME: When this is enabled and there are many threads, this can degrade
+@@ -439,87 +431,87 @@
+                                   RuntimeService::last_application_time_sec());
+         }
+ 
+-	_vm_queue->set_drain_list(safepoint_ops); // ensure ops can be scanned
++        _vm_queue->set_drain_list(safepoint_ops); // ensure ops can be scanned
+ 
+-        SafepointSynchronize::begin(); 
+-	evaluate_operation(_cur_vm_operation);
+-	// now process all queued safepoint ops, iteratively draining
+-	// the queue until there are none left
++        SafepointSynchronize::begin();
++        evaluate_operation(_cur_vm_operation);
++        // now process all queued safepoint ops, iteratively draining
++        // the queue until there are none left
+         do {
+-	  _cur_vm_operation = safepoint_ops;
+-	  if (_cur_vm_operation != NULL) {
+-	    do {
+-	      // evaluate_operation deletes the op object so we have
+-	      // to grab the next op now
+-	      VM_Operation* next = _cur_vm_operation->next();
+-	      _vm_queue->set_drain_list(next); 
+-	      evaluate_operation(_cur_vm_operation);
+-	      _cur_vm_operation = next;
++          _cur_vm_operation = safepoint_ops;
++          if (_cur_vm_operation != NULL) {
++            do {
++              // evaluate_operation deletes the op object so we have
++              // to grab the next op now
++              VM_Operation* next = _cur_vm_operation->next();
++              _vm_queue->set_drain_list(next);
++              evaluate_operation(_cur_vm_operation);
++              _cur_vm_operation = next;
+               if (PrintSafepointStatistics) {
+                 SafepointSynchronize::inc_vmop_coalesced_count();
+               }
+-	    } while (_cur_vm_operation != NULL);
+-	  }
+-	  // There is a chance that a thread enqueued a safepoint op
+-	  // since we released the op-queue lock and initiated the safepoint.
+-	  // So we drain the queue again if there is anything there, as an
+-	  // optimization to try and reduce the number of safepoints.
++            } while (_cur_vm_operation != NULL);
++          }
++          // There is a chance that a thread enqueued a safepoint op
++          // since we released the op-queue lock and initiated the safepoint.
++          // So we drain the queue again if there is anything there, as an
++          // optimization to try and reduce the number of safepoints.
+           // As the safepoint synchronizes us with JavaThreads we will see
+-	  // any enqueue made by a JavaThread, but the peek will not 
+-	  // necessarily detect a concurrent enqueue by a GC thread, but
+-	  // that simply means the op will wait for the next major cycle of the
+-	  // VMThread - just as it would if the GC thread lost the race for
+-	  // the lock.
+-	  if (_vm_queue->peek_at_safepoint_priority()) {
+-	    // must hold lock while draining queue
+-	    MutexLockerEx mu_queue(VMOperationQueue_lock, 
+-				     Mutex::_no_safepoint_check_flag);
+-	    safepoint_ops = _vm_queue->drain_at_safepoint_priority();
+-	  } else {
+-	    safepoint_ops = NULL;
+-	  }
+-        } while(safepoint_ops != NULL);      
++          // any enqueue made by a JavaThread, but the peek will not
++          // necessarily detect a concurrent enqueue by a GC thread, but
++          // that simply means the op will wait for the next major cycle of the
++          // VMThread - just as it would if the GC thread lost the race for
++          // the lock.
++          if (_vm_queue->peek_at_safepoint_priority()) {
++            // must hold lock while draining queue
++            MutexLockerEx mu_queue(VMOperationQueue_lock,
++                                     Mutex::_no_safepoint_check_flag);
++            safepoint_ops = _vm_queue->drain_at_safepoint_priority();
++          } else {
++            safepoint_ops = NULL;
++          }
++        } while(safepoint_ops != NULL);
+ 
+-	_vm_queue->set_drain_list(NULL);
++        _vm_queue->set_drain_list(NULL);
+ 
+         // Complete safepoint synchronization
+         SafepointSynchronize::end();
+ 
+         if (PrintGCApplicationStoppedTime) {
+-          gclog_or_tty->print_cr("Total time for which application threads " 
+-                                 "were stopped: %3.7f seconds", 
++          gclog_or_tty->print_cr("Total time for which application threads "
++                                 "were stopped: %3.7f seconds",
+                                  RuntimeService::last_safepoint_time_sec());
+         }
+ 
+-      } else {  // not a safepoint operation      
++      } else {  // not a safepoint operation
+         if (TraceLongCompiles) {
+           elapsedTimer t;
+-          t.start();  
+-          evaluate_operation(_cur_vm_operation);          
++          t.start();
++          evaluate_operation(_cur_vm_operation);
+           t.stop();
+           double secs = t.seconds();
+           if (secs * 1e3 > LongCompileThreshold) {
+-	    // XXX - _cur_vm_operation should not be accessed after
+-	    // the completed count has been incremented; the waiting
+-	    // thread may have already freed this memory.
++            // XXX - _cur_vm_operation should not be accessed after
++            // the completed count has been incremented; the waiting
++            // thread may have already freed this memory.
+             tty->print_cr("vm %s: %3.7f secs]", _cur_vm_operation->name(), secs);
+           }
+         } else {
+-          evaluate_operation(_cur_vm_operation);                    
++          evaluate_operation(_cur_vm_operation);
+         }
+-        
+-        _cur_vm_operation = NULL;        
+-      }              
+-    }    
++
++        _cur_vm_operation = NULL;
++      }
++    }
+ 
+     //
+-    //  Notify (potential) waiting Java thread(s) - lock without safepoint 
++    //  Notify (potential) waiting Java thread(s) - lock without safepoint
+     //  check so that sneaking is not possible
+-    { MutexLockerEx mu(VMOperationRequest_lock, 
+-		       Mutex::_no_safepoint_check_flag);
++    { MutexLockerEx mu(VMOperationRequest_lock,
++                       Mutex::_no_safepoint_check_flag);
+       VMOperationRequest_lock->notify_all();
+     }
+-        
++
+     //
+     // We want to make sure that we get to a safepoint regularly.
+     //
+@@ -535,9 +527,9 @@
+   }
+ }
+ 
+-void VMThread::execute(VM_Operation* op) {     
+-  Thread* t = Thread::current();  
+-  
++void VMThread::execute(VM_Operation* op) {
++  Thread* t = Thread::current();
++
+   if (!t->is_VM_thread()) {
+     // JavaThread or WatcherThread
+     t->check_for_valid_safepoint_state(true);
+@@ -547,11 +539,11 @@
+       return;   // op was cancelled
+     }
+ 
+-    // Setup VM_operations for execution    
++    // Setup VM_operations for execution
+     op->set_calling_thread(t, Thread::get_priority(t));
+-    
+-    // It does not make sense to execute the epilogue, if the VM operation object is getting 
+-    // deallocated by the VM thread.        
++
++    // It does not make sense to execute the epilogue, if the VM operation object is getting
++    // deallocated by the VM thread.
+     bool concurrent     = op->evaluate_concurrently();
+     bool execute_epilog = !op->is_cheap_allocated();
+     assert(!concurrent || op->is_cheap_allocated(), "concurrent => cheap_allocated");
+@@ -565,8 +557,8 @@
+     // Add VM operation to list of waiting threads. We are guaranteed not to block while holding the
+     // VMOperationQueue_lock, so we can block without a safepoint check. This allows vm operation requests
+     // to be queued up during a safepoint synchronization.
+-    { 
+-      VMOperationQueue_lock->lock_without_safepoint_check();      
++    {
++      VMOperationQueue_lock->lock_without_safepoint_check();
+       bool ok = _vm_queue->add(op);
+       op->set_timestamp(os::javaTimeMillis());
+       VMOperationQueue_lock->notify();
+@@ -579,22 +571,22 @@
+       }
+     }
+ 
+-    if (!concurrent) {      
++    if (!concurrent) {
+       // Wait for completion of request (non-concurrent)
+       // Note: only a JavaThread triggers the safepoint check when locking
+       MutexLocker mu(VMOperationRequest_lock);
+-      while(t->vm_operation_completed_count() < ticket) {        
+-        VMOperationRequest_lock->wait(!t->is_Java_thread());     
++      while(t->vm_operation_completed_count() < ticket) {
++        VMOperationRequest_lock->wait(!t->is_Java_thread());
+       }
+     }
+-          
++
+     if (execute_epilog) {
+       op->doit_epilogue();
+-    }    
++    }
+   } else {
+     // invoked by VM thread; usually nested VM operation
+     assert(t->is_VM_thread(), "must be a VM thread");
+-    VM_Operation* prev_vm_operation = vm_operation();    
++    VM_Operation* prev_vm_operation = vm_operation();
+     if (prev_vm_operation != NULL) {
+       // Check the VM operation allows nested VM operation. This normally not the case, e.g., the compiler
+       // does not allow nested scavenges or compiles.
+@@ -605,24 +597,24 @@
+     }
+ 
+     EventMark em("Executing %s VM operation: %s", prev_vm_operation ? "nested" : "", op->name());
+-    
++
+     // Release all internal handles after operation is evaluated
+-    HandleMark hm(t);    
++    HandleMark hm(t);
+     _cur_vm_operation = op;
+-    
+-    if (op->evaluate_at_safepoint() && !SafepointSynchronize::is_at_safepoint()) {      
+-      SafepointSynchronize::begin();      
+-      op->evaluate();                 
+-      SafepointSynchronize::end();      
++
++    if (op->evaluate_at_safepoint() && !SafepointSynchronize::is_at_safepoint()) {
++      SafepointSynchronize::begin();
++      op->evaluate();
++      SafepointSynchronize::end();
+     } else {
+-      op->evaluate();           
++      op->evaluate();
+     }
+-    
++
+     // Free memory if needed
+     if (op->is_cheap_allocated()) delete op;
+-      
++
+     _cur_vm_operation = prev_vm_operation;
+-  }  
++  }
+ }
+ 
+ 
+@@ -642,7 +634,7 @@
+ 
+   // Check forward links
+   for(i = 0; i < length; i++) {
+-    cur = cur->next();  
++    cur = cur->next();
+     assert(cur != _queue[prio], "list to short (forward)");
+   }
+   assert(cur->next() == _queue[prio], "list to long (forward)");
+@@ -650,7 +642,7 @@
+   // Check backwards links
+   cur = _queue[prio];
+   for(i = 0; i < length; i++) {
+-    cur = cur->prev();  
++    cur = cur->prev();
+     assert(cur != _queue[prio], "list to short (backwards)");
+   }
+   assert(cur->prev() == _queue[prio], "list to long (backwards)");
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/vmThread.hpp openjdk/hotspot/src/share/vm/runtime/vmThread.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/vmThread.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/vmThread.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)vmThread.hpp	1.40 07/05/05 17:07:03 JVM"
+-#endif
+ /*
+  * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //
+@@ -36,13 +33,13 @@
+   enum Priorities {
+      SafepointPriority, // Highest priority (operation executed at a safepoint)
+      MediumPriority,    // Medium priority
+-     nof_priorities 
++     nof_priorities
+   };
+ 
+   // We maintain a doubled linked list, with explicit count.
+   int           _queue_length[nof_priorities];
+   int           _queue_counter;
+-  VM_Operation* _queue       [nof_priorities];  
++  VM_Operation* _queue       [nof_priorities];
+   // we also allow the vmThread to register the ops it has drained so we
+   // can scan them from oops_do
+   VM_Operation* _drain_list;
+@@ -55,7 +52,7 @@
+   bool queue_empty                (int prio);
+   void queue_add_front            (int prio, VM_Operation *op);
+   void queue_add_back             (int prio, VM_Operation *op);
+-  VM_Operation* queue_remove_front(int prio);  
++  VM_Operation* queue_remove_front(int prio);
+   void queue_oops_do(int queue, OopClosure* f);
+   void drain_list_oops_do(OopClosure* f);
+   VM_Operation* queue_drain(int prio);
+@@ -105,7 +102,7 @@
+   bool is_GC_thread() const                      { return true; }
+ 
+   char* name() const { return (char*)"VM Thread"; }
+-  
++
+   // The ever running loop for the VMThread
+   void loop();
+ 
+@@ -118,7 +115,7 @@
+   static void execute(VM_Operation* op);
+ 
+   // Returns the current vm operation if any.
+-  static VM_Operation* vm_operation()             { return _cur_vm_operation;   }  
++  static VM_Operation* vm_operation()             { return _cur_vm_operation;   }
+ 
+   // Returns the single instance of VMThread.
+   static VMThread* vm_thread()                    { return _vm_thread; }
+@@ -128,7 +125,7 @@
+ 
+   // Debugging
+   void print_on(outputStream* st) const;
+-  void print() const				  { print_on(tty); }
++  void print() const                              { print_on(tty); }
+   void verify();
+ 
+   // Performance measurement
+@@ -141,13 +138,11 @@
+   static void create();
+   static void destroy();
+ 
+- private:   
+-  // VM_Operation support  
++ private:
++  // VM_Operation support
+   static VM_Operation*     _cur_vm_operation;   // Current VM operation
+   static VMOperationQueue* _vm_queue;           // Queue (w/ policy) of VM operations
+-  
++
+   // Pointer to single-instance of VM thread
+   static VMThread*     _vm_thread;
+ };
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/vm_version.cpp openjdk/hotspot/src/share/vm/runtime/vm_version.cpp
+--- openjdk6/hotspot/src/share/vm/runtime/vm_version.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/vm_version.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)vm_version.cpp	1.57 07/08/20 18:10:11 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -49,7 +46,7 @@
+   #define VM_RELEASE HOTSPOT_RELEASE_VERSION "-" HOTSPOT_BUILD_TARGET
+ #endif
+ 
+-// HOTSPOT_RELEASE_VERSION must follow the release version naming convention 
++// HOTSPOT_RELEASE_VERSION must follow the release version naming convention
+ // <major_ver>.<minor_ver>-b<nn>[-<identifier>][-<debug_target>]
+ int Abstract_VM_Version::_vm_major_version = 0;
+ int Abstract_VM_Version::_vm_minor_version = 0;
+@@ -62,7 +59,7 @@
+   }
+   char* vm_version = os::strdup(HOTSPOT_RELEASE_VERSION);
+ 
+-  // Expecting the next vm_version format: 
++  // Expecting the next vm_version format:
+   // <major_ver>.<minor_ver>-b<nn>[-<identifier>]
+   char* vm_major_ver = vm_version;
+   assert(isdigit(vm_major_ver[0]),"wrong vm major version number");
+@@ -75,10 +72,10 @@
+   vm_build_num[0] = '\0'; // terminate vm_minor_ver
+   vm_build_num += 2;
+ 
+-  _vm_major_version = atoi(vm_major_ver); 
+-  _vm_minor_version = atoi(vm_minor_ver); 
++  _vm_major_version = atoi(vm_major_ver);
++  _vm_minor_version = atoi(vm_minor_ver);
+   _vm_build_number  = atoi(vm_build_num);
+- 
++
+   os::free(vm_version);
+   _initialized = true;
+ }
+@@ -89,12 +86,16 @@
+   #define VMLP ""
+ #endif
+ 
++#ifdef KERNEL
++  #define VMTYPE "Kernel"
++#else // KERNEL
+ #ifdef TIERED
+   #define VMTYPE "Server"
+ #else
+   #define VMTYPE COMPILER1_PRESENT("Client")   \
+-                 COMPILER2_PRESENT("Server")   
++                 COMPILER2_PRESENT("Server")
+ #endif // TIERED
++#endif // KERNEL
+ 
+ #ifndef HOTSPOT_VM_DISTRO
+   #error HOTSPOT_VM_DISTRO must be defined
+@@ -128,7 +129,7 @@
+   return "";
+ }
+ 
+-// NOTE: do *not* use stringStream. this function is called by 
++// NOTE: do *not* use stringStream. this function is called by
+ //       fatal error handler. if the crash is in native thread,
+ //       stringStream cannot get resource allocated and will SEGV.
+ const char* Abstract_VM_Version::vm_release() {
+@@ -189,7 +190,7 @@
+ 
+ 
+   return VMNAME " (" VM_RELEASE ") for " OS "-" CPU
+-         " JRE (" JRE_RELEASE_VERSION "), built on " __DATE__ " " __TIME__ 
++         " JRE (" JRE_RELEASE_VERSION "), built on " __DATE__ " " __TIME__
+          " by " XSTR(HOTSPOT_BUILD_USER) " with " HOTSPOT_BUILD_COMPILER;
+ }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/vm_version.hpp openjdk/hotspot/src/share/vm/runtime/vm_version.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/vm_version.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/vm_version.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)vm_version.hpp	1.27 07/08/08 19:44:04 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // VM_Version provides information about the VM.
+@@ -66,5 +63,10 @@
+   static unsigned int logical_processors_per_package() {
+     return _logical_processors_per_package;
+   }
+-};
+ 
++  // Number of page sizes efficiently supported by the hardware.  Most chips now
++  // support two sizes, thus this default implementation.  Processor-specific
++  // subclasses should define new versions to hide this one as needed.  Note
++  // that the O/S may support more sizes, but at most this many are used.
++  static uint page_size_count() { return 2; }
++};
+diff -ruN openjdk6/hotspot/src/share/vm/runtime/vtune.hpp openjdk/hotspot/src/share/vm/runtime/vtune.hpp
+--- openjdk6/hotspot/src/share/vm/runtime/vtune.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/vtune.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)vtune.hpp	1.20 07/05/05 17:07:01 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,23 +19,23 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+-// Interface to Intel's VTune profiler. 
++// Interface to Intel's VTune profiler.
+ 
+ class VTune : AllStatic {
+  public:
+    static void create_nmethod(nmethod* nm);      // register newly created nmethod
+    static void delete_nmethod(nmethod* nm);      // unregister nmethod before discarding it
+ 
+-   static void register_stub(const char* name, address start, address end);    
++   static void register_stub(const char* name, address start, address end);
+                                                  // register internal VM stub
+    static void start_GC();                       // start/end of GC or scavenge
+    static void end_GC();
+ 
+    static void start_class_load();               // start/end of class loading
+-   static void end_class_load();  
++   static void end_class_load();
+ 
+    static void exit();                           // VM exit
+ };
+@@ -56,4 +53,3 @@
+    VTuneClassLoadMarker() { VTune::start_class_load(); }
+   ~VTuneClassLoadMarker() { VTune::end_class_load(); }
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/services/attachListener.cpp openjdk/hotspot/src/share/vm/services/attachListener.cpp
+--- openjdk6/hotspot/src/share/vm/services/attachListener.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/attachListener.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)attachListener.cpp	1.22 07/05/05 17:07:04 JVM"
+-#endif
+ /*
+  * Copyright 2005-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -33,7 +30,7 @@
+ // Implementation of "properties" command.
+ //
+ // Invokes sun.misc.VMSupport.serializePropertiesToByteArray to serialize
+-// the system properties into a byte array. 
++// the system properties into a byte array.
+ 
+ static klassOop load_and_initialize_klass(symbolHandle sh, TRAPS) {
+   klassOop k = SystemDictionary::resolve_or_fail(sh, true, CHECK_NULL);
+@@ -140,8 +137,9 @@
+   return JNI_OK;
+ }
+ 
++#ifndef SERVICES_KERNEL   // Heap dumping not supported
+ // Implementation of "dumpheap" command.
+-// 
++//
+ // Input arguments :-
+ //   arg0: Name of the dump file
+ //   arg1: "-live" or "-all"
+@@ -168,7 +166,7 @@
+     if (res == 0) {
+       out->print_cr("Heap dump file created");
+     } else {
+-      // heap dump failed 
++      // heap dump failed
+       ResourceMark rm;
+       char* error = dumper.error_as_C_string();
+       if (error == NULL) {
+@@ -180,6 +178,7 @@
+   }
+   return JNI_OK;
+ }
++#endif // SERVICES_KERNEL
+ 
+ // Implementation of "inspectheap" command
+ //
+@@ -210,9 +209,9 @@
+     if (n != 1) {
+       out->print_cr("flag value has to be boolean (1 or 0)");
+       return JNI_ERR;
+-    } 
++    }
+     value = (tmp != 0);
+-  }  
++  }
+   bool res = CommandLineFlags::boolAtPut((char*)name, &value, ATTACH_ON_DEMAND);
+   if (! res) {
+     out->print_cr("setting flag %s failed", name);
+@@ -229,8 +228,8 @@
+     if (n != 1) {
+       out->print_cr("flag value has to be integer");
+       return JNI_ERR;
+-    } 
+-  }  
++    }
++  }
+   bool res = CommandLineFlags::intxAtPut((char*)name,  &value, ATTACH_ON_DEMAND);
+   if (! res) {
+     out->print_cr("setting flag %s failed", name);
+@@ -249,7 +248,7 @@
+       out->print_cr("flag value has to be integer");
+       return JNI_ERR;
+     }
+-  }  
++  }
+   bool res = CommandLineFlags::uintxAtPut((char*)name,  &value, ATTACH_ON_DEMAND);
+   if (! res) {
+     out->print_cr("setting flag %s failed", name);
+@@ -264,7 +263,7 @@
+   if ((value = op->arg(1)) == NULL) {
+     out->print_cr("flag value has to be a string");
+     return JNI_ERR;
+-  }  
++  }
+   bool res = CommandLineFlags::ccstrAtPut((char*)name,  &value, ATTACH_ON_DEMAND);
+   if (res) {
+     FREE_C_HEAP_ARRAY(char, value);
+@@ -277,7 +276,7 @@
+ 
+ // Implementation of "setflag" command
+ static jint set_flag(AttachOperation* op, outputStream* out) {
+-   
++
+   const char* name = NULL;
+   if ((name = op->arg(0)) == NULL) {
+     out->print_cr("flag name is missing");
+@@ -287,13 +286,13 @@
+   Flag* f = Flag::find_flag((char*)name, strlen(name));
+   if (f && f->is_external() && f->is_writeable()) {
+     if (f->is_bool()) {
+-      return set_bool_flag(name, op, out); 
++      return set_bool_flag(name, op, out);
+     } else if (f->is_intx()) {
+-      return set_intx_flag(name, op, out); 
++      return set_intx_flag(name, op, out);
+     } else if (f->is_uintx()) {
+-      return set_uintx_flag(name, op, out); 
++      return set_uintx_flag(name, op, out);
+     } else if (f->is_ccstr()) {
+-      return set_ccstr_flag(name, op, out); 
++      return set_ccstr_flag(name, op, out);
+     } else {
+       ShouldNotReachHere();
+       return JNI_ERR;
+@@ -324,36 +323,38 @@
+ 
+ // names must be of length <= AttachOperation::name_length_max
+ static AttachOperationFunctionInfo funcs[] = {
+-  { "agentProperties", 	get_agent_properties },
++  { "agentProperties",  get_agent_properties },
+   { "datadump",         data_dump },
++#ifndef SERVICES_KERNEL
+   { "dumpheap",         dump_heap },
++#endif  // SERVICES_KERNEL
+   { "load",             JvmtiExport::load_agent_library },
+   { "properties",       get_system_properties },
+-  { "threaddump",	thread_dump },
++  { "threaddump",       thread_dump },
+   { "inspectheap",      heap_inspection },
+   { "setflag",          set_flag },
+   { "printflag",        print_flag },
+-  { NULL, 		NULL }
++  { NULL,               NULL }
+ };
+ 
+ 
+ 
+ // The Attach Listener threads services a queue. It dequeues an operation
+ // from the queue, examines the operation name (command), and dispatches
+-// to the corresponding function to perform the operation. 
++// to the corresponding function to perform the operation.
+ 
+ static void attach_listener_thread_entry(JavaThread* thread, TRAPS) {
+   os::set_priority(thread, NearMaxPriority);
+ 
+   if (AttachListener::pd_init() != 0) {
+     return;
+-  } 
++  }
+   AttachListener::set_initialized();
+ 
+   for (;;) {
+     AttachOperation* op = AttachListener::dequeue();
+     if (op == NULL) {
+-      return;	// dequeue failed or shutdown
++      return;   // dequeue failed or shutdown
+     }
+ 
+     ResourceMark rm;
+@@ -370,7 +371,7 @@
+         const char* name = funcs[i].name;
+         assert(strlen(name) <= AttachOperation::name_length_max, "operation <= name_length_max");
+         if (strcmp(op->name(), name) == 0) {
+-  	  info = &(funcs[i]);
++          info = &(funcs[i]);
+           break;
+         }
+       }
+@@ -402,18 +403,18 @@
+   instanceHandle thread_oop = klass->allocate_instance_handle(CHECK);
+ 
+   const char thread_name[] = "Attach Listener";
+-  Handle string = java_lang_String::create_from_str(thread_name, CHECK);    
++  Handle string = java_lang_String::create_from_str(thread_name, CHECK);
+ 
+   // Initialize thread_oop to put it into the system threadGroup
+   Handle thread_group (THREAD, Universe::system_thread_group());
+   JavaValue result(T_VOID);
+-  JavaCalls::call_special(&result, thread_oop, 
+-                       klass, 
+-                       vmSymbolHandles::object_initializer_name(), 
+-                       vmSymbolHandles::threadgroup_string_void_signature(), 
+-                       thread_group, 
+-                       string, 
+-                       CHECK);  
++  JavaCalls::call_special(&result, thread_oop,
++                       klass,
++                       vmSymbolHandles::object_initializer_name(),
++                       vmSymbolHandles::threadgroup_string_void_signature(),
++                       thread_group,
++                       string,
++                       CHECK);
+ 
+   KlassHandle group(THREAD, SystemDictionary::threadGroup_klass());
+   JavaCalls::call_special(&result,
+@@ -421,12 +422,12 @@
+                         group,
+                         vmSymbolHandles::add_method_name(),
+                         vmSymbolHandles::thread_void_signature(),
+-			thread_oop,		// ARG 1
++                        thread_oop,             // ARG 1
+                         CHECK);
+ 
+   { MutexLocker mu(Threads_lock);
+     JavaThread* listener_thread = new JavaThread(&attach_listener_thread_entry);
+-                                                                                                                              
++
+     // Check that thread and osthread were created
+     if (listener_thread == NULL || listener_thread->osthread() == NULL) {
+       vm_exit_during_initialization("java.lang.OutOfMemoryError",
+@@ -435,7 +436,7 @@
+ 
+     java_lang_Thread::set_thread(thread_oop(), listener_thread);
+     java_lang_Thread::set_daemon(thread_oop());
+-         
++
+     listener_thread->set_threadObj(thread_oop());
+     Threads::add(listener_thread);
+     Thread::start(listener_thread);
+diff -ruN openjdk6/hotspot/src/share/vm/services/attachListener.hpp openjdk/hotspot/src/share/vm/services/attachListener.hpp
+--- openjdk6/hotspot/src/share/vm/services/attachListener.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/attachListener.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)attachListener.hpp	1.11 07/05/05 17:07:04 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,18 +19,19 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The AttachListener thread services a queue of operations that are enqueued
+ // by client tools. Each operation is identified by a name and has up to 3
+ // arguments. The operation name is mapped to a function which performs the
+ // operation. The function is called with an outputStream which is can use to
+-// write any result data (for examples the properties command serializes 
+-// properties names and values to the output stream). When the function 
++// write any result data (for examples the properties command serializes
++// properties names and values to the output stream). When the function
+ // complets the result value and any result data is returned to the client
+ // tool.
+ 
++#ifndef SERVICES_KERNEL
+ 
+ class AttachOperation;
+ 
+@@ -43,25 +41,31 @@
+   const char* name;
+   AttachOperationFunction func;
+ };
++#endif // SERVICES_KERNEL
+ 
+ class AttachListener: AllStatic {
+- private:
+-  static volatile bool _initialized;
+-
+  public:
+-  static void init();
+-  static bool is_initialized()			{ return _initialized; }
+-  static void set_initialized()                 { _initialized = true; }
+-  static void abort();
++  static void init()  KERNEL_RETURN;
++  static void abort() KERNEL_RETURN;
+ 
+   // invoke to perform clean-up tasks when all clients detach
+-  static void detachall();
++  static void detachall() KERNEL_RETURN;
+ 
+   // indicates if the Attach Listener needs to be created at startup
+-  static bool init_at_startup();
++  static bool init_at_startup() KERNEL_RETURN_(return false;);
+ 
+   // indicates if we have a trigger to start the Attach Listener
+-  static bool is_init_trigger();
++  static bool is_init_trigger() KERNEL_RETURN_(return false;);
++
++#ifdef SERVICES_KERNEL
++  static bool is_attach_supported()             { return false; }
++#else // SERVICES_KERNEL
++ private:
++  static volatile bool _initialized;
++
++ public:
++  static bool is_initialized()                  { return _initialized; }
++  static void set_initialized()                 { _initialized = true; }
+ 
+   // indicates if this VM supports attach-on-demand
+   static bool is_attach_supported()             { return !DisableAttachMechanism; }
+@@ -83,14 +87,16 @@
+ 
+   // dequeue the next operation
+   static AttachOperation* dequeue();
++#endif // SERVICES_KERNEL
+ };
+ 
++#ifndef SERVICES_KERNEL
+ class AttachOperation: public CHeapObj {
+  public:
+   enum {
+-    name_length_max = 16,	// maximum length of  name
+-    arg_length_max = 1024,	// maximum length of argument
+-    arg_count_max = 3		// maximum number of arguments   
++    name_length_max = 16,       // maximum length of  name
++    arg_length_max = 1024,      // maximum length of argument
++    arg_count_max = 3           // maximum number of arguments
+   };
+ 
+   // name of special operation that can be enqueued when all
+@@ -102,7 +108,7 @@
+   char _arg[arg_count_max][arg_length_max+1];
+ 
+  public:
+-  const char* name() const			{ return _name; }
++  const char* name() const                      { return _name; }
+ 
+   // set the operation name
+   void set_name(char* name) {
+@@ -138,6 +144,4 @@
+   // complete operation by sending result code and any result data to the client
+   virtual void complete(jint result, bufferedStream* result_stream) = 0;
+ };
+-
+-
+-
++#endif // SERVICES_KERNEL
+diff -ruN openjdk6/hotspot/src/share/vm/services/classLoadingService.cpp openjdk/hotspot/src/share/vm/services/classLoadingService.cpp
+--- openjdk6/hotspot/src/share/vm/services/classLoadingService.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/classLoadingService.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)classLoadingService.cpp	1.15 07/05/05 17:07:04 JVM"
+-#endif
+ /*
+  * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -50,7 +47,7 @@
+ 
+ #else //  ndef DTRACE_ENABLED
+ 
+-#define DTRACE_CLASSLOAD_PROBE(type, clss, shared) 
++#define DTRACE_CLASSLOAD_PROBE(type, clss, shared)
+ 
+ #endif
+ 
+@@ -137,7 +134,7 @@
+ 
+ void ClassLoadingService::notify_class_loaded(instanceKlass* k, bool shared_class) {
+   DTRACE_CLASSLOAD_PROBE(loaded, k, shared_class);
+-  PerfCounter* classes_counter = (shared_class ? _shared_classes_loaded_count 
++  PerfCounter* classes_counter = (shared_class ? _shared_classes_loaded_count
+                                                : _classes_loaded_count);
+   // increment the count
+   classes_counter->inc();
+@@ -200,14 +197,14 @@
+ 
+   // For consistency of the loaded classes, grab the SystemDictionary lock
+   MutexLocker sd_mutex(SystemDictionary_lock);
+-  
++
+   // Set _loaded_classes and _current_thread and begin enumerating all classes.
+   // Only one thread will do the enumeration at a time.
+   // These static variables are needed and they are used by the static method
+   // add_loaded_class called from classes_do().
+   _loaded_classes = _klass_handle_array;
+   _current_thread = cur_thread;
+-  
++
+   SystemDictionary::classes_do(&add_loaded_class);
+ 
+   // FIXME: Exclude array klasses for now
+diff -ruN openjdk6/hotspot/src/share/vm/services/classLoadingService.hpp openjdk/hotspot/src/share/vm/services/classLoadingService.hpp
+--- openjdk6/hotspot/src/share/vm/services/classLoadingService.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/classLoadingService.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)classLoadingService.hpp	1.10 07/05/05 17:07:05 JVM"
+-#endif
+ /*
+  * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class instanceKlass;
+@@ -31,7 +28,7 @@
+ class ClassLoadingService : public AllStatic {
+ private:
+   // Counters for classes loaded from class files
+-  static PerfCounter*  _classes_loaded_count;  
++  static PerfCounter*  _classes_loaded_count;
+   static PerfCounter*  _classes_unloaded_count;
+   static PerfCounter*  _classbytes_loaded;
+   static PerfCounter*  _classbytes_unloaded;
+@@ -43,7 +40,7 @@
+   static PerfCounter*  _shared_classbytes_unloaded;
+ 
+   static PerfVariable* _class_methods_size;
+- 
++
+   static size_t compute_class_size(instanceKlass* k);
+ 
+ public:
+@@ -136,4 +133,3 @@
+     _loaded_classes->append(h);
+   }
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/services/dtraceAttacher.cpp openjdk/hotspot/src/share/vm/services/dtraceAttacher.cpp
+--- openjdk6/hotspot/src/share/vm/services/dtraceAttacher.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/dtraceAttacher.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)dtraceAttacher.cpp	1.8 07/05/23 10:54:23 JVM" 
+-#endif
+ /*
+  * Copyright 2006-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -48,10 +45,10 @@
+ };
+ 
+ static void set_bool_flag(const char* flag, bool value) {
+-  CommandLineFlags::boolAtPut((char*)flag, strlen(flag), &value, 
++  CommandLineFlags::boolAtPut((char*)flag, strlen(flag), &value,
+                               ATTACH_ON_DEMAND);
+ }
+-  
++
+ // Enable only the "fine grained" flags. Do *not* touch
+ // the overall "ExtendedDTraceProbes" flag.
+ void DTrace::enable_dprobes(int probes) {
+@@ -77,7 +74,7 @@
+ }
+ 
+ // Disable only the "fine grained" flags. Do *not* touch
+-// the overall "ExtendedDTraceProbes" flag. 
++// the overall "ExtendedDTraceProbes" flag.
+ void DTrace::disable_dprobes(int probes) {
+   bool changed = false;
+   if (DTraceAllocProbes && (probes & DTRACE_ALLOC_PROBES)) {
+@@ -123,15 +120,15 @@
+   if (flag) {
+     enable_dprobes(DTRACE_ALL_PROBES);
+   } else {
+-    /* 
++    /*
+      * FIXME: Revisit this: currently all-client-detach detection
+      * does not work and hence disabled. The following scheme does
+      * not work. So, we have to disable fine-grained flags here.
+      *
+      * disable_dprobes call has to be delayed till next "detach all "event.
+      * This is to be  done so that concurrent DTrace clients that may
+-     * have enabled one or more fine grained dprobes and may be running 
+-     * still. On "detach all" clients event, we would sync ExtendedDTraceProbes 
++     * have enabled one or more fine grained dprobes and may be running
++     * still. On "detach all" clients event, we would sync ExtendedDTraceProbes
+      * with  fine grained flags which would take care of disabling fine grained flags.
+      */
+     disable_dprobes(DTRACE_ALL_PROBES);
+diff -ruN openjdk6/hotspot/src/share/vm/services/dtraceAttacher.hpp openjdk/hotspot/src/share/vm/services/dtraceAttacher.hpp
+--- openjdk6/hotspot/src/share/vm/services/dtraceAttacher.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/dtraceAttacher.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)dtraceAttacher.hpp	1.5 07/05/05 17:07:05 JVM"
+-#endif
+ /*
+  * Copyright 2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #define DTRACE_ALLOC_PROBES    0x1
+@@ -45,4 +42,3 @@
+   // set ExtendedDTraceProbes flag
+   static void set_extended_dprobes(bool value);
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/services/heapDumper.cpp openjdk/hotspot/src/share/vm/services/heapDumper.cpp
+--- openjdk6/hotspot/src/share/vm/services/heapDumper.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/heapDumper.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)heapDumper.cpp	1.20 07/05/29 09:44:30 JVM"
+-#endif
+ /*
+  * Copyright 2005-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -40,7 +37,7 @@
+  *            UTF8 strings, objects, stack traces, etc. They usually
+  *            have the same size as host pointers. For example, on
+  *            Solaris and Win32, the size is 4.
+- * u4         high word 
++ * u4         high word
+  * u4         low word    number of milliseconds since 0:00 GMT, 1/1/70
+  * [record]*  a sequence of records.
+  *
+@@ -59,7 +56,7 @@
+  *
+  * TAG           BODY       notes
+  *----------------------------------------------------------
+- * HPROF_UTF8               a UTF8-encoded name  
++ * HPROF_UTF8               a UTF8-encoded name
+  *
+  *               id         name ID
+  *               [u1]*      UTF8 characters (no trailing zero)
+@@ -132,7 +129,7 @@
+  *               id         thread group name ID
+  *               id         thread group parent name ID
+  *
+- * HPROF_END_THREAD         a terminating thread. 
++ * HPROF_END_THREAD         a terminating thread.
+  *
+  *               u4         thread serial number
+  *
+@@ -212,7 +209,7 @@
+  *
+  *                          u2         size of constant pool
+  *                          [u2,       constant pool index,
+- *                           ty,       type 
++ *                           ty,       type
+  *                                     2:  object
+  *                                     4:  boolean
+  *                                     5:  char
+@@ -279,7 +276,7 @@
+  *                          0x00000002: cpu sampling on/off
+  *                u2        stack trace depth
+  *
+- * 
++ *
+  * When the header is "JAVA PROFILE 1.0.2" a heap dump can optionally
+  * be generated as a sequence of heap dump segments. This sequence is
+  * terminated by an end record. The additional tags allowed by format
+@@ -317,36 +314,36 @@
+   HPROF_HEAP_DUMP_END           = 0x2C,
+ 
+   // field types
+-  HPROF_ARRAY_OBJECT 		= 0x01,
+-  HPROF_NORMAL_OBJECT 		= 0x02,
+-  HPROF_BOOLEAN 		= 0x04,
+-  HPROF_CHAR 			= 0x05,
+-  HPROF_FLOAT 			= 0x06,
+-  HPROF_DOUBLE 			= 0x07,
+-  HPROF_BYTE 			= 0x08,
+-  HPROF_SHORT 			= 0x09,
+-  HPROF_INT 			= 0x0A,
+-  HPROF_LONG 			= 0x0B,
++  HPROF_ARRAY_OBJECT            = 0x01,
++  HPROF_NORMAL_OBJECT           = 0x02,
++  HPROF_BOOLEAN                 = 0x04,
++  HPROF_CHAR                    = 0x05,
++  HPROF_FLOAT                   = 0x06,
++  HPROF_DOUBLE                  = 0x07,
++  HPROF_BYTE                    = 0x08,
++  HPROF_SHORT                   = 0x09,
++  HPROF_INT                     = 0x0A,
++  HPROF_LONG                    = 0x0B,
+ 
+   // data-dump sub-records
+-  HPROF_GC_ROOT_UNKNOWN	    	= 0xFF,
+-  HPROF_GC_ROOT_JNI_GLOBAL	= 0x01,
+-  HPROF_GC_ROOT_JNI_LOCAL	= 0x02,
+-  HPROF_GC_ROOT_JAVA_FRAME	= 0x03,
+-  HPROF_GC_ROOT_NATIVE_STACK	= 0x04,
+-  HPROF_GC_ROOT_STICKY_CLASS	= 0x05,
+-  HPROF_GC_ROOT_THREAD_BLOCK	= 0x06,
+-  HPROF_GC_ROOT_MONITOR_USED	= 0x07,
+-  HPROF_GC_ROOT_THREAD_OBJ	= 0x08,
+-  HPROF_GC_CLASS_DUMP		= 0x20,
+-  HPROF_GC_INSTANCE_DUMP    	= 0x21,
+-  HPROF_GC_OBJ_ARRAY_DUMP	= 0x22,
+-  HPROF_GC_PRIM_ARRAY_DUMP	= 0x23
++  HPROF_GC_ROOT_UNKNOWN         = 0xFF,
++  HPROF_GC_ROOT_JNI_GLOBAL      = 0x01,
++  HPROF_GC_ROOT_JNI_LOCAL       = 0x02,
++  HPROF_GC_ROOT_JAVA_FRAME      = 0x03,
++  HPROF_GC_ROOT_NATIVE_STACK    = 0x04,
++  HPROF_GC_ROOT_STICKY_CLASS    = 0x05,
++  HPROF_GC_ROOT_THREAD_BLOCK    = 0x06,
++  HPROF_GC_ROOT_MONITOR_USED    = 0x07,
++  HPROF_GC_ROOT_THREAD_OBJ      = 0x08,
++  HPROF_GC_CLASS_DUMP           = 0x20,
++  HPROF_GC_INSTANCE_DUMP        = 0x21,
++  HPROF_GC_OBJ_ARRAY_DUMP       = 0x22,
++  HPROF_GC_PRIM_ARRAY_DUMP      = 0x23
+ } hprofTag;
+ 
+ // Default stack trace ID (used for dummy HPROF_TRACE record)
+ enum {
+-  STACK_TRACE_ID = 1 
++  STACK_TRACE_ID = 1
+ };
+ 
+ 
+@@ -358,24 +355,24 @@
+     io_buffer_size  = 8*M
+   };
+ 
+-  int _fd;	        // file descriptor (-1 if dump file not open)
+-  jlong _bytes_written;	// number of byte written to dump file
+-	
+-  char* _buffer;    // internal buffer 
++  int _fd;              // file descriptor (-1 if dump file not open)
++  jlong _bytes_written; // number of byte written to dump file
++
++  char* _buffer;    // internal buffer
+   int _size;
+   int _pos;
+ 
+-  char* _error;	  // error message when I/O fails
++  char* _error;   // error message when I/O fails
+ 
+   void set_file_descriptor(int fd)              { _fd = fd; }
+-  int file_descriptor()	const			{ return _fd; }
+-  
++  int file_descriptor() const                   { return _fd; }
++
+   char* buffer() const                          { return _buffer; }
+   int buffer_size() const                       { return _size; }
+   int position() const                          { return _pos; }
+   void set_position(int pos)                    { _pos = pos; }
+ 
+-  void set_error(const char* error)		{ _error = (char*)os::strdup(error); }
++  void set_error(const char* error)             { _error = (char*)os::strdup(error); }
+ 
+   // all I/O go through this function
+   void write_internal(void* s, int len);
+@@ -385,11 +382,11 @@
+   ~DumpWriter();
+ 
+   void close();
+-  bool is_open() const			{ return file_descriptor() >= 0; }
++  bool is_open() const                  { return file_descriptor() >= 0; }
+   void flush();
+ 
+   // total number of bytes written to the disk
+-  jlong bytes_written() const		{ return _bytes_written; }
++  jlong bytes_written() const           { return _bytes_written; }
+ 
+   // adjust the number of bytes written to disk (used to keep the count
+   // of the number of bytes written in case of rewrites)
+@@ -405,11 +402,11 @@
+ 
+   // writer functions
+   void write_raw(void* s, int len);
+-  void write_u1(u1 x)			{ write_raw((void*)&x, 1); }
++  void write_u1(u1 x)                   { write_raw((void*)&x, 1); }
+   void write_u2(u2 x);
+   void write_u4(u4 x);
+-  void write_u8(u8 x); 
+-  void write_objectID(oop o); 
++  void write_u8(u8 x);
++  void write_objectID(oop o);
+   void write_classID(Klass* k);
+ };
+ 
+@@ -418,7 +415,7 @@
+   // sufficient memory then reduce size until we can allocate something.
+   _size = io_buffer_size;
+   do {
+-    _buffer = (char*)os::malloc(_size);	
++    _buffer = (char*)os::malloc(_size);
+     if (_buffer == NULL) {
+       _size = _size >> 1;
+     }
+@@ -427,7 +424,7 @@
+   _pos = 0;
+   _error = NULL;
+   _bytes_written = 0L;
+-  _fd = os::create_binary_file(path, false);	// don't replace existing file
++  _fd = os::create_binary_file(path, false);    // don't replace existing file
+ 
+   // if the open failed we record the error
+   if (_fd < 0) {
+@@ -454,7 +451,7 @@
+ }
+ 
+ // write directly to the file
+-void DumpWriter::write_internal(void* s, int len) { 
++void DumpWriter::write_internal(void* s, int len) {
+   if (is_open()) {
+     int n = ::write(file_descriptor(), s, len);
+     if (n > 0) {
+@@ -473,7 +470,7 @@
+ }
+ 
+ // write raw bytes
+-void DumpWriter::write_raw(void* s, int len) { 
++void DumpWriter::write_raw(void* s, int len) {
+   if (is_open()) {
+     // flush buffer to make toom
+     if ((position()+ len) >= buffer_size()) {
+@@ -492,10 +489,10 @@
+ }
+ 
+ // flush any buffered bytes to the file
+-void DumpWriter::flush() {  
+-  if (is_open() && position() > 0) {    
++void DumpWriter::flush() {
++  if (is_open() && position() > 0) {
+     write_internal(buffer(), position());
+-    set_position(0);    
++    set_position(0);
+   }
+ }
+ 
+@@ -506,7 +503,7 @@
+     jlong offset = os::current_file_offset(file_descriptor());
+     assert(offset >= 0, "lseek failed");
+     return offset + (jlong)position();
+-  } else {  
++  } else {
+     return (jlong)-1;
+   }
+ }
+@@ -520,26 +517,26 @@
+   // may be closed due to I/O error
+   if (is_open()) {
+     jlong n = os::seek_to_file_offset(file_descriptor(), off);
+-    assert(n >= 0, "lseek failed");  
++    assert(n >= 0, "lseek failed");
+   }
+ }
+ 
+-void DumpWriter::write_u2(u2 x) { 
++void DumpWriter::write_u2(u2 x) {
+   u2 v;
+   Bytes::put_Java_u2((address)&v, x);
+   write_raw((void*)&v, 2);
+ }
+ 
+-void DumpWriter::write_u4(u4 x) { 
++void DumpWriter::write_u4(u4 x) {
+   u4 v;
+   Bytes::put_Java_u4((address)&v, x);
+-  write_raw((void*)&v, 4); 
++  write_raw((void*)&v, 4);
+ }
+ 
+-void DumpWriter::write_u8(u8 x) { 
++void DumpWriter::write_u8(u8 x) {
+   u8 v;
+   Bytes::put_Java_u8((address)&v, x);
+-  write_raw((void*)&v, 8); 
++  write_raw((void*)&v, 8);
+ }
+ 
+ void DumpWriter::write_objectID(oop o) {
+@@ -580,7 +577,7 @@
+   static void dump_double(DumpWriter* writer, jdouble d);
+   // dumps the raw value of the given field
+   static void dump_field_value(DumpWriter* writer, char type, address addr);
+-  // dumps static fields of the given class 
++  // dumps static fields of the given class
+   static void dump_static_fields(DumpWriter* writer, klassOop k);
+   // dump the raw values of the instance fields of the given object
+   static void dump_instance_fields(DumpWriter* writer, oop o);
+@@ -591,7 +588,7 @@
+   // creates HPROF_GC_CLASS_DUMP record for the given class and each of its
+   // array classes
+   static void dump_class_and_array_classes(DumpWriter* writer, klassOop k);
+-  // creates HPROF_GC_CLASS_DUMP record for a given primitive array 
++  // creates HPROF_GC_CLASS_DUMP record for a given primitive array
+   // class (and each multi-dimensional array class too)
+   static void dump_basic_type_array_class(DumpWriter* writer, klassOop k);
+ 
+@@ -604,35 +601,35 @@
+ // write a header of the given type
+ void DumperSupport:: write_header(DumpWriter* writer, hprofTag tag, u4 len) {
+   writer->write_u1((u1)tag);
+-  writer->write_u4(0);			// current ticks
++  writer->write_u4(0);                  // current ticks
+   writer->write_u4(len);
+ }
+ 
+ // returns hprof tag for the given type signature
+ hprofTag DumperSupport::sig2tag(symbolOop sig) {
+   switch (sig->byte_at(0)) {
+-    case JVM_SIGNATURE_CLASS	: return HPROF_NORMAL_OBJECT;
+-    case JVM_SIGNATURE_ARRAY	: return HPROF_NORMAL_OBJECT;
+-    case JVM_SIGNATURE_BYTE	: return HPROF_BYTE;
+-    case JVM_SIGNATURE_CHAR	: return HPROF_CHAR;
+-    case JVM_SIGNATURE_FLOAT	: return HPROF_FLOAT;
+-    case JVM_SIGNATURE_DOUBLE	: return HPROF_DOUBLE;
+-    case JVM_SIGNATURE_INT	: return HPROF_INT;
+-    case JVM_SIGNATURE_LONG	: return HPROF_LONG;
+-    case JVM_SIGNATURE_SHORT	: return HPROF_SHORT;
+-    case JVM_SIGNATURE_BOOLEAN	: return HPROF_BOOLEAN;
++    case JVM_SIGNATURE_CLASS    : return HPROF_NORMAL_OBJECT;
++    case JVM_SIGNATURE_ARRAY    : return HPROF_NORMAL_OBJECT;
++    case JVM_SIGNATURE_BYTE     : return HPROF_BYTE;
++    case JVM_SIGNATURE_CHAR     : return HPROF_CHAR;
++    case JVM_SIGNATURE_FLOAT    : return HPROF_FLOAT;
++    case JVM_SIGNATURE_DOUBLE   : return HPROF_DOUBLE;
++    case JVM_SIGNATURE_INT      : return HPROF_INT;
++    case JVM_SIGNATURE_LONG     : return HPROF_LONG;
++    case JVM_SIGNATURE_SHORT    : return HPROF_SHORT;
++    case JVM_SIGNATURE_BOOLEAN  : return HPROF_BOOLEAN;
+     default : ShouldNotReachHere(); /* to shut up compiler */ return HPROF_BYTE;
+   }
+ }
+ 
+ hprofTag DumperSupport::type2tag(BasicType type) {
+   switch (type) {
+-    case T_BYTE	    : return HPROF_BYTE;
+-    case T_CHAR	    : return HPROF_CHAR;
++    case T_BYTE     : return HPROF_BYTE;
++    case T_CHAR     : return HPROF_CHAR;
+     case T_FLOAT    : return HPROF_FLOAT;
+     case T_DOUBLE   : return HPROF_DOUBLE;
+-    case T_INT	    : return HPROF_INT;
+-    case T_LONG	    : return HPROF_LONG;
++    case T_INT      : return HPROF_INT;
++    case T_LONG     : return HPROF_LONG;
+     case T_SHORT    : return HPROF_SHORT;
+     case T_BOOLEAN  : return HPROF_BOOLEAN;
+     default : ShouldNotReachHere(); /* to shut up compiler */ return HPROF_BYTE;
+@@ -641,7 +638,7 @@
+ 
+ // dump a jfloat
+ void DumperSupport::dump_float(DumpWriter* writer, jfloat f) {
+-  if (g_isnan(f)) {		    
++  if (g_isnan(f)) {
+     writer->write_u4(0x7fc00000);    // collapsing NaNs
+   } else {
+     union {
+@@ -659,11 +656,11 @@
+     jlong l;
+     double d;
+   } u;
+-  if (g_isnan(d)) {		    // collapsing NaNs
++  if (g_isnan(d)) {                 // collapsing NaNs
+     u.l = (jlong)(0x7ff80000);
+     u.l = (u.l << 32);
+-  } else {	  
+-    u.d = (double)d;          
++  } else {
++    u.d = (double)d;
+   }
+   writer->write_u8((u8)u.l);
+ }
+@@ -672,17 +669,17 @@
+ void DumperSupport::dump_field_value(DumpWriter* writer, char type, address addr) {
+   switch (type) {
+     case JVM_SIGNATURE_CLASS :
+-    case JVM_SIGNATURE_ARRAY : { 
++    case JVM_SIGNATURE_ARRAY : {
+       oop* f = (oop*)addr;
+       oop o = *f;
+ 
+-      // reflection and sun.misc.Unsafe classes may have a reference to a 
++      // reflection and sun.misc.Unsafe classes may have a reference to a
+       // klassOop so filter it out.
+       if (o != NULL && o->is_klass()) {
+         o = NULL;
+       }
+ 
+-      // FIXME: When sharing is enabled we don't emit field references to objects 
++      // FIXME: When sharing is enabled we don't emit field references to objects
+       // in shared spaces. We can remove this once we write records for the classes
+       // and strings that are shared.
+       if (o != NULL && o->is_shared()) {
+@@ -691,12 +688,12 @@
+       writer->write_objectID(o);
+       break;
+     }
+-    case JVM_SIGNATURE_BYTE	: {
++    case JVM_SIGNATURE_BYTE     : {
+       jbyte* b = (jbyte*)addr;
+       writer->write_u1((u1)*b);
+       break;
+     }
+-    case JVM_SIGNATURE_CHAR	: {
++    case JVM_SIGNATURE_CHAR     : {
+       jchar* c = (jchar*)addr;
+       writer->write_u2((u2)*c);
+       break;
+@@ -721,7 +718,7 @@
+       writer->write_u4((u4)*i);
+       break;
+     }
+-    case JVM_SIGNATURE_LONG	: {
++    case JVM_SIGNATURE_LONG     : {
+       jlong* l = (jlong*)addr;
+       writer->write_u8((u8)*l);
+       break;
+@@ -742,17 +739,17 @@
+ 
+   int size = 0;
+ 
+-  for (FieldStream fld(ikh, false, false); !fld.eos(); fld.next()) {	
++  for (FieldStream fld(ikh, false, false); !fld.eos(); fld.next()) {
+     if (!fld.access_flags().is_static()) {
+       symbolOop sig = fld.signature();
+       switch (sig->byte_at(0)) {
+-	case JVM_SIGNATURE_CLASS   : 
+-	case JVM_SIGNATURE_ARRAY   : size += oopSize; break;
++        case JVM_SIGNATURE_CLASS   :
++        case JVM_SIGNATURE_ARRAY   : size += oopSize; break;
+ 
+-	case JVM_SIGNATURE_BYTE    : 
++        case JVM_SIGNATURE_BYTE    :
+         case JVM_SIGNATURE_BOOLEAN : size += 1; break;
+ 
+-	case JVM_SIGNATURE_CHAR    : 
++        case JVM_SIGNATURE_CHAR    :
+         case JVM_SIGNATURE_SHORT   : size += 2; break;
+ 
+         case JVM_SIGNATURE_INT     :
+@@ -761,33 +758,33 @@
+         case JVM_SIGNATURE_LONG    :
+         case JVM_SIGNATURE_DOUBLE  : size += 8; break;
+ 
+-	default : ShouldNotReachHere();
++        default : ShouldNotReachHere();
+       }
+     }
+   }
+   return (u4)size;
+ }
+ 
+-// dumps static fields of the given class 
++// dumps static fields of the given class
+ void DumperSupport::dump_static_fields(DumpWriter* writer, klassOop k) {
+   HandleMark hm;
+   instanceKlassHandle ikh = instanceKlassHandle(Thread::current(), k);
+ 
+   // pass 1 - count the static fields
+   u2 field_count = 0;
+-  for (FieldStream fldc(ikh, true, true); !fldc.eos(); fldc.next()) {	
++  for (FieldStream fldc(ikh, true, true); !fldc.eos(); fldc.next()) {
+     if (fldc.access_flags().is_static()) field_count++;
+   }
+ 
+   writer->write_u2(field_count);
+ 
+   // pass 2 - dump the field descriptors and raw values
+-  for (FieldStream fld(ikh, true, true); !fld.eos(); fld.next()) {	
++  for (FieldStream fld(ikh, true, true); !fld.eos(); fld.next()) {
+     if (fld.access_flags().is_static()) {
+       symbolOop sig = fld.signature();
+ 
+       writer->write_objectID(fld.name());   // name
+-      writer->write_u1(sig2tag(sig));	    // type
++      writer->write_u1(sig2tag(sig));       // type
+ 
+       // value
+       int offset = fld.offset();
+@@ -803,7 +800,7 @@
+   HandleMark hm;
+   instanceKlassHandle ikh = instanceKlassHandle(Thread::current(), o->klass());
+ 
+-  for (FieldStream fld(ikh, false, false); !fld.eos(); fld.next()) {	
++  for (FieldStream fld(ikh, false, false); !fld.eos(); fld.next()) {
+     if (!fld.access_flags().is_static()) {
+       symbolOop sig = fld.signature();
+       address addr = (address)o + fld.offset();
+@@ -820,19 +817,19 @@
+ 
+   // pass 1 - count the instance fields
+   u2 field_count = 0;
+-  for (FieldStream fldc(ikh, true, true); !fldc.eos(); fldc.next()) {	
++  for (FieldStream fldc(ikh, true, true); !fldc.eos(); fldc.next()) {
+     if (!fldc.access_flags().is_static()) field_count++;
+   }
+ 
+   writer->write_u2(field_count);
+ 
+   // pass 2 - dump the field descriptors
+-  for (FieldStream fld(ikh, true, true); !fld.eos(); fld.next()) {	
++  for (FieldStream fld(ikh, true, true); !fld.eos(); fld.next()) {
+     if (!fld.access_flags().is_static()) {
+       symbolOop sig = fld.signature();
+ 
+-      writer->write_objectID(fld.name());		    // name
+-      writer->write_u1(sig2tag(sig));	    // type
++      writer->write_objectID(fld.name());                   // name
++      writer->write_u1(sig2tag(sig));       // type
+     }
+   }
+ }
+@@ -843,7 +840,7 @@
+ 
+   writer->write_u1(HPROF_GC_INSTANCE_DUMP);
+   writer->write_objectID(o);
+-  writer->write_u4(STACK_TRACE_ID);  
++  writer->write_u4(STACK_TRACE_ID);
+ 
+   // class ID
+   writer->write_classID(Klass::cast(k));
+@@ -861,7 +858,7 @@
+   Klass* klass = Klass::cast(k);
+   assert(klass->oop_is_instance(), "not an instanceKlass");
+   instanceKlass* ik = (instanceKlass*)klass;
+- 
++
+   writer->write_u1(HPROF_GC_CLASS_DUMP);
+ 
+   // class ID
+@@ -903,7 +900,7 @@
+     assert(klass->oop_is_objArray(), "not an objArrayKlass");
+ 
+     writer->write_u1(HPROF_GC_CLASS_DUMP);
+-    writer->write_classID(klass);  
++    writer->write_classID(klass);
+     writer->write_u4(STACK_TRACE_ID);
+ 
+     // super class of array classes is java.lang.Object
+@@ -919,7 +916,7 @@
+     writer->write_objectID(NULL);
+     writer->write_u4(0);             // instance size
+     writer->write_u2(0);             // constant pool
+-    writer->write_u2(0);             // static fields  
++    writer->write_u2(0);             // static fields
+     writer->write_u2(0);             // instance fields
+ 
+     // get the array class for the next rank
+@@ -927,17 +924,17 @@
+   }
+ }
+ 
+-// creates HPROF_GC_CLASS_DUMP record for a given primitive array 
++// creates HPROF_GC_CLASS_DUMP record for a given primitive array
+ // class (and each multi-dimensional array class too)
+-void DumperSupport::dump_basic_type_array_class(DumpWriter* writer, klassOop k) { 
++void DumperSupport::dump_basic_type_array_class(DumpWriter* writer, klassOop k) {
+  // array classes
+  while (k != NULL) {
+     Klass* klass = Klass::cast(k);
+ 
+     writer->write_u1(HPROF_GC_CLASS_DUMP);
+-    writer->write_classID(klass);  
++    writer->write_classID(klass);
+     writer->write_u4(STACK_TRACE_ID);
+-    
++
+     // super class of array classes is java.lang.Object
+     klassOop java_super = klass->java_super();
+     assert(java_super != NULL, "checking");
+@@ -951,7 +948,7 @@
+     writer->write_objectID(NULL);
+     writer->write_u4(0);             // instance size
+     writer->write_u2(0);             // constant pool
+-    writer->write_u2(0);             // static fields  
++    writer->write_u2(0);             // static fields
+     writer->write_u2(0);             // instance fields
+ 
+     // get the array class for the next rank
+@@ -1012,7 +1009,7 @@
+       }
+       break;
+     }
+-    case T_BYTE : {      
++    case T_BYTE : {
+       writer->write_raw((void*)(array->byte_at_addr(0)), length_in_bytes);
+       break;
+     }
+@@ -1049,7 +1046,7 @@
+       break;
+     }
+ 
+-    // handle float/doubles in a special value to ensure than NaNs are 
++    // handle float/doubles in a special value to ensure than NaNs are
+     // written correctly. TO DO: Check if we can avoid this on processors that
+     // use IEEE 754.
+ 
+@@ -1070,15 +1067,15 @@
+ }
+ 
+ 
+-// Support class used to generate HPROF_UTF8 records from the entries in the 
++// Support class used to generate HPROF_UTF8 records from the entries in the
+ // SymbolTable.
+ 
+ class SymbolTableDumper : public OopClosure {
+  private:
+   DumpWriter* _writer;
+-  DumpWriter* writer() const		    { return _writer; }
++  DumpWriter* writer() const                { return _writer; }
+  public:
+-  SymbolTableDumper(DumpWriter* writer)	    { _writer = writer; }
++  SymbolTableDumper(DumpWriter* writer)     { _writer = writer; }
+   void do_oop(oop* obj_p);
+ };
+ 
+@@ -1096,23 +1093,23 @@
+ }
+ 
+ 
+-// Support class used to generate HPROF_GC_ROOT_JNI_LOCAL records 
++// Support class used to generate HPROF_GC_ROOT_JNI_LOCAL records
+ 
+ class JNILocalsDumper : public OopClosure {
+  private:
+   DumpWriter* _writer;
+   u4 _thread_serial_num;
+-  DumpWriter* writer() const		    { return _writer; }
++  DumpWriter* writer() const                { return _writer; }
+  public:
+-  JNILocalsDumper(DumpWriter* writer, u4 thread_serial_num) { 
+-    _writer = writer; 
++  JNILocalsDumper(DumpWriter* writer, u4 thread_serial_num) {
++    _writer = writer;
+     _thread_serial_num = thread_serial_num;
+   }
+   void do_oop(oop* obj_p);
+ };
+ 
+ 
+-void JNILocalsDumper::do_oop(oop* obj_p) {  
++void JNILocalsDumper::do_oop(oop* obj_p) {
+   // ignore null or deleted handles
+   oop o = *obj_p;
+   if (o != NULL && o != JNIHandles::deleted_handle()) {
+@@ -1124,16 +1121,16 @@
+ }
+ 
+ 
+-// Support class used to generate HPROF_GC_ROOT_JNI_GLOBAL records 
++// Support class used to generate HPROF_GC_ROOT_JNI_GLOBAL records
+ 
+ class JNIGlobalsDumper : public OopClosure {
+  private:
+   DumpWriter* _writer;
+-  DumpWriter* writer() const		    { return _writer; }
++  DumpWriter* writer() const                { return _writer; }
+ 
+  public:
+   JNIGlobalsDumper(DumpWriter* writer) {
+-    _writer = writer; 
++    _writer = writer;
+   }
+   void do_oop(oop* obj_p);
+ };
+@@ -1148,42 +1145,42 @@
+   if (o->is_instance() || o->is_objArray() || o->is_typeArray()) {
+     writer()->write_u1(HPROF_GC_ROOT_JNI_GLOBAL);
+     writer()->write_objectID(o);
+-    writer()->write_objectID((oopDesc*)obj_p);	    // global ref ID
++    writer()->write_objectID((oopDesc*)obj_p);      // global ref ID
+   }
+ };
+ 
+ 
+-// Support class used to generate HPROF_GC_ROOT_MONITOR_USED records 
++// Support class used to generate HPROF_GC_ROOT_MONITOR_USED records
+ 
+ class MonitorUsedDumper : public OopClosure {
+  private:
+   DumpWriter* _writer;
+-  DumpWriter* writer() const		    { return _writer; }
++  DumpWriter* writer() const                { return _writer; }
+  public:
+-  MonitorUsedDumper(DumpWriter* writer) { 
+-    _writer = writer; 
++  MonitorUsedDumper(DumpWriter* writer) {
++    _writer = writer;
+   }
+-  void do_oop(oop* obj_p) {  
++  void do_oop(oop* obj_p) {
+     writer()->write_u1(HPROF_GC_ROOT_MONITOR_USED);
+     writer()->write_objectID(*obj_p);
+   }
+ };
+ 
+ 
+-// Support class used to generate HPROF_GC_ROOT_STICKY_CLASS records 
++// Support class used to generate HPROF_GC_ROOT_STICKY_CLASS records
+ 
+ class StickyClassDumper : public OopClosure {
+  private:
+   DumpWriter* _writer;
+-  DumpWriter* writer() const		    { return _writer; }
++  DumpWriter* writer() const                { return _writer; }
+  public:
+-  StickyClassDumper(DumpWriter* writer) { 
+-    _writer = writer; 
++  StickyClassDumper(DumpWriter* writer) {
++    _writer = writer;
+   }
+   void do_oop(oop* obj_p);
+ };
+ 
+-void StickyClassDumper::do_oop(oop* obj_p) {  
++void StickyClassDumper::do_oop(oop* obj_p) {
+   if (*obj_p != NULL) {
+     oop o = *obj_p;
+     if (o->is_klass()) {
+@@ -1191,7 +1188,7 @@
+       if (Klass::cast(k)->oop_is_instance()) {
+         instanceKlass* ik = instanceKlass::cast(k);
+         writer()->write_u1(HPROF_GC_ROOT_STICKY_CLASS);
+-        writer()->write_classID(ik); 
++        writer()->write_classID(ik);
+       }
+     }
+   }
+@@ -1200,7 +1197,7 @@
+ 
+ class VM_HeapDumper;
+ 
+-// Support class using when iterating over the heap. 
++// Support class using when iterating over the heap.
+ 
+ class HeapObjectDumper : public ObjectClosure {
+  private:
+@@ -1208,15 +1205,15 @@
+   DumpWriter* _writer;
+ 
+   VM_HeapDumper* dumper()               { return _dumper; }
+-  DumpWriter* writer()			{ return _writer; }
++  DumpWriter* writer()                  { return _writer; }
+ 
+   // used to indicate that a record has been writen
+   void mark_end_of_record();
+ 
+  public:
+-  HeapObjectDumper(VM_HeapDumper* dumper, DumpWriter* writer) { 
++  HeapObjectDumper(VM_HeapDumper* dumper, DumpWriter* writer) {
+     _dumper = dumper;
+-    _writer = writer; 
++    _writer = writer;
+   }
+ 
+   // called for each object in the heap
+@@ -1224,7 +1221,7 @@
+ };
+ 
+ void HeapObjectDumper::do_object(oop o) {
+-  // hide the sentinel for deleted handles 
++  // hide the sentinel for deleted handles
+   if (o == JNIHandles::deleted_handle()) return;
+ 
+   // ignore KlassKlass objects
+@@ -1238,38 +1235,41 @@
+   }
+ 
+   // create a HPROF_GC_INSTANCE record for each object
+-  if (o->is_instance()) {  
++  if (o->is_instance()) {
+     DumperSupport::dump_instance(writer(), o);
+     mark_end_of_record();
+-  } else { 
++  } else {
+     // create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
+-    if (o->is_objArray()) {  
++    if (o->is_objArray()) {
+       DumperSupport::dump_object_array(writer(), objArrayOop(o));
+       mark_end_of_record();
+     } else {
+       // create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array
+       if (o->is_typeArray()) {
+         DumperSupport::dump_prim_array(writer(), typeArrayOop(o));
+-	mark_end_of_record();
++        mark_end_of_record();
+       }
+     }
+-  }     
++  }
+ }
+ 
+ // The VM operation that performs the heap dump
+-class VM_HeapDumper : public VM_Operation {
++class VM_HeapDumper : public VM_GC_Operation {
+  private:
+   DumpWriter* _writer;
+-  bool _is_segmented_dump; 
++  bool _gc_before_heap_dump;
++  bool _is_segmented_dump;
+   jlong _dump_start;
+ 
+   // accessors
+-  DumpWriter* writer() const			{ return _writer; }
++  DumpWriter* writer() const                    { return _writer; }
+   bool is_segmented_dump() const                { return _is_segmented_dump; }
+   void set_segmented_dump()                     { _is_segmented_dump = true; }
+   jlong dump_start() const                      { return _dump_start; }
+   void set_dump_start(jlong pos);
+ 
++  bool skip_operation() const;
++
+   // writes a HPROF_LOAD_CLASS record
+   static void do_load_class(klassOop k);
+ 
+@@ -1294,20 +1294,28 @@
+   // fixes up the current dump record )and writes HPROF_HEAP_DUMP_END
+   // record in the case of a segmented heap dump)
+   void end_of_dump();
+-  
++
+  public:
+-  VM_HeapDumper(DumpWriter* writer){
++  VM_HeapDumper(DumpWriter* writer, bool gc_before_heap_dump) :
++    VM_GC_Operation(0 /* total collections,      dummy, ignored */,
++                    0 /* total full collections, dummy, ignored */,
++                    gc_before_heap_dump) {
+     _writer = writer;
+-    _is_segmented_dump = false; 
++    _gc_before_heap_dump = gc_before_heap_dump;
++    _is_segmented_dump = false;
+     _dump_start = (jlong)-1;
+   }
+ 
+   VMOp_Type type() const { return VMOp_HeapDumper; }
+-  // used to mark sub-record boundary 
++  // used to mark sub-record boundary
+   void check_segment_length();
+   void doit();
+ };
+ 
++bool VM_HeapDumper::skip_operation() const {
++  return false;
++}
++
+ // sets the dump starting position
+ void VM_HeapDumper::set_dump_start(jlong pos) {
+   _dump_start = pos;
+@@ -1325,7 +1333,7 @@
+ 
+     // record the starting position for the dump (its length will be fixed up later)
+     set_dump_start(writer()->current_offset());
+-    writer()->write_u4(0);  
++    writer()->write_u4(0);
+   }
+ }
+ 
+@@ -1334,7 +1342,7 @@
+   if (writer()->is_open()) {
+     assert(dump_start() >= 0, "no dump start recorded");
+ 
+-    // calculate the size of the dump record 
++    // calculate the size of the dump record
+     jlong dump_end = writer()->current_offset();
+     jlong dump_len = (dump_end - dump_start() - 4);
+ 
+@@ -1350,7 +1358,7 @@
+     // adjust the total size written to keep the bytes written correct.
+     writer()->adjust_bytes_written(-((long) sizeof(u4)));
+ 
+-    // seek to dump end so we can continue 
++    // seek to dump end so we can continue
+     writer()->seek_to_offset(dump_end);
+ 
+     // no current dump record
+@@ -1358,12 +1366,12 @@
+   }
+ }
+ 
+-// used on a sub-record boundary to check if we need to start a 
+-// new segment. 
++// used on a sub-record boundary to check if we need to start a
++// new segment.
+ void VM_HeapDumper::check_segment_length() {
+   if (writer()->is_open()) {
+-    if (is_segmented_dump()) {   
+-      // don't use current_offset that would be too expensive on a per record basis    
++    if (is_segmented_dump()) {
++      // don't use current_offset that would be too expensive on a per record basis
+       jlong dump_end = writer()->bytes_written() + writer()->bytes_unwritten();
+       assert(dump_end == writer()->current_offset(), "checking");
+       jlong dump_len = (dump_end - dump_start() - 4);
+@@ -1385,9 +1393,9 @@
+ 
+     // for segmented dump we write the end record
+     if (is_segmented_dump()) {
+-      writer()->write_u1(HPROF_HEAP_DUMP_END); 
++      writer()->write_u1(HPROF_HEAP_DUMP_END);
++      writer()->write_u4(0);
+       writer()->write_u4(0);
+-      writer()->write_u4(0); 
+     }
+   }
+ }
+@@ -1403,17 +1411,17 @@
+   static u4 class_serial_num = 0;
+ 
+   VM_HeapDumper* dumper = ((VM_HeapDumper*)VMThread::vm_operation());
+-  DumpWriter* writer = dumper->writer();  
++  DumpWriter* writer = dumper->writer();
+ 
+   // len of HPROF_LOAD_CLASS record
+   u4 remaining = 2*oopSize + 2*sizeof(u4);
+- 
++
+   // write a HPROF_LOAD_CLASS for the class and each array class
+   do {
+     DumperSupport::write_header(writer, HPROF_LOAD_CLASS, remaining);
+ 
+     // class serial number is just a number
+-    writer->write_u4(++class_serial_num); 
++    writer->write_u4(++class_serial_num);
+ 
+     // class ID
+     Klass* klass = Klass::cast(k);
+@@ -1433,7 +1441,7 @@
+ // writes a HPROF_GC_CLASS_DUMP record for the given class
+ void VM_HeapDumper::do_class_dump(klassOop k) {
+   VM_HeapDumper* dumper = ((VM_HeapDumper*)VMThread::vm_operation());
+-  DumpWriter* writer = dumper->writer(); 
++  DumpWriter* writer = dumper->writer();
+   DumperSupport::dump_class_and_array_classes(writer, k);
+ }
+ 
+@@ -1441,11 +1449,11 @@
+ // array (and each multi-dimensional array too)
+ void VM_HeapDumper::do_basic_type_array_class_dump(klassOop k) {
+   VM_HeapDumper* dumper = ((VM_HeapDumper*)VMThread::vm_operation());
+-  DumpWriter* writer = dumper->writer(); 
+-  DumperSupport::dump_basic_type_array_class(writer, k); 
++  DumpWriter* writer = dumper->writer();
++  DumperSupport::dump_basic_type_array_class(writer, k);
+ }
+ 
+-// Walk the stack of the given thread. 
++// Walk the stack of the given thread.
+ // Dumps a HPROF_GC_ROOT_JAVA_FRAME record for each local
+ // Dumps a HPROF_GC_ROOT_JNI_LOCAL record for each JNI local
+ void VM_HeapDumper::do_thread(JavaThread* java_thread, u4 thread_serial_num) {
+@@ -1453,55 +1461,55 @@
+ 
+   oop threadObj = java_thread->threadObj();
+   assert(threadObj != NULL, "sanity check");
+-  
++
+   // JNI locals for the top frame
+   java_thread->active_handles()->oops_do(&blk);
+ 
+   if (java_thread->has_last_Java_frame()) {
+ 
+     // vframes are resource allocated
+-    Thread* current_thread = Thread::current(); 
++    Thread* current_thread = Thread::current();
+     ResourceMark rm(current_thread);
+     HandleMark hm(current_thread);
+ 
+-    RegisterMap reg_map(java_thread);   
++    RegisterMap reg_map(java_thread);
+     frame f = java_thread->last_frame();
+     vframe* vf = vframe::new_vframe(&f, &reg_map, java_thread);
+-   
++
+     while (vf != NULL) {
+       if (vf->is_java_frame()) {
+ 
+-	// java frame (interpreted, compiled, ...)
+-	javaVFrame *jvf = javaVFrame::cast(vf);
++        // java frame (interpreted, compiled, ...)
++        javaVFrame *jvf = javaVFrame::cast(vf);
+ 
+-	if (!(jvf->method()->is_native())) {         	
+-	  StackValueCollection* locals = jvf->locals();
+-	  for (int slot=0; slot<locals->size(); slot++) {
+-	    if (locals->at(slot)->type() == T_OBJECT) {
+-	      oop o = locals->obj_at(slot)();
+-
+-	      if (o != NULL) {	     
+-	        writer()->write_u1(HPROF_GC_ROOT_JAVA_FRAME);
+-	        writer()->write_objectID(o);
++        if (!(jvf->method()->is_native())) {
++          StackValueCollection* locals = jvf->locals();
++          for (int slot=0; slot<locals->size(); slot++) {
++            if (locals->at(slot)->type() == T_OBJECT) {
++              oop o = locals->obj_at(slot)();
++
++              if (o != NULL) {
++                writer()->write_u1(HPROF_GC_ROOT_JAVA_FRAME);
++                writer()->write_objectID(o);
+                 writer()->write_u4(thread_serial_num);
+                 writer()->write_u4((u4)-1); // empty
+-	      }
+-	    }
+-	  }
+-	}
++              }
++            }
++          }
++        }
+       } else {
+ 
+-	// externalVFrame - if it's an entry frame then report any JNI locals
+-	// as roots
+-	frame* fr = vf->frame_pointer();
++        // externalVFrame - if it's an entry frame then report any JNI locals
++        // as roots
++        frame* fr = vf->frame_pointer();
+         assert(fr != NULL, "sanity check");
+         if (fr->is_entry_frame()) {
+           fr->entry_frame_call_wrapper()->handles()->oops_do(&blk);
+-	}
++        }
+       }
+ 
+       vf = vf->sender();
+-    }  
++    }
+   }
+ }
+ 
+@@ -1544,14 +1552,21 @@
+ // The HPROF_HEAP_DUMP record has a length following by sub-records. To allow
+ // the heap dump be generated in a single pass we remember the position of
+ // the dump length and fix it up after all sub-records have been written.
+-// To generate the sub-records we iterate over the heap, writing 
++// To generate the sub-records we iterate over the heap, writing
+ // HPROF_GC_INSTANCE_DUMP, HPROF_GC_OBJ_ARRAY_DUMP, and HPROF_GC_PRIM_ARRAY_DUMP
+ // records as we go. Once that is done we write records for some of the GC
+ // roots.
+ 
+ void VM_HeapDumper::doit() {
+-  // need to ensure that we can iterate over the heap
+-  Universe::heap()->ensure_parsability(false);  // no need to retire TLABs
++
++  HandleMark hm;
++  CollectedHeap* ch = Universe::heap();
++  if (_gc_before_heap_dump) {
++    ch->collect_as_vm_thread(GCCause::_heap_dump);
++  } else {
++    // make the heap parsable (no need to retire TLABs)
++    ch->ensure_parsability(false);
++  }
+ 
+   // Write the file header - use 1.0.2 for large heaps, otherwise 1.0.1
+   size_t used;
+@@ -1580,8 +1595,8 @@
+   // HPROF_TRACE record without any frames
+   DumperSupport::write_header(writer(), HPROF_TRACE, 3*sizeof(u4));
+   writer()->write_u4(STACK_TRACE_ID);
+-  writer()->write_u4(0);		    // thread number
+-  writer()->write_u4(0);		    // frame count
++  writer()->write_u4(0);                    // thread number
++  writer()->write_u4(0);                    // frame count
+ 
+   // HPROF_UTF8 records
+   SymbolTableDumper sym_dumper(writer());
+@@ -1632,7 +1647,7 @@
+ }
+ 
+ 
+-// dump the heap to given path. 
++// dump the heap to given path.
+ int HeapDumper::dump(const char* path) {
+   assert(path != NULL && strlen(path) > 0, "path missing");
+ 
+@@ -1642,24 +1657,19 @@
+     timer()->start();
+   }
+ 
+-  // create the dump writer. If the file can be opened then bail 
++  // create the dump writer. If the file can be opened then bail
+   DumpWriter writer(path);
+   if (!writer.is_open()) {
+     set_error(writer.error());
+     if (print_to_tty()) {
+-      tty->print_cr("Unable to create %s: %s", path, 
++      tty->print_cr("Unable to create %s: %s", path,
+         (error() != NULL) ? error() : "reason unknown");
+     }
+-    return -1; 
++    return -1;
+   }
+ 
+-  // Do a full GC before heap dump
+-  if (_gc_before_heap_dump) {
+-    Universe::heap()->collect(GCCause::_heap_dump);
+-  }
+   // generate the dump
+-  MutexLocker ml(Heap_lock);
+-  VM_HeapDumper dumper(&writer);
++  VM_HeapDumper dumper(&writer, _gc_before_heap_dump);
+   VMThread::execute(&dumper);
+ 
+   // close dump file and record any error that the writer may have encountered
+@@ -1667,11 +1677,11 @@
+   set_error(writer.error());
+ 
+   // print message in interactive case
+-  if (print_to_tty()) {  
++  if (print_to_tty()) {
+     timer()->stop();
+-    if (error() == NULL) {    
++    if (error() == NULL) {
+       char msg[256];
+-      sprintf(msg, "Heap dump file created [%s bytes in %3.3f secs]", 
++      sprintf(msg, "Heap dump file created [%s bytes in %3.3f secs]",
+         os::jlong_format_specifier(), timer()->seconds());
+       tty->print_cr(msg, writer.bytes_written());
+     } else {
+@@ -1702,7 +1712,7 @@
+   }
+ }
+ 
+-// set the error string 
++// set the error string
+ void HeapDumper::set_error(char* error) {
+   if (_error != NULL) {
+     os::free(_error);
+@@ -1710,7 +1720,54 @@
+   if (error == NULL) {
+     _error = NULL;
+   } else {
+-    _error = os::strdup(error); 
++    _error = os::strdup(error);
+     assert(_error != NULL, "allocation failure");
+   }
+ }
++
++
++// Called by error reporting
++void HeapDumper::dump_heap() {
++  static char path[JVM_MAXPATHLEN];
++
++  // The dump file defaults to java_pid<pid>.hprof in the current working
++  // directory. HeapDumpPath=<file> can be used to specify an alternative
++  // dump file name or a directory where dump file is created.
++  bool use_default_filename = true;
++  if (HeapDumpPath == NULL || HeapDumpPath[0] == '\0') {
++    path[0] = '\0'; // HeapDumpPath=<file> not specified
++  } else {
++    assert(strlen(HeapDumpPath) < sizeof(path), "HeapDumpPath too long");
++    strcpy(path, HeapDumpPath);
++    // check if the path is a directory (must exist)
++    DIR* dir = os::opendir(path);
++    if (dir == NULL) {
++      use_default_filename = false;
++    } else {
++      // HeapDumpPath specified a directory. We append a file separator
++      // (if needed).
++      os::closedir(dir);
++      size_t fs_len = strlen(os::file_separator());
++      if (strlen(path) >= fs_len) {
++        char* end = path;
++        end += (strlen(path) - fs_len);
++        if (strcmp(end, os::file_separator()) != 0) {
++          assert(strlen(path) + strlen(os::file_separator()) < sizeof(path),
++            "HeapDumpPath too long");
++          strcat(path, os::file_separator());
++        }
++      }
++    }
++  }
++  // If HeapDumpPath wasn't a file name then we append the default name
++  if (use_default_filename) {
++    char fn[32];
++    sprintf(fn, "java_pid%d.hprof", os::current_process_id());
++    assert(strlen(path) + strlen(fn) < sizeof(path), "HeapDumpPath too long");
++    strcat(path, fn);
++  }
++
++  HeapDumper dumper(false /* no GC before heap dump */,
++                    true  /* send to tty */);
++  dumper.dump(path);
++}
+diff -ruN openjdk6/hotspot/src/share/vm/services/heapDumper.hpp openjdk/hotspot/src/share/vm/services/heapDumper.hpp
+--- openjdk6/hotspot/src/share/vm/services/heapDumper.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/heapDumper.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,8 +1,5 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)heapDumper.hpp	1.8 07/05/05 17:07:05 JVM"
+-#endif
+ /*
+- * Copyright 2005 Sun Microsystems, Inc.  All Rights Reserved.
++ * Copyright 2005-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+  *
+  * This code is free software; you can redistribute it and/or modify it
+@@ -22,10 +19,9 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+-
+ // HeapDumper is used to dump the java heap to file in HPROF binary format:
+ //
+ //  { HeapDumper dumper(true /* full GC before heap dump */);
+@@ -46,19 +42,19 @@
+   elapsedTimer _t;
+ 
+   // string representation of error
+-  char* error() const			{ return _error; }
++  char* error() const                   { return _error; }
+   void set_error(char* error);
+ 
+   // indicates if progress messages can be sent to tty
+-  bool print_to_tty() const		{ return _print_to_tty; }
++  bool print_to_tty() const             { return _print_to_tty; }
+ 
+   // internal timer.
+-  elapsedTimer* timer()                 { return &_t; }  
++  elapsedTimer* timer()                 { return &_t; }
+ 
+  public:
+-  HeapDumper(bool gc_before_heap_dump) : 
++  HeapDumper(bool gc_before_heap_dump) :
+     _gc_before_heap_dump(gc_before_heap_dump), _error(NULL), _print_to_tty(false)  { }
+-  HeapDumper(bool gc_before_heap_dump, bool print_to_tty) : 
++  HeapDumper(bool gc_before_heap_dump, bool print_to_tty) :
+     _gc_before_heap_dump(gc_before_heap_dump), _error(NULL), _print_to_tty(print_to_tty) { }
+ 
+   ~HeapDumper();
+@@ -68,5 +64,6 @@
+ 
+   // returns error message (resource allocated), or NULL if no error
+   char* error_as_C_string() const;
+-};
+ 
++  static void dump_heap()    KERNEL_RETURN;
++};
+diff -ruN openjdk6/hotspot/src/share/vm/services/jmm.h openjdk/hotspot/src/share/vm/services/jmm.h
+--- openjdk6/hotspot/src/share/vm/services/jmm.h	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/jmm.h	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jmm.h	1.36 07/05/05 17:07:04 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -51,7 +48,7 @@
+   JMM_VERSION_1_0 = 0x20010000,
+   JMM_VERSION_1_1 = 0x20010100, // JDK 6
+   JMM_VERSION_1_2 = 0x20010200, // JDK 7
+-  JMM_VERSION     = 0x20010200 
++  JMM_VERSION     = 0x20010200
+ };
+ 
+ typedef struct {
+@@ -113,8 +110,8 @@
+ 
+ 
+ enum {
+-  JMM_THREAD_STATE_FLAG_SUSPENDED = 0x00100000, 
+-  JMM_THREAD_STATE_FLAG_NATIVE    = 0x00400000 
++  JMM_THREAD_STATE_FLAG_SUSPENDED = 0x00100000,
++  JMM_THREAD_STATE_FLAG_NATIVE    = 0x00400000
+ };
+ 
+ #define JMM_THREAD_STATE_FLAG_MASK  0xFFF00000
+@@ -129,8 +126,8 @@
+ } jmmStatisticType;
+ 
+ typedef enum {
+-  JMM_USAGE_THRESHOLD_HIGH	      = 901,
+-  JMM_USAGE_THRESHOLD_LOW	      = 902,
++  JMM_USAGE_THRESHOLD_HIGH            = 901,
++  JMM_USAGE_THRESHOLD_LOW             = 902,
+   JMM_COLLECTION_USAGE_THRESHOLD_HIGH = 903,
+   JMM_COLLECTION_USAGE_THRESHOLD_LOW  = 904
+ } jmmThresholdType;
+@@ -160,7 +157,7 @@
+   jmmVMGlobalOrigin origin;         /* Default or non-default value */
+   unsigned int      writeable : 1;  /* dynamically writeable */
+   unsigned int      external  : 1;  /* external supported interface */
+-  unsigned int      reserved  : 30; 
++  unsigned int      reserved  : 30;
+   void *reserved1;
+   void *reserved2;
+ } jmmVMGlobal;
+@@ -195,7 +192,7 @@
+ 
+   jint         (JNICALL *GetVersion)             (JNIEnv *env);
+ 
+-  jint         (JNICALL *GetOptionalSupport)     (JNIEnv *env, 
++  jint         (JNICALL *GetOptionalSupport)     (JNIEnv *env,
+                                                   jmmOptionalSupport* support_ptr);
+ 
+   /* This is used by JDK 6 and earlier.
+@@ -203,8 +200,8 @@
+    */
+   jobject      (JNICALL *GetInputArguments)      (JNIEnv *env);
+ 
+-  jint         (JNICALL *GetThreadInfo)          (JNIEnv *env, 
+-                                                  jlongArray ids, 
++  jint         (JNICALL *GetThreadInfo)          (JNIEnv *env,
++                                                  jlongArray ids,
+                                                   jint maxDepth,
+                                                   jobjectArray infoArray);
+   jobjectArray (JNICALL *GetInputArgumentArray)  (JNIEnv *env);
+@@ -224,61 +221,61 @@
+   jboolean     (JNICALL *GetBoolAttribute)       (JNIEnv *env, jmmBoolAttribute att);
+   jboolean     (JNICALL *SetBoolAttribute)       (JNIEnv *env, jmmBoolAttribute att, jboolean flag);
+ 
+-  jint         (JNICALL *GetLongAttributes)      (JNIEnv *env, 
+-                                                  jobject obj, 
+-                                                  jmmLongAttribute* atts, 
+-                                                  jint count, 
++  jint         (JNICALL *GetLongAttributes)      (JNIEnv *env,
++                                                  jobject obj,
++                                                  jmmLongAttribute* atts,
++                                                  jint count,
+                                                   jlong* result);
+ 
+   jobjectArray (JNICALL *FindCircularBlockedThreads) (JNIEnv *env);
+   jlong        (JNICALL *GetThreadCpuTime)       (JNIEnv *env, jlong thread_id);
+ 
+   jobjectArray (JNICALL *GetVMGlobalNames)       (JNIEnv *env);
+-  jint         (JNICALL *GetVMGlobals)           (JNIEnv *env, 
++  jint         (JNICALL *GetVMGlobals)           (JNIEnv *env,
+                                                   jobjectArray names,
+                                                   jmmVMGlobal *globals,
+                                                   jint count);
+ 
+   jint         (JNICALL *GetInternalThreadTimes) (JNIEnv *env,
+-						  jobjectArray names,
+-						  jlongArray times);
++                                                  jobjectArray names,
++                                                  jlongArray times);
+ 
+   jboolean     (JNICALL *ResetStatistic)         (JNIEnv *env,
+                                                   jvalue obj,
+                                                   jmmStatisticType type);
+ 
+-  void         (JNICALL *SetPoolSensor)          (JNIEnv *env, 
+-                                                  jobject pool, 
+-						  jmmThresholdType type,
++  void         (JNICALL *SetPoolSensor)          (JNIEnv *env,
++                                                  jobject pool,
++                                                  jmmThresholdType type,
+                                                   jobject sensor);
+ 
+-  jlong        (JNICALL *SetPoolThreshold)       (JNIEnv *env, 
+-                                                  jobject pool, 
+-						  jmmThresholdType type,
++  jlong        (JNICALL *SetPoolThreshold)       (JNIEnv *env,
++                                                  jobject pool,
++                                                  jmmThresholdType type,
+                                                   jlong threshold);
+   jobject      (JNICALL *GetPoolCollectionUsage) (JNIEnv* env, jobject pool);
+ 
+-  jint         (JNICALL *GetGCExtAttributeInfo)  (JNIEnv *env, 
+-                                                  jobject mgr, 
++  jint         (JNICALL *GetGCExtAttributeInfo)  (JNIEnv *env,
++                                                  jobject mgr,
+                                                   jmmExtAttributeInfo *ext_info,
+                                                   jint count);
+-  void         (JNICALL *GetLastGCStat)          (JNIEnv *env, 
++  void         (JNICALL *GetLastGCStat)          (JNIEnv *env,
+                                                   jobject mgr,
+                                                   jmmGCStat *gc_stat);
+-  jlong        (JNICALL *GetThreadCpuTimeWithKind) (JNIEnv *env, 
+-                                                    jlong thread_id, 
++  jlong        (JNICALL *GetThreadCpuTimeWithKind) (JNIEnv *env,
++                                                    jlong thread_id,
+                                                     jboolean user_sys_cpu_time);
+   void*        reserved5;
+   jint         (JNICALL *DumpHeap0)              (JNIEnv *env,
+                                                   jstring outputfile,
+                                                   jboolean live);
+   jobjectArray (JNICALL *FindDeadlocks)             (JNIEnv *env, jboolean object_monitors_only);
+-  void         (JNICALL *SetVMGlobal)            (JNIEnv *env, 
++  void         (JNICALL *SetVMGlobal)            (JNIEnv *env,
+                                                   jstring flag_name,
+                                                   jvalue  new_value);
+   void*        reserved6;
+   jobjectArray (JNICALL *DumpThreads)            (JNIEnv *env,
+-                                                  jlongArray ids, 
++                                                  jlongArray ids,
+                                                   jboolean lockedMonitors,
+                                                   jboolean lockedSynchronizers);
+ } JmmInterface;
+@@ -288,4 +285,3 @@
+ #endif /* __cplusplus */
+ 
+ #endif /* !_JAVA_JMM_H_ */
+-
+diff -ruN openjdk6/hotspot/src/share/vm/services/lowMemoryDetector.cpp openjdk/hotspot/src/share/vm/services/lowMemoryDetector.cpp
+--- openjdk6/hotspot/src/share/vm/services/lowMemoryDetector.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/lowMemoryDetector.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)lowMemoryDetector.cpp	1.28 07/05/05 17:07:04 JVM"
+-#endif
+ /*
+  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -99,7 +96,7 @@
+     bool   sensors_changed = false;
+ 
+     {
+-      // _no_safepoint_check_flag is used here as LowMemory_lock is a 
++      // _no_safepoint_check_flag is used here as LowMemory_lock is a
+       // special lock and the VMThread may acquire this lock at safepoint.
+       // Need state transition ThreadBlockInVM so that this thread
+       // will be handled by safepoint correctly when this thread is
+@@ -149,12 +146,12 @@
+   for (int i = 0; i < num_memory_pools; i++) {
+     MemoryPool* pool = MemoryService::get_memory_pool(i);
+     SensorInfo* sensor = pool->usage_sensor();
+-    if (sensor != NULL && 
++    if (sensor != NULL &&
+         pool->usage_threshold()->is_high_threshold_supported() &&
+         pool->usage_threshold()->high_threshold() != 0) {
+       MemoryUsage usage = pool->get_memory_usage();
+       sensor->set_gauge_sensor_level(usage,
+-                                     pool->usage_threshold()); 
++                                     pool->usage_threshold());
+       has_pending_requests = has_pending_requests || sensor->has_pending_requests();
+     }
+   }
+@@ -168,18 +165,18 @@
+ // and also VMThread.
+ void LowMemoryDetector::detect_low_memory(MemoryPool* pool) {
+   SensorInfo* sensor = pool->usage_sensor();
+-  if (sensor == NULL || 
++  if (sensor == NULL ||
+       !pool->usage_threshold()->is_high_threshold_supported() ||
+       pool->usage_threshold()->high_threshold() == 0) {
+     return;
+   }
+-  
++
+   {
+     MutexLockerEx ml(LowMemory_lock, Mutex::_no_safepoint_check_flag);
+-  
++
+     MemoryUsage usage = pool->get_memory_usage();
+     sensor->set_gauge_sensor_level(usage,
+-                                   pool->usage_threshold()); 
++                                   pool->usage_threshold());
+     if (sensor->has_pending_requests()) {
+       // notify sensor state update
+       LowMemory_lock->notify_all();
+@@ -190,15 +187,15 @@
+ // Only called by VMThread at GC time
+ void LowMemoryDetector::detect_after_gc_memory(MemoryPool* pool) {
+   SensorInfo* sensor = pool->gc_usage_sensor();
+-  if (sensor == NULL || 
+-      !pool->gc_usage_threshold()->is_high_threshold_supported() || 
++  if (sensor == NULL ||
++      !pool->gc_usage_threshold()->is_high_threshold_supported() ||
+       pool->gc_usage_threshold()->high_threshold() == 0) {
+     return;
+   }
+-  
++
+   {
+     MutexLockerEx ml(LowMemory_lock, Mutex::_no_safepoint_check_flag);
+-  
++
+     MemoryUsage usage = pool->get_last_collection_usage();
+     sensor->set_counter_sensor_level(usage, pool->gc_usage_threshold());
+ 
+@@ -206,7 +203,7 @@
+       // notify sensor state update
+       LowMemory_lock->notify_all();
+     }
+-  } 
++  }
+ }
+ 
+ // recompute enabled flag
+@@ -235,8 +232,8 @@
+ // as a gauge attribute.  Sensor notifications (trigger or
+ // clear) is only emitted at the first time it crosses
+ // a threshold.
+-// 
+-// High and low thresholds are designed to provide a 
++//
++// High and low thresholds are designed to provide a
+ // hysteresis mechanism to avoid repeated triggering
+ // of notifications when the attribute value makes small oscillations
+ // around the high or low threshold value.
+@@ -273,7 +270,7 @@
+   bool is_below_low = high_low_threshold->is_low_threshold_crossed(usage);
+ 
+   assert(!(is_over_high && is_below_low), "Can't be both true");
+-  
++
+   if (is_over_high &&
+         ((!_sensor_on && _pending_trigger_count == 0) ||
+          _pending_clear_count > 0)) {
+@@ -288,12 +285,12 @@
+       // pending requests to clear this sensor.
+       // This trigger request needs to clear this clear count
+       // since the resulting sensor flag should be on.
+-      _pending_clear_count = 0;  
++      _pending_clear_count = 0;
+     }
+-  } else if (is_below_low && 
++  } else if (is_below_low &&
+                ((_sensor_on && _pending_clear_count == 0) ||
+                 (_pending_trigger_count > 0 && _pending_clear_count == 0))) {
+-    // memory usage returns below the threshold 
++    // memory usage returns below the threshold
+     // Request to clear the sensor if the sensor is on or will be on due to
+     // _pending_trigger_count > 0 and also no clear request
+     _pending_clear_count++;
+@@ -301,11 +298,11 @@
+ }
+ 
+ // When this method is used, the memory usage is monitored as a
+-// simple counter attribute.  The sensor will be triggered 
++// simple counter attribute.  The sensor will be triggered
+ // whenever the usage is crossing the threshold to keep track
+ // of the number of times the VM detects such a condition occurs.
+ //
+-// High and low thresholds are designed to provide a 
++// High and low thresholds are designed to provide a
+ // hysteresis mechanism to avoid repeated triggering
+ // of notifications when the attribute value makes small oscillations
+ // around the high or low threshold value.
+@@ -331,9 +328,9 @@
+   if (is_over_high) {
+     _pending_trigger_count++;
+     _usage = usage;
+-    _pending_clear_count = 0;  
++    _pending_clear_count = 0;
+   } else if (is_below_low && (_sensor_on || _pending_trigger_count > 0)) {
+-    _pending_clear_count++;  
++    _pending_clear_count++;
+   }
+ }
+ 
+@@ -375,7 +372,7 @@
+                             vmSymbolHandles::trigger_method_signature(),
+                             &args,
+                             CHECK);
+-  } 
++  }
+ 
+   {
+     // Holds LowMemory_lock and update the sensor state
+@@ -384,14 +381,14 @@
+     _sensor_count += count;
+     _pending_trigger_count = _pending_trigger_count - count;
+   }
+-} 
++}
+ 
+ void SensorInfo::clear(int count, TRAPS) {
+   if (_sensor_obj != NULL) {
+     klassOop k = Management::sun_management_Sensor_klass(CHECK);
+     instanceKlassHandle sensorKlass (THREAD, k);
+     Handle sensor(THREAD, _sensor_obj);
+-  
++
+     JavaValue result(T_VOID);
+     JavaCallArguments args(sensor);
+     args.push_int((int) count);
+@@ -420,6 +417,6 @@
+   tty->print_cr("%s count = %ld pending_triggers = %ld pending_clears = %ld",
+                 (_sensor_on ? "on" : "off"),
+                 _sensor_count, _pending_trigger_count, _pending_clear_count);
+-} 
++}
+ 
+ #endif // PRODUCT
+diff -ruN openjdk6/hotspot/src/share/vm/services/lowMemoryDetector.hpp openjdk/hotspot/src/share/vm/services/lowMemoryDetector.hpp
+--- openjdk6/hotspot/src/share/vm/services/lowMemoryDetector.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/lowMemoryDetector.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)lowMemoryDetector.hpp	1.22 07/05/05 17:07:05 JVM"
+-#endif
+ /*
+  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,10 +19,10 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+-// Low Memory Detection Support 
++// Low Memory Detection Support
+ // Two memory alarms in the JDK (we called them sensors).
+ //   - Heap memory sensor
+ //   - Non-heap memory sensor
+@@ -63,7 +60,7 @@
+  private:
+   bool            _support_high_threshold;
+   bool            _support_low_threshold;
+-  size_t          _high_threshold; 
++  size_t          _high_threshold;
+   size_t          _low_threshold;
+  public:
+   ThresholdSupport(bool support_high, bool support_low) {
+@@ -81,13 +78,13 @@
+   bool        is_high_threshold_crossed(MemoryUsage usage) {
+     if (_support_high_threshold && _high_threshold > 0) {
+       return (usage.used() >= _high_threshold);
+-    }  
++    }
+     return false;
+   }
+   bool        is_low_threshold_crossed(MemoryUsage usage) {
+     if (_support_low_threshold && _low_threshold > 0) {
+       return (usage.used() < _low_threshold);
+-    }  
++    }
+     return false;
+   }
+ 
+@@ -96,7 +93,7 @@
+     assert(new_threshold >= _low_threshold, "new_threshold must be >= _low_threshold");
+     size_t prev = _high_threshold;
+     _high_threshold = new_threshold;
+-    return prev; 
++    return prev;
+   }
+ 
+   size_t      set_low_threshold(size_t new_threshold) {
+@@ -104,14 +101,14 @@
+     assert(new_threshold <= _high_threshold, "new_threshold must be <= _high_threshold");
+     size_t prev = _low_threshold;
+     _low_threshold = new_threshold;
+-    return prev; 
++    return prev;
+   }
+ };
+ 
+ class SensorInfo : public CHeapObj {
+ private:
+   instanceOop     _sensor_obj;
+-  bool            _sensor_on;  
++  bool            _sensor_on;
+   size_t          _sensor_count;
+ 
+   // before the actual sensor on flag and sensor count are set
+@@ -122,14 +119,14 @@
+   int             _pending_trigger_count;
+ 
+   // _pending_clear_count takes precedence if it's > 0 which
+-  // indicates the resulting sensor will be off 
++  // indicates the resulting sensor will be off
+   // Sensor trigger requests will reset this clear count to
+   // indicate the resulting flag should be on.
+ 
+   int             _pending_clear_count;
+ 
+   MemoryUsage     _usage;
+-    
++
+   void clear(int count, TRAPS);
+   void trigger(int count, TRAPS);
+ public:
+@@ -147,11 +144,11 @@
+   int pending_clear_count()        { return _pending_clear_count; }
+ 
+   // When this method is used, the memory usage is monitored
+-  // as a gauge attribute.  High and low thresholds are designed 
++  // as a gauge attribute.  High and low thresholds are designed
+   // to provide a hysteresis mechanism to avoid repeated triggering
+-  // of notifications when the attribute value makes small oscillations 
++  // of notifications when the attribute value makes small oscillations
+   // around the high or low threshold value.
+-  // 
++  //
+   // The sensor will be triggered if:
+   //  (1) the usage is crossing above the high threshold and
+   //      the sensor is currently off and no pending
+@@ -168,7 +165,7 @@
+   //      the sensor is currently on and no pending
+   //      clear requests; or
+   //  (2) the usage is crossing below the low threshold and
+-  //      the sensor will be on (i.e. sensor is currently off 
++  //      the sensor will be on (i.e. sensor is currently off
+   //      and has pending trigger requests).
+   //
+   // Subsequent crossings of the low threshold value do not cause
+@@ -176,10 +173,10 @@
+   // to the high threshold.
+   //
+   // If the current level is between high and low threhsold, no change.
+-  // 
++  //
+   void set_gauge_sensor_level(MemoryUsage usage, ThresholdSupport* high_low_threshold);
+ 
+-  // When this method is used, the memory usage is monitored as a 
++  // When this method is used, the memory usage is monitored as a
+   // simple counter attribute.  The sensor will be triggered
+   // whenever the usage is crossing the threshold to keep track
+   // of the number of times the VM detects such a condition occurs.
+@@ -187,12 +184,12 @@
+   // The sensor will be triggered if:
+   //   - the usage is crossing above the high threshold regardless
+   //     of the current sensor state.
+-  //   
++  //
+   // The sensor will be cleared if:
+   //  (1) the usage is crossing below the low threshold and
+   //      the sensor is currently on; or
+   //  (2) the usage is crossing below the low threshold and
+-  //      the sensor will be on (i.e. sensor is currently off 
++  //      the sensor will be on (i.e. sensor is currently off
+   //      and has pending trigger requests).
+   //
+   void set_counter_sensor_level(MemoryUsage usage, ThresholdSupport* counter_threshold);
+@@ -208,7 +205,7 @@
+ 
+ class LowMemoryDetector : public AllStatic {
+ friend class LowMemoryDetectorDisabler;
+-private:  
++private:
+   // true if any collected heap has low memory detection enabled
+   static volatile bool _enabled_for_collected_pools;
+   // > 0 if temporary disabed
+@@ -221,7 +218,7 @@
+   static bool temporary_disabled() { return _disabled_count > 0; }
+   static void disable() { Atomic::inc(&_disabled_count); }
+   static void enable() { Atomic::dec(&_disabled_count); }
+-  
++
+ public:
+   static void initialize();
+   static void detect_low_memory();
+@@ -230,18 +227,18 @@
+ 
+   static bool is_enabled(MemoryPool* pool) {
+     // low memory detection is enabled for collected memory pools
+-    // iff one of the collected memory pool has a sensor and the 
++    // iff one of the collected memory pool has a sensor and the
+     // threshold set non-zero
+     if (pool->usage_sensor() == NULL) {
+       return false;
+     } else {
+       ThresholdSupport* threshold_support = pool->usage_threshold();
+-      return (threshold_support->is_high_threshold_supported() ? 
++      return (threshold_support->is_high_threshold_supported() ?
+                (threshold_support->high_threshold() > 0) : false);
+     }
+   }
+ 
+-  // indicates if low memory detection is enabled for any collected 
++  // indicates if low memory detection is enabled for any collected
+   // memory pools
+   static inline bool is_enabled_for_collected_pools() {
+     return !temporary_disabled() && _enabled_for_collected_pools;
+@@ -260,14 +257,14 @@
+     for (int i=0; i<num_memory_pools; i++) {
+       MemoryPool* pool = MemoryService::get_memory_pool(i);
+ 
+-      // if low memory detection is enabled then check if the 
++      // if low memory detection is enabled then check if the
+       // current used exceeds the high threshold
+       if (pool->is_collected_pool() && is_enabled(pool)) {
+-	size_t used = pool->used_in_bytes();
+-	size_t high = pool->usage_threshold()->high_threshold();
+-	if (used > high) {
+-          detect_low_memory(pool);      
+-	}
++        size_t used = pool->used_in_bytes();
++        size_t high = pool->usage_threshold()->high_threshold();
++        if (used > high) {
++          detect_low_memory(pool);
++        }
+       }
+     }
+   }
+@@ -276,11 +273,11 @@
+ 
+ class LowMemoryDetectorDisabler: public StackObj {
+ public:
+-  LowMemoryDetectorDisabler() 
++  LowMemoryDetectorDisabler()
+   {
+     LowMemoryDetector::disable();
+   }
+-  ~LowMemoryDetectorDisabler() 
++  ~LowMemoryDetectorDisabler()
+   {
+     assert(LowMemoryDetector::temporary_disabled(), "should be disabled!");
+     LowMemoryDetector::enable();
+diff -ruN openjdk6/hotspot/src/share/vm/services/management.cpp openjdk/hotspot/src/share/vm/services/management.cpp
+--- openjdk6/hotspot/src/share/vm/services/management.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/management.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)management.cpp	1.82 07/05/05 17:07:05 JVM"
+-#endif
+ /*
+  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -40,7 +37,7 @@
+ klassOop Management::_garbageCollectorMXBean_klass = NULL;
+ klassOop Management::_managementFactory_klass = NULL;
+ 
+-jmmOptionalSupport Management::_optional_support;
++jmmOptionalSupport Management::_optional_support = {0};
+ TimeStamp Management::_stamp;
+ 
+ void management_init() {
+@@ -83,7 +80,10 @@
+   }
+   _optional_support.isBootClassPathSupported = 1;
+   _optional_support.isObjectMonitorUsageSupported = 1;
++#ifndef SERVICES_KERNEL
++  // This depends on the heap inspector
+   _optional_support.isSynchronizerUsageSupported = 1;
++#endif // SERVICES_KERNEL
+ }
+ 
+ void Management::initialize(TRAPS) {
+@@ -94,7 +94,7 @@
+     ResourceMark rm(THREAD);
+     HandleMark hm(THREAD);
+ 
+-    // Load and initialize the sun.management.Agent class 
++    // Load and initialize the sun.management.Agent class
+     // invoke startAgent method to start the management server
+     Handle loader = Handle(THREAD, SystemDictionary::java_system_loader());
+     klassOop k = SystemDictionary::resolve_or_fail(vmSymbolHandles::sun_management_Agent(),
+@@ -253,8 +253,8 @@
+ 
+   JavaValue result(T_VOID);
+   JavaCallArguments args(14);
+-  
+-  // First allocate a ThreadObj object and 
++
++  // First allocate a ThreadObj object and
+   // push the receiver as the first argument
+   Handle element = ik->allocate_instance_handle(CHECK_NULL);
+   args.push_oop(element);
+@@ -284,7 +284,7 @@
+   JavaValue result(T_VOID);
+   JavaCallArguments args(17);
+ 
+-  // First allocate a ThreadObj object and 
++  // First allocate a ThreadObj object and
+   // push the receiver as the first argument
+   Handle element = ik->allocate_instance_handle(CHECK_NULL);
+   args.push_oop(element);
+@@ -316,7 +316,7 @@
+   // Sequential search for now.  Need to do better optimization later.
+   for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
+     oop tobj = thread->threadObj();
+-    if (!thread->is_exiting() && 
++    if (!thread->is_exiting() &&
+         tobj != NULL &&
+         thread_id == java_lang_Thread::thread_id(tobj)) {
+       java_thread = thread;
+@@ -335,14 +335,14 @@
+ 
+   klassOop k = Management::java_lang_management_GarbageCollectorMXBean_klass(CHECK_NULL);
+   if (!h->is_a(k)) {
+-    THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(), 
++    THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
+                "the object is not an instance of java.lang.management.GarbageCollectorMXBean class",
+                NULL);
+   }
+ 
+   MemoryManager* gc = MemoryService::get_memory_manager(h);
+   if (gc == NULL || !gc->is_gc_memory_manager()) {
+-    THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(), 
++    THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
+                "Invalid GC memory manager",
+                NULL);
+   }
+@@ -365,9 +365,9 @@
+   int num_threads = ids_ah->length();
+   // should be non-empty array
+   if (num_threads == 0) {
+-    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), 
++    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+               "Empty array of thread IDs");
+-  } 
++  }
+ 
+   // Validate input thread IDs
+   int i = 0;
+@@ -375,7 +375,7 @@
+     jlong tid = ids_ah->long_at(i);
+     if (tid <= 0) {
+       // throw exception if invalid thread id.
+-      THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), 
++      THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+                 "Invalid thread ID entry");
+     }
+   }
+@@ -388,7 +388,7 @@
+   klassOop threadinfo_klass = Management::java_lang_management_ThreadInfo_klass(CHECK);
+   klassOop element_klass = objArrayKlass::cast(infoArray_h->klass())->element_klass();
+   if (element_klass != threadinfo_klass) {
+-    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), 
++    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+               "infoArray element type is not ThreadInfo class");
+   }
+ 
+@@ -415,7 +415,7 @@
+ 
+ // Gets the list of VM monitoring and management optional supports
+ // Returns 0 if succeeded; otherwise returns non-zero.
+-JVM_LEAF(jint, jmm_GetOptionalSupport(JNIEnv *env, jmmOptionalSupport* support)) 
++JVM_LEAF(jint, jmm_GetOptionalSupport(JNIEnv *env, jmmOptionalSupport* support))
+   if (support == NULL) {
+     return -1;
+   }
+@@ -424,7 +424,7 @@
+ JVM_END
+ 
+ // Returns a java.lang.String object containing the input arguments to the VM.
+-JVM_ENTRY(jobject, jmm_GetInputArguments(JNIEnv *env)) 
++JVM_ENTRY(jobject, jmm_GetInputArguments(JNIEnv *env))
+   ResourceMark rm(THREAD);
+ 
+   if (Arguments::num_jvm_args() == 0 && Arguments::num_jvm_flags() == 0) {
+@@ -444,7 +444,7 @@
+   for (i = 0; i < num_args; i++) {
+     length += strlen(vm_args[i]);
+   }
+-  // add a space between each argument 
++  // add a space between each argument
+   length += num_flags + num_args - 1;
+ 
+   // Return the list of input arguments passed to the VM
+@@ -458,7 +458,7 @@
+       strcat(args, " ");
+       strcat(args, vm_flags[i]);
+     }
+-  } 
++  }
+ 
+   if (num_args > 0 && num_flags > 0) {
+     // append a space if args already contains one or more jvm_flags
+@@ -479,7 +479,7 @@
+ JVM_END
+ 
+ // Returns an array of java.lang.String object containing the input arguments to the VM.
+-JVM_ENTRY(jobjectArray, jmm_GetInputArgumentArray(JNIEnv *env)) 
++JVM_ENTRY(jobjectArray, jmm_GetInputArgumentArray(JNIEnv *env))
+   ResourceMark rm(THREAD);
+ 
+   if (Arguments::num_jvm_args() == 0 && Arguments::num_jvm_flags() == 0) {
+@@ -654,7 +654,7 @@
+   assert(s->is_instance(), "Sensor should be an instanceOop");
+   instanceHandle sensor_h(THREAD, (instanceOop) s);
+   if (!sensor_h->is_a(sensor_klass)) {
+-    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), 
++    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+               "Sensor is not an instance of sun.management.Sensor class");
+   }
+ 
+@@ -679,7 +679,7 @@
+ JVM_END
+ 
+ 
+-// Sets the threshold of a given memory pool. 
++// Sets the threshold of a given memory pool.
+ // Returns the previous threshold.
+ //
+ // Input parameters:
+@@ -689,13 +689,13 @@
+ //
+ JVM_ENTRY(jlong, jmm_SetPoolThreshold(JNIEnv* env, jobject obj, jmmThresholdType type, jlong threshold))
+   if (threshold < 0) {
+-    THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(), 
++    THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
+                "Invalid threshold value",
+                -1);
+   }
+ 
+   if (threshold > max_intx) {
+-    THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(), 
++    THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
+                "Invalid threshold value > max value of size_t",
+                -1);
+   }
+@@ -723,14 +723,14 @@
+       if (!pool->gc_usage_threshold()->is_high_threshold_supported()) {
+         return -1;
+       }
+-      // return and the new threshold is effective for the next GC  
++      // return and the new threshold is effective for the next GC
+       return pool->gc_usage_threshold()->set_high_threshold((size_t) threshold);
+ 
+     case JMM_COLLECTION_USAGE_THRESHOLD_LOW:
+       if (!pool->gc_usage_threshold()->is_low_threshold_supported()) {
+         return -1;
+       }
+-      // return and the new threshold is effective for the next GC  
++      // return and the new threshold is effective for the next GC
+       return pool->gc_usage_threshold()->set_low_threshold((size_t) threshold);
+ 
+     default:
+@@ -759,7 +759,7 @@
+   size_t total_max = 0;
+   bool   has_undefined_init_size = false;
+   bool   has_undefined_max_size = false;
+-  
++
+   for (int i = 0; i < MemoryService::num_memory_pools(); i++) {
+     MemoryPool* pool = MemoryService::get_memory_pool(i);
+     if ((heap && pool->is_heap()) || (!heap && pool->is_non_heap())) {
+@@ -785,16 +785,16 @@
+     }
+   }
+ 
+-  // In our current implementation, all pools should have 
++  // In our current implementation, all pools should have
+   // defined init and max size
+   assert(!has_undefined_init_size, "Undefined init size");
+   assert(!has_undefined_max_size, "Undefined max size");
+ 
+   MemoryUsage usage((heap ? Arguments::initial_heap_size() : total_init),
+-                    total_used, 
+-                    total_committed, 
++                    total_used,
++                    total_committed,
+                     (heap ? Universe::heap()->max_capacity() : total_max));
+-  
++
+   Handle obj = MemoryService::create_MemoryUsage_obj(usage, CHECK_NULL);
+   return JNIHandles::make_local(env, obj());
+ JVM_END
+@@ -912,7 +912,7 @@
+     return ThreadService::get_peak_thread_count();
+ 
+   case JMM_THREAD_DAEMON_COUNT:
+-    return ThreadService::get_daemon_thread_count();  
++    return ThreadService::get_daemon_thread_count();
+ 
+   case JMM_JVM_INIT_DONE_TIME_MS:
+     return Management::vm_init_done_time();
+@@ -986,7 +986,7 @@
+ 
+ 
+ // Returns the long value of a given attribute.
+-JVM_ENTRY(jlong, jmm_GetLongAttribute(JNIEnv *env, jobject obj, jmmLongAttribute att)) 
++JVM_ENTRY(jlong, jmm_GetLongAttribute(JNIEnv *env, jobject obj, jmmLongAttribute att))
+   if (obj == NULL) {
+     return get_long_attribute(att);
+   } else {
+@@ -1001,9 +1001,9 @@
+ // Gets the value of all attributes specified in the given array
+ // and sets the value in the result array.
+ // Returns the number of attributes found.
+-JVM_ENTRY(jint, jmm_GetLongAttributes(JNIEnv *env, 
+-                                      jobject obj, 
+-                                      jmmLongAttribute* atts, 
++JVM_ENTRY(jint, jmm_GetLongAttributes(JNIEnv *env,
++                                      jobject obj,
++                                      jmmLongAttribute* atts,
+                                       jint count,
+                                       jlong* result))
+ 
+@@ -1037,8 +1037,8 @@
+                            TRAPS) {
+ 
+   // First get an array of threadObj handles.
+-  // A JavaThread may terminate before we get the stack trace. 
+-  GrowableArray<instanceHandle>* thread_handle_array = new GrowableArray<instanceHandle>(num_threads); 
++  // A JavaThread may terminate before we get the stack trace.
++  GrowableArray<instanceHandle>* thread_handle_array = new GrowableArray<instanceHandle>(num_threads);
+   {
+     MutexLockerEx ml(Threads_lock);
+     for (int i = 0; i < num_threads; i++) {
+@@ -1050,7 +1050,7 @@
+     }
+   }
+ 
+-  // Obtain thread dumps and thread snapshot information 
++  // Obtain thread dumps and thread snapshot information
+   VM_ThreadDump op(dump_result,
+                    thread_handle_array,
+                    num_threads,
+@@ -1079,7 +1079,7 @@
+   }
+ 
+   if (maxDepth < -1) {
+-    THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(), 
++    THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
+                "Invalid maxDepth", -1);
+   }
+ 
+@@ -1112,20 +1112,20 @@
+   // Must use ThreadDumpResult to store the ThreadSnapshot.
+   // GC may occur after the thread snapshots are taken but before
+   // this function returns. The threadObj and other oops kept
+-  // in the ThreadSnapshot are marked and adjusted during GC. 
++  // in the ThreadSnapshot are marked and adjusted during GC.
+   ThreadDumpResult dump_result(num_threads);
+ 
+   if (maxDepth == 0) {
+-    // no stack trace dumped - do not need to stop the world 
+-    { 
++    // no stack trace dumped - do not need to stop the world
++    {
+       MutexLockerEx ml(Threads_lock);
+       for (int i = 0; i < num_threads; i++) {
+         jlong tid = ids_ah->long_at(i);
+         JavaThread* jt = find_java_thread_from_id(tid);
+         ThreadSnapshot* ts;
+         if (jt == NULL) {
+-          // if the thread does not exist or now it is terminated, 
+-          // create dummy snapshot 
++          // if the thread does not exist or now it is terminated,
++          // create dummy snapshot
+           ts = new ThreadSnapshot();
+         } else {
+           ts = new ThreadSnapshot(jt);
+@@ -1135,7 +1135,7 @@
+     }
+   } else {
+     // obtain thread dump with the specific list of threads with stack trace
+-    
++
+     do_thread_dump(&dump_result,
+                    ids_ah,
+                    num_threads,
+@@ -1170,11 +1170,11 @@
+ // for the thread ID specified in the corresponding entry in
+ // the given array of thread IDs; or NULL if the thread does not exist
+ // or has terminated.
+-// 
++//
+ // Input parameter:
+ //    ids - array of thread IDs; NULL indicates all live threads
+ //    locked_monitors - if true, dump locked object monitors
+-//    locked_synchronizers - if true, dump locked JSR-166 synchronizers 
++//    locked_synchronizers - if true, dump locked JSR-166 synchronizers
+ //
+ JVM_ENTRY(jobjectArray, jmm_DumpThreads(JNIEnv *env, jlongArray thread_ids, jboolean locked_monitors, jboolean locked_synchronizers))
+   ResourceMark rm(THREAD);
+@@ -1186,10 +1186,11 @@
+ 
+   typeArrayOop ta = typeArrayOop(JNIHandles::resolve(thread_ids));
+   int num_threads = (ta != NULL ? ta->length() : 0);
+-  ThreadDumpResult dump_result(num_threads);
++  typeArrayHandle ids_ah(THREAD, ta);
+ 
+-  if (ta != NULL) {
+-    typeArrayHandle ids_ah(THREAD, ta);
++  ThreadDumpResult dump_result(num_threads);  // can safepoint
++
++  if (ids_ah() != NULL) {
+ 
+     // validate the thread id array
+     validate_thread_id_array(ids_ah, CHECK_NULL);
+@@ -1210,7 +1211,7 @@
+                      (locked_synchronizers ? true : false) /* with locked synchronizers */);
+     VMThread::execute(&op);
+   }
+- 
++
+   int num_snapshots = dump_result.num_snapshots();
+ 
+   // create the result ThreadInfo[] object
+@@ -1227,8 +1228,8 @@
+       continue;
+     }
+ 
+-  
+-   
++
++
+     ThreadStackTrace* stacktrace = ts->get_stack_trace();
+     assert(stacktrace != NULL, "Must have a stack trace dumped");
+ 
+@@ -1257,7 +1258,7 @@
+       typeArrayOop tarray = oopFactory::new_typeArray(T_INT, num_locked_monitors, CHECK_NULL);
+       typeArrayHandle dh(THREAD, tarray);
+       depths_array = dh;
+-  
++
+       int count = 0;
+       int j = 0;
+       for (int depth = 0; depth < num_frames; depth++) {
+@@ -1286,23 +1287,23 @@
+     }
+ 
+     if (locked_synchronizers) {
+-      // Create Object[] filled with locked JSR-166 synchronizers 
++      // Create Object[] filled with locked JSR-166 synchronizers
+       assert(ts->threadObj() != NULL, "Must be a valid JavaThread");
+       ThreadConcurrentLocks* tcl = ts->get_concurrent_locks();
+       GrowableArray<instanceOop>* locks = (tcl != NULL ? tcl->owned_locks() : NULL);
+       int num_locked_synchronizers = (locks != NULL ? locks->length() : 0);
+-  
++
+       objArrayOop array = oopFactory::new_system_objArray(num_locked_synchronizers, CHECK_NULL);
+       objArrayHandle sh(THREAD, array);
+       synchronizers_array = sh;
+-      
++
+       for (int k = 0; k < num_locked_synchronizers; k++) {
+         synchronizers_array->obj_at_put(k, locks->at(k));
+       }
+     }
+ 
+     // Create java.lang.management.ThreadInfo object
+-    instanceOop info_obj = Management::create_thread_info_instance(ts, 
++    instanceOop info_obj = Management::create_thread_info_instance(ts,
+                                                                    monitors_array,
+                                                                    depths_array,
+                                                                    synchronizers_array,
+@@ -1334,7 +1335,7 @@
+ 
+ // Reset statistic.  Return true if the requested statistic is reset.
+ // Otherwise, return false.
+-// 
++//
+ // Input parameters:
+ //  obj  - specify which instance the statistic associated with to be reset
+ //         For PEAK_POOL_USAGE stat, obj is required to be a memory pool object.
+@@ -1428,7 +1429,7 @@
+   }
+ 
+   if (thread_id < 0) {
+-    THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(), 
++    THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
+                "Invalid thread ID", -1);
+   }
+ 
+@@ -1516,7 +1517,7 @@
+   }
+   global->name = (jstring)JNIHandles::make_local(env, flag_name());
+   global->type = JMM_VMGLOBAL_TYPE_UNKNOWN;
+-                           
++
+   if (flag->is_bool()) {
+     global->value.z = flag->get_bool() ? JNI_TRUE : JNI_FALSE;
+     global->type = JMM_VMGLOBAL_TYPE_JBOOLEAN;
+@@ -1535,7 +1536,7 @@
+   global->writeable = flag->is_writeable();
+   global->external = flag->is_external();
+   switch (flag->origin) {
+-    case DEFAULT: 
++    case DEFAULT:
+       global->origin = JMM_VMGLOBAL_ORIGIN_DEFAULT;
+       break;
+     case COMMAND_LINE:
+@@ -1564,9 +1565,9 @@
+ // created in globals.
+ // If a Flag with a given name in an array element does not
+ // exist, globals[i].name will be set to NULL.
+-JVM_ENTRY(jint, jmm_GetVMGlobals(JNIEnv *env, 
++JVM_ENTRY(jint, jmm_GetVMGlobals(JNIEnv *env,
+                                  jobjectArray names,
+-                                 jmmVMGlobal *globals, 
++                                 jmmVMGlobal *globals,
+                                  jint count))
+ 
+ 
+@@ -1583,7 +1584,7 @@
+     // Make sure we have a String array
+     klassOop element_klass = objArrayKlass::cast(names_ah->klass())->element_klass();
+     if (element_klass != SystemDictionary::string_klass()) {
+-      THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(), 
++      THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
+                  "Array element type is not String class", 0);
+     }
+ 
+@@ -1622,7 +1623,7 @@
+       }
+     }
+     return num_entries;
+-  }  
++  }
+ JVM_END
+ 
+ JVM_ENTRY(void, jmm_SetVMGlobal(JNIEnv *env, jstring flag_name, jvalue new_value))
+@@ -1630,19 +1631,19 @@
+ 
+   oop fn = JNIHandles::resolve_external_guard(flag_name);
+   if (fn == NULL) {
+-    THROW_MSG(vmSymbols::java_lang_NullPointerException(), 
++    THROW_MSG(vmSymbols::java_lang_NullPointerException(),
+               "The flag name cannot be null.");
+   }
+   char* name = java_lang_String::as_utf8_string(fn);
+   Flag* flag = Flag::find_flag(name, strlen(name));
+   if (flag == NULL) {
+-    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), 
++    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+               "Flag does not exist.");
+-  }  
++  }
+   if (!flag->is_writeable()) {
+-    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), 
++    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+               "This flag is not writeable.");
+-  }  
++  }
+ 
+   bool succeed;
+   if (flag->is_bool()) {
+@@ -1678,7 +1679,7 @@
+   virtual void do_thread(Thread* thread);
+   int count() { return _count; }
+ };
+- 
++
+ ThreadTimesClosure::ThreadTimesClosure(objArrayOop names,
+                                        typeArrayOop times) {
+   assert(names != NULL, "names was NULL");
+@@ -1714,7 +1715,7 @@
+                         os::thread_cpu_time(thread) : -1);
+   _count++;
+ }
+-  
++
+ // Fills names with VM internal thread names and times with the corresponding
+ // CPU times.  If names or times is NULL, a NullPointerException is thrown.
+ // If the element type of names is not String, an IllegalArgumentException is
+@@ -1734,7 +1735,7 @@
+   // Make sure we have a String array
+   klassOop element_klass = objArrayKlass::cast(names_ah->klass())->element_klass();
+   if (element_klass != SystemDictionary::string_klass()) {
+-    THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(), 
++    THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
+                "Array element type is not String class", 0);
+   }
+ 
+@@ -1767,7 +1768,7 @@
+   for (cycle = deadlocks; cycle != NULL; cycle = cycle->next()) {
+     num_threads += cycle->num_threads();
+   }
+-  
++
+   objArrayOop r = oopFactory::new_objArray(SystemDictionary::thread_klass(), num_threads, CHECK_NH);
+   objArrayHandle threads_ah(THREAD, r);
+ 
+@@ -1787,7 +1788,7 @@
+ // and JSR-166 synchronizers.
+ // Returns an array of Thread objects which are in deadlock, if any.
+ // Otherwise, returns NULL.
+-// 
++//
+ // Input parameter:
+ //    object_monitors_only - if true, only check object monitors
+ //
+@@ -1806,7 +1807,7 @@
+ 
+ // Gets the information about GC extension attributes including
+ // the name of the attribute, its type, and a short description.
+-// 
++//
+ // Input parameters:
+ //   mgr   - GC memory manager
+ //   info  - caller allocated array of jmmExtAttributeInfo
+@@ -1851,7 +1852,7 @@
+   klassOop usage_klass = Management::java_lang_management_MemoryUsage_klass(CHECK_0);
+   klassOop element_klass = objArrayKlass::cast(array_h->klass())->element_klass();
+   if (element_klass != usage_klass) {
+-    THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(), 
++    THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
+                "The element type is not MemoryUsage class", 0);
+   }
+ 
+@@ -1862,22 +1863,22 @@
+ // Input parameters:
+ //   obj     - GarbageCollectorMXBean object
+ //   gc_stat - caller allocated jmmGCStat where:
+-//     a. before_gc_usage - array of MemoryUsage objects 
+-//     b. after_gc_usage  - array of MemoryUsage objects 
+-//     c. gc_ext_attributes_values_size is set to the 
++//     a. before_gc_usage - array of MemoryUsage objects
++//     b. after_gc_usage  - array of MemoryUsage objects
++//     c. gc_ext_attributes_values_size is set to the
+ //        gc_ext_attribute_values array allocated
+ //     d. gc_ext_attribute_values is a caller allocated array of jvalue.
+-//   
++//
+ // On return,
+ //   gc_index == 0 indicates no GC statistics available
+ //
+ //   before_gc_usage and after_gc_usage - filled with per memory pool
+ //      before and after GC usage in the same order as the memory pools
+ //      returned by GetMemoryPools for a given GC memory manager.
+-//   num_gc_ext_attributes indicates the number of elements in 
++//   num_gc_ext_attributes indicates the number of elements in
+ //      the gc_ext_attribute_values array is filled; or
+ //      -1 if the gc_ext_attributes_values array is not big enough
+-//   
++//
+ JVM_ENTRY(void, jmm_GetLastGCStat(JNIEnv *env, jobject obj, jmmGCStat *gc_stat))
+   ResourceMark rm(THREAD);
+ 
+@@ -1907,12 +1908,12 @@
+ 
+   // Fill the arrays of MemoryUsage objects with before and after GC
+   // per pool memory usage
+-  objArrayOop bu = get_memory_usage_objArray(gc_stat->usage_before_gc, 
++  objArrayOop bu = get_memory_usage_objArray(gc_stat->usage_before_gc,
+                                              num_pools,
+                                              CHECK);
+   objArrayHandle usage_before_gc_ah(THREAD, bu);
+ 
+-  objArrayOop au = get_memory_usage_objArray(gc_stat->usage_after_gc, 
++  objArrayOop au = get_memory_usage_objArray(gc_stat->usage_after_gc,
+                                              num_pools,
+                                              CHECK);
+   objArrayHandle usage_after_gc_ah(THREAD, au);
+@@ -1932,7 +1933,7 @@
+     }
+     usage_before_gc_ah->obj_at_put(i, before_usage());
+     usage_after_gc_ah->obj_at_put(i, after_usage());
+-  } 
++  }
+ 
+   if (gc_stat->gc_ext_attribute_values_size > 0) {
+     // Current implementation only has 1 attribute (number of GC threads)
+@@ -1943,15 +1944,16 @@
+ 
+ // Dump heap - Returns 0 if succeeds.
+ JVM_ENTRY(jint, jmm_DumpHeap0(JNIEnv *env, jstring outputfile, jboolean live))
++#ifndef SERVICES_KERNEL
+   ResourceMark rm(THREAD);
+   oop on = JNIHandles::resolve_external_guard(outputfile);
+   if (on == NULL) {
+-    THROW_MSG_(vmSymbols::java_lang_NullPointerException(), 
++    THROW_MSG_(vmSymbols::java_lang_NullPointerException(),
+                "Output file name cannot be null.", -1);
+   }
+   char* name = java_lang_String::as_utf8_string(on);
+   if (name == NULL) {
+-    THROW_MSG_(vmSymbols::java_lang_NullPointerException(), 
++    THROW_MSG_(vmSymbols::java_lang_NullPointerException(),
+                "Output file name cannot be null.", -1);
+   }
+   HeapDumper dumper(live ? true : false);
+@@ -1959,13 +1961,15 @@
+     const char* errmsg = dumper.error_as_C_string();
+     THROW_MSG_(vmSymbols::java_io_IOException(), errmsg, -1);
+   }
+-
+   return 0;
++#else  // SERVICES_KERNEL
++  return -1;
++#endif // SERVICES_KERNEL
+ JVM_END
+ 
+ jlong Management::ticks_to_ms(jlong ticks) {
+   assert(os::elapsed_frequency() > 0, "Must be non-zero");
+-  return (jlong)(((double)ticks / (double)os::elapsed_frequency()) 
++  return (jlong)(((double)ticks / (double)os::elapsed_frequency())
+                  * (double)1000.0);
+ }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/services/management.hpp openjdk/hotspot/src/share/vm/services/management.hpp
+--- openjdk6/hotspot/src/share/vm/services/management.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/management.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)management.hpp	1.23 07/05/05 17:07:05 JVM"
+-#endif
+ /*
+  * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class OopClosure;
+@@ -50,7 +47,7 @@
+ public:
+   static void init();
+   static void initialize(TRAPS);
+- 
++
+   static jlong ticks_to_ms(jlong ticks);
+   static jlong timestamp();
+ 
+@@ -72,7 +69,7 @@
+   static jlong vm_init_done_time() {
+     return _vm_init_done_time->get_value();
+   }
+- 
++
+   // methods to return a klassOop.
+   static klassOop java_lang_management_ThreadInfo_klass(TRAPS);
+   static klassOop java_lang_management_MemoryUsage_klass(TRAPS);
+@@ -95,15 +92,15 @@
+   TraceVmCreationTime() {}
+   ~TraceVmCreationTime() {}
+ 
+-  void start() 
++  void start()
+   { _timer.update_to(0); _begin_time = os::javaTimeMillis(); }
+ 
+   /**
+-   * Only call this if initialization completes successfully; it will   
++   * Only call this if initialization completes successfully; it will
+    * crash if PerfMemory_exit() has already been called (usually by
+    * os::shutdown() when there was an initialization failure).
+    */
+-  void end()  
++  void end()
+   { Management::record_vm_startup_time(_begin_time, _timer.milliseconds()); }
+ 
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/services/memoryManager.cpp openjdk/hotspot/src/share/vm/services/memoryManager.cpp
+--- openjdk6/hotspot/src/share/vm/services/memoryManager.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/memoryManager.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)memoryManager.cpp	1.28 07/05/05 17:07:05 JVM"
+-#endif
+ /*
+  * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -39,7 +36,7 @@
+ }
+ 
+ void MemoryManager::add_pool(MemoryPool* pool) {
+-  assert(_num_pools < MemoryManager::max_num_pools, "_num_pools exceeds the max");  
++  assert(_num_pools < MemoryManager::max_num_pools, "_num_pools exceeds the max");
+   if (_num_pools < MemoryManager::max_num_pools) {
+     _pools[_num_pools] = pool;
+     _num_pools++;
+@@ -84,7 +81,7 @@
+     // Extra manager instances will just be gc'ed.
+     klassOop k = Management::sun_management_ManagementFactory_klass(CHECK_0);
+     instanceKlassHandle ik(THREAD, k);
+-    
++
+     Handle mgr_name = java_lang_String::create_from_str(name(), CHECK_0);
+ 
+     JavaValue result(T_OBJECT);
+@@ -96,7 +93,7 @@
+     if (is_gc_memory_manager()) {
+       method_name = vmSymbolHandles::createGarbageCollector_name();
+       signature = vmSymbolHandles::createGarbageCollector_signature();
+-      args.push_oop(Handle());      // Argument 2 (for future extension) 
++      args.push_oop(Handle());      // Argument 2 (for future extension)
+     } else {
+       method_name = vmSymbolHandles::createMemoryManager_name();
+       signature = vmSymbolHandles::createMemoryManager_signature();
+@@ -209,10 +206,10 @@
+     MemoryPool* pool = MemoryService::get_memory_pool(i);
+     MemoryUsage usage = pool->get_memory_usage();
+     _last_gc_stat->set_before_gc_usage(i, usage);
+-    HS_DTRACE_PROBE8(hotspot, mem__pool__gc__begin, 
++    HS_DTRACE_PROBE8(hotspot, mem__pool__gc__begin,
+       name(), strlen(name()),
+-      pool->name(), strlen(pool->name()), 
+-      usage.init_size(), usage.used(), 
++      pool->name(), strlen(pool->name()),
++      usage.init_size(), usage.used(),
+       usage.committed(), usage.max_size());
+   }
+ }
+@@ -227,10 +224,10 @@
+     MemoryPool* pool = MemoryService::get_memory_pool(i);
+     MemoryUsage usage = pool->get_memory_usage();
+ 
+-    HS_DTRACE_PROBE8(hotspot, mem__pool__gc__end, 
+-      name(), strlen(name()), 
++    HS_DTRACE_PROBE8(hotspot, mem__pool__gc__end,
++      name(), strlen(name()),
+       pool->name(), strlen(pool->name()),
+-      usage.init_size(), usage.used(), 
++      usage.init_size(), usage.used(),
+       usage.committed(), usage.max_size());
+ 
+     _last_gc_stat->set_after_gc_usage(i, usage);
+diff -ruN openjdk6/hotspot/src/share/vm/services/memoryManager.hpp openjdk/hotspot/src/share/vm/services/memoryManager.hpp
+--- openjdk6/hotspot/src/share/vm/services/memoryManager.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/memoryManager.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)memoryManager.hpp	1.15 07/05/05 17:07:05 JVM"
+-#endif
+ /*
+  * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A memory manager is responsible for managing one or more memory pools.
+@@ -57,7 +54,7 @@
+     ParNew,
+     ConcurrentMarkSweep,
+     PSScavenge,
+-    PSMarkSweep   
++    PSMarkSweep
+   };
+ 
+   MemoryManager();
+@@ -176,7 +173,7 @@
+ 
+ // These subclasses of GCMemoryManager are defined to include
+ // GC-specific information.
+-// TODO: Add GC-specific information 
++// TODO: Add GC-specific information
+ class CopyMemoryManager : public GCMemoryManager {
+ private:
+ public:
+diff -ruN openjdk6/hotspot/src/share/vm/services/memoryPool.cpp openjdk/hotspot/src/share/vm/services/memoryPool.cpp
+--- openjdk6/hotspot/src/share/vm/services/memoryPool.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/memoryPool.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)memoryPool.cpp	1.35 07/05/29 09:44:30 JVM"
+-#endif
+ /*
+  * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -65,7 +62,7 @@
+ // Returns an instanceHandle of a MemoryPool object.
+ // It creates a MemoryPool instance when the first time
+ // this function is called.
+-instanceOop MemoryPool::get_memory_pool_instance(TRAPS) { 
++instanceOop MemoryPool::get_memory_pool_instance(TRAPS) {
+   // Must do an acquire so as to force ordering of subsequent
+   // loads from anything _memory_pool_obj points to or implies.
+   instanceOop pool_obj = (instanceOop)OrderAccess::load_ptr_acquire(&_memory_pool_obj);
+@@ -74,16 +71,16 @@
+     // Extra pool instances will just be gc'ed.
+     klassOop k = Management::sun_management_ManagementFactory_klass(CHECK_NULL);
+     instanceKlassHandle ik(THREAD, k);
+-    
++
+     Handle pool_name = java_lang_String::create_from_str(_name, CHECK_NULL);
+     jlong usage_threshold_value = (_usage_threshold->is_high_threshold_supported() ? 0 : -1L);
+     jlong gc_usage_threshold_value = (_gc_usage_threshold->is_high_threshold_supported() ? 0 : -1L);
+-  
++
+     JavaValue result(T_OBJECT);
+-    JavaCallArguments args; 
++    JavaCallArguments args;
+     args.push_oop(pool_name);           // Argument 1
+     args.push_int((int) is_heap());     // Argument 2
+-  
++
+     symbolHandle method_name = vmSymbolHandles::createMemoryPool_name();
+     symbolHandle signature = vmSymbolHandles::createMemoryPool_signature();
+ 
+@@ -101,7 +98,7 @@
+     instanceHandle pool(THREAD, p);
+ 
+     {
+-      // Get lock since another thread may have create the instance 
++      // Get lock since another thread may have create the instance
+       MutexLocker ml(Management_lock);
+ 
+       // Check if another thread has created the pool.  We reload
+@@ -169,12 +166,12 @@
+   }
+ }
+ 
+-ContiguousSpacePool::ContiguousSpacePool(ContiguousSpace* space, 
+-                                         const char* name, 
+-                                         PoolType type, 
++ContiguousSpacePool::ContiguousSpacePool(ContiguousSpace* space,
++                                         const char* name,
++                                         PoolType type,
+                                          size_t max_size,
+                                          bool support_usage_threshold) :
+-  CollectedMemoryPool(name, type, space->capacity(), max_size, 
++  CollectedMemoryPool(name, type, space->capacity(), max_size,
+                       support_usage_threshold), _space(space) {
+ }
+ 
+@@ -187,11 +184,11 @@
+ }
+ 
+ SurvivorContiguousSpacePool::SurvivorContiguousSpacePool(DefNewGeneration* gen,
+-                                                         const char* name, 
+-                                                         PoolType type, 
++                                                         const char* name,
++                                                         PoolType type,
+                                                          size_t max_size,
+                                                          bool support_usage_threshold) :
+-  CollectedMemoryPool(name, type, gen->from()->capacity(), max_size, 
++  CollectedMemoryPool(name, type, gen->from()->capacity(), max_size,
+                       support_usage_threshold), _gen(gen) {
+ }
+ 
+@@ -204,12 +201,12 @@
+ }
+ 
+ #ifndef SERIALGC
+-CompactibleFreeListSpacePool::CompactibleFreeListSpacePool(CompactibleFreeListSpace* space, 
++CompactibleFreeListSpacePool::CompactibleFreeListSpacePool(CompactibleFreeListSpace* space,
+                                                            const char* name,
+-                                                           PoolType type, 
++                                                           PoolType type,
+                                                            size_t max_size,
+                                                            bool support_usage_threshold) :
+-  CollectedMemoryPool(name, type, space->capacity(), max_size, 
++  CollectedMemoryPool(name, type, space->capacity(), max_size,
+                       support_usage_threshold), _space(space) {
+ }
+ 
+@@ -223,10 +220,10 @@
+ #endif // SERIALGC
+ 
+ GenerationPool::GenerationPool(Generation* gen,
+-                               const char* name, 
++                               const char* name,
+                                PoolType type,
+                                bool support_usage_threshold) :
+-  CollectedMemoryPool(name, type, gen->capacity(), gen->max_capacity(), 
++  CollectedMemoryPool(name, type, gen->capacity(), gen->max_capacity(),
+                       support_usage_threshold), _gen(gen) {
+ }
+ 
+@@ -239,7 +236,7 @@
+ }
+ 
+ CodeHeapPool::CodeHeapPool(CodeHeap* codeHeap, const char* name, bool support_usage_threshold) :
+-  MemoryPool(name, NonHeap, codeHeap->capacity(), codeHeap->max_capacity(), 
++  MemoryPool(name, NonHeap, codeHeap->capacity(), codeHeap->max_capacity(),
+              support_usage_threshold, false), _codeHeap(codeHeap) {
+ }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/services/memoryPool.hpp openjdk/hotspot/src/share/vm/services/memoryPool.hpp
+--- openjdk6/hotspot/src/share/vm/services/memoryPool.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/memoryPool.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)memoryPool.hpp	1.25 07/05/29 09:44:30 JVM"
+-#endif
+ /*
+  * Copyright 2003-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A memory pool represents the memory area that the VM manages.
+@@ -55,7 +52,7 @@
+   };
+ 
+   // We could make some of the following as performance counters
+-  // for external monitoring. 
++  // for external monitoring.
+   const char*      _name;
+   PoolType         _type;
+   size_t           _initial_size;
+@@ -83,13 +80,13 @@
+              size_t max_size,
+              bool support_usage_threshold,
+              bool support_gc_threshold);
+-  
++
+   const char* name()                       { return _name; }
+   bool        is_heap()                    { return _type == Heap; }
+   bool        is_non_heap()                { return _type == NonHeap; }
+   size_t      initial_size()   const       { return _initial_size; }
+   int         num_memory_managers() const  { return _num_managers; }
+-  // max size could be changed  
++  // max size could be changed
+   virtual size_t max_size()    const       { return _max_size; }
+ 
+   bool is_pool(instanceHandle pool) { return (pool() == _memory_pool_obj); }
+@@ -109,7 +106,7 @@
+   // Records current memory usage if it's a peak usage
+   void record_peak_memory_usage();
+ 
+-  MemoryUsage get_peak_memory_usage() { 
++  MemoryUsage get_peak_memory_usage() {
+     // check current memory usage first and then return peak usage
+     record_peak_memory_usage();
+     return _peak_usage;
+@@ -152,9 +149,9 @@
+ public:
+   ContiguousSpacePool(ContiguousSpace* space, const char* name, PoolType type, size_t max_size, bool support_usage_threshold);
+ 
+-  ContiguousSpace* space()		{ return _space; }
++  ContiguousSpace* space()              { return _space; }
+   MemoryUsage get_memory_usage();
+-  size_t used_in_bytes() 		{ return space()->used(); }
++  size_t used_in_bytes()                { return space()->used(); }
+ };
+ 
+ class SurvivorContiguousSpacePool : public CollectedMemoryPool {
+@@ -164,8 +161,8 @@
+ public:
+   SurvivorContiguousSpacePool(DefNewGeneration* gen,
+                               const char* name,
+-                              PoolType type, 
+-                              size_t max_size, 
++                              PoolType type,
++                              size_t max_size,
+                               bool support_usage_threshold);
+ 
+   MemoryUsage get_memory_usage();
+@@ -184,13 +181,13 @@
+   CompactibleFreeListSpace* _space;
+ public:
+   CompactibleFreeListSpacePool(CompactibleFreeListSpace* space,
+-                               const char* name, 
++                               const char* name,
+                                PoolType type,
+                                size_t max_size,
+                                bool support_usage_threshold);
+ 
+   MemoryUsage get_memory_usage();
+-  size_t used_in_bytes() 	    { return _space->used(); }
++  size_t used_in_bytes()            { return _space->used(); }
+ };
+ #endif // SERIALGC
+ 
+@@ -202,7 +199,7 @@
+   GenerationPool(Generation* gen, const char* name, PoolType type, bool support_usage_threshold);
+ 
+   MemoryUsage get_memory_usage();
+-  size_t used_in_bytes()		{ return _gen->used(); }
++  size_t used_in_bytes()                { return _gen->used(); }
+ };
+ 
+ class CodeHeapPool: public MemoryPool {
+@@ -211,5 +208,5 @@
+ public:
+   CodeHeapPool(CodeHeap* codeHeap, const char* name, bool support_usage_threshold);
+   MemoryUsage get_memory_usage();
+-  size_t used_in_bytes()	    { return _codeHeap->allocated_capacity(); }
++  size_t used_in_bytes()            { return _codeHeap->allocated_capacity(); }
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/services/memoryService.cpp openjdk/hotspot/src/share/vm/services/memoryService.cpp
+--- openjdk6/hotspot/src/share/vm/services/memoryService.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/memoryService.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)memoryService.cpp	1.35 07/05/29 09:44:30 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,15 +19,15 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_memoryService.cpp.incl"
+ 
+-GrowableArray<MemoryPool*>* MemoryService::_pools_list = 
++GrowableArray<MemoryPool*>* MemoryService::_pools_list =
+   new (ResourceObj::C_HEAP) GrowableArray<MemoryPool*>(init_pools_list_size, true);
+-GrowableArray<MemoryManager*>* MemoryService::_managers_list = 
++GrowableArray<MemoryManager*>* MemoryService::_managers_list =
+   new (ResourceObj::C_HEAP) GrowableArray<MemoryManager*>(init_managers_list_size, true);
+ 
+ GCMemoryManager* MemoryService::_minor_gc_manager = NULL;
+@@ -101,8 +98,8 @@
+         _minor_gc_manager = MemoryManager::get_copy_memory_manager();
+         break;
+ #ifndef SERIALGC
+-      case Generation::ParNew: 
+-      case Generation::ASParNew: 
++      case Generation::ParNew:
++      case Generation::ASParNew:
+         _minor_gc_manager = MemoryManager::get_parnew_memory_manager();
+         break;
+ #endif // SERIALGC
+@@ -149,7 +146,7 @@
+ }
+ 
+ #ifndef SERIALGC
+-// Add memory pools for ParallelScavengeHeap 
++// Add memory pools for ParallelScavengeHeap
+ // This function currently only supports two generations collected heap.
+ // The collector for ParallelScavengeHeap will have two memory managers.
+ void MemoryService::add_parallel_scavenge_heap_info(ParallelScavengeHeap* heap) {
+@@ -165,11 +162,11 @@
+ }
+ #endif // SERIALGC
+ 
+-MemoryPool* MemoryService::add_gen(Generation* gen, 
+-                                   const char* name, 
++MemoryPool* MemoryService::add_gen(Generation* gen,
++                                   const char* name,
+                                    bool is_heap,
+                                    bool support_usage_threshold) {
+- 
++
+   MemoryPool::PoolType type = (is_heap ? MemoryPool::Heap : MemoryPool::NonHeap);
+   GenerationPool* pool = new GenerationPool(gen, name, type, support_usage_threshold);
+   _pools_list->append(pool);
+@@ -177,25 +174,25 @@
+ }
+ 
+ MemoryPool* MemoryService::add_space(ContiguousSpace* space,
+-                                     const char* name,   
++                                     const char* name,
+                                      bool is_heap,
+                                      size_t max_size,
+                                      bool support_usage_threshold) {
+   MemoryPool::PoolType type = (is_heap ? MemoryPool::Heap : MemoryPool::NonHeap);
+   ContiguousSpacePool* pool = new ContiguousSpacePool(space, name, type, max_size, support_usage_threshold);
+- 
++
+   _pools_list->append(pool);
+   return (MemoryPool*) pool;
+ }
+ 
+ MemoryPool* MemoryService::add_survivor_spaces(DefNewGeneration* gen,
+-                                               const char* name,   
++                                               const char* name,
+                                                bool is_heap,
+                                                size_t max_size,
+                                                bool support_usage_threshold) {
+   MemoryPool::PoolType type = (is_heap ? MemoryPool::Heap : MemoryPool::NonHeap);
+   SurvivorContiguousSpacePool* pool = new SurvivorContiguousSpacePool(gen, name, type, max_size, support_usage_threshold);
+- 
++
+   _pools_list->append(pool);
+   return (MemoryPool*) pool;
+ }
+@@ -214,7 +211,7 @@
+ #endif // SERIALGC
+ 
+ // Add memory pool(s) for one generation
+-void MemoryService::add_generation_memory_pool(Generation* gen, 
++void MemoryService::add_generation_memory_pool(Generation* gen,
+                                                MemoryManager* major_mgr,
+                                                MemoryManager* minor_mgr) {
+   Generation::Name kind = gen->kind();
+@@ -224,15 +221,15 @@
+     case Generation::DefNew: {
+       assert(major_mgr != NULL && minor_mgr != NULL, "Should have two managers");
+       DefNewGeneration* young_gen = (DefNewGeneration*) gen;
+-      // Add a memory pool for each space and young gen doesn't 
++      // Add a memory pool for each space and young gen doesn't
+       // support low memory detection as it is expected to get filled up.
+       MemoryPool* eden = add_space(young_gen->eden(),
+                                    "Eden Space",
+                                    true, /* is_heap */
+                                    young_gen->max_eden_size(),
+                                    false /* support_usage_threshold */);
+-      MemoryPool* survivor = add_survivor_spaces(young_gen, 
+-                                                 "Survivor Space", 
++      MemoryPool* survivor = add_survivor_spaces(young_gen,
++                                                 "Survivor Space",
+                                                  true, /* is_heap */
+                                                  young_gen->max_survivor_size(),
+                                                  false /* support_usage_threshold */);
+@@ -241,15 +238,15 @@
+ 
+ #ifndef SERIALGC
+     case Generation::ParNew:
+-    case Generation::ASParNew: 
++    case Generation::ASParNew:
+     {
+       assert(major_mgr != NULL && minor_mgr != NULL, "Should have two managers");
+-      // Add a memory pool for each space and young gen doesn't 
++      // Add a memory pool for each space and young gen doesn't
+       // support low memory detection as it is expected to get filled up.
+       ParNewGeneration* parnew_gen = (ParNewGeneration*) gen;
+-      MemoryPool* eden = add_space(parnew_gen->eden(), 
+-                                   "Par Eden Space", 
+-                                   true /* is_heap */, 
++      MemoryPool* eden = add_space(parnew_gen->eden(),
++                                   "Par Eden Space",
++                                   true /* is_heap */,
+                                    parnew_gen->max_eden_size(),
+                                    false /* support_usage_threshold */);
+       MemoryPool* survivor = add_survivor_spaces(parnew_gen,
+@@ -257,7 +254,7 @@
+                                                  true, /* is_heap */
+                                                  parnew_gen->max_survivor_size(),
+                                                  false /* support_usage_threshold */);
+-      
++
+       break;
+     }
+ #endif // SERIALGC
+@@ -272,8 +269,8 @@
+     }
+ 
+ #ifndef SERIALGC
+-    case Generation::ConcurrentMarkSweep: 
+-    case Generation::ASConcurrentMarkSweep: 
++    case Generation::ConcurrentMarkSweep:
++    case Generation::ASConcurrentMarkSweep:
+     {
+       assert(major_mgr != NULL && minor_mgr == NULL, "Should have only one manager");
+       ConcurrentMarkSweepGeneration* cms = (ConcurrentMarkSweepGeneration*) gen;
+@@ -308,7 +305,7 @@
+   PermanentGenerationSpec* spec = perm_gen->spec();
+   size_t max_size = spec->max_size() - spec->read_only_size() - spec->read_write_size();
+   MemoryPool* pool = add_space(perm_gen->unshared_space(),
+-                               "Perm Gen", 
++                               "Perm Gen",
+                                 false, /* is_heap */
+                                 max_size,
+                                 true   /* support_usage_threshold */);
+@@ -345,16 +342,16 @@
+ void MemoryService::add_psYoung_memory_pool(PSYoungGen* gen, MemoryManager* major_mgr, MemoryManager* minor_mgr) {
+   assert(major_mgr != NULL && minor_mgr != NULL, "Should have two managers");
+ 
+-  // Add a memory pool for each space and young gen doesn't 
++  // Add a memory pool for each space and young gen doesn't
+   // support low memory detection as it is expected to get filled up.
+   EdenMutableSpacePool* eden = new EdenMutableSpacePool(gen,
+-                                                        gen->eden_space(), 
+-                                                        "PS Eden Space", 
++                                                        gen->eden_space(),
++                                                        "PS Eden Space",
+                                                         MemoryPool::Heap,
+                                                         false /* support_usage_threshold */);
+ 
+   SurvivorMutableSpacePool* survivor = new SurvivorMutableSpacePool(gen,
+-                                                                    "PS Survivor Space", 
++                                                                    "PS Survivor Space",
+                                                                     MemoryPool::Heap,
+                                                                     false /* support_usage_threshold */);
+ 
+@@ -367,17 +364,17 @@
+ }
+ 
+ void MemoryService::add_psOld_memory_pool(PSOldGen* gen, MemoryManager* mgr) {
+-  PSGenerationPool* old_gen = new PSGenerationPool(gen, 
++  PSGenerationPool* old_gen = new PSGenerationPool(gen,
+                                                    "PS Old Gen",
+-                                                   MemoryPool::Heap, 
++                                                   MemoryPool::Heap,
+                                                    true /* support_usage_threshold */);
+   mgr->add_pool(old_gen);
+   _pools_list->append(old_gen);
+ }
+ 
+ void MemoryService::add_psPerm_memory_pool(PSPermGen* gen, MemoryManager* mgr) {
+-  PSGenerationPool* perm_gen = new PSGenerationPool(gen, 
+-                                                    "PS Perm Gen", 
++  PSGenerationPool* perm_gen = new PSGenerationPool(gen,
++                                                    "PS Perm Gen",
+                                                     MemoryPool::NonHeap,
+                                                     true /* support_usage_threshold */);
+   mgr->add_pool(perm_gen);
+@@ -430,7 +427,7 @@
+ void MemoryService::track_memory_pool_usage(MemoryPool* pool) {
+   // Track the peak memory usage
+   pool->record_peak_memory_usage();
+-  
++
+   // Detect low memory
+   if (LowMemoryDetector::is_enabled(pool)) {
+     LowMemoryDetector::detect_low_memory(pool);
+@@ -438,7 +435,7 @@
+ }
+ 
+ void MemoryService::gc_begin(bool fullGC) {
+-  GCMemoryManager* mgr; 
++  GCMemoryManager* mgr;
+   if (fullGC) {
+     mgr = _major_gc_manager;
+   } else {
+@@ -455,7 +452,7 @@
+ }
+ 
+ void MemoryService::gc_end(bool fullGC) {
+-  GCMemoryManager* mgr; 
++  GCMemoryManager* mgr;
+   if (fullGC) {
+     mgr = (GCMemoryManager*) _major_gc_manager;
+   } else {
+@@ -485,7 +482,7 @@
+   // verbose will be set to the previous value
+   bool succeed = CommandLineFlags::boolAtPut((char*)"PrintGC", &verbose, MANAGEMENT);
+   assert(succeed, "Setting PrintGC flag fails");
+-  ClassLoadingService::reset_trace_class_unloading(); 
++  ClassLoadingService::reset_trace_class_unloading();
+ 
+   return verbose;
+ }
+@@ -514,18 +511,18 @@
+ }
+ //
+ // GC manager type depends on the type of Generation. Depending the space
+-// availablity and vm option the gc uses major gc manager or minor gc 
+-// manager or both. The type of gc manager depends on the generation kind. 
+-// For DefNew, ParNew and ASParNew generation doing scavange gc uses minor 
+-// gc manager (so _fullGC is set to false ) and for other generation kind 
+-// DOing mark-sweep-compact uses major gc manager (so _fullGC is set 
++// availablity and vm option the gc uses major gc manager or minor gc
++// manager or both. The type of gc manager depends on the generation kind.
++// For DefNew, ParNew and ASParNew generation doing scavange gc uses minor
++// gc manager (so _fullGC is set to false ) and for other generation kind
++// DOing mark-sweep-compact uses major gc manager (so _fullGC is set
+ // to true).
+ TraceMemoryManagerStats::TraceMemoryManagerStats(Generation::Name kind) {
+   switch (kind) {
+     case Generation::DefNew:
+ #ifndef SERIALGC
+-    case Generation::ParNew: 
+-    case Generation::ASParNew: 
++    case Generation::ParNew:
++    case Generation::ASParNew:
+ #endif // SERIALGC
+       _fullGC=false;
+       break;
+diff -ruN openjdk6/hotspot/src/share/vm/services/memoryService.hpp openjdk/hotspot/src/share/vm/services/memoryService.hpp
+--- openjdk6/hotspot/src/share/vm/services/memoryService.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/memoryService.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)memoryService.hpp	1.16 07/05/05 17:07:05 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Forward declaration
+@@ -70,10 +67,10 @@
+   // Code heap memory pool
+   static MemoryPool*                    _code_heap_pool;
+ 
+-  static void add_generation_memory_pool(Generation* gen, 
++  static void add_generation_memory_pool(Generation* gen,
+                                          MemoryManager* major_mgr,
+                                          MemoryManager* minor_mgr);
+-  static void add_generation_memory_pool(Generation* gen, 
++  static void add_generation_memory_pool(Generation* gen,
+                                          MemoryManager* major_mgr) {
+     add_generation_memory_pool(gen, major_mgr, NULL);
+   }
+@@ -83,30 +80,30 @@
+   static void add_cms_perm_gen_memory_pool(CMSPermGenGen* perm_gen,
+                                            MemoryManager* mgr);
+ 
+-  static void add_psYoung_memory_pool(PSYoungGen* gen, 
+-                                      MemoryManager* major_mgr, 
++  static void add_psYoung_memory_pool(PSYoungGen* gen,
++                                      MemoryManager* major_mgr,
+                                       MemoryManager* minor_mgr);
+-  static void add_psOld_memory_pool(PSOldGen* gen, 
++  static void add_psOld_memory_pool(PSOldGen* gen,
+                                     MemoryManager* mgr);
+-  static void add_psPerm_memory_pool(PSPermGen* perm, 
++  static void add_psPerm_memory_pool(PSPermGen* perm,
+                                      MemoryManager* mgr);
+ 
+ 
+-  static MemoryPool* add_space(ContiguousSpace* space, 
+-                               const char* name, 
+-                               bool is_heap, 
++  static MemoryPool* add_space(ContiguousSpace* space,
++                               const char* name,
++                               bool is_heap,
+                                size_t max_size,
+                                bool support_usage_threshold);
+   static MemoryPool* add_survivor_spaces(DefNewGeneration* gen,
+-                                         const char* name, 
+-                                         bool is_heap, 
++                                         const char* name,
++                                         bool is_heap,
+                                          size_t max_size,
+                                          bool support_usage_threshold);
+-  static MemoryPool* add_gen(Generation* gen, 
+-                             const char* name, 
++  static MemoryPool* add_gen(Generation* gen,
++                             const char* name,
+                              bool is_heap,
+                              bool support_usage_threshold);
+-  static MemoryPool* add_cms_space(CompactibleFreeListSpace* space, 
++  static MemoryPool* add_cms_space(CompactibleFreeListSpace* space,
+                                    const char* name,
+                                    bool is_heap,
+                                    size_t max_size,
+diff -ruN openjdk6/hotspot/src/share/vm/services/memoryUsage.hpp openjdk/hotspot/src/share/vm/services/memoryUsage.hpp
+--- openjdk6/hotspot/src/share/vm/services/memoryUsage.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/memoryUsage.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)memoryUsage.hpp	1.12 07/05/05 17:07:06 JVM"
+-#endif
+ /*
+  * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A memory usage contains the following attributes about memory usage:
+diff -ruN openjdk6/hotspot/src/share/vm/services/psMemoryPool.cpp openjdk/hotspot/src/share/vm/services/psMemoryPool.cpp
+--- openjdk6/hotspot/src/share/vm/services/psMemoryPool.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/psMemoryPool.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,46 +1,43 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)psMemoryPool.cpp	1.1 07/05/01 16:48:51 JVM"
+-#endif
+ /*
+  * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+- *   
++ *
+  * This code is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License version 2 only, as
+  * published by the Free Software Foundation.
+- *   
++ *
+  * This code is distributed in the hope that it will be useful, but WITHOUT
+  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+  * version 2 for more details (a copy is included in the LICENSE file that
+  * accompanied this code).
+- *  
++ *
+  * You should have received a copy of the GNU General Public License version
+  * 2 along with this work; if not, write to the Free Software Foundation,
+  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+- *   
++ *
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_psMemoryPool.cpp.incl"
+ 
+-PSGenerationPool::PSGenerationPool(PSOldGen* gen, 
+-                                   const char* name, 
++PSGenerationPool::PSGenerationPool(PSOldGen* gen,
++                                   const char* name,
+                                    PoolType type,
+                                    bool support_usage_threshold) :
+-  CollectedMemoryPool(name, type, gen->capacity_in_bytes(), 
++  CollectedMemoryPool(name, type, gen->capacity_in_bytes(),
+                       gen->reserved().byte_size(), support_usage_threshold), _gen(gen) {
+ }
+ 
+-PSGenerationPool::PSGenerationPool(PSPermGen* gen, 
+-                                   const char* name, 
++PSGenerationPool::PSGenerationPool(PSPermGen* gen,
++                                   const char* name,
+                                    PoolType type,
+                                    bool support_usage_threshold) :
+-  CollectedMemoryPool(name, type, gen->capacity_in_bytes(), 
++  CollectedMemoryPool(name, type, gen->capacity_in_bytes(),
+                       gen->reserved().byte_size(), support_usage_threshold), _gen(gen) {
+ }
+ 
+@@ -60,12 +57,12 @@
+ //
+ EdenMutableSpacePool::EdenMutableSpacePool(PSYoungGen* gen,
+                                            MutableSpace* space,
+-                                           const char* name, 
++                                           const char* name,
+                                            PoolType type,
+                                            bool support_usage_threshold) :
+-  CollectedMemoryPool(name, type, space->capacity_in_bytes(), 
+-                      (gen->max_size() - gen->from_space()->capacity_in_bytes() - gen->to_space()->capacity_in_bytes()), 
+-                       support_usage_threshold), 
++  CollectedMemoryPool(name, type, space->capacity_in_bytes(),
++                      (gen->max_size() - gen->from_space()->capacity_in_bytes() - gen->to_space()->capacity_in_bytes()),
++                       support_usage_threshold),
+   _gen(gen), _space(space) {
+ }
+ 
+@@ -83,7 +80,7 @@
+ // PS from and to survivor spaces could have different sizes.
+ //
+ SurvivorMutableSpacePool::SurvivorMutableSpacePool(PSYoungGen* gen,
+-                                                   const char* name, 
++                                                   const char* name,
+                                                    PoolType type,
+                                                    bool support_usage_threshold) :
+   CollectedMemoryPool(name, type, gen->from_space()->capacity_in_bytes(),
+diff -ruN openjdk6/hotspot/src/share/vm/services/psMemoryPool.hpp openjdk/hotspot/src/share/vm/services/psMemoryPool.hpp
+--- openjdk6/hotspot/src/share/vm/services/psMemoryPool.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/psMemoryPool.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,28 +1,25 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)psMemoryPool.hpp	1.1 07/05/01 16:48:51 JVM"
+-#endif
+ /*
+  * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+- *   
++ *
+  * This code is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License version 2 only, as
+  * published by the Free Software Foundation.
+- *   
++ *
+  * This code is distributed in the hope that it will be useful, but WITHOUT
+  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+  * version 2 for more details (a copy is included in the LICENSE file that
+  * accompanied this code).
+- *  
++ *
+  * You should have received a copy of the GNU General Public License version
+  * 2 along with this work; if not, write to the Free Software Foundation,
+  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+- *   
++ *
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class PSGenerationPool : public CollectedMemoryPool {
+@@ -34,7 +31,7 @@
+   PSGenerationPool(PSPermGen* pool, const char* name, PoolType type, bool support_usage_threshold);
+ 
+   MemoryUsage get_memory_usage();
+-  size_t used_in_bytes()	      { return _gen->used_in_bytes(); }
++  size_t used_in_bytes()              { return _gen->used_in_bytes(); }
+   size_t max_size() const             { return _gen->reserved().byte_size(); }
+ };
+ 
+@@ -44,16 +41,16 @@
+   MutableSpace* _space;
+ 
+ public:
+-  EdenMutableSpacePool(PSYoungGen* gen, 
+-                       MutableSpace* space, 
+-                       const char* name, 
+-                       PoolType type, 
++  EdenMutableSpacePool(PSYoungGen* gen,
++                       MutableSpace* space,
++                       const char* name,
++                       PoolType type,
+                        bool support_usage_threshold);
+ 
+-  MutableSpace* space()			    { return _space; }
++  MutableSpace* space()                     { return _space; }
+   MemoryUsage get_memory_usage();
+-  size_t used_in_bytes()		    { return space()->used_in_bytes(); }
+-  size_t max_size() const { 
++  size_t used_in_bytes()                    { return space()->used_in_bytes(); }
++  size_t max_size() const {
+     // Eden's max_size = max_size of Young Gen - the current committed size of survivor spaces
+     return _gen->max_size() - _gen->from_space()->capacity_in_bytes() - _gen->to_space()->capacity_in_bytes();
+   }
+@@ -77,8 +74,8 @@
+   size_t committed_in_bytes() {
+     return _gen->from_space()->capacity_in_bytes();
+   }
+-  size_t max_size() const { 
++  size_t max_size() const {
+     // Return current committed size of the from-space
+-    return _gen->from_space()->capacity_in_bytes(); 
++    return _gen->from_space()->capacity_in_bytes();
+   }
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/services/runtimeService.cpp openjdk/hotspot/src/share/vm/services/runtimeService.cpp
+--- openjdk6/hotspot/src/share/vm/services/runtimeService.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/runtimeService.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)runtimeService.cpp	1.16 07/05/05 17:07:06 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -42,7 +39,7 @@
+ PerfCounter*  RuntimeService::_interrupted_during_count = NULL;
+ 
+ void RuntimeService::init() {
+-  // Make sure the VM version is initialized 
++  // Make sure the VM version is initialized
+   Abstract_VM_Version::initialize();
+ 
+   if (UsePerfData) {
+@@ -51,43 +48,43 @@
+     _sync_time_ticks =
+               PerfDataManager::create_counter(SUN_RT, "safepointSyncTime",
+                                               PerfData::U_Ticks, CHECK);
+-    
++
+     _total_safepoints =
+               PerfDataManager::create_counter(SUN_RT, "safepoints",
+                                               PerfData::U_Events, CHECK);
+-    
++
+     _safepoint_time_ticks =
+               PerfDataManager::create_counter(SUN_RT, "safepointTime",
+                                               PerfData::U_Ticks, CHECK);
+-    
++
+     _application_time_ticks =
+               PerfDataManager::create_counter(SUN_RT, "applicationTime",
+                                               PerfData::U_Ticks, CHECK);
+ 
+ 
+     // create performance counters for jvm_version and its capabilities
+-    PerfDataManager::create_constant(SUN_RT, "jvmVersion", PerfData::U_None, 
++    PerfDataManager::create_constant(SUN_RT, "jvmVersion", PerfData::U_None,
+                                      (jlong) Abstract_VM_Version::jvm_version(), CHECK);
+ 
+     // I/O interruption related counters
+ 
+     // thread signaling via os::interrupt()
+ 
+-    _thread_interrupt_signaled_count = 
+-    		PerfDataManager::create_counter(SUN_RT, 
+-		 "threadInterruptSignaled", PerfData::U_Events, CHECK);
++    _thread_interrupt_signaled_count =
++                PerfDataManager::create_counter(SUN_RT,
++                 "threadInterruptSignaled", PerfData::U_Events, CHECK);
+ 
+     // OS_INTRPT via "check before" in _INTERRUPTIBLE
+ 
+     _interrupted_before_count =
+-    		PerfDataManager::create_counter(SUN_RT, "interruptedBeforeIO",
+-						PerfData::U_Events, CHECK);
++                PerfDataManager::create_counter(SUN_RT, "interruptedBeforeIO",
++                                                PerfData::U_Events, CHECK);
+ 
+     // OS_INTRPT via "check during" in _INTERRUPTIBLE
+ 
+     _interrupted_during_count =
+-    		PerfDataManager::create_counter(SUN_RT, "interruptedDuringIO",
+-						PerfData::U_Events, CHECK);
++                PerfDataManager::create_counter(SUN_RT, "interruptedDuringIO",
++                                                PerfData::U_Events, CHECK);
+ 
+     // The capabilities counter is a binary representation of the VM capabilities in string.
+     // This string respresentation simplifies the implementation of the client side
+@@ -97,6 +94,9 @@
+     memset((void*) capabilities, '0', len);
+     capabilities[len-1] = '\0';
+     capabilities[0] = AttachListener::is_attach_supported() ? '1' : '0';
++#ifdef KERNEL
++    capabilities[1] = '1';
++#endif // KERNEL
+     PerfDataManager::create_string_constant(SUN_RT, "jvmCapabilities",
+                                             capabilities, CHECK);
+   }
+@@ -139,21 +139,21 @@
+ // the application time counter at VM exit.
+ 
+ jlong RuntimeService::safepoint_sync_time_ms() {
+-  return UsePerfData ? 
++  return UsePerfData ?
+     Management::ticks_to_ms(_sync_time_ticks->get_value()) : -1;
+ }
+ 
+ jlong RuntimeService::safepoint_count() {
+-  return UsePerfData ? 
++  return UsePerfData ?
+     _total_safepoints->get_value() : -1;
+ }
+ jlong RuntimeService::safepoint_time_ms() {
+-  return UsePerfData ? 
++  return UsePerfData ?
+     Management::ticks_to_ms(_safepoint_time_ticks->get_value()) : -1;
+ }
+ 
+ jlong RuntimeService::application_time_ms() {
+-  return UsePerfData ? 
++  return UsePerfData ?
+     Management::ticks_to_ms(_application_time_ticks->get_value()) : -1;
+ }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/services/runtimeService.hpp openjdk/hotspot/src/share/vm/services/runtimeService.hpp
+--- openjdk6/hotspot/src/share/vm/services/runtimeService.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/runtimeService.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)runtimeService.hpp	1.10 07/05/05 17:07:06 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class RuntimeService : public AllStatic {
+diff -ruN openjdk6/hotspot/src/share/vm/services/serviceUtil.hpp openjdk/hotspot/src/share/vm/services/serviceUtil.hpp
+--- openjdk6/hotspot/src/share/vm/services/serviceUtil.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/serviceUtil.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)serviceUtil.hpp	1.8 07/05/05 17:07:06 JVM"
+-#endif
+ /*
+  * Copyright 2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //
+@@ -31,7 +28,7 @@
+ //
+ class ServiceUtil : public AllStatic {
+  public:
+-    
++
+   // Return true if oop represents an object that is "visible"
+   // to the java world.
+   static inline bool visible_oop(oop o) {
+@@ -88,5 +85,5 @@
+     // everything else (methodOops, ...) aren't visible
+     return false;
+   };   // end of visible_oop()
+-    
++
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/services/threadService.cpp openjdk/hotspot/src/share/vm/services/threadService.cpp
+--- openjdk6/hotspot/src/share/vm/services/threadService.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/threadService.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)threadService.cpp	1.54 07/05/17 16:07:12 JVM"
+-#endif
+ /*
+  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -57,7 +54,7 @@
+   // that case, they will be allocated on C heap.
+ 
+   _total_threads_count =
+-                PerfDataManager::create_counter(JAVA_THREADS, "started", 
++                PerfDataManager::create_counter(JAVA_THREADS, "started",
+                                                 PerfData::U_Events, CHECK);
+ 
+   _live_threads_count =
+@@ -74,7 +71,7 @@
+ 
+   if (os::is_thread_cpu_time_supported()) {
+     _thread_cpu_time_enabled = true;
+-  } 
++  }
+ }
+ 
+ void ThreadService::reset_peak_thread_count() {
+@@ -86,7 +83,7 @@
+ 
+ void ThreadService::add_thread(JavaThread* thread, bool daemon) {
+   // Do not count VM internal or JVMTI agent threads
+-  if (thread->is_hidden_from_external_view() || 
++  if (thread->is_hidden_from_external_view() ||
+       thread->is_jvmti_agent_thread()) {
+     return;
+   }
+@@ -148,7 +145,7 @@
+       obj = (oop) enter_obj->object();
+     }
+     // If obj == NULL, then ObjectMonitor is raw which doesn't count.
+-  }  
++  }
+ 
+   Handle h(obj);
+   return h;
+@@ -156,7 +153,7 @@
+ 
+ bool ThreadService::set_thread_monitoring_contention(bool flag) {
+   MutexLocker m(Management_lock);
+-  
++
+   bool prev = _thread_monitoring_contention_enabled;
+   _thread_monitoring_contention_enabled = flag;
+ 
+@@ -165,7 +162,7 @@
+ 
+ bool ThreadService::set_thread_cpu_time_enabled(bool flag) {
+   MutexLocker m(Management_lock);
+-  
++
+   bool prev = _thread_cpu_time_enabled;
+   _thread_cpu_time_enabled = flag;
+ 
+@@ -211,7 +208,7 @@
+ // Dump stack trace of threads specified in the given threads array.
+ // Returns StackTraceElement[][] each element is the stack trace of a thread in
+ // the corresponding entry in the given threads array
+-Handle ThreadService::dump_stack_traces(GrowableArray<instanceHandle>* threads, 
++Handle ThreadService::dump_stack_traces(GrowableArray<instanceHandle>* threads,
+                                         int num_threads,
+                                         TRAPS) {
+   assert(num_threads > 0, "just checking");
+@@ -224,22 +221,22 @@
+                    false, /* with locked monitors */
+                    false  /* with locked synchronizers */);
+   VMThread::execute(&op);
+-                                            
++
+   // Allocate the resulting StackTraceElement[][] object
+ 
+-  ResourceMark rm(THREAD); 
++  ResourceMark rm(THREAD);
+   klassOop k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_StackTraceElement_array(), true, CHECK_NH);
+   objArrayKlassHandle ik (THREAD, k);
+   objArrayOop r = oopFactory::new_objArray(ik(), num_threads, CHECK_NH);
+   objArrayHandle result_obj(THREAD, r);
+-                                    
++
+   int num_snapshots = dump_result.num_snapshots();
+   assert(num_snapshots == num_threads, "Must have num_threads thread snapshots");
+   int i = 0;
+   for (ThreadSnapshot* ts = dump_result.snapshots(); ts != NULL; i++, ts = ts->next()) {
+     ThreadStackTrace* stacktrace = ts->get_stack_trace();
+     if (stacktrace == NULL) {
+-      // No stack trace 
++      // No stack trace
+       result_obj->obj_at_put(i, NULL);
+     } else {
+       // Construct an array of java/lang/StackTraceElement object
+@@ -274,12 +271,12 @@
+   bool blocked_on_monitor = false;
+   JavaThread *currentThread, *previousThread;
+   int num_deadlocks = 0;
+-                                                                                 
++
+   for (JavaThread* p = Threads::first(); p != NULL; p = p->next()) {
+     // Initialize the depth-first-number
+     p->set_depth_first_number(-1);
+   }
+-                                                                                 
++
+   DeadlockCycle* deadlocks = NULL;
+   DeadlockCycle* last = NULL;
+   DeadlockCycle* cycle = new DeadlockCycle();
+@@ -288,7 +285,7 @@
+       // this thread was already visited
+       continue;
+     }
+-                                                                                 
++
+     thisDfn = globalDfn;
+     jt->set_depth_first_number(globalDfn++);
+     previousThread = jt;
+@@ -336,7 +333,7 @@
+         // We have a (new) cycle
+         num_deadlocks++;
+ 
+-        cycle->set_deadlock(true); 
++        cycle->set_deadlock(true);
+ 
+         // add this cycle to the deadlocks list
+         if (deadlocks == NULL) {
+@@ -354,7 +351,7 @@
+         waitingToLockBlocker = currentThread->current_park_blocker();
+       }
+     }
+-                                                                                 
++
+   }
+ 
+   return deadlocks;
+@@ -380,7 +377,7 @@
+ 
+   // free all the ThreadSnapshot objects created during
+   // the VM_ThreadDump operation
+-  ThreadSnapshot* ts = _snapshots; 
++  ThreadSnapshot* ts = _snapshots;
+   while (ts != NULL) {
+     ThreadSnapshot* p = ts;
+     ts = ts->next();
+@@ -390,7 +387,7 @@
+ 
+ 
+ void ThreadDumpResult::add_thread_snapshot(ThreadSnapshot* ts) {
+-  assert(_num_threads == 0 || _num_snapshots < _num_threads, 
++  assert(_num_threads == 0 || _num_snapshots < _num_threads,
+          "_num_snapshots must be less than _num_threads");
+   _num_snapshots++;
+   if (_snapshots == NULL) {
+@@ -454,7 +451,7 @@
+   ThreadStackTrace* _stack_trace;
+   Thread* _thread;
+ public:
+-  InflatedMonitorsClosure(Thread* t, ThreadStackTrace* st) { 
++  InflatedMonitorsClosure(Thread* t, ThreadStackTrace* st) {
+     _thread = t;
+     _stack_trace = st;
+   }
+@@ -465,7 +462,7 @@
+         _stack_trace->add_jni_locked_monitor(object);
+       }
+     }
+-  } 
++  }
+ };
+ 
+ ThreadStackTrace::ThreadStackTrace(JavaThread* t, bool with_locked_monitors) {
+@@ -491,7 +488,7 @@
+ }
+ 
+ void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth) {
+-  assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");  
++  assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
+ 
+   if (_thread->has_last_Java_frame()) {
+     RegisterMap reg_map(_thread);
+@@ -506,8 +503,8 @@
+         // Ignore non-Java frames
+       }
+       if (maxDepth > 0 && count == maxDepth) {
+-        // Skip frames if more than maxDepth 
+-        break; 
++        // Skip frames if more than maxDepth
++        break;
+       }
+     }
+   }
+@@ -522,7 +519,7 @@
+ 
+ 
+ bool ThreadStackTrace::is_owned_monitor_on_stack(oop object) {
+-  assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");  
++  assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
+ 
+   bool found = false;
+   int num_frames = get_stack_depth();
+@@ -539,7 +536,7 @@
+       }
+     }
+   }
+-  return found; 
++  return found;
+ }
+ 
+ Handle ThreadStackTrace::allocate_fill_stack_trace_element_array(TRAPS) {
+@@ -582,7 +579,7 @@
+   }
+ 
+   for (ThreadConcurrentLocks* t = _map; t != NULL;)  {
+-    ThreadConcurrentLocks* tcl = t; 
++    ThreadConcurrentLocks* tcl = t;
+     t = t->next();
+     delete tcl;
+   }
+@@ -625,7 +622,7 @@
+   if (tcl != NULL) {
+     tcl->add_lock(o);
+     return;
+-  }  
++  }
+ 
+   // First owned lock found for this thread
+   tcl = new ThreadConcurrentLocks(thread);
+@@ -724,18 +721,18 @@
+ 
+     Handle obj = ThreadService::get_current_contended_monitor(thread);
+     if (obj() == NULL) {
+-      // monitor no longer exists; thread is not blocked 
++      // monitor no longer exists; thread is not blocked
+       _thread_status = java_lang_Thread::RUNNABLE;
+     } else {
+       _blocker_object = obj();
+       JavaThread* owner = ObjectSynchronizer::get_lock_owner(obj, false);
+       if ((owner == NULL && _thread_status == java_lang_Thread::BLOCKED_ON_MONITOR_ENTER)
+-	  || (owner != NULL && owner->is_attaching())) {
++          || (owner != NULL && owner->is_attaching())) {
+         // ownership information of the monitor is not available
+         // (may no longer be owned or releasing to some other thread)
+         // make this thread in RUNNABLE state.
+-	// And when the owner thread is in attaching state, the java thread 
+-	// is not completely initialized. For example thread name and id 
++        // And when the owner thread is in attaching state, the java thread
++        // is not completely initialized. For example thread name and id
+         // and may not be set, so hide the attaching thread.
+         _thread_status = java_lang_Thread::RUNNABLE;
+         _blocker_object = NULL;
+@@ -812,7 +809,7 @@
+       if (obj != NULL) {
+         st->print(" (object "INTPTR_FORMAT ", a %s)", (address)obj,
+                    (instanceKlass::cast(obj->klass()))->external_name());
+-        
++
+         if (!currentThread->current_pending_monitor_is_from_java()) {
+           owner_desc = "\n  in JNI, which is held by";
+         }
+@@ -823,7 +820,7 @@
+       currentThread = Threads::owning_thread_from_monitor_owner(
+         (address)waitingToLockMonitor->owner(), false /* no locking needed */);
+     } else {
+-      st->print("  waiting for ownable synchronizer " INTPTR_FORMAT ", (a %s)", 
++      st->print("  waiting for ownable synchronizer " INTPTR_FORMAT ", (a %s)",
+                 (address)waitingToLockBlocker,
+                 (instanceKlass::cast(waitingToLockBlocker->klass()))->external_name());
+       assert(waitingToLockBlocker->is_a(SystemDictionary::abstract_ownable_synchronizer_klass()),
+@@ -846,13 +843,13 @@
+     currentThread = _threads->at(j);
+     st->print_cr("\"%s\":", currentThread->get_thread_name());
+     currentThread->print_stack_on(st);
+-  } 
++  }
+   JavaMonitorsInStackTrace = oldJavaMonitorsInStackTrace;
+ }
+ 
+-ThreadsListEnumerator::ThreadsListEnumerator(Thread* cur_thread, 
+-					     bool include_jvmti_agent_threads,
+-					     bool include_jni_attaching_threads) {
++ThreadsListEnumerator::ThreadsListEnumerator(Thread* cur_thread,
++                                             bool include_jvmti_agent_threads,
++                                             bool include_jni_attaching_threads) {
+   assert(cur_thread == Thread::current(), "Check current thread");
+ 
+   int init_size = ThreadService::get_live_thread_count();
+diff -ruN openjdk6/hotspot/src/share/vm/services/threadService.hpp openjdk/hotspot/src/share/vm/services/threadService.hpp
+--- openjdk6/hotspot/src/share/vm/services/threadService.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/services/threadService.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)threadService.hpp	1.41 07/05/05 17:07:06 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class OopClosure;
+@@ -33,7 +30,7 @@
+ class ThreadConcurrentLocks;
+ class DeadlockCycle;
+ 
+-// VM monitoring and management support for the thread and 
++// VM monitoring and management support for the thread and
+ // synchronization subsystem
+ //
+ // Thread contention monitoring is disabled by default.
+@@ -42,7 +39,7 @@
+ //
+ class ThreadService : public AllStatic {
+ private:
+-  // These counters could be moved to Threads class 
++  // These counters could be moved to Threads class
+   static PerfCounter*  _total_threads_count;
+   static PerfVariable* _live_threads_count;
+   static PerfVariable* _peak_threads_count;
+@@ -88,7 +85,7 @@
+   static Handle get_current_contended_monitor(JavaThread* thread);
+ 
+   // This function is called by JVM_DumpThreads.
+-  static Handle dump_stack_traces(GrowableArray<instanceHandle>* threads, 
++  static Handle dump_stack_traces(GrowableArray<instanceHandle>* threads,
+                                   int num_threads, TRAPS);
+ 
+   static void   reset_peak_thread_count();
+@@ -115,7 +112,7 @@
+   elapsedTimer _sleep_timer;
+ 
+ 
+-  // These two reset flags are set to true when another thread 
++  // These two reset flags are set to true when another thread
+   // requests to reset the statistics.  The actual statistics
+   // are reset when the thread contention occurs and attempts
+   // to update the statistics.
+@@ -128,14 +125,14 @@
+   int          _class_link_recursion_count;
+ 
+   // utility functions
+-  void  check_and_reset_count()            { 
++  void  check_and_reset_count()            {
+                                              if (!_count_pending_reset) return;
+                                              _contended_enter_count = 0;
+                                              _monitor_wait_count = 0;
+                                              _sleep_count = 0;
+                                              _count_pending_reset = 0;
+                                            }
+-  void  check_and_reset_timer()            { 
++  void  check_and_reset_timer()            {
+                                              if (!_timer_pending_reset) return;
+                                              _contended_enter_timer.reset();
+                                              _monitor_wait_timer.reset();
+@@ -164,7 +161,7 @@
+   void contended_enter()                   { check_and_reset_count(); _contended_enter_count++; }
+   void contended_enter_begin()             { check_and_reset_timer(); _contended_enter_timer.start(); }
+   void contended_enter_end()               { _contended_enter_timer.stop(); check_and_reset_timer(); }
+-  
++
+   void reset_count_stat()                  { _count_pending_reset = true; }
+   void reset_time_stat()                   { _timer_pending_reset = true; }
+ 
+@@ -173,7 +170,7 @@
+   int* class_link_recursion_count_addr()   { return &_class_link_recursion_count; }
+ };
+ 
+-// Thread snapshot to represent the thread state and statistics 
++// Thread snapshot to represent the thread state and statistics
+ class ThreadSnapshot : public CHeapObj {
+ private:
+   JavaThread* _thread;
+@@ -189,7 +186,7 @@
+   jlong   _monitor_wait_count;
+   jlong   _sleep_ticks;
+   jlong   _sleep_count;
+-  oop     _blocker_object;     
++  oop     _blocker_object;
+   oop     _blocker_object_owner;
+ 
+   ThreadStackTrace*      _stack_trace;
+@@ -197,7 +194,7 @@
+   ThreadSnapshot*        _next;
+ 
+ public:
+-  // Dummy snapshot 
++  // Dummy snapshot
+   ThreadSnapshot() : _thread(NULL), _threadObj(NULL), _stack_trace(NULL), _concurrent_locks(NULL), _next(NULL),
+                      _blocker_object(NULL), _blocker_object_owner(NULL) {};
+   ThreadSnapshot(JavaThread* thread);
+@@ -291,7 +288,7 @@
+   GrowableArray<instanceOop>* _owned_locks;
+   ThreadConcurrentLocks*      _next;
+   JavaThread*                 _thread;
+- public: 
++ public:
+   ThreadConcurrentLocks(JavaThread* thread);
+   ~ThreadConcurrentLocks();
+ 
+@@ -335,7 +332,7 @@
+   ~ThreadDumpResult();
+ 
+   void                 add_thread_snapshot(ThreadSnapshot* ts);
+-  void                 set_next(ThreadDumpResult* next) { _next = next; } 
++  void                 set_next(ThreadDumpResult* next) { _next = next; }
+   ThreadDumpResult*    next()                           { return _next; }
+   int                  num_threads()                    { return _num_threads; }
+   int                  num_snapshots()                  { return _num_snapshots; }
+@@ -368,9 +365,9 @@
+ private:
+   GrowableArray<instanceHandle>* _threads_array;
+ public:
+-  ThreadsListEnumerator(Thread* cur_thread, 
+-			bool include_jvmti_agent_threads = false,
+-			bool include_jni_attaching_threads = true);
++  ThreadsListEnumerator(Thread* cur_thread,
++                        bool include_jvmti_agent_threads = false,
++                        bool include_jni_attaching_threads = true);
+   int            num_threads()            { return _threads_array->length(); }
+   instanceHandle get_threadObj(int index) { return _threads_array->at(index); }
+ };
+@@ -385,19 +382,24 @@
+ 
+   void save_old_state(JavaThread* java_thread) {
+     _java_thread  = java_thread;
+-    _is_alive = (_java_thread != NULL) && (_java_thread->threadObj() != NULL);
++    _is_alive = is_alive(java_thread);
+     if (is_alive()) {
+       _old_state = java_lang_Thread::get_thread_status(_java_thread->threadObj());
+     }
+   }
+ 
+  public:
++  static void set_thread_status(JavaThread* java_thread,
++                                java_lang_Thread::ThreadStatus state) {
++    java_lang_Thread::set_thread_status(java_thread->threadObj(), state);
++  }
++
+   void set_thread_status(java_lang_Thread::ThreadStatus state) {
+     if (is_alive()) {
+-      java_lang_Thread::set_thread_status(_java_thread->threadObj(), state);
++      set_thread_status(_java_thread, state);
+     }
+   }
+-    
++
+   JavaThreadStatusChanger(JavaThread* java_thread,
+                           java_lang_Thread::ThreadStatus state) {
+     save_old_state(java_thread);
+@@ -411,6 +413,11 @@
+   ~JavaThreadStatusChanger() {
+     set_thread_status(_old_state);
+   }
++
++  static bool is_alive(JavaThread* java_thread) {
++    return java_thread != NULL && java_thread->threadObj() != NULL;
++  }
++
+   bool is_alive() {
+     return _is_alive;
+   }
+@@ -419,12 +426,12 @@
+ // Change status to waiting on an object  (timed or indefinite)
+ class JavaThreadInObjectWaitState : public JavaThreadStatusChanger {
+  private:
+-  ThreadStatistics* _stat; 
++  ThreadStatistics* _stat;
+   bool _active;
+-    
++
+  public:
+   JavaThreadInObjectWaitState(JavaThread *java_thread, bool timed) :
+-    JavaThreadStatusChanger(java_thread, 
++    JavaThreadStatusChanger(java_thread,
+                             timed ? java_lang_Thread::IN_OBJECT_WAIT_TIMED : java_lang_Thread::IN_OBJECT_WAIT) {
+     if (is_alive()) {
+       _stat = java_thread->get_thread_stat();
+@@ -448,12 +455,12 @@
+ // Change status to parked (timed or indefinite)
+ class JavaThreadParkedState : public JavaThreadStatusChanger {
+  private:
+-  ThreadStatistics* _stat; 
++  ThreadStatistics* _stat;
+   bool _active;
+-    
++
+  public:
+   JavaThreadParkedState(JavaThread *java_thread, bool timed) :
+-    JavaThreadStatusChanger(java_thread, 
++    JavaThreadStatusChanger(java_thread,
+                             timed ? java_lang_Thread::PARKED_TIMED : java_lang_Thread::PARKED) {
+     if (is_alive()) {
+       _stat = java_thread->get_thread_stat();
+@@ -477,9 +484,39 @@
+ // Change status to blocked on (re-)entering a synchronization block
+ class JavaThreadBlockedOnMonitorEnterState : public JavaThreadStatusChanger {
+  private:
+-  ThreadStatistics* _stat; 
++  ThreadStatistics* _stat;
+   bool _active;
++
++  static bool contended_enter_begin(JavaThread *java_thread) {
++    set_thread_status(java_thread, java_lang_Thread::BLOCKED_ON_MONITOR_ENTER);
++    ThreadStatistics* stat = java_thread->get_thread_stat();
++    stat->contended_enter();
++    bool active = ThreadService::is_thread_monitoring_contention();
++    if (active) {
++      stat->contended_enter_begin();
++    }
++    return active;
++  }
++
+  public:
++  // java_thread is waiting thread being blocked on monitor reenter.
++  // Current thread is the notifying thread which holds the monitor.
++  static bool wait_reenter_begin(JavaThread *java_thread, ObjectMonitor *obj_m) {
++    assert((java_thread != NULL), "Java thread should not be null here");
++    bool active  = false;
++    if (is_alive(java_thread) && ServiceUtil::visible_oop((oop)obj_m->object())) {
++      active = contended_enter_begin(java_thread);
++    }
++    return active;
++  }
++
++  static void wait_reenter_end(JavaThread *java_thread, bool active) {
++    if (active) {
++      java_thread->get_thread_stat()->contended_enter_end();
++    }
++    set_thread_status(java_thread, java_lang_Thread::RUNNABLE);
++  }
++
+   JavaThreadBlockedOnMonitorEnterState(JavaThread *java_thread, ObjectMonitor *obj_m) :
+     JavaThreadStatusChanger(java_thread) {
+     assert((java_thread != NULL), "Java thread should not be null here");
+@@ -487,17 +524,11 @@
+     // enter done for external java world objects and it is contended. All other cases
+     // like for vm internal objects and for external objects which are not contended
+     // thread status is not changed and contended enter stat is not collected.
++    _active = false;
+     if (is_alive() && ServiceUtil::visible_oop((oop)obj_m->object()) && obj_m->contentions() > 0) {
+-      set_thread_status(java_lang_Thread::BLOCKED_ON_MONITOR_ENTER);
+       _stat = java_thread->get_thread_stat();
+-      _stat->contended_enter();
+-      _active = ThreadService::is_thread_monitoring_contention();
+-      if (_active) {
+-        _stat->contended_enter_begin();
+-      }
+-    } else {
+-      _active = false;
+-    } 
++      _active = contended_enter_begin(java_thread);
++    }
+   }
+ 
+   ~JavaThreadBlockedOnMonitorEnterState() {
+@@ -510,7 +541,7 @@
+ // Change status to sleeping
+ class JavaThreadSleepState : public JavaThreadStatusChanger {
+  private:
+-  ThreadStatistics* _stat; 
++  ThreadStatistics* _stat;
+   bool _active;
+  public:
+   JavaThreadSleepState(JavaThread *java_thread) :
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/accessFlags.cpp openjdk/hotspot/src/share/vm/utilities/accessFlags.cpp
+--- openjdk6/hotspot/src/share/vm/utilities/accessFlags.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/accessFlags.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)accessFlags.cpp	1.25 07/05/05 17:07:07 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -60,7 +57,7 @@
+   if (is_synchronized()) st->print("synchronized ");
+   if (is_volatile    ()) st->print("volatile "    );
+   if (is_transient   ()) st->print("transient "   );
+-  if (is_native      ()) st->print("native "      );	
++  if (is_native      ()) st->print("native "      );
+   if (is_interface   ()) st->print("interface "   );
+   if (is_abstract    ()) st->print("abstract "    );
+   if (is_strict      ()) st->print("strict "      );
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/accessFlags.hpp openjdk/hotspot/src/share/vm/utilities/accessFlags.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/accessFlags.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/accessFlags.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)accessFlags.hpp	1.68 07/05/05 17:07:07 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // AccessFlags is an abstraction over Java access flags.
+@@ -38,8 +35,8 @@
+ 
+   // methodOop flags
+   JVM_ACC_MONITOR_MATCH           = 0x10000000,     // True if we know that monitorenter/monitorexit bytecodes match
+-  JVM_ACC_HAS_MONITOR_BYTECODES   = 0x20000000,     // Method contains monitorenter/monitorexit bytecodes  
+-  JVM_ACC_HAS_LOOPS               = 0x40000000,     // Method has loops    
++  JVM_ACC_HAS_MONITOR_BYTECODES   = 0x20000000,     // Method contains monitorenter/monitorexit bytecodes
++  JVM_ACC_HAS_LOOPS               = 0x40000000,     // Method has loops
+   JVM_ACC_LOOPS_FLAG_INIT         = (int)0x80000000,// The loop flag has been initialized
+   JVM_ACC_QUEUED                  = 0x01000000,     // Queued for compilation
+   JVM_ACC_NOT_TIER1_COMPILABLE    = 0x04000000,
+@@ -53,7 +50,7 @@
+ 
+   // klassOop flags
+   JVM_ACC_HAS_MIRANDA_METHODS     = 0x10000000,     // True if this class has miranda methods in it's vtable
+-  JVM_ACC_HAS_VANILLA_CONSTRUCTOR = 0x20000000,     // True if klass has a vanilla default constructor 
++  JVM_ACC_HAS_VANILLA_CONSTRUCTOR = 0x20000000,     // True if klass has a vanilla default constructor
+   JVM_ACC_HAS_FINALIZER           = 0x40000000,     // True if klass has a non-empty finalize() method
+   JVM_ACC_IS_CLONEABLE            = (int)0x80000000,// True if klass supports the Clonable interface
+   JVM_ACC_HAS_FINAL_METHOD        = 0x01000000,     // True if klass has final method
+@@ -97,17 +94,17 @@
+   bool is_native      () const         { return (_flags & JVM_ACC_NATIVE      ) != 0; }
+   bool is_interface   () const         { return (_flags & JVM_ACC_INTERFACE   ) != 0; }
+   bool is_abstract    () const         { return (_flags & JVM_ACC_ABSTRACT    ) != 0; }
+-  bool is_strict      () const         { return (_flags & JVM_ACC_STRICT      ) != 0; }  
+-  
++  bool is_strict      () const         { return (_flags & JVM_ACC_STRICT      ) != 0; }
++
+   // Attribute flags
+   bool is_synthetic   () const         { return (_flags & JVM_ACC_SYNTHETIC   ) != 0; }
+ 
+   // methodOop flags
+   bool is_monitor_matching     () const { return (_flags & JVM_ACC_MONITOR_MATCH          ) != 0; }
+   bool has_monitor_bytecodes   () const { return (_flags & JVM_ACC_HAS_MONITOR_BYTECODES  ) != 0; }
+-  bool has_loops               () const { return (_flags & JVM_ACC_HAS_LOOPS              ) != 0; }  
+-  bool loops_flag_init         () const { return (_flags & JVM_ACC_LOOPS_FLAG_INIT        ) != 0; }  
+-  bool queued_for_compilation  () const { return (_flags & JVM_ACC_QUEUED                 ) != 0; }  
++  bool has_loops               () const { return (_flags & JVM_ACC_HAS_LOOPS              ) != 0; }
++  bool loops_flag_init         () const { return (_flags & JVM_ACC_LOOPS_FLAG_INIT        ) != 0; }
++  bool queued_for_compilation  () const { return (_flags & JVM_ACC_QUEUED                 ) != 0; }
+   bool is_not_tier1_compilable  () const { return (_flags & JVM_ACC_NOT_TIER1_COMPILABLE  ) != 0; }
+   bool is_not_osr_compilable   () const { return (_flags & JVM_ACC_NOT_OSR_COMPILABLE     ) != 0; }
+   bool has_linenumber_table    () const { return (_flags & JVM_ACC_HAS_LINE_NUMBER_TABLE  ) != 0; }
+@@ -125,8 +122,8 @@
+   bool is_cloneable            () const { return (_flags & JVM_ACC_IS_CLONEABLE           ) != 0; }
+   // klassOop and methodOop flags
+   bool has_localvariable_table () const { return (_flags & JVM_ACC_HAS_LOCAL_VARIABLE_TABLE) != 0; }
+-  void set_has_localvariable_table()	{ atomic_set_bits(JVM_ACC_HAS_LOCAL_VARIABLE_TABLE); }
+-  void clear_has_localvariable_table()	{ atomic_clear_bits(JVM_ACC_HAS_LOCAL_VARIABLE_TABLE); }
++  void set_has_localvariable_table()    { atomic_set_bits(JVM_ACC_HAS_LOCAL_VARIABLE_TABLE); }
++  void clear_has_localvariable_table()  { atomic_clear_bits(JVM_ACC_HAS_LOCAL_VARIABLE_TABLE); }
+ 
+   // field flags
+   bool is_field_access_watched() const  { return (_flags & JVM_ACC_FIELD_ACCESS_WATCHED) != 0; }
+@@ -141,8 +138,8 @@
+   void set_field_flags(jint flags)      { _flags = (flags & JVM_ACC_FIELD_FLAGS); }
+   void set_flags(jint flags)            { _flags = (flags & JVM_ACC_WRITTEN_FLAGS); }
+ 
+-  void set_queued_for_compilation()    { atomic_set_bits(JVM_ACC_QUEUED); }   
+-  void clear_queued_for_compilation()  { atomic_clear_bits(JVM_ACC_QUEUED); }   
++  void set_queued_for_compilation()    { atomic_set_bits(JVM_ACC_QUEUED); }
++  void clear_queued_for_compilation()  { atomic_clear_bits(JVM_ACC_QUEUED); }
+ 
+   // Atomic update of flags
+   void atomic_set_bits(jint bits);
+@@ -152,7 +149,7 @@
+   friend class methodOopDesc;
+   friend class Klass;
+   friend class ClassFileParser;
+-  // the functions below should only be called on the _access_flags inst var directly, 
++  // the functions below should only be called on the _access_flags inst var directly,
+   // otherwise they are just changing a copy of the flags
+ 
+   // attribute flags
+@@ -160,9 +157,9 @@
+ 
+   // methodOop flags
+   void set_monitor_matching()          { atomic_set_bits(JVM_ACC_MONITOR_MATCH);           }
+-  void set_has_monitor_bytecodes()     { atomic_set_bits(JVM_ACC_HAS_MONITOR_BYTECODES);   } 
+-  void set_has_loops()                 { atomic_set_bits(JVM_ACC_HAS_LOOPS);               }   
+-  void set_loops_flag_init()           { atomic_set_bits(JVM_ACC_LOOPS_FLAG_INIT);         }   
++  void set_has_monitor_bytecodes()     { atomic_set_bits(JVM_ACC_HAS_MONITOR_BYTECODES);   }
++  void set_has_loops()                 { atomic_set_bits(JVM_ACC_HAS_LOOPS);               }
++  void set_loops_flag_init()           { atomic_set_bits(JVM_ACC_LOOPS_FLAG_INIT);         }
+   void set_not_tier1_compilable()      { atomic_set_bits(JVM_ACC_NOT_TIER1_COMPILABLE);    }
+   void set_not_osr_compilable()        { atomic_set_bits(JVM_ACC_NOT_OSR_COMPILABLE);      }
+   void set_has_linenumber_table()      { atomic_set_bits(JVM_ACC_HAS_LINE_NUMBER_TABLE);   }
+@@ -173,11 +170,11 @@
+   void set_is_prefixed_native()        { atomic_set_bits(JVM_ACC_IS_PREFIXED_NATIVE);      }
+ 
+   // klassOop flags
+-  void set_has_vanilla_constructor()   { atomic_set_bits(JVM_ACC_HAS_VANILLA_CONSTRUCTOR); }   
+-  void set_has_finalizer()             { atomic_set_bits(JVM_ACC_HAS_FINALIZER);           }   
+-  void set_has_final_method()          { atomic_set_bits(JVM_ACC_HAS_FINAL_METHOD);        }   
+-  void set_is_cloneable()              { atomic_set_bits(JVM_ACC_IS_CLONEABLE);            } 
+-  void set_has_miranda_methods()       { atomic_set_bits(JVM_ACC_HAS_MIRANDA_METHODS);     }  
++  void set_has_vanilla_constructor()   { atomic_set_bits(JVM_ACC_HAS_VANILLA_CONSTRUCTOR); }
++  void set_has_finalizer()             { atomic_set_bits(JVM_ACC_HAS_FINALIZER);           }
++  void set_has_final_method()          { atomic_set_bits(JVM_ACC_HAS_FINAL_METHOD);        }
++  void set_is_cloneable()              { atomic_set_bits(JVM_ACC_IS_CLONEABLE);            }
++  void set_has_miranda_methods()       { atomic_set_bits(JVM_ACC_HAS_MIRANDA_METHODS);     }
+ 
+  public:
+   // field flags
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/array.cpp openjdk/hotspot/src/share/vm/utilities/array.cpp
+--- openjdk6/hotspot/src/share/vm/utilities/array.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/array.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)array.cpp	1.20 07/05/05 17:07:07 JVM"
+-#endif
+ /*
+  * Copyright 2000-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/array.hpp openjdk/hotspot/src/share/vm/utilities/array.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/array.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/array.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)array.hpp	1.15 07/05/05 17:07:07 JVM"
+-#endif
+ /*
+  * Copyright 2000-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // correct linkage required to compile w/o warnings
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/bitMap.cpp openjdk/hotspot/src/share/vm/utilities/bitMap.cpp
+--- openjdk6/hotspot/src/share/vm/utilities/bitMap.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/bitMap.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)bitMap.cpp	1.48 07/05/05 17:07:07 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -64,10 +61,10 @@
+ BitMap::inverted_bit_mask_for_range(idx_t beg, idx_t end) const {
+   assert(end != 0, "does not work when end == 0");
+   assert(beg == end || word_index(beg) == word_index(end - 1),
+-	 "must be a single-word range");
+-  idx_t mask = bit_mask(beg) - 1;	// low (right) bits
++         "must be a single-word range");
++  idx_t mask = bit_mask(beg) - 1;       // low (right) bits
+   if (bit_in_word(end) != 0) {
+-    mask |= ~(bit_mask(end) - 1);	// high (left) bits
++    mask |= ~(bit_mask(end) - 1);       // high (left) bits
+   }
+   return mask;
+ }
+@@ -165,10 +162,10 @@
+ 
+   idx_t beg_full_word = word_index_round_up(beg);
+   idx_t end_full_word = word_index(end);
+-  
++
+   assert(end_full_word - beg_full_word >= 32,
+-	 "the range must include at least 32 bytes");
+-  
++         "the range must include at least 32 bytes");
++
+   // The range includes at least one full word.
+   set_range_within_word(beg, bit_index(beg_full_word));
+   set_large_range_of_words(beg_full_word, end_full_word);
+@@ -180,10 +177,10 @@
+ 
+   idx_t beg_full_word = word_index_round_up(beg);
+   idx_t end_full_word = word_index(end);
+-             
++
+   assert(end_full_word - beg_full_word >= 32,
+-	 "the range must include at least 32 bytes");
+-  
++         "the range must include at least 32 bytes");
++
+   // The range includes at least one full word.
+   clear_range_within_word(beg, bit_index(beg_full_word));
+   clear_large_range_of_words(beg_full_word, end_full_word);
+@@ -195,7 +192,7 @@
+     set_bit(offset);
+   } else {
+     clear_bit(offset);
+-  }  
++  }
+ }
+ 
+ // Return true to indicate that this thread changed
+@@ -269,10 +266,10 @@
+ 
+   idx_t beg_full_word = word_index_round_up(beg);
+   idx_t end_full_word = word_index(end);
+-             
++
+   assert(end_full_word - beg_full_word >= 32,
+-	 "the range must include at least 32 bytes");
+-  
++         "the range must include at least 32 bytes");
++
+   // The range includes at least one full word.
+   par_put_range_within_word(beg, bit_index(beg_full_word), value);
+   if (value) {
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/bitMap.hpp openjdk/hotspot/src/share/vm/utilities/bitMap.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/bitMap.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/bitMap.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)bitMap.hpp	1.45 07/05/05 17:07:06 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Closure for iterating over BitMaps
+@@ -43,7 +40,7 @@
+   friend class BitMap2D;
+ 
+  public:
+-  typedef size_t idx_t;		// Type used for bit and word indices.
++  typedef size_t idx_t;         // Type used for bit and word indices.
+ 
+   // Hints for range sizes.
+   typedef enum {
+@@ -116,6 +113,10 @@
+   }
+ 
+  public:
++
++  // Constructs a bitmap with no map, and size 0.
++  BitMap() : _map(NULL), _size(0) {}
++
+   // Construction
+   BitMap(idx_t* map, idx_t size_in_bits);
+ 
+@@ -190,7 +191,7 @@
+   // Clearing
+   void clear();
+   void clear_large();
+-  
++
+   // Iteration support
+   void iterate(BitMapClosure* blk, idx_t leftIndex, idx_t rightIndex);
+   inline void iterate(BitMapClosure* blk) {
+@@ -297,7 +298,7 @@
+ // Convenience class wrapping BitMap which provides multiple bits per slot.
+ class BitMap2D VALUE_OBJ_CLASS_SPEC {
+  public:
+-  typedef size_t idx_t;		// Type used for bit and word indices.
++  typedef size_t idx_t;         // Type used for bit and word indices.
+ 
+  private:
+   BitMap _map;
+@@ -357,7 +358,7 @@
+     verify_bit_within_slot_index(bit_within_slot_index);
+     _map.at_put_grow(bit_index(slot_index, bit_within_slot_index), value);
+   }
+-  
++
+   void clear() {
+     _map.clear();
+   }
+@@ -393,4 +394,3 @@
+     }
+   }
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/bitMap.inline.hpp openjdk/hotspot/src/share/vm/utilities/bitMap.inline.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/bitMap.inline.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/bitMap.inline.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)bitMap.inline.hpp	1.8 07/05/05 17:07:07 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ inline bool BitMap::par_set_bit(idx_t bit) {
+@@ -34,15 +31,15 @@
+   do {
+     const idx_t new_val = old_val | mask;
+     if (new_val == old_val) {
+-      return false;	// Someone else beat us to it.
++      return false;     // Someone else beat us to it.
+     }
+     const idx_t cur_val = (idx_t) Atomic::cmpxchg_ptr((void*) new_val,
+-						      (volatile void*) addr,
+-						      (void*) old_val);
++                                                      (volatile void*) addr,
++                                                      (void*) old_val);
+     if (cur_val == old_val) {
+-      return true;	// Success.
++      return true;      // Success.
+     }
+-    old_val = cur_val;	// The value changed, try again.
++    old_val = cur_val;  // The value changed, try again.
+   } while (true);
+ }
+ 
+@@ -55,15 +52,15 @@
+   do {
+     const idx_t new_val = old_val & mask;
+     if (new_val == old_val) {
+-      return false;	// Someone else beat us to it.
++      return false;     // Someone else beat us to it.
+     }
+     const idx_t cur_val = (idx_t) Atomic::cmpxchg_ptr((void*) new_val,
+-						      (volatile void*) addr,
+-						      (void*) old_val);
++                                                      (volatile void*) addr,
++                                                      (void*) old_val);
+     if (cur_val == old_val) {
+-      return true;	// Success.
++      return true;      // Success.
+     }
+-    old_val = cur_val;	// The value changed, try again.
++    old_val = cur_val;  // The value changed, try again.
+   } while (true);
+ }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/constantTag.cpp openjdk/hotspot/src/share/vm/utilities/constantTag.cpp
+--- openjdk6/hotspot/src/share/vm/utilities/constantTag.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/constantTag.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)constantTag.cpp	1.21 07/05/05 17:07:08 JVM"
+-#endif
+ /*
+  * Copyright 1997-1999 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -80,7 +77,7 @@
+     default:
+       ShouldNotReachHere();
+       break;
+-  }         
++  }
+ }
+ 
+ #endif // PRODUCT
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/constantTag.hpp openjdk/hotspot/src/share/vm/utilities/constantTag.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/constantTag.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/constantTag.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)constantTag.hpp	1.28 07/05/05 17:07:08 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // constant tags in Java .class files
+@@ -32,14 +29,14 @@
+   // See jvm.h for shared JVM_CONSTANT_XXX tags
+   // NOTE: replicated in SA in vm/agent/sun/jvm/hotspot/utilities/ConstantTag.java
+   // Hotspot specific tags
+-  JVM_CONSTANT_Invalid             	= 0,    // For bad value initialization
+-  JVM_CONSTANT_InternalMin		= 100,	// First implementation tag (aside from bad value of course)
+-  JVM_CONSTANT_UnresolvedClass     	= 100,  // Temporary tag until actual use
+-  JVM_CONSTANT_ClassIndex          	= 101,  // Temporary tag while constructing constant pool
+-  JVM_CONSTANT_UnresolvedString    	= 102,  // Temporary tag until actual use
+-  JVM_CONSTANT_StringIndex         	= 103,  // Temporary tag while constructing constant pool
+-  JVM_CONSTANT_UnresolvedClassInError	= 104,	// Error tag due to resolution error
+-  JVM_CONSTANT_InternalMax		= 104	// Last implementation tag
++  JVM_CONSTANT_Invalid                  = 0,    // For bad value initialization
++  JVM_CONSTANT_InternalMin              = 100,  // First implementation tag (aside from bad value of course)
++  JVM_CONSTANT_UnresolvedClass          = 100,  // Temporary tag until actual use
++  JVM_CONSTANT_ClassIndex               = 101,  // Temporary tag while constructing constant pool
++  JVM_CONSTANT_UnresolvedString         = 102,  // Temporary tag until actual use
++  JVM_CONSTANT_StringIndex              = 103,  // Temporary tag while constructing constant pool
++  JVM_CONSTANT_UnresolvedClassInError   = 104,  // Error tag due to resolution error
++  JVM_CONSTANT_InternalMax              = 104   // Last implementation tag
+ };
+ 
+ 
+@@ -65,8 +62,8 @@
+     return _tag == JVM_CONSTANT_UnresolvedClass || _tag == JVM_CONSTANT_UnresolvedClassInError;
+   }
+ 
+-  bool is_unresolved_klass_in_error() const { 
+-    return _tag == JVM_CONSTANT_UnresolvedClassInError; 
++  bool is_unresolved_klass_in_error() const {
++    return _tag == JVM_CONSTANT_UnresolvedClassInError;
+   }
+ 
+   bool is_klass_index() const       { return _tag == JVM_CONSTANT_ClassIndex; }
+@@ -77,14 +74,13 @@
+   bool is_field_or_method() const   { return is_field() || is_method() || is_interface_method(); }
+   bool is_symbol() const            { return is_utf8(); }
+ 
+-  constantTag(jbyte tag) { 
+-    assert((tag >= 0 && tag <= JVM_CONSTANT_NameAndType) || 
++  constantTag(jbyte tag) {
++    assert((tag >= 0 && tag <= JVM_CONSTANT_NameAndType) ||
+            (tag >= JVM_CONSTANT_InternalMin && tag <= JVM_CONSTANT_InternalMax), "Invalid constant tag");
+-    _tag = tag; 
++    _tag = tag;
+   }
+ 
+   jbyte value()                      { return _tag; }
+-    
++
+   void print_on(outputStream* st) const PRODUCT_RETURN;
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/copy.cpp openjdk/hotspot/src/share/vm/utilities/copy.cpp
+--- openjdk6/hotspot/src/share/vm/utilities/copy.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/copy.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,9 +1,25 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)copy.cpp	1.2 07/04/13 10:35:00 JVM"
+-#endif
+ /*
+- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+- * SUN PROPRIETARY/CONFIDENTIAL.  Use is subject to license terms.
++ * Copyright 2006-2007 Sun Microsystems, Inc.  All Rights Reserved.
++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++ *
++ * This code is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 only, as
++ * published by the Free Software Foundation.
++ *
++ * This code is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++ * version 2 for more details (a copy is included in the LICENSE file that
++ * accompanied this code).
++ *
++ * You should have received a copy of the GNU General Public License version
++ * 2 along with this work; if not, write to the Free Software Foundation,
++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
++ * CA 95054 USA or visit www.sun.com if you need additional information or
++ * have any questions.
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/copy.hpp openjdk/hotspot/src/share/vm/utilities/copy.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/copy.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/copy.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)copy.hpp	1.15 07/05/17 16:07:14 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Assembly code for platforms that need it.
+@@ -293,7 +290,7 @@
+   }
+ 
+   static void assert_params_ok(HeapWord* to, intptr_t log_align) {
+-#ifdef ASSERT 
++#ifdef ASSERT
+     if (mask_bits((uintptr_t)to, right_n_bits(log_align)) != 0)
+       basic_fatal("not word aligned");
+ #endif
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/debug.cpp openjdk/hotspot/src/share/vm/utilities/debug.cpp
+--- openjdk6/hotspot/src/share/vm/utilities/debug.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/debug.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)debug.cpp	1.180 07/05/05 17:07:08 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -182,7 +179,7 @@
+   err.report_and_die();
+ }
+ 
+-void report_fatal_vararg(const char* file_name, int line_no, const char* format, ...) {  
++void report_fatal_vararg(const char* file_name, int line_no, const char* format, ...) {
+   char buffer[256];
+   va_list ap;
+   va_start(ap, format);
+@@ -198,15 +195,15 @@
+ // Just passing the flow to VMError to handle error
+ void report_vm_out_of_memory(const char* file_name, int line_no, size_t size, const char* message) {
+   if (Debugging || assert_is_suppressed(file_name, line_no))  return;
+-  
+-  // We try to gather additional information for the first out of memory  
+-  // error only; gathering additional data might cause an allocation and a  
+-  // recursive out_of_memory condition. 
+-   
++
++  // We try to gather additional information for the first out of memory
++  // error only; gathering additional data might cause an allocation and a
++  // recursive out_of_memory condition.
++
+   const jint exiting = 1;
+   // If we succeed in changing the value, we're the first one in.
+   bool first_time_here = Atomic::xchg(exiting, &_exiting_out_of_mem) != exiting;
+-   
++
+   if (first_time_here) {
+     Thread* thread = ThreadLocalStorage::get_thread_slow();
+     VMError(thread, size, message, file_name, line_no).report_and_die();
+@@ -214,7 +211,7 @@
+   vm_abort();
+ }
+ 
+-void report_vm_out_of_memory_vararg(const char* file_name, int line_no, size_t size, const char* format, ...) {  
++void report_vm_out_of_memory_vararg(const char* file_name, int line_no, size_t size, const char* format, ...) {
+   char buffer[256];
+   va_list ap;
+   va_start(ap, format);
+@@ -252,9 +249,8 @@
+ 
+ void report_java_out_of_memory(const char* message) {
+   static jint out_of_memory_reported = 0;
+-  static char path[JVM_MAXPATHLEN];
+ 
+-  // A number of threads may attempt to report OutOfMemoryError at around the 
++  // A number of threads may attempt to report OutOfMemoryError at around the
+   // same time. To avoid dumping the heap or executing the data collection
+   // commands multiple times we just do it once when the first threads reports
+   // the error.
+@@ -262,47 +258,7 @@
+     // create heap dump before OnOutOfMemoryError commands are executed
+     if (HeapDumpOnOutOfMemoryError) {
+       tty->print_cr("java.lang.OutOfMemoryError: %s", message);
+-
+-      // The dump file defaults to java_pid<pid>.hprof in the current working
+-      // directory. HeapDumpPath=<file> can be used to specify an alternative
+-      // dump file name or a directory where dump file is created.    
+-      bool use_default_filename = true;
+-      if (HeapDumpPath == NULL || HeapDumpPath[0] == '\0') {
+-        path[0] = '\0'; // HeapDumpPath=<file> not specified
+-      } else {
+-        assert(strlen(HeapDumpPath) < sizeof(path), "HeapDumpPath too long");
+-        strcpy(path, HeapDumpPath);
+-	// check if the path is a directory (must exist)
+-        DIR* dir = os::opendir(path);        
+-	if (dir == NULL) {	  
+-	  use_default_filename = false; 
+-	} else {
+-	  // HeapDumpPath specified a directory. We append a file separator
+-	  // (if needed).
+-	  os::closedir(dir);
+-	  size_t fs_len = strlen(os::file_separator());
+-	  if (strlen(path) >= fs_len) {
+-	    char* end = path;
+-	    end += (strlen(path) - fs_len);
+-	    if (strcmp(end, os::file_separator()) != 0) {
+-              assert(strlen(path) + strlen(os::file_separator()) < sizeof(path), 
+-	        "HeapDumpPath too long");
+-	      strcat(path, os::file_separator());
+-	    }
+-	  }
+-	}     
+-      }
+-      // If HeapDumpPath wasn't a file name then we append the default name
+-      if (use_default_filename) {
+-        char fn[32];
+-	sprintf(fn, "java_pid%d.hprof", os::current_process_id());
+-	assert(strlen(path) + strlen(fn) < sizeof(path), "HeapDumpPath too long");
+-	strcat(path, fn);
+-      }
+-
+-      HeapDumper dumper(false /* no GC before heap dump */,
+-                        true  /* send to tty */);
+-      dumper.dump(path);
++      HeapDumper::dump_heap();
+     }
+ 
+     if (OnOutOfMemoryError && OnOutOfMemoryError[0]) {
+@@ -354,9 +310,9 @@
+ 
+ int Command::level = 0;
+ 
+-extern "C" void blob(CodeBlob* cb) {  
++extern "C" void blob(CodeBlob* cb) {
+   Command c("blob");
+-  cb->print();  
++  cb->print();
+ }
+ 
+ 
+@@ -452,7 +408,7 @@
+     // If the last_Java_fp is set we are in C land and
+     // can call the standard stack_trace function.
+     p->trace_stack();
+-  } else {    
++  } else {
+     frame f = os::current_frame();
+     RegisterMap reg_map(p);
+     f = f.sender(&reg_map);
+@@ -502,7 +458,7 @@
+ }
+ 
+ 
+-extern "C" void debug() {		// to set things up for compiler debugging
++extern "C" void debug() {               // to set things up for compiler debugging
+   Command c("debug");
+   WizardMode = true;
+   PrintVMMessages = PrintCompilation = true;
+@@ -511,7 +467,7 @@
+ }
+ 
+ 
+-extern "C" void ndebug() {		// undo debug()
++extern "C" void ndebug() {              // undo debug()
+   Command c("ndebug");
+   PrintCompilation = false;
+   PrintInlining = PrintAssembly = false;
+@@ -552,16 +508,16 @@
+ }
+ 
+ 
+-extern "C" methodOop findm(intptr_t pc) { 
++extern "C" methodOop findm(intptr_t pc) {
+   Command c("findm");
+   nmethod* nm = CodeCache::find_nmethod((address)pc);
+-  return (nm == NULL) ? (methodOop)NULL : nm->method(); 
++  return (nm == NULL) ? (methodOop)NULL : nm->method();
+ }
+ 
+ 
+ extern "C" nmethod* findnm(intptr_t addr) {
+   Command c("findnm");
+-  return  CodeCache::find_nmethod((address)addr);  
++  return  CodeCache::find_nmethod((address)addr);
+ }
+ 
+ static address same_page(address x, address y) {
+@@ -584,24 +540,24 @@
+     if (b->is_buffer_blob()) {
+       // the interpreter is generated into a buffer blob
+       InterpreterCodelet* i = Interpreter::codelet_containing(addr);
+-      if (i != NULL) { 
+-        i->print(); 
+-        return; 
++      if (i != NULL) {
++        i->print();
++        return;
+       }
+       if (Interpreter::contains(addr)) {
+         tty->print_cr(INTPTR_FORMAT " is pointing into interpreter code (not bytecode specific)", addr);
+         return;
+       }
+-      // 
++      //
+       if (AdapterHandlerLibrary::contains(b)) {
+         AdapterHandlerLibrary::print_handler(b);
+       }
+       // the stubroutines are generated into a buffer blob
+       StubCodeDesc* d = StubCodeDesc::desc_for(addr);
+-      if (d != NULL) { 
+-        d->print(); 
++      if (d != NULL) {
++        d->print();
+         if (print_pc) tty->cr();
+-        return; 
++        return;
+       }
+       if (StubRoutines::contains(addr)) {
+         tty->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", addr);
+@@ -621,10 +577,10 @@
+     if (print_pc && b->is_nmethod()) {
+       ResourceMark rm;
+       tty->print("%#p: Compiled ", addr);
+-      ((nmethod*)b)->method()->print_value_on(tty); 
++      ((nmethod*)b)->method()->print_value_on(tty);
+       tty->print("  = (CodeBlob*)" INTPTR_FORMAT, b);
+       tty->cr();
+-      return; 
++      return;
+     }
+     if ( b->is_nmethod()) {
+       if (b->is_zombie()) {
+@@ -633,8 +589,8 @@
+         tty->print_cr(INTPTR_FORMAT " is non-entrant nmethod", b);
+       }
+     }
+-    b->print(); 
+-    return; 
++    b->print();
++    return;
+   }
+ 
+   if (Universe::heap()->is_in_reserved(addr)) {
+@@ -649,19 +605,19 @@
+       print = true;
+     }
+     if (print) {
+-      oop(p)->print(); 
++      oop(p)->print();
+       if (p != (HeapWord*)x && oop(p)->is_constMethod() &&
+-	  constMethodOop(p)->contains(addr)) {
+-	Thread *thread = Thread::current();
+-	HandleMark hm(thread);
+-	methodHandle mh (thread, constMethodOop(p)->method());
+-	if (!mh->is_native()) {
+-	  tty->print_cr("bci_from(%p) = %d; print_codes():",
+-			addr, mh->bci_from(address(x)));
+-	  mh->print_codes();
+-	}
++          constMethodOop(p)->contains(addr)) {
++        Thread *thread = Thread::current();
++        HandleMark hm(thread);
++        methodHandle mh (thread, constMethodOop(p)->method());
++        if (!mh->is_native()) {
++          tty->print_cr("bci_from(%p) = %d; print_codes():",
++                        addr, mh->bci_from(address(x)));
++          mh->print_codes();
++        }
+       }
+-      return; 
++      return;
+     }
+   }
+   if (JNIHandles::is_global_handle((jobject) addr)) {
+@@ -689,7 +645,7 @@
+        return;
+     }
+   }
+-  
++
+   // Try an OS specific find
+   if (os::find(addr)) {
+     return;
+@@ -759,7 +715,7 @@
+         if (k->name() != NULL) {
+           ResourceMark rm;
+           const char* ext = k->external_name();
+-          if ( strcmp(_target, ext) == 0 ) { 
++          if ( strcmp(_target, ext) == 0 ) {
+             tty->print_cr("Found " INTPTR_FORMAT, obj);
+             obj->print();
+           }
+@@ -768,7 +724,7 @@
+     }
+ };
+ 
+-// 
++//
+ extern "C" void findclass(const char name[]) {
+   Command c("findclass");
+   if (name != NULL) {
+@@ -808,7 +764,7 @@
+ void pp(intptr_t p)          { pp((void*)p); }
+ void pp(oop p)               { pp((void*)p); }
+ 
+-void help() { 
++void help() {
+   Command c("help");
+   tty->print_cr("basic");
+   tty->print_cr("  pp(void* p)   - try to make sense of p");
+@@ -818,7 +774,7 @@
+   tty->print_cr("  pm(int pc)    - print methodOop given compiled PC");
+   tty->print_cr("  findm(intptr_t pc) - finds methodOop");
+   tty->print_cr("  find(intptr_t x)   - finds & prints nmethod/stub/bytecode/oop based on pointer into it");
+- 
++
+   tty->print_cr("misc.");
+   tty->print_cr("  flush()       - flushes the log file");
+   tty->print_cr("  events()      - dump last 50 events");
+@@ -872,7 +828,7 @@
+   (char *)"thread", CMDID_THREADS, "Dump Info on all Threads",
+   (char *)0, CMDID_ILLEGAL
+ };
+-   
++
+ 
+ // get_debug_command()
+ //
+@@ -913,30 +869,30 @@
+           switch ( CommandList[i].code ) {
+               case CMDID_PS:
+                 ps();
+-		break;
++                break;
+               case CMDID_PSS:
+                 pss();
+-		break;
++                break;
+               case CMDID_PSF:
+                 psf();
+-		break;
++                break;
+               case CMDID_FINDM:
+                 tty->print("Please enter the hex addr to pass to findm: ");
+                 scanf("%I64X", &addr);
+                 m = (methodOop)findm(addr);
+                 tty->print("findm(0x%I64X) returned 0x%I64X\n", addr, m);
+-		break;
++                break;
+               case CMDID_FINDNM:
+                 tty->print("Please enter the hex addr to pass to findnm: ");
+                 scanf("%I64X", &addr);
+                 nm = (nmethod*)findnm(addr);
+                 tty->print("findnm(0x%I64X) returned 0x%I64X\n", addr, nm);
+-		break;
++                break;
+               case CMDID_PP:
+                 tty->print("Please enter the hex addr to pass to pp: ");
+                 scanf("%I64X", &addr);
+                 pp((void*)addr);
+-		break;
++                break;
+               case CMDID_EXIT:
+                 exit(0);
+               case CMDID_HELP:
+@@ -947,35 +903,34 @@
+                   tty->print_cr("  %s --  %s\n", CommandList[j].name,
+                                                  CommandList[j].description );
+                 }
+-		break;
++                break;
+               case CMDID_QUIT:
+                 return;
+-		break;
++                break;
+               case CMDID_BPT:
+                 BREAKPOINT;
+-		break;
++                break;
+               case CMDID_VERIFY:
+                 verify();;
+-		break;
++                break;
+               case CMDID_THREADS:
+                 threads();;
+-		break;
++                break;
+               case CMDID_HSFIND:
+                 tty->print("Please enter the hex addr to pass to hsfind: ");
+                 scanf("%I64X", &addr);
+                 tty->print("Calling hsfind(0x%I64X)\n", addr);
+                 hsfind(addr);
+-		break;
++                break;
+               default:
+               case CMDID_ILLEGAL:
+-		break;
++                break;
+           }
+         }
+-      } 
++      }
+     }
+   }
+ }
+ #endif
+ 
+ #endif // PRODUCT
+-
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/debug.hpp openjdk/hotspot/src/share/vm/utilities/debug.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/debug.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/debug.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)debug.hpp	1.50 07/05/05 17:07:07 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // assertions
+@@ -48,12 +45,12 @@
+     }
+ #endif
+ 
+-// This version of assert is for use with checking return status from 
+-// library calls that return actual error values eg. EINVAL, 
+-// ENOMEM etc, rather than returning -1 and setting errno. 
+-// When the status is not what is expected it is very useful to know 
+-// what status was actually returned, so we pass the status variable as 
+-// an extra arg and use strerror to convert it to a meaningful string 
++// This version of assert is for use with checking return status from
++// library calls that return actual error values eg. EINVAL,
++// ENOMEM etc, rather than returning -1 and setting errno.
++// When the status is not what is expected it is very useful to know
++// what status was actually returned, so we pass the status variable as
++// an extra arg and use strerror to convert it to a meaningful string
+ // like "Invalid argument", "out of memory" etc
+ #define assert_status(p, status, msg)                                     \
+    do {                                                                   \
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/defaultStream.hpp openjdk/hotspot/src/share/vm/utilities/defaultStream.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/defaultStream.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/defaultStream.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)defaultStream.hpp	1.13 07/05/05 17:07:08 JVM"
+-#endif
+ /*
+  * Copyright 2003-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class defaultStream : public xmlTextStream {
+@@ -44,27 +41,27 @@
+  public:
+   // must defer time stamp due to the fact that os::init() hasn't
+   // yet been called and os::elapsed_counter() may not be valid
+-  defaultStream() { 
++  defaultStream() {
+     _log_file = NULL;
+     _inited = false;
+     _writer = -1;
+     _last_writer = -1;
+   }
+ 
+-  ~defaultStream() { 
++  ~defaultStream() {
+     if (has_log_file())  finish_log();
+   }
+ 
+   static inline FILE* output_stream() {
+     return DisplayVMOutputToStderr ? _error_stream : _output_stream;
+   }
+-  static inline FILE* error_stream() { 
++  static inline FILE* error_stream() {
+     return DisplayVMOutputToStdout ? _output_stream : _error_stream;
+   }
+-  static inline int output_fd() { 
++  static inline int output_fd() {
+     return DisplayVMOutputToStderr ? _error_fd : _output_fd;
+   }
+-  static inline int error_fd() { 
++  static inline int error_fd() {
+     return DisplayVMOutputToStdout ? _output_fd : _error_fd;
+   }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/dtrace.hpp openjdk/hotspot/src/share/vm/utilities/dtrace.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/dtrace.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/dtrace.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)dtrace.hpp	1.11 07/05/05 17:07:08 JVM"
+-#endif
+ /*
+  * Copyright 2005-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #if defined(SOLARIS) && defined(DTRACE_ENABLED)
+@@ -126,4 +123,3 @@
+   HS_DTRACE_PROBE_N(provider,name,((uintptr_t)a0,(uintptr_t)a1,(uintptr_t)a2,\
+     (uintptr_t)a3,(uintptr_t)a4,(uintptr_t)a5,(uintptr_t)a6,(uintptr_t)a7,\
+     (uintptr_t)a8,(uintptr_t)a9))
+-
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/events.cpp openjdk/hotspot/src/share/vm/utilities/events.cpp
+--- openjdk6/hotspot/src/share/vm/utilities/events.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/events.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)events.cpp	1.40 07/05/05 17:07:07 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -36,7 +33,7 @@
+ 
+ typedef u4 EventID;
+ 
+-class Event VALUE_OBJ_CLASS_SPEC  {     
++class Event VALUE_OBJ_CLASS_SPEC  {
+  private:
+   jlong       _time_tick;
+   intx        _thread_id;
+@@ -88,7 +85,7 @@
+ // will get different event id, and then write to different buffer location.
+ // However, it is assumed that add_event() is quick enough (or buffer size
+ // is big enough), so when one thread is adding event, there can't be more
+-// than "size" events created by other threads; otherwise we'll end up having 
++// than "size" events created by other threads; otherwise we'll end up having
+ // two threads writing to the same location.
+ 
+ class EventBuffer : AllStatic {
+@@ -121,7 +118,7 @@
+ 
+   // add a new event to the queue; if EventBuffer is full, this call will
+   // overwrite the oldest event in the queue
+-  static EventID add_event(const char* format, 
++  static EventID add_event(const char* format,
+                            intptr_t arg_1, intptr_t arg_2, intptr_t arg_3) {
+     // assign a unique id
+     EventID id = get_next_event_id();
+@@ -206,7 +203,7 @@
+ }
+ 
+ void Events::print_last(outputStream *st, int number) {
+-  EventBuffer::print_last(st, number);  
++  EventBuffer::print_last(st, number);
+ }
+ 
+ ///////////////////////////////////////////////////////////////////////////
+@@ -250,4 +247,3 @@
+ int print_all_events(outputStream *st) { return 0; }
+ 
+ #endif // PRODUCT
+-
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/events.hpp openjdk/hotspot/src/share/vm/utilities/events.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/events.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/events.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)events.hpp	1.22 07/05/05 17:07:09 JVM"
+-#endif
+ /*
+  * Copyright 1997-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Events and EventMark provide interfaces to log events taking place in the vm.
+@@ -30,7 +27,7 @@
+ // often provides crucial information about events leading up to the crash.
+ //
+ // All arguments past the format string must be passed as an intptr_t.
+-// 
++//
+ // To log a single event use:
+ //    Events::log("New nmethod has been created " INTPTR_FORMAT, nm);
+ //
+@@ -44,7 +41,7 @@
+ //   Max 3 arguments are saved for each logged event.
+ //
+ 
+-class Events : AllStatic {  
++class Events : AllStatic {
+  public:
+   // Logs an event, format as printf
+   static void log(const char* format, ...) PRODUCT_RETURN;
+@@ -65,4 +62,3 @@
+ };
+ 
+ int print_all_events(outputStream *st);
+-
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/exceptions.cpp openjdk/hotspot/src/share/vm/utilities/exceptions.cpp
+--- openjdk6/hotspot/src/share/vm/utilities/exceptions.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/exceptions.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)exceptions.cpp	1.99 07/05/05 17:07:09 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -59,7 +56,7 @@
+ 
+ bool Exceptions::special_exception(Thread* thread, const char* file, int line, Handle h_exception) {
+   // bootstrapping check
+-  if (!Universe::is_fully_initialized()) {     
++  if (!Universe::is_fully_initialized()) {
+    vm_exit_during_initialization(h_exception);
+    ShouldNotReachHere();
+   }
+@@ -77,9 +74,9 @@
+ 
+ bool Exceptions::special_exception(Thread* thread, const char* file, int line, symbolHandle h_name, const char* message) {
+   // bootstrapping check
+-  if (!Universe::is_fully_initialized()) {     
++  if (!Universe::is_fully_initialized()) {
+     if (h_name.is_null()) {
+-      // atleast an informative message. 
++      // atleast an informative message.
+       vm_exit_during_initialization("Exception", message);
+     } else {
+       vm_exit_during_initialization(h_name, message);
+@@ -98,7 +95,7 @@
+   return false;
+ }
+ 
+-// This method should only be called from generated code, 
++// This method should only be called from generated code,
+ // therefore the exception oop should be in the oopmap.
+ void Exceptions::_throw_oop(Thread* thread, const char* file, int line, oop exception) {
+   assert(exception != NULL, "exception should not be NULL");
+@@ -113,20 +110,20 @@
+   if (TraceExceptions) {
+     ttyLocker ttyl;
+     ResourceMark rm;
+-    tty->print_cr("Exception <%s> (" INTPTR_FORMAT " ) \nthrown [%s, line %d]\nfor thread " INTPTR_FORMAT, 
++    tty->print_cr("Exception <%s> (" INTPTR_FORMAT " ) \nthrown [%s, line %d]\nfor thread " INTPTR_FORMAT,
+                       h_exception->print_value_string(), (address)h_exception(), file, line, thread);
+-  }  
++  }
+   // for AbortVMOnException flag
+   NOT_PRODUCT(Exceptions::debug_check_abort(h_exception));
+ 
+   // Check for special boot-strapping/vm-thread handling
+   if (special_exception(thread, file, line, h_exception)) return;
+- 
++
+   assert(h_exception->is_a(SystemDictionary::throwable_klass()), "exception is not a subclass of java/lang/Throwable");
+-    
++
+   // set the pending exception
+   thread->set_pending_exception(h_exception(), file, line);
+-  
++
+   // vm log
+   Events::log("throw_exception " INTPTR_FORMAT, (address)h_exception());
+ }
+@@ -206,9 +203,9 @@
+ void Exceptions::fthrow(Thread* thread, const char* file, int line, symbolHandle h_name, const char* format, ...) {
+   const int max_msg_size = 1024;
+   va_list ap;
+-  va_start(ap, format);  
++  va_start(ap, format);
+   char msg[max_msg_size];
+-  vsnprintf(msg, max_msg_size, format, ap); 
++  vsnprintf(msg, max_msg_size, format, ap);
+   msg[max_msg_size-1] = '\0';
+   va_end(ap);
+   _throw_msg(thread, file, line, h_name, msg);
+@@ -217,77 +214,77 @@
+ // Creates an exception oop, calls the <init> method with the given signature.
+ // and returns a Handle
+ // Initializes the cause if cause non-null
+-Handle Exceptions::new_exception(Thread *thread, symbolHandle h_name, 
+-				 symbolHandle signature, 
+-				 JavaCallArguments *args, 
+-                                 Handle h_cause, Handle h_loader, 
+-				 Handle h_protection_domain) {
+-  assert(Universe::is_fully_initialized(), 
++Handle Exceptions::new_exception(Thread *thread, symbolHandle h_name,
++                                 symbolHandle signature,
++                                 JavaCallArguments *args,
++                                 Handle h_cause, Handle h_loader,
++                                 Handle h_protection_domain) {
++  assert(Universe::is_fully_initialized(),
+     "cannot be called during initialization");
+   assert(thread->is_Java_thread(), "can only be called by a Java thread");
+   assert(!thread->has_pending_exception(), "already has exception");
+ 
+   Handle h_exception;
+-  
++
+   // Resolve exception klass
+   klassOop ik = SystemDictionary::resolve_or_fail(h_name, h_loader, h_protection_domain, true, thread);
+   instanceKlassHandle klass (thread, ik);
+ 
+-  if (!thread->has_pending_exception()) {    
++  if (!thread->has_pending_exception()) {
+     assert(klass.not_null(), "klass must exist");
+     // We are about to create an instance - so make sure that klass is initialized
+     klass->initialize(thread);
+-    if (!thread->has_pending_exception()) {      
++    if (!thread->has_pending_exception()) {
+       // Allocate new exception
+       h_exception = klass->allocate_instance_handle(thread);
+-      if (!thread->has_pending_exception()) {      
+-        JavaValue result(T_VOID);      
++      if (!thread->has_pending_exception()) {
++        JavaValue result(T_VOID);
+         args->set_receiver(h_exception);
+         // Call constructor
+-        JavaCalls::call_special(&result, klass, 
++        JavaCalls::call_special(&result, klass,
+                                          vmSymbolHandles::object_initializer_name(),
+                                          signature,
+                                          args,
+                                          thread);
+-    
++
+       }
+     }
+ 
+     // Future: object initializer should take a cause argument
+     if (h_cause() != NULL) {
+-      assert(h_cause->is_a(SystemDictionary::throwable_klass()), 
++      assert(h_cause->is_a(SystemDictionary::throwable_klass()),
+           "exception cause is not a subclass of java/lang/Throwable");
+-      JavaValue result1(T_OBJECT);      
+-      JavaCallArguments args1; 
++      JavaValue result1(T_OBJECT);
++      JavaCallArguments args1;
+       args1.set_receiver(h_exception);
+-      args1.push_oop(h_cause); 
+-      JavaCalls::call_virtual(&result1, klass, 
++      args1.push_oop(h_cause);
++      JavaCalls::call_virtual(&result1, klass,
+                                      vmSymbolHandles::initCause_name(),
+                                      vmSymbolHandles::throwable_throwable_signature(),
+                                      &args1,
+                                      thread);
+     }
+   }
+-   
++
+   // Check if another exception was thrown in the process, if so rethrow that one
+   if (thread->has_pending_exception()) {
+     h_exception = Handle(thread, thread->pending_exception());
+     thread->clear_pending_exception();
+   }
+-  return h_exception;  
++  return h_exception;
+ }
+ 
+-// Convenience method. Calls either the <init>() or <init>(String) method when 
++// Convenience method. Calls either the <init>() or <init>(String) method when
+ // creating a new exception
+-Handle Exceptions::new_exception(Thread* thread, symbolHandle h_name, 
+-				 const char* message, Handle h_cause,
+-                                 Handle h_loader, 
+-				 Handle h_protection_domain, 
+-				 ExceptionMsgToUtf8Mode to_utf8_safe) {
+-  JavaCallArguments args; 
++Handle Exceptions::new_exception(Thread* thread, symbolHandle h_name,
++                                 const char* message, Handle h_cause,
++                                 Handle h_loader,
++                                 Handle h_protection_domain,
++                                 ExceptionMsgToUtf8Mode to_utf8_safe) {
++  JavaCallArguments args;
+   symbolHandle signature;
+   if (message == NULL) {
+-    signature = vmSymbolHandles::void_method_signature();    
++    signature = vmSymbolHandles::void_method_signature();
+   } else {
+     // We want to allocate storage, but we can't do that if there's
+     // a pending exception, so we preserve any pending exception
+@@ -317,19 +314,19 @@
+     if (incoming_exception.not_null()) {
+       return incoming_exception;
+     }
+-    args.push_oop(msg); 
+-    signature = vmSymbolHandles::string_void_signature();    
++    args.push_oop(msg);
++    signature = vmSymbolHandles::string_void_signature();
+   }
+-  return new_exception(thread, h_name, signature, &args, h_cause, h_loader, h_protection_domain);       
++  return new_exception(thread, h_name, signature, &args, h_cause, h_loader, h_protection_domain);
+ }
+ 
+ // Another convenience method that creates handles for null class loaders and
+-// protection domains and null causes. 
++// protection domains and null causes.
+ // If the last parameter 'to_utf8_mode' is safe_to_utf8,
+-// it means we can safely ignore the encoding scheme of the message string and 
+-// convert it directly to a java UTF8 string. Otherwise, we need to take the 
+-// encoding scheme of the string into account. One thing we should do at some 
+-// point is to push this flag down to class java_lang_String since other 
++// it means we can safely ignore the encoding scheme of the message string and
++// convert it directly to a java UTF8 string. Otherwise, we need to take the
++// encoding scheme of the string into account. One thing we should do at some
++// point is to push this flag down to class java_lang_String since other
+ // classes may need similar functionalities.
+ Handle Exceptions::new_exception(Thread* thread,
+                                  symbolOop name,
+@@ -340,7 +337,7 @@
+   Handle       h_loader(thread, NULL);
+   Handle       h_prot(thread, NULL);
+   Handle       h_cause(thread, NULL);
+-  return Exceptions::new_exception(thread, h_name, message, h_cause, h_loader, 
++  return Exceptions::new_exception(thread, h_name, message, h_cause, h_loader,
+                                    h_prot, to_utf8_safe);
+ }
+ 
+@@ -376,14 +373,14 @@
+ #ifndef PRODUCT
+ // caller frees value_string if necessary
+ void Exceptions::debug_check_abort(const char *value_string) {
+-  if (AbortVMOnException[0] != '\0' && value_string != NULL &&
++  if (AbortVMOnException != NULL && value_string != NULL &&
+       strstr(value_string, AbortVMOnException)) {
+     fatal1("Saw %s, aborting", value_string);
+   }
+ }
+ 
+ void Exceptions::debug_check_abort(Handle exception) {
+-  if (AbortVMOnException[0] != '\0') {
++  if (AbortVMOnException != NULL) {
+     ResourceMark rm;
+     debug_check_abort(instanceKlass::cast(exception()->klass())->external_name());
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/exceptions.hpp openjdk/hotspot/src/share/vm/utilities/exceptions.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/exceptions.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/exceptions.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)exceptions.hpp	1.51 07/05/05 17:07:09 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This file provides the basic support for exception handling in the VM.
+@@ -98,7 +95,7 @@
+   static bool special_exception(Thread *thread, const char* file, int line, Handle exception);
+   static bool special_exception(Thread* thread, const char* file, int line, symbolHandle name, const char* message);
+  public:
+-  // this enum is defined to indicate whether it is safe to 
++  // this enum is defined to indicate whether it is safe to
+   // ignore the encoding scheme of the original message string.
+   typedef enum {
+     safe_to_utf8 = 0,
+@@ -107,18 +104,18 @@
+   // Throw exceptions: w/o message, w/ message & with formatted message.
+   static void _throw_oop(Thread* thread, const char* file, int line, oop exception);
+   static void _throw(Thread* thread, const char* file, int line, Handle exception);
+-  static void _throw_msg(Thread* thread, const char* file, int line, 
+-			 symbolHandle name, const char* message, Handle loader, 
+-			 Handle protection_domain);
+-  static void _throw_msg(Thread* thread, const char* file, int line, 
+-			 symbolOop name, const char* message);
+-  static void _throw_msg(Thread* thread, const char* file, int line, 
+-			 symbolHandle name, const char* message);
+-  static void _throw_args(Thread* thread, const char* file, int line, 
+-			  symbolHandle name, symbolHandle signature, 
+-			  JavaCallArguments* args);
+-  static void _throw_msg_cause(Thread* thread, const char* file, 
+-                         int line, symbolHandle h_name, const char* message, 
++  static void _throw_msg(Thread* thread, const char* file, int line,
++                         symbolHandle name, const char* message, Handle loader,
++                         Handle protection_domain);
++  static void _throw_msg(Thread* thread, const char* file, int line,
++                         symbolOop name, const char* message);
++  static void _throw_msg(Thread* thread, const char* file, int line,
++                         symbolHandle name, const char* message);
++  static void _throw_args(Thread* thread, const char* file, int line,
++                          symbolHandle name, symbolHandle signature,
++                          JavaCallArguments* args);
++  static void _throw_msg_cause(Thread* thread, const char* file,
++                         int line, symbolHandle h_name, const char* message,
+                          Handle h_cause, Handle h_loader, Handle h_protection_domain);
+   static void _throw_msg_cause(Thread* thread, const char* file, int line,
+                             symbolHandle name, const char* message, Handle cause);
+@@ -126,21 +123,21 @@
+   // There is no THROW... macro for this method. Caller should remember
+   // to do a return after calling it.
+   static void fthrow(Thread* thread, const char* file, int line, symbolHandle name,
+-		     const char* format, ...);
++                     const char* format, ...);
+ 
+   // Create and initialize a new exception
+-  static Handle new_exception(Thread* thread, symbolHandle name, 
+-			      symbolHandle signature, JavaCallArguments* args, 
+-			      Handle cause, Handle loader, 
++  static Handle new_exception(Thread* thread, symbolHandle name,
++                              symbolHandle signature, JavaCallArguments* args,
++                              Handle cause, Handle loader,
+                               Handle protection_domain);
+ 
+-  static Handle new_exception(Thread* thread, symbolHandle name,  
+-			      const char* message, Handle cause, Handle loader, 
+-			      Handle protection_domain, 
+-		              ExceptionMsgToUtf8Mode to_utf8_safe = safe_to_utf8);
++  static Handle new_exception(Thread* thread, symbolHandle name,
++                              const char* message, Handle cause, Handle loader,
++                              Handle protection_domain,
++                              ExceptionMsgToUtf8Mode to_utf8_safe = safe_to_utf8);
+ 
+  static Handle new_exception(Thread* thread, symbolOop name,
+-                             const char* message, 
++                             const char* message,
+                              ExceptionMsgToUtf8Mode to_utf8_safe = safe_to_utf8);
+ 
+   static void throw_stack_overflow_exception(Thread* thread, const char* file, int line);
+@@ -271,10 +268,8 @@
+ // pending exception exists upon entering its scope and tests that no pending exception
+ // exists when leaving the scope.
+ 
+-// See also preserveException.hpp for PRESERVE_EXCEPTION_MARK macro, 
++// See also preserveException.hpp for PRESERVE_EXCEPTION_MARK macro,
+ // which preserves pre-existing exceptions and does not allow new
+ // exceptions.
+ 
+ #define EXCEPTION_MARK                           Thread* THREAD; ExceptionMark __em(THREAD);
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/globalDefinitions.cpp openjdk/hotspot/src/share/vm/utilities/globalDefinitions.cpp
+--- openjdk6/hotspot/src/share/vm/utilities/globalDefinitions.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/globalDefinitions.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)globalDefinitions.cpp	1.48 07/05/05 17:07:09 JVM"
+-#endif
+ /*
+  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -97,13 +94,13 @@
+       case T_ADDRESS:   // random raw pointer
+       case T_CONFLICT:  // might as well support a bottom type
+       case T_VOID:      // padding or other unaddressed word
+-	// layout type must map to itself
+-	assert(vt == ft, "");
+-	break;
++        // layout type must map to itself
++        assert(vt == ft, "");
++        break;
+       default:
+-	// non-layout type must map to a (different) layout type
+-	assert(vt != ft, "");
+-	assert(ft == type2field[ft], "");
++        // non-layout type must map to a (different) layout type
++        assert(vt != ft, "");
++        assert(ft == type2field[ft], "");
+       }
+       // every type must map to same-sized layout type:
+       assert(type2size[vt] == type2size[ft], "");
+@@ -180,14 +177,14 @@
+   (BasicType)0,            // 1,
+   (BasicType)0,            // 2,
+   (BasicType)0,            // 3,
+-  T_BOOLEAN,     	   // T_BOOLEAN  =  4,
+-  T_CHAR,     	  	   // T_CHAR     =  5,
+-  T_FLOAT,   		   // T_FLOAT    =  6,
+-  T_DOUBLE,  		   // T_DOUBLE   =  7,
+-  T_BYTE,     		   // T_BYTE     =  8,
+-  T_SHORT,     		   // T_SHORT    =  9,
+-  T_INT,     		   // T_INT      = 10,
+-  T_LONG,    		   // T_LONG     = 11,
++  T_BOOLEAN,               // T_BOOLEAN  =  4,
++  T_CHAR,                  // T_CHAR     =  5,
++  T_FLOAT,                 // T_FLOAT    =  6,
++  T_DOUBLE,                // T_DOUBLE   =  7,
++  T_BYTE,                  // T_BYTE     =  8,
++  T_SHORT,                 // T_SHORT    =  9,
++  T_INT,                   // T_INT      = 10,
++  T_LONG,                  // T_LONG     = 11,
+   T_OBJECT,                // T_OBJECT   = 12,
+   T_OBJECT,                // T_ARRAY    = 13,
+   T_VOID,                  // T_VOID     = 14,
+@@ -233,7 +230,7 @@
+   T_OBJECT_aelem_bytes,   // T_OBJECT   = 12,
+   T_ARRAY_aelem_bytes,    // T_ARRAY    = 13,
+   0,                      // T_VOID     = 14,
+-  T_INT_aelem_bytes,   	  // T_ADDRESS  = 15,	
++  T_INT_aelem_bytes,      // T_ADDRESS  = 15,
+   0                       // T_CONFLICT = 16,
+ };
+ 
+@@ -281,18 +278,18 @@
+ // least common multiple
+ size_t lcm(size_t a, size_t b) {
+     size_t cur, div, next;
+-   
+-    cur = MAX2(a, b); 
++
++    cur = MAX2(a, b);
+     div = MIN2(a, b);
+-    
++
+     assert(div != 0, "lcm requires positive arguments");
+-    
+-    
++
++
+     while ((next = cur % div) != 0) {
+-	cur = div; div = next;
++        cur = div; div = next;
+     }
+- 
+-    
++
++
+     julong result = julong(a) * b / div;
+     assert(result <= (size_t)max_uintx, "Integer overflow in lcm");
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/globalDefinitions_gcc.hpp openjdk/hotspot/src/share/vm/utilities/globalDefinitions_gcc.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/globalDefinitions_gcc.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/globalDefinitions_gcc.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)globalDefinitions_gcc.hpp	1.47 07/05/05 17:07:10 JVM"
+-#endif
+ /*
+  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This file holds compiler-dependent includes,
+@@ -44,7 +41,7 @@
+ #include <math.h>
+ #ifndef FP_PZERO
+ // Linux doesn't have positive/negative zero
+-#define FP_PZERO FP_ZERO  
++#define FP_PZERO FP_ZERO
+ #endif
+ #if (!defined fpclass) && ((!defined SPARC) || (!defined SOLARIS))
+ #define fpclass fpclassify
+@@ -110,7 +107,7 @@
+ #endif
+ 
+ // NULL vs NULL_WORD:
+-// On Linux NULL is defined as a special type '__null'. Assigning __null to 
++// On Linux NULL is defined as a special type '__null'. Assigning __null to
+ // integer variable will cause gcc warning. Use NULL_WORD in places where a
+ // pointer is stored as integer value.  On some platforms, sizeof(intptr_t) >
+ // sizeof(void*), so here we want something which is integer type, but has the
+@@ -125,7 +122,7 @@
+   #define NULL_WORD  NULL
+ #endif
+ 
+-#ifndef	LINUX
++#ifndef LINUX
+ // Compiler-specific primitive types
+ typedef unsigned short     uint16_t;
+ #ifndef _UINT32_T
+@@ -139,8 +136,8 @@
+ typedef unsigned long long uint64_t;
+ #endif // _UINT64_T
+ // %%%% how to access definition of intptr_t portably in 5.5 onward?
+-typedef int			intptr_t;
+-typedef unsigned int		uintptr_t;
++typedef int                     intptr_t;
++typedef unsigned int            uintptr_t;
+ // If this gets an error, figure out a symbol XXX that implies the
+ // prior definition of intptr_t, and add "&& !defined(XXX)" above.
+ #endif // _SYS_INT_TYPES_H
+@@ -186,11 +183,11 @@
+ //
+ // This also means that pointers to functions can no longer be "hidden"
+ // in opaque types like void * because at the invokation point warnings
+-// will be generated. While this makes perfect sense from a type safety 
++// will be generated. While this makes perfect sense from a type safety
+ // point of view it causes a lot of warnings on old code using C header
+ // files. Here are some typedefs to make the job of silencing warnings
+ // a bit easier.
+-// 
++//
+ // The final kick in the teeth is that you can only have extern "C" linkage
+ // specified at file scope. So these typedefs are here rather than in the
+ // .hpp for the class (os:Solaris usually) that needs them.
+@@ -267,7 +264,7 @@
+ #endif // _LP64
+ 
+ // HACK: gcc warns about applying offsetof() to non-POD object or calculating
+-//       offset directly when base address is NULL. Use 16 to get around the 
++//       offset directly when base address is NULL. Use 16 to get around the
+ //       warning. gcc-3.4 has an option -Wno-invalid-offsetof to suppress
+ //       this warning.
+ #define offset_of(klass,field) (size_t)((intx)&(((klass*)16)->field) - 16)
+@@ -276,4 +273,3 @@
+ # undef offsetof
+ #endif
+ #define offsetof(klass,field) offset_of(klass,field)
+-
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/globalDefinitions.hpp openjdk/hotspot/src/share/vm/utilities/globalDefinitions.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/globalDefinitions.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/globalDefinitions.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)globalDefinitions.hpp	1.217 07/05/23 10:54:27 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This file holds all globally used constants & types, class (forward)
+@@ -60,10 +57,10 @@
+ const int WordAlignmentMask  = (1 << LogBytesPerWord) - 1;
+ const int LongAlignmentMask  = (1 << LogBytesPerLong) - 1;
+ 
+-const int WordsPerLong       = 2;	// Number of stack entries for longs
++const int WordsPerLong       = 2;       // Number of stack entries for longs
+ 
+ const int oopSize            = sizeof(char*);
+-const int wordSize           = sizeof(char*); 
++const int wordSize           = sizeof(char*);
+ const int longSize           = sizeof(jlong);
+ const int jintSize           = sizeof(jint);
+ const int size_tSize         = sizeof(size_t);
+@@ -75,11 +72,11 @@
+ const int LogBitsPerOop      = LogBitsPerWord;
+ const int BytesPerOop        = 1 << LogBytesPerOop;
+ const int BitsPerOop         = 1 << LogBitsPerOop;
+- 
++
+ const int BitsPerJavaInteger = 32;
+ const int BitsPerSize_t      = size_tSize * BitsPerByte;
+ 
+-// In fact this should be 
++// In fact this should be
+ // log2_intptr(sizeof(class JavaThread)) - log2_intptr(64);
+ // see os::set_memory_serialize_page()
+ #ifdef _LP64
+@@ -90,7 +87,7 @@
+ 
+ // An opaque struct of heap-word width, so that HeapWord* can be a generic
+ // pointer into the heap.  We require that object sizes be measured in
+-// units of heap words, so that that 
++// units of heap words, so that that
+ //   HeapWord* hw;
+ //   hw += oop(hw)->foo();
+ // works, where foo is a method (like size or scavenge) that returns the
+@@ -140,9 +137,9 @@
+ // Constants for converting from a base unit to milli-base units.  For
+ // example from seconds to milliseconds and microseconds
+ 
+-const int MILLIUNITS	= 1000;		// milli units per base unit
+-const int MICROUNITS	= 1000000;	// micro units per base unit
+-const int NANOUNITS	= 1000000000;	// nano units per base unit
++const int MILLIUNITS    = 1000;         // milli units per base unit
++const int MICROUNITS    = 1000000;      // micro units per base unit
++const int NANOUNITS     = 1000000000;   // nano units per base unit
+ 
+ inline const char* proper_unit_for_byte_size(size_t s) {
+   if (s >= 10*M) {
+@@ -179,10 +176,10 @@
+ const uintx max_uintx = (uintx)-1;
+ 
+ // Table of values:
+-// 	sizeof intx	    4		    8
+-// min_intx		0x80000000	0x8000000000000000
+-// max_intx		0x7FFFFFFF	0x7FFFFFFFFFFFFFFF
+-// max_uintx		0xFFFFFFFF	0xFFFFFFFFFFFFFFFF
++//      sizeof intx         4               8
++// min_intx             0x80000000      0x8000000000000000
++// max_intx             0x7FFFFFFF      0x7FFFFFFFFFFFFFFF
++// max_uintx            0xFFFFFFFF      0xFFFFFFFFFFFFFFFF
+ 
+ typedef unsigned int uint;   NEEDS_CLEANUP
+ 
+@@ -195,10 +192,10 @@
+ typedef unsigned char u_char;
+ typedef u_char*       address;
+ typedef uintptr_t     address_word; // unsigned integer which will hold a pointer
+-				    // except for some implementations of a C++
+-				    // linkage pointer to function. Should never
+-				    // need one of those to be placed in this
+-				    // type anyway.
++                                    // except for some implementations of a C++
++                                    // linkage pointer to function. Should never
++                                    // need one of those to be placed in this
++                                    // type anyway.
+ 
+ //  Utility functions to "portably" (?) bit twiddle pointers
+ //  Where portable means keep ANSI C++ compilers quiet
+@@ -213,19 +210,19 @@
+ inline address_word  castable_address(void* x)                { return address_word(x) ; }
+ 
+ // Pointer subtraction.
+-// The idea here is to avoid ptrdiff_t, which is signed and so doesn't have 
+-// the range we might need to find differences from one end of the heap 
++// The idea here is to avoid ptrdiff_t, which is signed and so doesn't have
++// the range we might need to find differences from one end of the heap
+ // to the other.
+ // A typical use might be:
+ //     if (pointer_delta(end(), top()) >= size) {
+ //       // enough room for an object of size
+ //       ...
+-// and then additions like 
++// and then additions like
+ //       ... top() + size ...
+ // are safe because we know that top() is at least size below end().
+ inline size_t pointer_delta(const void* left,
+-			    const void* right,
+-			    size_t element_size) {
++                            const void* right,
++                            size_t element_size) {
+   return (((uintptr_t) left) - ((uintptr_t) right)) / element_size;
+ }
+ // A version specialized for HeapWord*'s.
+@@ -302,13 +299,13 @@
+ 
+ #define align_size_up_(size, alignment) (((size) + ((alignment) - 1)) & ~((alignment) - 1))
+ 
+-inline intptr_t align_size_up(intptr_t size, intptr_t alignment) { 
++inline intptr_t align_size_up(intptr_t size, intptr_t alignment) {
+   return align_size_up_(size, alignment);
+ }
+ 
+ #define align_size_down_(size, alignment) ((size) & ~((alignment) - 1))
+ 
+-inline intptr_t align_size_down(intptr_t size, intptr_t alignment) { 
++inline intptr_t align_size_down(intptr_t size, intptr_t alignment) {
+   return align_size_down_(size, alignment);
+ }
+ 
+@@ -316,7 +313,7 @@
+ 
+ #define align_object_size_(size) align_size_up_(size, MinObjAlignment)
+ 
+-inline intptr_t align_object_size(intptr_t size) { 
++inline intptr_t align_object_size(intptr_t size) {
+   return align_size_up(size, MinObjAlignment);
+ }
+ 
+@@ -354,7 +351,7 @@
+ inline jint low (jlong value)                    { return jint(value); }
+ inline jint high(jlong value)                    { return jint(value >> 32); }
+ 
+-// the fancy casts are a hopefully portable way 
++// the fancy casts are a hopefully portable way
+ // to do unsigned 32 to 64 bit type conversion
+ inline void set_low (jlong* value, jint low )    { *value &= (jlong)0xffffffff << 32;
+                                                    *value |= (jlong)(julong)(juint)low; }
+@@ -398,24 +395,24 @@
+ // Convert a char from a classfile signature to a BasicType
+ inline BasicType char2type(char c) {
+   switch( c ) {
+-  case 'B': return T_BYTE;    
+-  case 'C': return T_CHAR;    
+-  case 'D': return T_DOUBLE;  
+-  case 'F': return T_FLOAT;   
+-  case 'I': return T_INT;     
+-  case 'J': return T_LONG;    
+-  case 'S': return T_SHORT;   
+-  case 'Z': return T_BOOLEAN; 
+-  case 'V': return T_VOID;    
+-  case 'L': return T_OBJECT;  
+-  case '[': return T_ARRAY;   
++  case 'B': return T_BYTE;
++  case 'C': return T_CHAR;
++  case 'D': return T_DOUBLE;
++  case 'F': return T_FLOAT;
++  case 'I': return T_INT;
++  case 'J': return T_LONG;
++  case 'S': return T_SHORT;
++  case 'Z': return T_BOOLEAN;
++  case 'V': return T_VOID;
++  case 'L': return T_OBJECT;
++  case '[': return T_ARRAY;
+   }
+   return T_ILLEGAL;
+ }
+ 
+ extern char type2char_tab[T_CONFLICT+1];     // Map a BasicType to a jchar
+ inline char type2char(BasicType t) { return (uint)t < T_CONFLICT+1 ? type2char_tab[t] : 0; }
+-extern int type2size[T_CONFLICT+1];	    // Map BasicType to result stack elements
++extern int type2size[T_CONFLICT+1];         // Map BasicType to result stack elements
+ extern const char* type2name_tab[T_CONFLICT+1];     // Map a BasicType to a jchar
+ inline const char* type2name(BasicType t) { return (uint)t < T_CONFLICT+1 ? type2name_tab[t] : NULL; }
+ extern BasicType name2type(const char* name);
+@@ -482,11 +479,11 @@
+     jlong    l;
+     jobject  h;
+   } JavaCallValue;
+- 
++
+  private:
+   BasicType _type;
+   JavaCallValue _value;
+- 
++
+  public:
+   JavaValue(BasicType t = T_ILLEGAL) { _type = t; }
+ 
+@@ -507,14 +504,14 @@
+  jobject get_jobject() const { return _value.h; }
+  JavaCallValue* get_value_addr() { return &_value; }
+  BasicType get_type() const { return _type; }
+- 
++
+  void set_jfloat(jfloat f) { _value.f = f;}
+  void set_jdouble(jdouble d) { _value.d = d;}
+  void set_jint(jint i) { _value.i = i;}
+  void set_jlong(jlong l) { _value.l = l;}
+  void set_jobject(jobject h) { _value.h = h;}
+  void set_type(BasicType t) { _type = t; }
+- 
++
+  jboolean get_jboolean() const { return (jboolean) (_value.i);}
+  jbyte get_jbyte() const { return (jbyte) (_value.i);}
+  jchar get_jchar() const { return (jchar) (_value.i);}
+@@ -523,12 +520,12 @@
+ };
+ 
+ 
+-#define STACK_BIAS	0
++#define STACK_BIAS      0
+ // V9 Sparc CPU's running in 64 Bit mode use a stack bias of 7ff
+ // in order to extend the reach of the stack pointer.
+ #if defined(SPARC) && defined(_LP64)
+ #undef STACK_BIAS
+-#define STACK_BIAS	0x7ff
++#define STACK_BIAS      0x7ff
+ #endif
+ 
+ 
+@@ -542,14 +539,14 @@
+ // type specific operations (e.g. verification code).
+ 
+ enum TosState {         // describes the tos cache contents
+-  btos = 0, 		// byte, bool tos cached
+-  ctos = 1,		// short, char tos cached
+-  stos = 2,		// short, char tos cached
++  btos = 0,             // byte, bool tos cached
++  ctos = 1,             // short, char tos cached
++  stos = 2,             // short, char tos cached
+   itos = 3,             // int tos cached
+   ltos = 4,             // long tos cached
+   ftos = 5,             // float tos cached
+   dtos = 6,             // double tos cached
+-  atos = 7, 		// object cached
++  atos = 7,             // object cached
+   vtos = 8,             // tos not cached
+   number_of_states,
+   ilgl                  // illegal state: should not occur
+@@ -618,7 +615,7 @@
+   _thread_in_Java           =  8, // running in Java or in stub code
+   _thread_in_Java_trans     =  9, // corresponding transition state (not used, included for completness)
+   _thread_blocked           = 10, // blocked in vm
+-  _thread_blocked_trans     = 11, // corresponding transition state  
++  _thread_blocked_trans     = 11, // corresponding transition state
+   _thread_max_state         = 12  // maximum thread state+1 - used for statistics allocation
+ };
+ 
+@@ -1043,7 +1040,7 @@
+ 
+ // Printf-style formatters for fixed- and variable-width types as pointers and
+ // integers.
+-// 
++//
+ // Each compiler-specific definitions file (e.g., globalDefinitions_gcc.hpp)
+ // must define the macro FORMAT64_MODIFIER, which is the modifier for '%x' or
+ // '%d' formats to indicate a 64-bit quantity; commonly "l" (in LP64) or "ll"
+@@ -1052,8 +1049,8 @@
+ // Format 32-bit quantities.
+ #define INT32_FORMAT  "%d"
+ #define UINT32_FORMAT "%u"
+-#define INT32_FORMAT_W(width)	"%" #width "d"
+-#define UINT32_FORMAT_W(width)	"%" #width "u"
++#define INT32_FORMAT_W(width)   "%" #width "d"
++#define UINT32_FORMAT_W(width)  "%" #width "u"
+ 
+ #define PTR32_FORMAT  "0x%08x"
+ 
+@@ -1068,28 +1065,28 @@
+ // Format macros that allow the field width to be specified.  The width must be
+ // a string literal (e.g., "8") or a macro that evaluates to one.
+ #ifdef _LP64
+-#define SSIZE_FORMAT_W(width)	INT64_FORMAT_W(width)
+-#define SIZE_FORMAT_W(width)	UINT64_FORMAT_W(width)
++#define SSIZE_FORMAT_W(width)   INT64_FORMAT_W(width)
++#define SIZE_FORMAT_W(width)    UINT64_FORMAT_W(width)
+ #else
+-#define SSIZE_FORMAT_W(width)	INT32_FORMAT_W(width)
+-#define SIZE_FORMAT_W(width)	UINT32_FORMAT_W(width)
++#define SSIZE_FORMAT_W(width)   INT32_FORMAT_W(width)
++#define SIZE_FORMAT_W(width)    UINT32_FORMAT_W(width)
+ #endif // _LP64
+ 
+ // Format pointers and size_t (or size_t-like integer types) which change size
+ // between 32- and 64-bit.
+-#ifdef	_LP64
++#ifdef  _LP64
+ #define PTR_FORMAT    PTR64_FORMAT
+ #define UINTX_FORMAT  UINT64_FORMAT
+ #define INTX_FORMAT   INT64_FORMAT
+ #define SIZE_FORMAT   UINT64_FORMAT
+ #define SSIZE_FORMAT  INT64_FORMAT
+-#else	// !_LP64
++#else   // !_LP64
+ #define PTR_FORMAT    PTR32_FORMAT
+ #define UINTX_FORMAT  UINT32_FORMAT
+ #define INTX_FORMAT   INT32_FORMAT
+ #define SIZE_FORMAT   UINT32_FORMAT
+ #define SSIZE_FORMAT  INT32_FORMAT
+-#endif	// _LP64
++#endif  // _LP64
+ 
+ #define INTPTR_FORMAT PTR_FORMAT
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp openjdk/hotspot/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)globalDefinitions_sparcWorks.hpp	1.80 07/05/05 17:07:10 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This file holds compiler-dependent includes,
+@@ -98,8 +95,8 @@
+ typedef unsigned long long uint64_t;
+ #endif
+ // %%%% how to access definition of intptr_t portably in 5.5 onward?
+-typedef int			intptr_t;
+-typedef unsigned int		uintptr_t;
++typedef int                     intptr_t;
++typedef unsigned int            uintptr_t;
+ // If this gets an error, figure out a symbol XXX that implies the
+ // prior definition of intptr_t, and add "&& !defined(XXX)" above.
+ #endif
+@@ -141,11 +138,11 @@
+ //
+ // This also means that pointers to functions can no longer be "hidden"
+ // in opaque types like void * because at the invokation point warnings
+-// will be generated. While this makes perfect sense from a type safety 
++// will be generated. While this makes perfect sense from a type safety
+ // point of view it causes a lot of warnings on old code using C header
+ // files. Here are some typedefs to make the job of silencing warnings
+ // a bit easier.
+-// 
++//
+ // The final kick in the teeth is that you can only have extern "C" linkage
+ // specified at file scope. So these typedefs are here rather than in the
+ // .hpp for the class (os:Solaris usually) that needs them.
+@@ -203,7 +200,7 @@
+ 
+ 
+ // Portability macros
+-#define PRAGMA_INTERFACE      
++#define PRAGMA_INTERFACE
+ #define PRAGMA_IMPLEMENTATION
+ #define PRAGMA_IMPLEMENTATION_(arg)
+ #define VALUE_OBJ_CLASS_SPEC    : public _ValueObj
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/globalDefinitions_visCPP.hpp openjdk/hotspot/src/share/vm/utilities/globalDefinitions_visCPP.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/globalDefinitions_visCPP.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/globalDefinitions_visCPP.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)globalDefinitions_visCPP.hpp	1.68 07/05/05 17:07:10 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This file holds compiler-dependent includes,
+@@ -40,7 +37,7 @@
+ # include <time.h>
+ # include <fcntl.h>
+ // Need this on windows to get the math constants (e.g., M_PI).
+-#define	_USE_MATH_DEFINES
++#define _USE_MATH_DEFINES
+ # include <math.h>
+ 
+ // 4810578: varargs unsafe on 32-bit integer/64-bit pointer architectures
+@@ -75,8 +72,8 @@
+ #define NULL_WORD NULL
+ 
+ // Compiler-specific primitive types
+-typedef	unsigned __int8  uint8_t;
+-typedef	unsigned __int16 uint16_t;
++typedef unsigned __int8  uint8_t;
++typedef unsigned __int16 uint16_t;
+ typedef unsigned __int32 uint32_t;
+ typedef unsigned __int64 uint64_t;
+ 
+@@ -85,7 +82,7 @@
+ #else
+ typedef unsigned int uintptr_t;
+ #endif
+-typedef	signed   __int8  int8_t;
++typedef signed   __int8  int8_t;
+ typedef signed   __int16 int16_t;
+ typedef signed   __int32 int32_t;
+ typedef signed   __int64 int64_t;
+@@ -101,7 +98,7 @@
+ // Additional Java basic types
+ 
+ typedef unsigned char    jubyte;
+-typedef	unsigned short   jushort;
++typedef unsigned short   jushort;
+ typedef unsigned int     juint;
+ typedef unsigned __int64 julong;
+ 
+@@ -185,7 +182,7 @@
+ #pragma warning( disable : 4291 ) // no matching operator delete found; memory will not be freed if initialization thows an exception
+ 
+ // Portability macros
+-#define PRAGMA_INTERFACE      
++#define PRAGMA_INTERFACE
+ #define PRAGMA_IMPLEMENTATION
+ #define PRAGMA_IMPLEMENTATION_(arg)
+ #define VALUE_OBJ_CLASS_SPEC    : public _ValueObj
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/growableArray.cpp openjdk/hotspot/src/share/vm/utilities/growableArray.cpp
+--- openjdk6/hotspot/src/share/vm/utilities/growableArray.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/growableArray.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)growableArray.cpp	1.37 07/05/05 17:07:10 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ # include "incls/_precompiled.incl"
+ # include "incls/_growableArray.cpp.incl"
+@@ -35,7 +32,7 @@
+ }
+ 
+ void GenericGrowableArray::check_nesting() {
+-  // Check for insidious allocation bug: if a GrowableArray overflows, the 
++  // Check for insidious allocation bug: if a GrowableArray overflows, the
+   // grown array must be allocated under the same ResourceMark as the original.
+   // Otherwise, the _data array will be deallocated too early.
+   if (on_stack() &&
+@@ -54,4 +51,3 @@
+     return _arena->Amalloc(elementSize * _max);
+   }
+ }
+-
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/growableArray.hpp openjdk/hotspot/src/share/vm/utilities/growableArray.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/growableArray.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/growableArray.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)growableArray.hpp	1.55 07/05/05 17:07:09 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // A growable array.
+@@ -72,8 +69,8 @@
+ 
+ class GenericGrowableArray : public ResourceObj {
+  protected:
+-  int    _len;		// current length
+-  int    _max;		// maximum length
++  int    _len;          // current length
++  int    _max;          // maximum length
+   Arena* _arena;        // Indicates where allocation occurs:
+                         //   0 means default ResourceArea
+                         //   1 means on C heap
+@@ -90,7 +87,7 @@
+   // Where are we going to allocate memory?
+   bool on_C_heap() { return _arena == (Arena*)1; }
+   bool on_stack () { return _arena == NULL;      }
+-  bool on_arena () { return _arena >  (Arena*)1;  } 
++  bool on_arena () { return _arena >  (Arena*)1;  }
+ 
+   // This GA will use the resource stack for storage if c_heap==false,
+   // Else it will use the C heap.  Use clear_and_deallocate to avoid leaks.
+@@ -118,7 +115,7 @@
+ 
+ template<class E> class GrowableArray : public GenericGrowableArray {
+  private:
+-  E*     _data; 	// data array
++  E*     _data;         // data array
+ 
+   void grow(int j);
+   void raw_at_put_grow(int i, const E& p, const E& fill);
+@@ -152,12 +149,12 @@
+                                 // Does nothing for resource and arena objects
+   ~GrowableArray()              { if (on_C_heap()) clear_and_deallocate(); }
+ 
+-  void  clear()    		{ _len = 0; }
+-  int   length() const  	{ return _len; }
+-  void	trunc_to(int l)		{ assert(l <= _len,"cannot increase length"); _len = l; }
+-  bool  is_empty() const  	{ return _len == 0; }
+-  bool  is_nonempty() const 	{ return _len != 0; }
+-  bool  is_full() const   	{ return _len == _max; }
++  void  clear()                 { _len = 0; }
++  int   length() const          { return _len; }
++  void  trunc_to(int l)         { assert(l <= _len,"cannot increase length"); _len = l; }
++  bool  is_empty() const        { return _len == 0; }
++  bool  is_nonempty() const     { return _len != 0; }
++  bool  is_full() const         { return _len == _max; }
+   DEBUG_ONLY(E* data_addr() const      { return _data; })
+ 
+   void print();
+@@ -187,7 +184,7 @@
+     return _data[0];
+   }
+ 
+-  E top() const {                                                  
++  E top() const {
+     assert(_len > 0, "empty list");
+     return _data[_len-1];
+   }
+@@ -317,7 +314,7 @@
+ // destructor.
+ template<class E> void GrowableArray<E>::clear_and_deallocate() {
+     assert(on_C_heap(),
+-  	   "clear_and_deallocate should only be called when on C heap");
++           "clear_and_deallocate should only be called when on C heap");
+     clear();
+     if (_data != NULL) {
+       for (int i = 0; i < _max; i++) _data[i].~E();
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/hashtable.cpp openjdk/hotspot/src/share/vm/utilities/hashtable.cpp
+--- openjdk6/hotspot/src/share/vm/utilities/hashtable.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/hashtable.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)hashtable.cpp	1.13 07/05/05 17:07:10 JVM"
+-#endif
+ /*
+  * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -41,7 +38,7 @@
+ 
+ BasicHashtableEntry* BasicHashtable::new_entry(unsigned int hashValue) {
+   BasicHashtableEntry* entry;
+-  
++
+   if (_free_list) {
+     entry = _free_list;
+     _free_list = _free_list->next();
+@@ -66,7 +63,7 @@
+ 
+   entry = (HashtableEntry*)BasicHashtable::new_entry(hashValue);
+   entry->set_literal(obj);   // clears literal string field
+-  HS_DTRACE_PROBE4(hs_private, hashtable__new_entry, 
++  HS_DTRACE_PROBE4(hs_private, hashtable__new_entry,
+     this, hashValue, obj, entry);
+   return entry;
+ }
+@@ -230,7 +227,7 @@
+ 
+ #ifndef PRODUCT
+ 
+-void Hashtable::print() {  
++void Hashtable::print() {
+   ResourceMark rm;
+ 
+   for (int i = 0; i < table_size(); i++) {
+@@ -264,7 +261,7 @@
+ void BasicHashtable::verify_lookup_length(double load) {
+   if ((double)_lookup_length / (double)_lookup_count > load * 2.0) {
+     warning("Performance bug: SystemDictionary lookup_count=%d "
+-            "lookup_length=%d average=%lf load=%f", 
++            "lookup_length=%d average=%lf load=%f",
+             _lookup_count, _lookup_length,
+             (double) _lookup_length / _lookup_count, load);
+   }
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/hashtable.hpp openjdk/hotspot/src/share/vm/utilities/hashtable.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/hashtable.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/hashtable.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)hashtable.hpp	1.14 07/05/05 17:07:10 JVM"
+-#endif
+ /*
+  * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This is a generic hashtable, designed to be used for the symbol
+@@ -219,7 +216,7 @@
+ 
+   // GC support
+   //   Delete pointers to otherwise-unreachable objects.
+-  void unlink(BoolObjectClosure* cl); 
++  void unlink(BoolObjectClosure* cl);
+ 
+   // Reverse the order of elements in each of the buckets. Hashtable
+   // entries which refer to objects at a lower address than 'boundary'
+@@ -268,8 +265,8 @@
+ 
+ public:
+   unsigned int compute_hash(symbolHandle name, Handle loader) {
+-    // Be careful with identity_hash(), it can safepoint and if this 
+-    // were one expression, the compiler could choose to unhandle each 
++    // Be careful with identity_hash(), it can safepoint and if this
++    // were one expression, the compiler could choose to unhandle each
+     // oop before calling identity_hash() for either of them.  If the first
+     // causes a GC, the next would fail.
+     unsigned int name_hash = name->identity_hash();
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/hashtable.inline.hpp openjdk/hotspot/src/share/vm/utilities/hashtable.inline.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/hashtable.inline.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/hashtable.inline.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)hashtable.inline.hpp	1.9 07/05/05 17:07:10 JVM"
+-#endif
+ /*
+  * Copyright 2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Inline function definitions for hashtable.hpp.
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/histogram.cpp openjdk/hotspot/src/share/vm/utilities/histogram.cpp
+--- openjdk6/hotspot/src/share/vm/utilities/histogram.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/histogram.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)histogram.cpp	1.21 07/05/05 17:07:09 JVM"
+-#endif
+ /*
+  * Copyright 1998-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -73,7 +70,7 @@
+   _title = title;
+   _elements = new (ResourceObj::C_HEAP) GrowableArray<HistogramElement*>(estimatedCount,true);
+ }
+-  
++
+ void Histogram::add_element(HistogramElement* element) {
+   // Note, we need to add locking !
+   elements()->append(element);
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/histogram.hpp openjdk/hotspot/src/share/vm/utilities/histogram.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/histogram.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/histogram.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)histogram.hpp	1.17 07/05/05 17:07:11 JVM"
+-#endif
+ /*
+  * Copyright 1998-2000 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This class provides a framework for collecting various statistics.
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/macros.hpp openjdk/hotspot/src/share/vm/utilities/macros.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/macros.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/macros.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)macros.hpp	1.41 07/05/29 09:44:30 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Use this to mark code that needs to be cleaned up (for development only)
+@@ -38,6 +35,21 @@
+ #ifdef KERNEL
+ #define COMPILER1
+ #define SERIALGC
++
++#define JVMTI_KERNEL
++#define FPROF_KERNEL
++#define VM_STRUCTS_KERNEL
++#define JNICHECK_KERNEL
++#define SERVICES_KERNEL
++
++#define KERNEL_RETURN        {}
++#define KERNEL_RETURN_(code) { code }
++
++#else  // KERNEL
++
++#define KERNEL_RETURN        /* next token must be ; */
++#define KERNEL_RETURN_(code) /* next token must be ; */
++
+ #endif // KERNEL
+ 
+ // COMPILER1 variant
+@@ -81,6 +93,14 @@
+ #define NOT_CHECK_UNHANDLED_OOPS(code)  code
+ #endif // CHECK_UNHANDLED_OOPS
+ 
++#ifdef CC_INTERP
++#define CC_INTERP_ONLY(code) code
++#define NOT_CC_INTERP(code)
++#else
++#define CC_INTERP_ONLY(code)
++#define NOT_CC_INTERP(code) code
++#endif // CC_INTERP
++
+ #ifdef ASSERT
+ #define DEBUG_ONLY(code) code
+ #define NOT_DEBUG(code)
+@@ -159,4 +179,3 @@
+ #define FIX_THIS(code) report_assertion_failure("FIX_THIS",__FILE__, __LINE__, "")
+ 
+ #define define_pd_global(type, name, value) const type pd_##name = value;
+-
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/ostream.cpp openjdk/hotspot/src/share/vm/utilities/ostream.cpp
+--- openjdk6/hotspot/src/share/vm/utilities/ostream.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/ostream.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)ostream.cpp	1.78 07/06/08 23:17:46 JVM"
+-#endif
+ /*
+  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -111,7 +108,7 @@
+   va_end(ap);
+ }
+ 
+-void outputStream::print_cr(const char* format, ...) { 
++void outputStream::print_cr(const char* format, ...) {
+   char buffer[O_BUFLEN];
+   va_list ap;
+   va_start(ap, format);
+@@ -121,7 +118,7 @@
+   va_end(ap);
+ }
+ 
+-void outputStream::vprint(const char *format, va_list argptr) { 
++void outputStream::vprint(const char *format, va_list argptr) {
+   char buffer[O_BUFLEN];
+   size_t len;
+   const char* str = do_vsnprintf(buffer, O_BUFLEN, format, argptr, false, len);
+@@ -202,7 +199,7 @@
+ stringStream::stringStream(size_t initial_size) : outputStream() {
+   buffer_length = initial_size;
+   buffer        = NEW_RESOURCE_ARRAY(char, buffer_length);
+-  buffer_pos    = 0;  
++  buffer_pos    = 0;
+   buffer_fixed  = false;
+ }
+ 
+@@ -210,7 +207,7 @@
+ stringStream::stringStream(char* fixed_buffer, size_t fixed_buffer_size) : outputStream() {
+   buffer_length = fixed_buffer_size;
+   buffer        = fixed_buffer;
+-  buffer_pos    = 0;  
++  buffer_pos    = 0;
+   buffer_fixed  = true;
+ }
+ 
+@@ -223,7 +220,7 @@
+       end = buffer_length;
+       write_len = end - buffer_pos - 1; // leave room for the final '\0'
+     } else {
+-      // For small overruns, double the buffer.  For larger ones, 
++      // For small overruns, double the buffer.  For larger ones,
+       // increase to the requested size.
+       if (end < buffer_length * 2) {
+         end = buffer_length * 2;
+@@ -314,13 +311,13 @@
+   }
+ }
+ 
+-bool defaultStream::has_log_file() { 
++bool defaultStream::has_log_file() {
+   // lazily create log file (at startup, LogVMOutput is false even
+   // if +LogVMOutput is used, because the flags haven't been parsed yet)
+   // For safer printing during fatal error handling, do not init logfile
+   // if a VM error has been reported.
+   if (!_inited && !is_error_reported())  init();
+-  return _log_file != NULL; 
++  return _log_file != NULL;
+ }
+ 
+ static const char* make_log_name(const char* log_name, const char* force_directory, char* buf) {
+@@ -358,7 +355,7 @@
+ 
+ void defaultStream::init_log() {
+   // %%% Need a MutexLocker?
+-  const char* log_name = strlen(LogFile) > 0 ? LogFile : "hotspot.log";
++  const char* log_name = LogFile != NULL ? LogFile : "hotspot.log";
+   char buf[O_BUFLEN*2];
+   const char* try_name = make_log_name(log_name, NULL, buf);
+   fileStream* file = new(ResourceObj::C_HEAP) fileStream(try_name);
+@@ -459,7 +456,7 @@
+ 
+   delete _outer_xmlStream;
+   _outer_xmlStream = NULL;
+-  
++
+   file->flush();
+   delete file;
+ }
+@@ -480,11 +477,11 @@
+     fileStream* file = _log_file;
+     _log_file = NULL;
+     _outer_xmlStream = NULL;
+-  
++
+     if (file) {
+       file->flush();
+ 
+-      // Can't delete or close the file because delete and fclose aren't 
++      // Can't delete or close the file because delete and fclose aren't
+       // async-safe. We are about to die, so leave it to the kernel.
+       // delete file;
+     }
+@@ -494,23 +491,23 @@
+ intx defaultStream::hold(intx writer_id) {
+   bool has_log = has_log_file();  // check before locking
+   if (// impossible, but who knows?
+-      writer_id == NO_WRITER || 
++      writer_id == NO_WRITER ||
+ 
+       // bootstrap problem
+       tty_lock == NULL ||
+ 
+       // can't grab a lock or call Thread::current() if TLS isn't initialized
+-      ThreadLocalStorage::thread() == NULL || 
++      ThreadLocalStorage::thread() == NULL ||
+ 
+       // developer hook
+-      !SerializeVMOutput || 
++      !SerializeVMOutput ||
+ 
+       // VM already unhealthy
+-      is_error_reported() || 
+-      
++      is_error_reported() ||
++
+       // safepoint == global lock (for VM only)
+-      (SafepointSynchronize::is_synchronizing() && 
+-       Thread::current()->is_VM_thread()) 
++      (SafepointSynchronize::is_synchronizing() &&
++       Thread::current()->is_VM_thread())
+       ) {
+     // do not attempt to lock unless we know the thread and the VM is healthy
+     return NO_WRITER;
+@@ -645,7 +642,7 @@
+   defaultStream::instance->has_log_file();
+ }
+ 
+-// ostream_exit() is called during normal VM exit to finish log files, flush 
++// ostream_exit() is called during normal VM exit to finish log files, flush
+ // output and free resource.
+ void ostream_exit() {
+   static bool ostream_exit_called = false;
+@@ -657,14 +654,14 @@
+   {
+       // we temporaly disable PrintMallocFree here
+       // as otherwise it'll lead to using of almost deleted
+-      // tty or defaultStream::instance in logging facility 
++      // tty or defaultStream::instance in logging facility
+       // of HeapFree(), see 6391258
+       DEBUG_ONLY(FlagSetting fs(PrintMallocFree, false);)
+       if (tty != defaultStream::instance) {
+-	  delete tty;
++          delete tty;
+       }
+       if (defaultStream::instance != NULL) {
+-	  delete defaultStream::instance;
++          delete defaultStream::instance;
+       }
+   }
+   tty = NULL;
+@@ -686,7 +683,7 @@
+ }
+ 
+ staticBufferStream::staticBufferStream(char* buffer, size_t buflen,
+-				       outputStream *outer_stream) {
++                                       outputStream *outer_stream) {
+   _buffer = buffer;
+   _buflen = buflen;
+   _outer_stream = outer_stream;
+@@ -736,7 +733,7 @@
+   buffer_pos    = 0;
+   buffer_fixed  = false;
+ }
+-                                                                                                
++
+ bufferedStream::bufferedStream(char* fixed_buffer, size_t fixed_buffer_size) : outputStream() {
+   buffer_length = fixed_buffer_size;
+   buffer        = fixed_buffer;
+@@ -751,7 +748,7 @@
+       // if buffer cannot resize, silently truncate
+       len = buffer_length - buffer_pos - 1;
+     } else {
+-      // For small overruns, double the buffer.  For larger ones, 
++      // For small overruns, double the buffer.  For larger ones,
+       // increase to the requested size.
+       if (end < buffer_length * 2) {
+         end = buffer_length * 2;
+@@ -762,18 +759,92 @@
+   }
+   memcpy(buffer + buffer_pos, s, len);
+   buffer_pos += len;
++  update_position(s, len);
+ }
+-                                                                                                
++
+ char* bufferedStream::as_string() {
+   char* copy = NEW_RESOURCE_ARRAY(char, buffer_pos+1);
+   strncpy(copy, buffer, buffer_pos);
+   copy[buffer_pos] = 0;  // terminating null
+   return copy;
+ }
+-                                                                                                
++
+ bufferedStream::~bufferedStream() {
+   if (!buffer_fixed) {
+     FREE_C_HEAP_ARRAY(char, buffer);
+   }
+ }
+ 
++#ifndef PRODUCT
++
++#if defined(SOLARIS) || defined(LINUX)
++#include <sys/types.h>
++#include <sys/socket.h>
++#include <netinet/in.h>
++#include <arpa/inet.h>
++#endif
++
++// Network access
++networkStream::networkStream() {
++
++  _socket = -1;
++
++  hpi::initialize_socket_library();
++
++  int result = hpi::socket(AF_INET, SOCK_STREAM, 0);
++  if (result <= 0) {
++    assert(false, "Socket could not be created!");
++  } else {
++    _socket = result;
++  }
++}
++
++int networkStream::read(char *buf, size_t len) {
++  return hpi::recv(_socket, buf, (int)len, 0);
++}
++
++void networkStream::flush() {
++  if (size() != 0) {
++    hpi::send(_socket, (char *)base(), (int)size(), 0);
++  }
++  reset();
++}
++
++networkStream::~networkStream() {
++  close();
++}
++
++void networkStream::close() {
++  if (_socket != -1) {
++    flush();
++    hpi::socket_close(_socket);
++    _socket = -1;
++  }
++}
++
++bool networkStream::connect(const char *ip, short port) {
++
++  struct sockaddr_in server;
++  server.sin_family = AF_INET;
++  server.sin_port = htons(port);
++
++  server.sin_addr.s_addr = inet_addr(ip);
++  if (server.sin_addr.s_addr == (unsigned long)-1) {
++#ifdef _WINDOWS
++    struct hostent* host = hpi::get_host_by_name((char*)ip);
++#else
++    struct hostent* host = gethostbyname(ip);
++#endif
++    if (host != NULL) {
++      memcpy(&server.sin_addr, host->h_addr_list[0], host->h_length);
++    } else {
++      return false;
++    }
++  }
++
++
++  int result = hpi::connect(_socket, (struct sockaddr*)&server, sizeof(struct sockaddr_in));
++  return (result >= 0);
++}
++
++#endif
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/ostream.hpp openjdk/hotspot/src/share/vm/utilities/ostream.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/ostream.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/ostream.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)ostream.hpp	1.43 07/06/08 23:18:20 JVM"
+-#endif
+ /*
+  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,11 +19,11 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Output streams for printing
+-// 
++//
+ // Printing guidelines:
+ // Where possible, please use tty->print() and tty->print_cr().
+ // For product mode VM warnings use warning() which internally uses tty.
+@@ -109,8 +106,8 @@
+ };
+ 
+ // standard output
+-				// ANSI C++ name collision
+-extern outputStream* tty;	    // tty output
++                                // ANSI C++ name collision
++extern outputStream* tty;           // tty output
+ extern outputStream* gclog_or_tty;  // stream for gc log if -Xloggc:<f>, or tty
+ 
+ // advisory locking for the shared tty stream:
+@@ -192,7 +189,7 @@
+   outputStream* _outer_stream;
+  public:
+   staticBufferStream(char* buffer, size_t buflen,
+-		     outputStream *outer_stream);
++                     outputStream *outer_stream);
+   ~staticBufferStream() {};
+   virtual void write(const char* c, size_t len);
+   void flush();
+@@ -222,3 +219,23 @@
+ };
+ 
+ #define O_BUFLEN 2000   // max size of output of individual print() methods
++
++#ifndef PRODUCT
++
++class networkStream : public bufferedStream {
++
++  private:
++    int _socket;
++
++  public:
++    networkStream();
++    ~networkStream();
++
++    bool connect(const char *host, short port);
++    bool is_open() const { return _socket != -1; }
++    int read(char *buf, size_t len);
++    void close();
++    virtual void flush();
++};
++
++#endif
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/preserveException.cpp openjdk/hotspot/src/share/vm/utilities/preserveException.cpp
+--- openjdk6/hotspot/src/share/vm/utilities/preserveException.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/preserveException.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)preserveException.cpp	1.21 07/05/05 17:07:11 JVM"
+-#endif
+ /*
+  * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -34,7 +31,7 @@
+   thread     = Thread::current();
+   _thread    = thread;
+   _preserved_exception_oop = Handle(thread, _thread->pending_exception());
+-  _thread->clear_pending_exception(); // Needed to avoid infinite recursion  
++  _thread->clear_pending_exception(); // Needed to avoid infinite recursion
+   _preserved_exception_line = _thread->exception_line();
+   _preserved_exception_file = _thread->exception_file();
+ }
+@@ -54,7 +51,7 @@
+ 
+ 
+ // This code is cloned from PreserveExceptionMark, except that:
+-//   returned pending exceptions do not cause a crash. 
++//   returned pending exceptions do not cause a crash.
+ //   thread is passed in, not set (not a reference parameter)
+ //   and bug 6431341 has been addressed.
+ 
+@@ -63,14 +60,14 @@
+   _preserved_exception_oop = Handle(thread, _thread->pending_exception());
+   _preserved_exception_line = _thread->exception_line();
+   _preserved_exception_file = _thread->exception_file();
+-  _thread->clear_pending_exception(); // Pending exceptions are checked in the destructor 
++  _thread->clear_pending_exception(); // Pending exceptions are checked in the destructor
+ }
+ 
+ 
+ CautiouslyPreserveExceptionMark::~CautiouslyPreserveExceptionMark() {
+   assert(!_thread->has_pending_exception(), "unexpected exception generated");
+   if (_thread->has_pending_exception()) {
+-    _thread->clear_pending_exception(); 
++    _thread->clear_pending_exception();
+   }
+   if (_preserved_exception_oop() != NULL) {
+     _thread->set_pending_exception(_preserved_exception_oop(), _preserved_exception_file, _preserved_exception_line);
+@@ -82,7 +79,7 @@
+   _preserved_exception_oop = Handle(_thread, _thread->pending_exception());
+   _preserved_exception_line = _thread->exception_line();
+   _preserved_exception_file = _thread->exception_file();
+-  _thread->clear_pending_exception(); 
++  _thread->clear_pending_exception();
+ }
+ 
+ void WeakPreserveExceptionMark::restore() {
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/preserveException.hpp openjdk/hotspot/src/share/vm/utilities/preserveException.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/preserveException.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/preserveException.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)preserveException.hpp	1.20 07/05/05 17:07:11 JVM"
+-#endif
+ /*
+  * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // This file provides more support for exception handling; see also exceptions.hpp
+@@ -64,10 +61,10 @@
+   Handle      _preserved_exception_oop;
+   int         _preserved_exception_line;
+   const char* _preserved_exception_file;
+-  
++
+   void        preserve();
+   void        restore();
+-  
++
+   public:
+     WeakPreserveExceptionMark(Thread* pThread) :  _thread(pThread), _preserved_exception_oop()  {
+       if (pThread->has_pending_exception()) {
+@@ -84,5 +81,5 @@
+ 
+ 
+ // use global exception mark when allowing pending exception to be set and
+-// saving and restoring them 
++// saving and restoring them
+ #define PRESERVE_EXCEPTION_MARK                    Thread* THREAD; PreserveExceptionMark __em(THREAD);
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/sizes.cpp openjdk/hotspot/src/share/vm/utilities/sizes.cpp
+--- openjdk6/hotspot/src/share/vm/utilities/sizes.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/sizes.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)sizes.cpp	1.11 07/05/05 17:07:10 JVM"
+-#endif
+ /*
+  * Copyright 2000 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/sizes.hpp openjdk/hotspot/src/share/vm/utilities/sizes.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/sizes.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/sizes.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)sizes.hpp	1.19 07/05/05 17:07:08 JVM"
+-#endif
+ /*
+  * Copyright 2000-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // The following two classes are used to represent 'sizes' and 'offsets' in the VM;
+@@ -145,4 +142,3 @@
+ // Use the following #define to get C++ field member offsets
+ 
+ #define byte_offset_of(klass,field)   in_ByteSize((int)offset_of(klass, field))
+-
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/taskqueue.cpp openjdk/hotspot/src/share/vm/utilities/taskqueue.cpp
+--- openjdk6/hotspot/src/share/vm/utilities/taskqueue.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/taskqueue.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)taskqueue.cpp	1.25 07/05/05 17:07:10 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -53,9 +50,8 @@
+ ParallelTaskTerminator::
+ ParallelTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set) :
+   _n_threads(n_threads),
+-  _queue_set(queue_set), 
+-  _offered_termination(0), _terminated(0),
+-  _term_monitor(Mutex::leaf+1, "ParTaskTerm", true) {}
++  _queue_set(queue_set),
++  _offered_termination(0) {}
+ 
+ bool ParallelTaskTerminator::peek_in_queue_set() {
+   return _queue_set->peek();
+@@ -111,522 +107,9 @@
+   }
+ }
+ 
+-OopTaskQueue::OopTaskQueue() : TaskQueueSuper() {
+-  assert(sizeof(Age) == sizeof(jint), "Depends on this.");
+-}
+-
+-void OopTaskQueue::initialize() {
+-  _elems = NEW_C_HEAP_ARRAY(Task, n());
+-  guarantee(_elems != NULL, "Allocation failed.");
+-}
+-
+-bool OopTaskQueue::push_slow(Task t, juint dirty_n_elems) {
+-  if (dirty_n_elems == n() - 1) {
+-    // Actually means 0, so do the push.
+-    juint localBot = _bottom;
+-    _elems[localBot] = t;
+-    _bottom = increment_index(localBot);
+-    return true;
+-  } else
+-    return false;
+-}
+-
+-bool OopTaskQueue::
+-pop_local_slow(juint localBot, Age oldAge) {
+-  // This queue was observed to contain exactly one element; either this
+-  // thread will claim it, or a competing "pop_global".  In either case,
+-  // the queue will be logically empty afterwards.  Create a new Age value
+-  // that represents the empty queue for the given value of "_bottom".  (We 
+-  // must also increment "tag" because of the case where "bottom == 1",
+-  // "top == 0".  A pop_global could read the queue element in that case,
+-  // then have the owner thread do a pop followed by another push.  Without
+-  // the incrementing of "tag", the pop_global's CAS could succeed,
+-  // allowing it to believe it has claimed the stale element.)
+-  Age newAge;
+-  newAge._top = localBot;
+-  newAge._tag = oldAge.tag() + 1;
+-  // Perhaps a competing pop_global has already incremented "top", in which 
+-  // case it wins the element.
+-  if (localBot == oldAge.top()) {
+-    Age tempAge;
+-    // No competing pop_global has yet incremented "top"; we'll try to
+-    // install new_age, thus claiming the element.
+-    assert(sizeof(Age) == sizeof(jint) && sizeof(jint) == sizeof(juint),
+-	   "Assumption about CAS unit.");
+-    *(jint*)&tempAge = Atomic::cmpxchg(*(jint*)&newAge, (volatile jint*)&_age, *(jint*)&oldAge);
+-    if (tempAge == oldAge) {
+-      // We win.
+-      assert(dirty_size(localBot, get_top()) != n() - 1,
+-	     "Shouldn't be possible...");
+-      return true;
+-    }
+-  }
+-  // We fail; a completing pop_global gets the element.  But the queue is
+-  // empty (and top is greater than bottom.)  Fix this representation of
+-  // the empty queue to become the canonical one.
+-  set_age(newAge);
+-  assert(dirty_size(localBot, get_top()) != n() - 1,
+-	 "Shouldn't be possible...");
+-  return false;
+-}
+-
+-bool OopTaskQueue::pop_global(Task& t) {
+-  Age newAge;
+-  Age oldAge = get_age();
+-  juint localBot = _bottom;
+-  juint n_elems = size(localBot, oldAge.top());
+-  if (n_elems == 0) {
+-    return false;
+-  }
+-  t = _elems[oldAge.top()];
+-  newAge = oldAge;
+-  newAge._top = increment_index(newAge.top());
+-  if ( newAge._top == 0 ) newAge._tag++;
+-  Age resAge;
+-  *(jint*)&resAge = Atomic::cmpxchg(*(jint*)&newAge, (volatile jint*)&_age, *(jint*)&oldAge);
+-  // Note that using "_bottom" here might fail, since a pop_local might
+-  // have decremented it.
+-  assert(dirty_size(localBot, newAge._top) != n() - 1,
+-	 "Shouldn't be possible...");
+-  return (resAge == oldAge);
+-}
+-
+-OopTaskQueue::~OopTaskQueue() {
+-  FREE_C_HEAP_ARRAY(Task, _elems);
+-}
+-
+-OopStarTaskQueue::OopStarTaskQueue() : TaskQueueSuper() {
+-  assert(sizeof(Age) == sizeof(jint), "Depends on this.");
+-}
+-
+-void OopStarTaskQueue::initialize() {
+-  _elems = NEW_C_HEAP_ARRAY(StarTask, n());
+-  guarantee(_elems != NULL, "Allocation failed.");
+-}
+-
+-bool OopStarTaskQueue::push_slow(StarTask t, juint dirty_n_elems) {
+-  if (dirty_n_elems == n() - 1) {
+-    // Actually means 0, so do the push.
+-    juint localBot = _bottom;
+-    _elems[localBot] = t;
+-    _bottom = increment_index(localBot);
+-    return true;
+-  } else
+-    return false;
+-}
+-
+-bool OopStarTaskQueue::
+-pop_local_slow(juint localBot, Age oldAge) {
+-  // This queue was observed to contain exactly one element; either this
+-  // thread will claim it, or a competing "pop_global".  In either case,
+-  // the queue will be logically empty afterwards.  Create a new Age value
+-  // that represents the empty queue for the given value of "_bottom".  (We 
+-  // must also increment "tag" because of the case where "bottom == 1",
+-  // "top == 0".  A pop_global could read the queue element in that case,
+-  // then have the owner thread do a pop followed by another push.  Without
+-  // the incrementing of "tag", the pop_global's CAS could succeed,
+-  // allowing it to believe it has claimed the stale element.)
+-  Age newAge;
+-  newAge._top = localBot;
+-  newAge._tag = oldAge.tag() + 1;
+-  // Perhaps a competing pop_global has already incremented "top", in which 
+-  // case it wins the element.
+-  if (localBot == oldAge.top()) {
+-    Age tempAge;
+-    // No competing pop_global has yet incremented "top"; we'll try to
+-    // install new_age, thus claiming the element.
+-    assert(sizeof(Age) == sizeof(jint) && sizeof(jint) == sizeof(juint),
+-	   "Assumption about CAS unit.");
+-    *(jint*)&tempAge = Atomic::cmpxchg(*(jint*)&newAge, (volatile jint*)&_age, *(jint*)&oldAge);
+-    if (tempAge == oldAge) {
+-      // We win.
+-      assert(dirty_size(localBot, get_top()) != n() - 1,
+-	     "Shouldn't be possible...");
+-      return true;
+-    }
+-  }
+-  // We fail; a completing pop_global gets the element.  But the queue is
+-  // empty (and top is greater than bottom.)  Fix this representation of
+-  // the empty queue to become the canonical one.
+-  set_age(newAge);
+-  assert(dirty_size(localBot, get_top()) != n() - 1,
+-	 "Shouldn't be possible...");
+-  return false;
+-}
+-
+-bool OopStarTaskQueue::pop_global(StarTask& t) {
+-  Age newAge;
+-  Age oldAge = get_age();
+-  juint localBot = _bottom;
+-  juint n_elems = size(localBot, oldAge.top());
+-  if (n_elems == 0) {
+-    return false;
+-  }
+-  t = _elems[oldAge.top()];
+-  newAge = oldAge;
+-  newAge._top = increment_index(newAge.top());
+-  if ( newAge._top == 0 ) newAge._tag++;
+-  Age resAge;
+-  *(jint*)&resAge = Atomic::cmpxchg(*(jint*)&newAge, (volatile jint*)&_age, *(jint*)&oldAge);
+-  // Note that using "_bottom" here might fail, since a pop_local might
+-  // have decremented it.
+-  assert(dirty_size(localBot, newAge._top) != n() - 1,
+-	 "Shouldn't be possible...");
+-  return (resAge == oldAge);
+-}
+-
+-OopStarTaskQueue::~OopStarTaskQueue() {
+-  FREE_C_HEAP_ARRAY(Task, _elems);
+-}
+-
+-// GenTaskQueue is an exact clone of OopTaskQueue.  See
+-// header file for questions about why GenTaskQueue exists.
+-
+-GenTaskQueue::GenTaskQueue() : TaskQueueSuper() {
+-  assert(sizeof(Age) == sizeof(jint), "Depends on this.");
+-}
+-
+-void GenTaskQueue::initialize() {
+-  _elems = NEW_C_HEAP_ARRAY(GenTask, n());
+-  guarantee(_elems != NULL, "Allocation failed.");
+-}
+-
+-bool GenTaskQueue::push_slow(GenTask t, juint dirty_n_elems) {
+-  if (dirty_n_elems == n() - 1) {
+-    // Actually means 0, so do the push.
+-    juint localBot = _bottom;
+-    _elems[localBot] = t;
+-    _bottom = increment_index(localBot);
+-    return true;
+-  } else
+-    return false;
+-}
+-
+-bool GenTaskQueue::
+-pop_local_slow(juint localBot, Age oldAge) {
+-  // This queue was observed to contain exactly one element; either this
+-  // thread will claim it, or a competing "pop_global".  In either case,
+-  // the queue will be logically empty afterwards.  Create a new Age value
+-  // that represents the empty queue for the given value of "_bottom".  (We 
+-  // must also increment "tag" because of the case where "bottom == 1",
+-  // "top == 0".  A pop_global could read the queue element in that case,
+-  // then have the owner thread do a pop followed by another push.  Without
+-  // the incrementing of "tag", the pop_global's CAS could succeed,
+-  // allowing it to believe it has claimed the stale element.)
+-  Age newAge;
+-  newAge._top = localBot;
+-  newAge._tag = oldAge.tag() + 1;
+-  // Perhaps a competing pop_global has already incremented "top", in which 
+-  // case it wins the element.
+-  if (localBot == oldAge.top()) {
+-    Age tempAge;
+-    // No competing pop_global has yet incremented "top"; we'll try to
+-    // install new_age, thus claiming the element.
+-    assert(sizeof(Age) == sizeof(jint) && sizeof(jint) == sizeof(juint),
+-	   "Assumption about CAS unit.");
+-    *(jint*)&tempAge = Atomic::cmpxchg(*(jint*)&newAge, (volatile jint*)&_age, *(jint*)&oldAge);
+-    if (tempAge == oldAge) {
+-      // We win.
+-      assert(dirty_size(localBot, get_top()) != n() - 1,
+-	     "Shouldn't be possible...");
+-      return true;
+-    }
+-  }
+-  // We fail; a completing pop_global gets the element.  But the queue is
+-  // empty (and top is greater than bottom.)  Fix this representation of
+-  // the empty queue to become the canonical one.
+-  set_age(newAge);
+-  assert(dirty_size(localBot, get_top()) != n() - 1,
+-	 "Shouldn't be possible...");
+-  return false;
+-}
+-
+-bool GenTaskQueue::pop_global(GenTask& t) {
+-  Age newAge;
+-  Age oldAge = get_age();
+-  juint localBot = _bottom;
+-  juint n_elems = size(localBot, oldAge.top());
+-  if (n_elems == 0) {
+-    return false;
+-  }
+-  t = _elems[oldAge.top()];
+-  newAge = oldAge;
+-  newAge._top = increment_index(newAge.top());
+-  if ( newAge._top == 0 ) newAge._tag++;
+-  Age resAge;
+-  *(jint*)&resAge = Atomic::cmpxchg(*(jint*)&newAge, (volatile jint*)&_age, *(jint*)&oldAge);
+-  // Note that using "_bottom" here might fail, since a pop_local might
+-  // have decremented it.
+-  assert(dirty_size(localBot, newAge._top) != n() - 1,
+-	 "Shouldn't be possible...");
+-  return (resAge == oldAge);
+-}
+-
+-GenTaskQueue::~GenTaskQueue() {
+-  FREE_C_HEAP_ARRAY(GenTask, _elems);
+-}
+-// End of GenTaskQueue clone
+-
+-void OopTaskQueueSet::register_queue(int i, OopTaskQueue* q) {
+-  assert(0 <= i && i < _n, "index out of range.");
+-  _queues[i] = q;
+-}
+-
+-OopTaskQueue* OopTaskQueueSet::queue(int i) {
+-  return _queues[i];
+-}
+-
+-bool OopTaskQueueSet::steal(int queue_num, int* seed, Task& t) {
+-  for (int i = 0; i < 2 * _n; i++)
+-    if (steal_best_of_2(queue_num, seed, t))
+-      return true;
+-  return false;
+-}
+-
+-bool OopTaskQueueSet::steal_best_of_all(int queue_num, int* seed, Task& t) {
+-  if (_n > 2) {
+-    int best_k;
+-    jint best_sz = 0;
+-    for (int k = 0; k < _n; k++) {
+-      if (k == queue_num) continue;
+-      jint sz = _queues[k]->size();
+-      if (sz > best_sz) {
+-	best_sz = sz;
+-	best_k = k;
+-      }
+-    }
+-    return best_sz > 0 && _queues[best_k]->pop_global(t);
+-  } else if (_n == 2) {
+-    // Just try the other one.
+-    int k = (queue_num + 1) % 2;
+-    return _queues[k]->pop_global(t);
+-  } else {
+-    assert(_n == 1, "can't be zero.");
+-    return false;
+-  }
+-}
+-
+-bool OopTaskQueueSet::steal_1_random(int queue_num, int* seed, Task& t) {
+-  if (_n > 2) {
+-    int k = queue_num;
+-    while (k == queue_num) k = randomParkAndMiller(seed) % _n;
+-    return _queues[2]->pop_global(t);
+-  } else if (_n == 2) {
+-    // Just try the other one.
+-    int k = (queue_num + 1) % 2;
+-    return _queues[k]->pop_global(t);
+-  } else {
+-    assert(_n == 1, "can't be zero.");
+-    return false;
+-  }
+-}
+-
+-bool OopTaskQueueSet::steal_best_of_2(int queue_num, int* seed, Task& t) {
+-  if (_n > 2) {
+-    int k1 = queue_num;
+-    while (k1 == queue_num) k1 = randomParkAndMiller(seed) % _n;
+-    int k2 = queue_num;
+-    while (k2 == queue_num || k2 == k1) k2 = randomParkAndMiller(seed) % _n;
+-    // Sample both and try the larger.
+-    juint sz1 = _queues[k1]->size();
+-    juint sz2 = _queues[k2]->size();
+-    if (sz2 > sz1) return _queues[k2]->pop_global(t);
+-    else return _queues[k1]->pop_global(t);
+-  } else if (_n == 2) {
+-    // Just try the other one.
+-    int k = (queue_num + 1) % 2;
+-    return _queues[k]->pop_global(t);
+-  } else {
+-    assert(_n == 1, "can't be zero.");
+-    return false;
+-  }
+-}
+-
+-bool OopTaskQueueSet::peek() {
+-  // Try all the queues.
+-  for (int j = 0; j < _n; j++) {
+-    if (_queues[j]->peek())
+-      return true;
+-  }
+-  return false;
+-}
+-
+-void OopStarTaskQueueSet::register_queue(int i, OopStarTaskQueue* q) {
+-  assert(0 <= i && i < _n, "index out of range.");
+-  _queues[i] = q;
+-}
+-
+-OopStarTaskQueue* OopStarTaskQueueSet::queue(int i) {
+-  return _queues[i];
+-}
+-
+-bool OopStarTaskQueueSet::steal(int queue_num, int* seed, StarTask& t) {
+-  for (int i = 0; i < 2 * _n; i++)
+-    if (steal_best_of_2(queue_num, seed, t))
+-      return true;
+-  return false;
+-}
+-
+-bool OopStarTaskQueueSet::steal_best_of_all(int queue_num, int* seed,
+-					    StarTask& t) {
+-  if (_n > 2) {
+-    int best_k;
+-    jint best_sz = 0;
+-    for (int k = 0; k < _n; k++) {
+-      if (k == queue_num) continue;
+-      jint sz = _queues[k]->size();
+-      if (sz > best_sz) {
+-	best_sz = sz;
+-	best_k = k;
+-      }
+-    }
+-    return best_sz > 0 && _queues[best_k]->pop_global(t);
+-  } else if (_n == 2) {
+-    // Just try the other one.
+-    int k = (queue_num + 1) % 2;
+-    return _queues[k]->pop_global(t);
+-  } else {
+-    assert(_n == 1, "can't be zero.");
+-    return false;
+-  }
+-}
+-
+-bool OopStarTaskQueueSet::steal_1_random(int queue_num, int* seed,
+-					 StarTask& t) {
+-  if (_n > 2) {
+-    int k = queue_num;
+-    while (k == queue_num) k = randomParkAndMiller(seed) % _n;
+-    return _queues[2]->pop_global(t);
+-  } else if (_n == 2) {
+-    // Just try the other one.
+-    int k = (queue_num + 1) % 2;
+-    return _queues[k]->pop_global(t);
+-  } else {
+-    assert(_n == 1, "can't be zero.");
+-    return false;
+-  }
+-}
+-
+-bool OopStarTaskQueueSet::steal_best_of_2(int queue_num, int* seed,
+-					  StarTask& t) {
+-  if (_n > 2) {
+-    int k1 = queue_num;
+-    while (k1 == queue_num) k1 = randomParkAndMiller(seed) % _n;
+-    int k2 = queue_num;
+-    while (k2 == queue_num || k2 == k1) k2 = randomParkAndMiller(seed) % _n;
+-    // Sample both and try the larger.
+-    juint sz1 = _queues[k1]->size();
+-    juint sz2 = _queues[k2]->size();
+-    if (sz2 > sz1) return _queues[k2]->pop_global(t);
+-    else return _queues[k1]->pop_global(t);
+-  } else if (_n == 2) {
+-    // Just try the other one.
+-    int k = (queue_num + 1) % 2;
+-    return _queues[k]->pop_global(t);
+-  } else {
+-    assert(_n == 1, "can't be zero.");
+-    return false;
+-  }
+-}
+-
+-bool OopStarTaskQueueSet::peek() {
+-  // Try all the queues.
+-  for (int j = 0; j < _n; j++) {
+-    if (_queues[j]->peek())
+-      return true;
+-  }
+-  return false;
+-}
+-
+-// Clone of OopTaskQueueSet for GenTask
+-void GenTaskQueueSet::register_queue(int i, GenTaskQueue* q) {
+-  assert(0 <= i && i < _n, "index out of range.");
+-  _queues[i] = q;
+-}
+-
+-GenTaskQueue* GenTaskQueueSet::queue(int i) {
+-  return _queues[i];
+-}
+-
+-bool GenTaskQueueSet::steal(int queue_num, int* seed, GenTask& t) {
+-  for (int i = 0; i < 2 * _n; i++)
+-    if (steal_best_of_all(queue_num, seed, t))
+-      return true;
+-  return false;
+-}
+-
+-bool GenTaskQueueSet::steal_best_of_all(int queue_num, int* seed, GenTask& t) {
+-  if (_n > 2) {
+-    int best_k;
+-    jint best_sz = 0;
+-    for (int k = 0; k < _n; k++) {
+-      if (k == queue_num) continue;
+-      jint sz = _queues[k]->size();
+-      if (sz > best_sz) {
+-	best_sz = sz;
+-	best_k = k;
+-      }
+-    }
+-    return best_sz > 0 && _queues[best_k]->pop_global(t);
+-  } else if (_n == 2) {
+-    // Just try the other one.
+-    int k = (queue_num + 1) % 2;
+-    return _queues[k]->pop_global(t);
+-  } else {
+-    assert(_n == 1, "can't be zero.");
+-    return false;
+-  }
+-}
+-
+-bool GenTaskQueueSet::steal_1_random(int queue_num, int* seed, GenTask& t) {
+-  if (_n > 2) {
+-    int k = queue_num;
+-    while (k == queue_num) k = randomParkAndMiller(seed) % _n;
+-    return _queues[2]->pop_global(t);
+-  } else if (_n == 2) {
+-    // Just try the other one.
+-    int k = (queue_num + 1) % 2;
+-    return _queues[k]->pop_global(t);
+-  } else {
+-    assert(_n == 1, "can't be zero.");
+-    return false;
+-  }
+-}
+-
+-bool GenTaskQueueSet::steal_best_of_2(int queue_num, int* seed, GenTask& t) {
+-  if (_n > 2) {
+-    int k1 = queue_num;
+-    while (k1 == queue_num) k1 = randomParkAndMiller(seed) % _n;
+-    int k2 = queue_num;
+-    while (k2 == queue_num || k2 == k1) k2 = randomParkAndMiller(seed) % _n;
+-    // Sample both and try the larger.
+-    juint sz1 = _queues[k1]->size();
+-    juint sz2 = _queues[k2]->size();
+-    if (sz2 > sz1) return _queues[k2]->pop_global(t);
+-    else return _queues[k1]->pop_global(t);
+-  } else if (_n == 2) {
+-    // Just try the other one.
+-    int k = (queue_num + 1) % 2;
+-    return _queues[k]->pop_global(t);
+-  } else {
+-    assert(_n == 1, "can't be zero.");
+-    return false;
+-  }
+-}
+-
+-bool GenTaskQueueSet::peek() {
+-  // Try all the queues.
+-  for (int j = 0; j < _n; j++) {
+-    if (_queues[j]->peek())
+-      return true;
+-  }
+-  return false;
+-}
+-// End clone of OopTaskQueueSet for GenTask
+-
+ bool ChunkTaskQueueWithOverflow::is_empty() {
+   return (_chunk_queue.size() == 0) &&
+-	 (_overflow_stack->length() == 0);
++         (_overflow_stack->length() == 0);
+ }
+ 
+ bool ChunkTaskQueueWithOverflow::stealable_is_empty() {
+@@ -640,7 +123,7 @@
+ void ChunkTaskQueueWithOverflow::initialize() {
+   _chunk_queue.initialize();
+   assert(_overflow_stack == 0, "Creating memory leak");
+-  _overflow_stack = 
++  _overflow_stack =
+     new (ResourceObj::C_HEAP) GrowableArray<ChunkTask>(10, true);
+ }
+ 
+@@ -670,7 +153,7 @@
+ }
+ 
+ bool ChunkTaskQueueWithOverflow::retrieve_from_stealable_queue(
+-				   ChunkTask& chunk_task) {
++                                   ChunkTask& chunk_task) {
+   bool result = _chunk_queue.pop_local(chunk_task);
+   if (TraceChunkTasksQueuing && Verbose) {
+     gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, chunk_task);
+@@ -679,7 +162,7 @@
+ }
+ 
+ bool ChunkTaskQueueWithOverflow::retrieve_from_overflow(
+-					ChunkTask& chunk_task) {
++                                        ChunkTask& chunk_task) {
+   bool result;
+   if (!_overflow_stack->is_empty()) {
+     chunk_task = _overflow_stack->pop();
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/taskqueue.hpp openjdk/hotspot/src/share/vm/utilities/taskqueue.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/taskqueue.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/taskqueue.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)taskqueue.hpp	1.38 07/05/05 17:07:10 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,12 +19,10 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class TaskQueueSuper: public CHeapObj {
+-  friend class ChunkTaskQueue;
+-
+ protected:
+   // The first free element after the last one pushed (mod _n).
+   // (For now we'll assume only 32-bit CAS).
+@@ -94,12 +89,12 @@
+     // from a state in which _bottom == _top+1.  The pop_local could
+     // succeed in decrementing _bottom, and the pop_global in incrementing
+     // _top (in which case the pop_global will be awarded the contested
+-    // queue element.)  The resulting state must be interpreted as an empty 
++    // queue element.)  The resulting state must be interpreted as an empty
+     // queue.  (We only need to worry about one such event: only the queue
+     // owner performs pop_local's, and several concurrent threads
+     // attempting to perform the pop_global will all perform the same CAS,
+     // and only one can succeed.  Any stealing thread that reads after
+-    // either the increment or decrement will seen an empty queue, and will 
++    // either the increment or decrement will seen an empty queue, and will
+     // not join the competitors.  The "sz == -1 || sz == _n-1" state will
+     // not be modified  by concurrent queues, so the owner thread can reset
+     // the state to _bottom == top so subsequent pushes will be performed
+@@ -107,7 +102,7 @@
+     if (sz == (n()-1)) return 0;
+     else return sz;
+   }
+-    
++
+ public:
+   TaskQueueSuper() : _bottom(0), _age() {}
+ 
+@@ -131,115 +126,129 @@
+ 
+ };
+ 
+-typedef oop Task;
+-class OopTaskQueue: public TaskQueueSuper {
+-private:
+-  // Slow paths for push, pop_local.  (pop_global has no fast path.)
+-  bool push_slow(Task t, juint dirty_n_elems);
+-  bool pop_local_slow(juint localBot, Age oldAge);
+-
+-public:
+-  // Initializes the queue to empty.
+-  OopTaskQueue();
+-
+-  void initialize();
+-
+-  // Push the task "t" on the queue.  Returns "false" iff the queue is
+-  // full.
+-  inline bool push(Task t);
+-
+-  // If succeeds in claiming a task (from the 'local' end, that is, the
+-  // most recently pushed task), returns "true" and sets "t" to that task.
+-  // Otherwise, the queue is empty and returns false.
+-  inline bool pop_local(Task& t);
+-
+-  // If succeeds in claiming a task (from the 'global' end, that is, the
+-  // least recently pushed task), returns "true" and sets "t" to that task.
+-  // Otherwise, the queue is empty and returns false.
+-  bool pop_global(Task& t);
+-
+-  // Delete any resource associated with the queue.
+-  ~OopTaskQueue();
+-
+-private:
+-  // Element array.
+-  volatile Task* _elems;
+-
+-};
+-
+-typedef oop* StarTask;
+-class OopStarTaskQueue: public TaskQueueSuper {
++template<class E> class GenericTaskQueue: public TaskQueueSuper {
+ private:
+   // Slow paths for push, pop_local.  (pop_global has no fast path.)
+-  bool push_slow(StarTask t, juint dirty_n_elems);
++  bool push_slow(E t, juint dirty_n_elems);
+   bool pop_local_slow(juint localBot, Age oldAge);
+ 
+ public:
+   // Initializes the queue to empty.
+-  OopStarTaskQueue();
++  GenericTaskQueue();
+ 
+   void initialize();
+ 
+   // Push the task "t" on the queue.  Returns "false" iff the queue is
+   // full.
+-  inline bool push(StarTask t);
++  inline bool push(E t);
+ 
+   // If succeeds in claiming a task (from the 'local' end, that is, the
+   // most recently pushed task), returns "true" and sets "t" to that task.
+   // Otherwise, the queue is empty and returns false.
+-  inline bool pop_local(StarTask& t);
++  inline bool pop_local(E& t);
+ 
+   // If succeeds in claiming a task (from the 'global' end, that is, the
+   // least recently pushed task), returns "true" and sets "t" to that task.
+   // Otherwise, the queue is empty and returns false.
+-  bool pop_global(StarTask& t);
++  bool pop_global(E& t);
+ 
+   // Delete any resource associated with the queue.
+-  ~OopStarTaskQueue();
++  ~GenericTaskQueue();
+ 
+ private:
+   // Element array.
+-  volatile StarTask* _elems;
+-
++  volatile E* _elems;
+ };
+ 
+-// Clone of OopTaskQueue with GenTask instead of Task
+-typedef size_t GenTask;  // Generic task
+-class GenTaskQueue: public TaskQueueSuper {
+-private:
+-  // Slow paths for push, pop_local.  (pop_global has no fast path.)
+-  bool push_slow(GenTask t, juint dirty_n_elems);
+-  bool pop_local_slow(juint localBot, Age oldAge);
+-
+-public:
+-  // Initializes the queue to empty.
+-  GenTaskQueue();
+-
+-  void initialize();
+-
+-  // Push the task "t" on the queue.  Returns "false" iff the queue is
+-  // full.
+-  inline bool push(GenTask t);
++template<class E>
++GenericTaskQueue<E>::GenericTaskQueue():TaskQueueSuper() {
++  assert(sizeof(Age) == sizeof(jint), "Depends on this.");
++}
+ 
+-  // If succeeds in claiming a task (from the 'local' end, that is, the
+-  // most recently pushed task), returns "true" and sets "t" to that task.
+-  // Otherwise, the queue is empty and returns false.
+-  inline bool pop_local(GenTask& t);
++template<class E>
++void GenericTaskQueue<E>::initialize() {
++  _elems = NEW_C_HEAP_ARRAY(E, n());
++  guarantee(_elems != NULL, "Allocation failed.");
++}
+ 
+-  // If succeeds in claiming a task (from the 'global' end, that is, the
+-  // least recently pushed task), returns "true" and sets "t" to that task.
+-  // Otherwise, the queue is empty and returns false.
+-  bool pop_global(GenTask& t);
++template<class E>
++bool GenericTaskQueue<E>::push_slow(E t, juint dirty_n_elems) {
++  if (dirty_n_elems == n() - 1) {
++    // Actually means 0, so do the push.
++    juint localBot = _bottom;
++    _elems[localBot] = t;
++    _bottom = increment_index(localBot);
++    return true;
++  } else
++    return false;
++}
+ 
+-  // Delete any resource associated with the queue.
+-  ~GenTaskQueue();
++template<class E>
++bool GenericTaskQueue<E>::
++pop_local_slow(juint localBot, Age oldAge) {
++  // This queue was observed to contain exactly one element; either this
++  // thread will claim it, or a competing "pop_global".  In either case,
++  // the queue will be logically empty afterwards.  Create a new Age value
++  // that represents the empty queue for the given value of "_bottom".  (We
++  // must also increment "tag" because of the case where "bottom == 1",
++  // "top == 0".  A pop_global could read the queue element in that case,
++  // then have the owner thread do a pop followed by another push.  Without
++  // the incrementing of "tag", the pop_global's CAS could succeed,
++  // allowing it to believe it has claimed the stale element.)
++  Age newAge;
++  newAge._top = localBot;
++  newAge._tag = oldAge.tag() + 1;
++  // Perhaps a competing pop_global has already incremented "top", in which
++  // case it wins the element.
++  if (localBot == oldAge.top()) {
++    Age tempAge;
++    // No competing pop_global has yet incremented "top"; we'll try to
++    // install new_age, thus claiming the element.
++    assert(sizeof(Age) == sizeof(jint) && sizeof(jint) == sizeof(juint),
++           "Assumption about CAS unit.");
++    *(jint*)&tempAge = Atomic::cmpxchg(*(jint*)&newAge, (volatile jint*)&_age, *(jint*)&oldAge);
++    if (tempAge == oldAge) {
++      // We win.
++      assert(dirty_size(localBot, get_top()) != n() - 1,
++             "Shouldn't be possible...");
++      return true;
++    }
++  }
++  // We fail; a completing pop_global gets the element.  But the queue is
++  // empty (and top is greater than bottom.)  Fix this representation of
++  // the empty queue to become the canonical one.
++  set_age(newAge);
++  assert(dirty_size(localBot, get_top()) != n() - 1,
++         "Shouldn't be possible...");
++  return false;
++}
+ 
+-private:
+-  // Element array.
+-  volatile GenTask* _elems;
++template<class E>
++bool GenericTaskQueue<E>::pop_global(E& t) {
++  Age newAge;
++  Age oldAge = get_age();
++  juint localBot = _bottom;
++  juint n_elems = size(localBot, oldAge.top());
++  if (n_elems == 0) {
++    return false;
++  }
++  t = _elems[oldAge.top()];
++  newAge = oldAge;
++  newAge._top = increment_index(newAge.top());
++  if ( newAge._top == 0 ) newAge._tag++;
++  Age resAge;
++  *(jint*)&resAge = Atomic::cmpxchg(*(jint*)&newAge, (volatile jint*)&_age, *(jint*)&oldAge);
++  // Note that using "_bottom" here might fail, since a pop_local might
++  // have decremented it.
++  assert(dirty_size(localBot, newAge._top) != n() - 1,
++         "Shouldn't be possible...");
++  return (resAge == oldAge);
++}
+ 
+-};
+-// End clone of OopTaskQueue with GenTask instead of Task
++template<class E>
++GenericTaskQueue<E>::~GenericTaskQueue() {
++  FREE_C_HEAP_ARRAY(E, _elems);
++}
+ 
+ // Inherits the typedef of "Task" from above.
+ class TaskQueueSetSuper: public CHeapObj {
+@@ -250,100 +259,129 @@
+   virtual bool peek() = 0;
+ };
+ 
+-class OopTaskQueueSet: public TaskQueueSetSuper {
++template<class E> class GenericTaskQueueSet: public TaskQueueSetSuper {
+ private:
+   int _n;
+-  OopTaskQueue** _queues;
++  GenericTaskQueue<E>** _queues;
+ 
+-  bool steal_1_random(int queue_num, int* seed, Task& t);
+-  bool steal_best_of_2(int queue_num, int* seed, Task& t);
+-  bool steal_best_of_all(int queue_num, int* seed, Task& t);
+ public:
+-  OopTaskQueueSet(int n) : _n(n) {
+-    typedef OopTaskQueue* OopTaskQueuePtr;
+-    _queues = NEW_C_HEAP_ARRAY(OopTaskQueuePtr, n);
++  GenericTaskQueueSet(int n) : _n(n) {
++    typedef GenericTaskQueue<E>* GenericTaskQueuePtr;
++    _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n);
+     guarantee(_queues != NULL, "Allocation failure.");
+-    for (int i = 0; i < n; i++) _queues[i] = NULL;
++    for (int i = 0; i < n; i++) {
++      _queues[i] = NULL;
++    }
+   }
+ 
+-  void register_queue(int i, OopTaskQueue* q);
+-
+-  OopTaskQueue* queue(int n);
+-
+-  // The thread with queue number "queue_num" (and whose random number seed 
+-  // is at "seed") is trying to steal a task from some other queue.  (It
+-  // may try several queues, according to some configuration parameter.)
+-  // If some steal succeeds, returns "true" and sets "t" the stolen task,
+-  // otherwise returns false.
+-  bool steal(int queue_num, int* seed, Task& t);
+-
+-  bool peek();
+-};
+-
+-class OopStarTaskQueueSet: public TaskQueueSetSuper {
+-private:
+-  int _n;
+-  OopStarTaskQueue** _queues;
+-
+-  bool steal_1_random(int queue_num, int* seed, StarTask& t);
+-  bool steal_best_of_2(int queue_num, int* seed, StarTask& t);
+-  bool steal_best_of_all(int queue_num, int* seed, StarTask& t);
+-public:
+-  OopStarTaskQueueSet(int n) : _n(n) {
+-    typedef OopStarTaskQueue* OopStarTaskQueuePtr;
+-    _queues = NEW_C_HEAP_ARRAY(OopStarTaskQueuePtr, n);
+-    guarantee(_queues != NULL, "Allocation failure.");
+-    for (int i = 0; i < n; i++) _queues[i] = NULL;
+-  }
++  bool steal_1_random(int queue_num, int* seed, E& t);
++  bool steal_best_of_2(int queue_num, int* seed, E& t);
++  bool steal_best_of_all(int queue_num, int* seed, E& t);
+ 
+-  void register_queue(int i, OopStarTaskQueue* q);
++  void register_queue(int i, GenericTaskQueue<E>* q);
+ 
+-  OopStarTaskQueue* queue(int n);
++  GenericTaskQueue<E>* queue(int n);
+ 
+-  // The thread with queue number "queue_num" (and whose random number seed 
++  // The thread with queue number "queue_num" (and whose random number seed
+   // is at "seed") is trying to steal a task from some other queue.  (It
+   // may try several queues, according to some configuration parameter.)
+   // If some steal succeeds, returns "true" and sets "t" the stolen task,
+   // otherwise returns false.
+-  bool steal(int queue_num, int* seed, StarTask& t);
++  bool steal(int queue_num, int* seed, E& t);
+ 
+   bool peek();
+ };
+ 
+-// Clone of OopTaskQueueSet for GenTask
+-class GenTaskQueueSet: public TaskQueueSetSuper {
+-  friend class ChunkTaskQueueSet;
++template<class E>
++void GenericTaskQueueSet<E>::register_queue(int i, GenericTaskQueue<E>* q) {
++  assert(0 <= i && i < _n, "index out of range.");
++  _queues[i] = q;
++}
+ 
+-private:
+-  int _n;
+-  GenTaskQueue** _queues;
++template<class E>
++GenericTaskQueue<E>* GenericTaskQueueSet<E>::queue(int i) {
++  return _queues[i];
++}
+ 
+-protected:
+-  bool steal_1_random(int queue_num, int* seed, GenTask& t);
+-  bool steal_best_of_2(int queue_num, int* seed, GenTask& t);
+-  bool steal_best_of_all(int queue_num, int* seed, GenTask& t);
+-public:
+-  GenTaskQueueSet(int n) : _n(n) {
+-    typedef GenTaskQueue* GenTaskQueuePtr;
+-    _queues = NEW_C_HEAP_ARRAY(GenTaskQueuePtr, n);
+-    guarantee(_queues != NULL, "Allocation failure.");
+-    for (int i = 0; i < n; i++) _queues[i] = NULL;
+-  }
++template<class E>
++bool GenericTaskQueueSet<E>::steal(int queue_num, int* seed, E& t) {
++  for (int i = 0; i < 2 * _n; i++)
++    if (steal_best_of_2(queue_num, seed, t))
++      return true;
++  return false;
++}
+ 
+-  void register_queue(int i, GenTaskQueue* q);
++template<class E>
++bool GenericTaskQueueSet<E>::steal_best_of_all(int queue_num, int* seed, E& t) {
++  if (_n > 2) {
++    int best_k;
++    jint best_sz = 0;
++    for (int k = 0; k < _n; k++) {
++      if (k == queue_num) continue;
++      jint sz = _queues[k]->size();
++      if (sz > best_sz) {
++        best_sz = sz;
++        best_k = k;
++      }
++    }
++    return best_sz > 0 && _queues[best_k]->pop_global(t);
++  } else if (_n == 2) {
++    // Just try the other one.
++    int k = (queue_num + 1) % 2;
++    return _queues[k]->pop_global(t);
++  } else {
++    assert(_n == 1, "can't be zero.");
++    return false;
++  }
++}
+ 
+-  GenTaskQueue* queue(int n);
++template<class E>
++bool GenericTaskQueueSet<E>::steal_1_random(int queue_num, int* seed, E& t) {
++  if (_n > 2) {
++    int k = queue_num;
++    while (k == queue_num) k = randomParkAndMiller(seed) % _n;
++    return _queues[2]->pop_global(t);
++  } else if (_n == 2) {
++    // Just try the other one.
++    int k = (queue_num + 1) % 2;
++    return _queues[k]->pop_global(t);
++  } else {
++    assert(_n == 1, "can't be zero.");
++    return false;
++  }
++}
+ 
+-  // The thread with queue number "queue_num" (and whose random number seed 
+-  // is at "seed") is trying to steal a task from some other queue.  (It
+-  // may try several queues, according to some configuration parameter.)
+-  // If some steal succeeds, returns "true" and sets "t" the stolen task,
+-  // otherwise returns false.
+-  bool steal(int queue_num, int* seed, GenTask& t);
++template<class E>
++bool GenericTaskQueueSet<E>::steal_best_of_2(int queue_num, int* seed, E& t) {
++  if (_n > 2) {
++    int k1 = queue_num;
++    while (k1 == queue_num) k1 = randomParkAndMiller(seed) % _n;
++    int k2 = queue_num;
++    while (k2 == queue_num || k2 == k1) k2 = randomParkAndMiller(seed) % _n;
++    // Sample both and try the larger.
++    juint sz1 = _queues[k1]->size();
++    juint sz2 = _queues[k2]->size();
++    if (sz2 > sz1) return _queues[k2]->pop_global(t);
++    else return _queues[k1]->pop_global(t);
++  } else if (_n == 2) {
++    // Just try the other one.
++    int k = (queue_num + 1) % 2;
++    return _queues[k]->pop_global(t);
++  } else {
++    assert(_n == 1, "can't be zero.");
++    return false;
++  }
++}
+ 
+-  bool peek();
+-};
+-// End clone of OopTaskQueueSet for GenTask
++template<class E>
++bool GenericTaskQueueSet<E>::peek() {
++  // Try all the queues.
++  for (int j = 0; j < _n; j++) {
++    if (_queues[j]->peek())
++      return true;
++  }
++  return false;
++}
+ 
+ // A class to aid in the termination of a set of parallel tasks using
+ // TaskQueueSet's for work stealing.
+@@ -353,8 +391,6 @@
+   int _n_threads;
+   TaskQueueSetSuper* _queue_set;
+   jint _offered_termination;
+-  jint _terminated;
+-  Monitor _term_monitor;
+ 
+   bool peek_in_queue_set();
+ protected:
+@@ -363,7 +399,7 @@
+ 
+ public:
+ 
+-  // "n_threads" is the number of threads to be terminated.  "queue_set" is a 
++  // "n_threads" is the number of threads to be terminated.  "queue_set" is a
+   // queue sets of work queues of other threads.
+   ParallelTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set);
+ 
+@@ -383,7 +419,7 @@
+ 
+ #define SIMPLE_STACK 0
+ 
+-inline bool OopTaskQueue::push(Task t) {
++template<class E> inline bool GenericTaskQueue<E>::push(E t) {
+ #if SIMPLE_STACK
+   juint localBot = _bottom;
+   if (_bottom < max_elems()) {
+@@ -399,7 +435,7 @@
+   jushort top = get_top();
+   juint dirty_n_elems = dirty_size(localBot, top);
+   assert((dirty_n_elems >= 0) && (dirty_n_elems < n()),
+-	 "n_elems out of range.");
++         "n_elems out of range.");
+   if (dirty_n_elems < max_elems()) {
+     _elems[localBot] = t;
+     _bottom = increment_index(localBot);
+@@ -410,7 +446,7 @@
+ #endif
+ }
+ 
+-inline bool OopTaskQueue::pop_local(Task& t) {
++template<class E> inline bool GenericTaskQueue<E>::pop_local(E& t) {
+ #if SIMPLE_STACK
+   juint localBot = _bottom;
+   assert(localBot > 0, "precondition.");
+@@ -435,12 +471,12 @@
+   t = _elems[localBot];
+   // This is a second read of "age"; the "size()" above is the first.
+   // If there's still at least one element in the queue, based on the
+-  // "_bottom" and "age" we've read, then there can be no interference with 
++  // "_bottom" and "age" we've read, then there can be no interference with
+   // a "pop_global" operation, and we're done.
+   juint tp = get_top();
+   if (size(localBot, tp) > 0) {
+     assert(dirty_size(localBot, tp) != n() - 1,
+-	   "Shouldn't be possible...");
++           "Shouldn't be possible...");
+     return true;
+   } else {
+     // Otherwise, the queue contained exactly one element; we take the slow
+@@ -450,167 +486,21 @@
+ #endif
+ }
+ 
+-inline bool OopStarTaskQueue::push(StarTask t) {
+-#if SIMPLE_STACK
+-  juint localBot = _bottom;
+-  if (_bottom < max_elems()) {
+-    _elems[localBot] = t;
+-    _bottom = localBot + 1;
+-    return true;
+-  } else {
+-    return false;
+-  }
+-#else
+-  juint localBot = _bottom;
+-  assert((localBot >= 0) && (localBot < n()), "_bottom out of range.");
+-  jushort top = get_top();
+-  juint dirty_n_elems = dirty_size(localBot, top);
+-  assert((dirty_n_elems >= 0) && (dirty_n_elems < n()),
+-	 "n_elems out of range.");
+-  if (dirty_n_elems < max_elems()) {
+-    _elems[localBot] = t;
+-    _bottom = increment_index(localBot);
+-    return true;
+-  } else {
+-    return push_slow(t, dirty_n_elems);
+-  }
+-#endif
+-}
+-
+-inline bool OopStarTaskQueue::pop_local(StarTask& t) {
+-#if SIMPLE_STACK
+-  juint localBot = _bottom;
+-  assert(localBot > 0, "precondition.");
+-  localBot--;
+-  t = _elems[localBot];
+-  _bottom = localBot;
+-  return true;
+-#else
+-  juint localBot = _bottom;
+-  // This value cannot be n-1.  That can only occur as a result of
+-  // the assignment to bottom in this method.  If it does, this method
+-  // resets the size( to 0 before the next call (which is sequential,
+-  // since this is pop_local.)
+-  juint dirty_n_elems = dirty_size(localBot, get_top());
+-  assert(dirty_n_elems != n() - 1, "Shouldn't be possible...");
+-  if (dirty_n_elems == 0) return false;
+-  localBot = decrement_index(localBot);
+-  _bottom = localBot;
+-  // This is necessary to prevent any read below from being reordered
+-  // before the store just above.
+-  OrderAccess::fence();
+-  t = _elems[localBot];
+-  // This is a second read of "age"; the "size()" above is the first.
+-  // If there's still at least one element in the queue, based on the
+-  // "_bottom" and "age" we've read, then there can be no interference with 
+-  // a "pop_global" operation, and we're done.
+-  juint tp = get_top();
+-  if (size(localBot, tp) > 0) {
+-    assert(dirty_size(localBot, tp) != n() - 1,
+-	   "Shouldn't be possible...");
+-    return true;
+-  } else {
+-    // Otherwise, the queue contained exactly one element; we take the slow
+-    // path.
+-    return pop_local_slow(localBot, get_age());
+-  }
+-#endif
+-}
+-
+-// Clone of OopTaskQueue for GenTask
+-
+-inline bool GenTaskQueue::push(GenTask t) {
+-#if SIMPLE_STACK
+-  juint localBot = _bottom;
+-  if (_bottom < max_elems()) {
+-    _elems[localBot] = t;
+-    _bottom = localBot + 1;
+-    return true;
+-  } else {
+-    return false;
+-  }
+-#else
+-  juint localBot = _bottom;
+-  assert((localBot >= 0) && (localBot < n()), "_bottom out of range.");
+-  jushort top = get_top();
+-  juint dirty_n_elems = dirty_size(localBot, top);
+-  assert((dirty_n_elems >= 0) && (dirty_n_elems < n()),
+-	 "n_elems out of range.");
+-  if (dirty_n_elems < max_elems()) {
+-    _elems[localBot] = t;
+-    _bottom = increment_index(localBot);
+-    return true;
+-  } else {
+-    return push_slow(t, dirty_n_elems);
+-  }
+-#endif
+-}
++typedef oop Task;
++typedef GenericTaskQueue<Task>         OopTaskQueue;
++typedef GenericTaskQueueSet<Task>      OopTaskQueueSet;
+ 
+-inline bool GenTaskQueue::pop_local(GenTask& t) {
+-#if SIMPLE_STACK
+-  juint localBot = _bottom;
+-  assert(localBot > 0, "precondition.");
+-  localBot--;
+-  t = _elems[localBot];
+-  _bottom = localBot;
+-  return true;
+-#else
+-  juint localBot = _bottom;
+-  // This value cannot be n-1.  That can only occur as a result of
+-  // the assignment to bottom in this method.  If it does, this method
+-  // resets the size( to 0 before the next call (which is sequential,
+-  // since this is pop_local.)
+-  juint dirty_n_elems = dirty_size(localBot, get_top());
+-  assert(dirty_n_elems != n() - 1, "Shouldn't be possible...");
+-  if (dirty_n_elems == 0) return false;
+-  localBot = decrement_index(localBot);
+-  _bottom = localBot;
+-  // This is necessary to prevent any read below from being reordered
+-  // before the store just above.
+-  OrderAccess::fence();
+-  t = _elems[localBot];
+-  // This is a second read of "age"; the "size()" above is the first.
+-  // If there's still at least one element in the queue, based on the
+-  // "_bottom" and "age" we've read, then there can be no interference with 
+-  // a "pop_global" operation, and we're done.
+-  juint tp = get_top();
+-  if (size(localBot, tp) > 0) {
+-    assert(dirty_size(localBot, tp) != n() - 1,
+-	   "Shouldn't be possible...");
+-    return true;
+-  } else {
+-    // Otherwise, the queue contained exactly one element; we take the slow
+-    // path.
+-    return pop_local_slow(localBot, get_age());
+-  }
+-#endif
+-}
+-// End clone of OopTaskQueue for GenTask
++typedef oop* StarTask;
++typedef GenericTaskQueue<StarTask>     OopStarTaskQueue;
++typedef GenericTaskQueueSet<StarTask>  OopStarTaskQueueSet;
+ 
+ typedef size_t ChunkTask;  // index for chunk
+-
+-class ChunkTaskQueue: public CHeapObj {
+-private:
+-  GenTaskQueue	_chunk_queue;
+-
+- public:
+-  ChunkTaskQueue() {};
+-  ~ChunkTaskQueue() {};
+-
+-  bool push_slow(ChunkTask t, juint dirty_n_elems);
+-  bool pop_local_slow(juint localBot, TaskQueueSuper::Age oldAge);
+-
+-  inline void initialize() { _chunk_queue.initialize(); }
+-  inline bool push(ChunkTask t) { return _chunk_queue.push((GenTask) t); }
+-  inline bool pop_local(ChunkTask& t) { return _chunk_queue.pop_local((GenTask&) t); }
+-  bool pop_global(ChunkTask& t) { return _chunk_queue.pop_global((GenTask&) t); }  
+-  juint size() { return _chunk_queue.size(); }
+-};
++typedef GenericTaskQueue<ChunkTask>    ChunkTaskQueue;
++typedef GenericTaskQueueSet<ChunkTask> ChunkTaskQueueSet;
+ 
+ class ChunkTaskQueueWithOverflow: public CHeapObj {
+-  friend class ChunkTaskQueueSet;
+  protected:
+-  GenTaskQueue	              _chunk_queue;
++  ChunkTaskQueue              _chunk_queue;
+   GrowableArray<ChunkTask>*   _overflow_stack;
+ 
+  public:
+@@ -629,50 +519,7 @@
+   bool stealable_is_empty();
+   bool overflow_is_empty();
+   juint stealable_size() { return _chunk_queue.size(); }
++  ChunkTaskQueue* task_queue() { return &_chunk_queue; }
+ };
+ 
+-class ChunkTaskQueueSet: public CHeapObj {
+-private:
+-  GenTaskQueueSet _task_queue_set;
+-
+-  bool steal_1_random(int queue_num, int* seed, GenTask& t) {
+-    return _task_queue_set.steal_1_random(queue_num, seed, t);
+-  }
+-  bool steal_best_of_2(int queue_num, int* seed, GenTask& t) {
+-    return _task_queue_set.steal_best_of_2(queue_num, seed, t);
+-  }
+-  bool steal_best_of_all(int queue_num, int* seed, GenTask& t) {
+-    return _task_queue_set.steal_best_of_all(queue_num, seed, t);
+-  }
+-public:
+-  ChunkTaskQueueSet(int n) : _task_queue_set(n) {}
+-
+ #define USE_ChunkTaskQueueWithOverflow
+-  void register_queue(int i, GenTaskQueue* q) {
+-    _task_queue_set.register_queue(i, q);
+-  }
+-
+-  void register_queue(int i, ChunkTaskQueueWithOverflow* q) {
+-    register_queue(i, &q->_chunk_queue);
+-  }
+-
+-  ChunkTaskQueue* queue(int n) { 
+-    return (ChunkTaskQueue*) _task_queue_set.queue(n); 
+-  }
+-
+-  GenTaskQueueSet* task_queue_set() { return &_task_queue_set; }
+-
+-  // The thread with queue number "queue_num" (and whose random number seed 
+-  // is at "seed") is trying to steal a task from some other queue.  (It
+-  // may try several queues, according to some configuration parameter.)
+-  // If some steal succeeds, returns "true" and sets "t" the stolen task,
+-  // otherwise returns false.
+-  bool steal(int queue_num, int* seed, GenTask& t) {
+-    return _task_queue_set.steal(queue_num, seed, t);
+-  }
+-
+-  bool peek() {
+-    return _task_queue_set.peek();
+-  }
+-};
+-// End clone of OopTaskQueueSet for GenTask
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/top.hpp openjdk/hotspot/src/share/vm/utilities/top.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/top.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/top.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)top.hpp	1.16 07/05/05 17:07:11 JVM"
+-#endif
+ /*
+  * Copyright 1997 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // THIS FILE IS INTESIONALLY LEFT EMPTY
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/utf8.cpp openjdk/hotspot/src/share/vm/utilities/utf8.cpp
+--- openjdk6/hotspot/src/share/vm/utilities/utf8.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/utf8.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)utf8.cpp	1.30 07/05/05 17:07:07 JVM"
+-#endif
+ /*
+  * Copyright 1997-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,18 +19,18 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+ # include "incls/_utf8.cpp.incl"
+ 
+-// Assume the utf8 string is in legal form and has been 
++// Assume the utf8 string is in legal form and has been
+ // checked in the class file parser/format checker.
+ char* UTF8::next(const char* str, jchar* value) {
+   unsigned const char *ptr = (const unsigned char *)str;
+   unsigned char ch, ch2, ch3;
+-  int length = -1;		/* bad length */
++  int length = -1;              /* bad length */
+   jchar result;
+   switch ((ch = ptr[0]) >> 4) {
+     default:
+@@ -45,7 +42,7 @@
+     /* Shouldn't happen. */
+     break;
+ 
+-  case 0xC: case 0xD:	
++  case 0xC: case 0xD:
+     /* 110xxxxx  10xxxxxx */
+     if (((ch2 = ptr[1]) & 0xC0) == 0x80) {
+       unsigned char high_five = ch & 0x1F;
+@@ -53,7 +50,7 @@
+       result = (high_five << 6) + low_six;
+       length = 2;
+       break;
+-    } 
++    }
+     break;
+ 
+   case 0xE:
+@@ -65,13 +62,13 @@
+         unsigned char low_six = ch3 & 0x3f;
+         result = (((high_four << 6) + mid_six) << 6) + low_six;
+         length = 3;
+-      } 
++      }
+     }
+     break;
+   } /* end of switch */
+ 
+   if (length <= 0) {
+-    *value = ptr[0];	/* default bad result; */
++    *value = ptr[0];    /* default bad result; */
+     return (char*)(ptr + 1); // make progress somehow
+   }
+ 
+@@ -84,7 +81,7 @@
+ 
+ char* UTF8::next_character(const char* str, jint* value) {
+   unsigned const char *ptr = (const unsigned char *)str;
+-  /* See if it's legal supplementary character: 
++  /* See if it's legal supplementary character:
+      11101101 1010xxxx 10xxxxxx 11101101 1011xxxx 10xxxxxx */
+   if (is_supplementary_character(ptr)) {
+     *value = get_supplementary_character(ptr);
+@@ -96,8 +93,8 @@
+   return next_ch;
+ }
+ 
+-// Count bytes of the form 10xxxxxx and deduct this count 
+-// from the total byte count.  The utf8 string must be in 
++// Count bytes of the form 10xxxxxx and deduct this count
++// from the total byte count.  The utf8 string must be in
+ // legal form which has been verified in the format checker.
+ int UTF8::unicode_length(const char* str, int len) {
+   int num_chars = len;
+@@ -109,9 +106,9 @@
+   return num_chars;
+ }
+ 
+-// Count bytes of the utf8 string except those in form 
++// Count bytes of the utf8 string except those in form
+ // 10xxxxxx which only appear in multibyte characters.
+-// The utf8 string must be in legal form and has been 
++// The utf8 string must be in legal form and has been
+ // verified in the format checker.
+ int UTF8::unicode_length(const char* str) {
+   int num_chars = 0;
+@@ -130,12 +127,12 @@
+     return base + 1;
+   }
+ 
+-  if (ch <= 0x7FF) { 
++  if (ch <= 0x7FF) {
+     /* 11 bits or less. */
+     unsigned char high_five = ch >> 6;
+     unsigned char low_six = ch & 0x3F;
+     base[0] = high_five | 0xC0; /* 110xxxxx */
+-    base[1] = low_six | 0x80;	/* 10xxxxxx */
++    base[1] = low_six | 0x80;   /* 10xxxxxx */
+     return base + 2;
+   }
+   /* possibly full 16 bits. */
+@@ -168,7 +165,7 @@
+ // Returns NULL if 'c' it not found. This only works as long
+ // as 'c' is an ASCII character
+ jbyte* UTF8::strrchr(jbyte* base, int length, jbyte c) {
+-  assert(length >= 0, "sanity check");    
++  assert(length >= 0, "sanity check");
+   assert(c >= 0, "does not work for non-ASCII characters");
+   // Skip backwards in string until 'c' is found or end is reached
+   while(--length >= 0 && base[length] != c);
+@@ -178,10 +175,10 @@
+ bool UTF8::equal(jbyte* base1, int length1, jbyte* base2, int length2) {
+   // Length must be the same
+   if (length1 != length2) return false;
+-  for (int i = 0; i < length1; i++) {  
++  for (int i = 0; i < length1; i++) {
+     if (base1[i] != base2[i]) return false;
+-  } 
+-  return true;  
++  }
++  return true;
+ }
+ 
+ bool UTF8::is_supplementary_character(const unsigned char* str) {
+@@ -190,7 +187,7 @@
+ }
+ 
+ jint UTF8::get_supplementary_character(const unsigned char* str) {
+-  return 0x10000 + ((str[1] & 0x0f) << 16) + ((str[2] & 0x3f) << 10) 
++  return 0x10000 + ((str[1] & 0x0f) << 16) + ((str[2] & 0x3f) << 10)
+                  + ((str[4] & 0x0f) << 6)  + (str[5] & 0x3f);
+ }
+ 
+@@ -245,6 +242,3 @@
+   }
+   *utf8_buffer = '\0';
+ }
+-
+-
+-
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/utf8.hpp openjdk/hotspot/src/share/vm/utilities/utf8.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/utf8.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/utf8.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)utf8.hpp	1.22 07/05/05 17:07:11 JVM"
+-#endif
+ /*
+  * Copyright 1997-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Low-level interface for UTF8 strings
+@@ -36,13 +33,13 @@
+   static int unicode_length(const char* uft8_str, int len);
+ 
+   // converts a uft8 string to a unicode string
+-  static void convert_to_unicode(const char* utf8_str, jchar* unicode_buffer, int unicode_length); 
++  static void convert_to_unicode(const char* utf8_str, jchar* unicode_buffer, int unicode_length);
+ 
+   // decodes the current utf8 character, stores the result in value,
+   // and returns the end of the current uft8 chararacter.
+   static char* next(const char* str, jchar* value);
+ 
+-  // decodes the current utf8 character, gets the supplementary character instead of 
++  // decodes the current utf8 character, gets the supplementary character instead of
+   // the surrogate pair when seeing a supplementary character in string,
+   // stores the result in value, and returns the end of the current uft8 chararacter.
+   static char* next_character(const char* str, jint* value);
+@@ -74,6 +71,6 @@
+ 
+   // converts a unicode string to a utf8 string; result is allocated
+   // in resource area unless a buffer is provided.
+-  static char* as_utf8(jchar* base, int length);  
++  static char* as_utf8(jchar* base, int length);
+   static char* as_utf8(jchar* base, int length, char* buf, int buflen);
+ };
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/vmError.cpp openjdk/hotspot/src/share/vm/utilities/vmError.cpp
+--- openjdk6/hotspot/src/share/vm/utilities/vmError.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/vmError.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)vmError.cpp	1.31 07/05/23 10:54:29 JVM"
+-#endif
+ /*
+  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -31,11 +28,11 @@
+ // List of environment variables that should be reported in error log file.
+ const char *env_list[] = {
+   // All platforms
+-  "JAVA_HOME", "JRE_HOME", "JAVA_TOOL_OPTIONS", "_JAVA_OPTIONS", "CLASSPATH", 
++  "JAVA_HOME", "JRE_HOME", "JAVA_TOOL_OPTIONS", "_JAVA_OPTIONS", "CLASSPATH",
+   "JAVA_COMPILER", "PATH", "USERNAME",
+ 
+   // Env variables that are defined on Solaris/Linux
+-  "LD_LIBRARY_PATH", "LD_PRELOAD", "SHELL", "DISPLAY", 
++  "LD_LIBRARY_PATH", "LD_PRELOAD", "SHELL", "DISPLAY",
+   "HOSTTYPE", "OSTYPE", "ARCH", "MACHTYPE",
+ 
+   // defined on Linux
+@@ -51,7 +48,7 @@
+ //
+ // The default behavior of fatal error handler is to print a brief message
+ // to standard out (defaultStream::output_fd()), then save detailed information
+-// into an error report file (hs_err_pid<pid>.log) and abort VM. If multiple 
++// into an error report file (hs_err_pid<pid>.log) and abort VM. If multiple
+ // threads are having troubles at the same time, only one error is reported.
+ // The thread that is reporting error will abort VM when it is done, all other
+ // threads are blocked forever inside report_and_die().
+@@ -71,7 +68,7 @@
+     _message = "";
+     _filename = NULL;
+     _lineno = 0;
+-    
++
+     _size = 0;
+ }
+ 
+@@ -90,7 +87,7 @@
+     _pc = NULL;
+     _siginfo = NULL;
+     _context = NULL;
+-    
++
+     _size = 0;
+ }
+ 
+@@ -101,15 +98,15 @@
+     _filename = filename;
+     _lineno = lineno;
+     _message = message;
+-    
++
+     _verbose = false;
+     _current_step = 0;
+     _current_step_info = NULL;
+-   
++
+     _pc = NULL;
+     _siginfo = NULL;
+     _context = NULL;
+-    
++
+     _size = size;
+ }
+ 
+@@ -129,7 +126,7 @@
+     _pc = NULL;
+     _siginfo = NULL;
+     _context = NULL;
+-    
++
+     _size = 0;
+ }
+ 
+@@ -190,7 +187,7 @@
+ 
+   if (signame) {
+     jio_snprintf(buf, buflen,
+-                 "%s (0x%x) at pc=" PTR_FORMAT ", pid=%d, tid=" UINTX_FORMAT, 
++                 "%s (0x%x) at pc=" PTR_FORMAT ", pid=%d, tid=" UINTX_FORMAT,
+                  signame, _id, _pc,
+                  os::current_process_id(), os::current_thread_id());
+   } else {
+@@ -201,7 +198,7 @@
+ 
+       jio_snprintf(buf, buflen,
+         "Internal Error at %s:%d, pid=%d, tid=" UINTX_FORMAT " \nError: %s",
+-        p ? p + 1 : _filename, _lineno, 
++        p ? p + 1 : _filename, _lineno,
+         os::current_process_id(), os::current_thread_id(),
+         _message ? _message : "");
+     } else {
+@@ -216,30 +213,30 @@
+ 
+ 
+ // This is the main function to report a fatal error. Only one thread can
+-// call this function, so we don't need to worry about MT-safety. But it's 
+-// possible that the error handler itself may crash or die on an internal 
+-// error, for example, when the stack/heap is badly damaged. We must be 
++// call this function, so we don't need to worry about MT-safety. But it's
++// possible that the error handler itself may crash or die on an internal
++// error, for example, when the stack/heap is badly damaged. We must be
+ // able to handle recursive errors that happen inside error handler.
+-// 
++//
+ // Error reporting is done in several steps. If a crash or internal error
+-// occurred when reporting an error, the nested signal/exception handler 
+-// can skip steps that are already (or partially) done. Error reporting will 
+-// continue from the next step. This allows us to retrieve and print 
++// occurred when reporting an error, the nested signal/exception handler
++// can skip steps that are already (or partially) done. Error reporting will
++// continue from the next step. This allows us to retrieve and print
+ // information that may be unsafe to get after a fatal error. If it happens,
+ // you may find nested report_and_die() frames when you look at the stack
+ // in a debugger.
+ //
+ // In general, a hang in error handler is much worse than a crash or internal
+-// error, as it's harder to recover from a hang. Deadlock can happen if we 
+-// try to grab a lock that is already owned by current thread, or if the 
+-// owner is blocked forever (e.g. in os::infinite_sleep()). If possible, the 
+-// error handler and all the functions it called should avoid grabbing any 
++// error, as it's harder to recover from a hang. Deadlock can happen if we
++// try to grab a lock that is already owned by current thread, or if the
++// owner is blocked forever (e.g. in os::infinite_sleep()). If possible, the
++// error handler and all the functions it called should avoid grabbing any
+ // lock. An important thing to notice is that memory allocation needs a lock.
+ //
+-// We should avoid using large stack allocated buffers. Many errors happen 
+-// when stack space is already low. Making things even worse is that there 
++// We should avoid using large stack allocated buffers. Many errors happen
++// when stack space is already low. Making things even worse is that there
+ // could be nested report_and_die() calls on stack (see above). Only one
+-// thread can report error, so large buffers are statically allocated in data 
++// thread can report error, so large buffers are statically allocated in data
+ // segment.
+ 
+ void VMError::report(outputStream* st) {
+@@ -354,7 +351,7 @@
+      }
+ 
+   STEP(65, "(printing bug submit message)")
+-    
++
+      if (_verbose) print_bug_submit_message(st, _thread);
+ 
+   STEP(70, "(printing thread)" )
+@@ -399,7 +396,7 @@
+ 
+      if (_verbose) {
+        st->print("Stack: ");
+- 
++
+        address stack_top;
+        size_t stack_size;
+ 
+@@ -419,7 +416,7 @@
+ 
+        if (fr.sp()) {
+          st->print(",  sp=" PTR_FORMAT, fr.sp());
+-         st->print(",  free space=%dk", 
++         st->print(",  free space=%dk",
+                      ((intptr_t)fr.sp() - (intptr_t)stack_bottom) >> 10);
+        }
+ 
+@@ -444,7 +441,7 @@
+              if (os::is_first_C_frame(&fr)) break;
+              fr = os::get_sender_for_C_frame(&fr);
+           }
+-                                                                                
++
+           if (count > StackPrintLimit) {
+              st->print_cr("...<more frames>...");
+           }
+@@ -636,7 +633,7 @@
+   // An error could happen before tty is initialized or after it has been
+   // destroyed. Here we use a very simple unbuffered fdStream for printing.
+   // Only out.print_raw() and out.print_raw_cr() should be used, as other
+-  // printing methods need to allocate large buffer on stack. To format a 
++  // printing methods need to allocate large buffer on stack. To format a
+   // string, use jio_snprintf() with a static buffer or use staticBufferStream.
+   static fdStream out(defaultStream::output_fd());
+ 
+@@ -663,7 +660,7 @@
+     if (ShowMessageBoxOnError) {
+       show_message_box(buffer, sizeof(buffer));
+ 
+-      // User has asked JVM to abort. Reset ShowMessageBoxOnError so the 
++      // User has asked JVM to abort. Reset ShowMessageBoxOnError so the
+       // WatcherThread can kill JVM if the error handler hangs.
+       ShowMessageBoxOnError = false;
+     }
+@@ -728,8 +725,8 @@
+       // open log file
+       int fd = -1;
+ 
+-      if (strlen(ErrorFile) > 0) {
+-        bool copy_ok = 
++      if (ErrorFile != NULL) {
++        bool copy_ok =
+           Arguments::copy_expand_pid(ErrorFile, strlen(ErrorFile), buffer, sizeof(buffer));
+         if (copy_ok) {
+           fd = open(buffer, O_WRONLY | O_CREAT | O_TRUNC, 0666);
+@@ -741,7 +738,7 @@
+         size_t len = strlen(cwd);
+         // either user didn't specify, or the user's location failed,
+         // so use the default name in the current directory
+-        jio_snprintf(&buffer[len], sizeof(buffer)-len, "%shs_err_pid%u.log", 
++        jio_snprintf(&buffer[len], sizeof(buffer)-len, "%shs_err_pid%u.log",
+                      os::file_separator(), os::current_process_id());
+         fd = open(buffer, O_WRONLY | O_CREAT | O_TRUNC, 0666);
+       }
+@@ -800,9 +797,9 @@
+ #endif
+       out.print_raw   ("\"");
+       out.print_raw   (cmd);
+-      out.print_raw_cr("\" ..."); 
++      out.print_raw_cr("\" ...");
+ 
+-      fork_and_exec(cmd);
++      os::fork_and_exec(cmd);
+     }
+ 
+     // done with OnError
+@@ -851,7 +848,7 @@
+   tty->print_cr("# java.lang.OutOfMemoryError: %s", _err->message());
+   tty->print_cr("# -XX:OnOutOfMemoryError=\"%s\"", OnOutOfMemoryError);
+ 
+-  // make heap parsability 
++  // make heap parsability
+   Universe::heap()->ensure_parsability(false);  // no need to retire TLABs
+ 
+   char* cmd;
+@@ -859,13 +856,13 @@
+   while ((cmd = next_OnError_command(buffer, sizeof(buffer), &ptr)) != NULL){
+     tty->print("#   Executing ");
+ #if defined(LINUX)
+-    tty->print	("/bin/sh -c ");
++    tty->print  ("/bin/sh -c ");
+ #elif defined(SOLARIS)
+-    tty->print	("/usr/bin/sh -c ");
++    tty->print  ("/usr/bin/sh -c ");
+ #endif
+     tty->print_cr("\"%s\"...", cmd);
+ 
+-    _err->fork_and_exec(cmd);
++    os::fork_and_exec(cmd);
+   }
+ }
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/vmError.hpp openjdk/hotspot/src/share/vm/utilities/vmError.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/vmError.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/vmError.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)vmError.hpp	1.17 07/05/05 17:07:11 JVM"
+-#endif
+ /*
+  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ 
+@@ -43,7 +40,7 @@
+ 
+   Thread *     _thread;      // NULL if it's native thread
+ 
+-  
++
+   // additional info for crashes
+   address      _pc;          // faulting PC
+   void *       _siginfo;     // ExceptionRecord on Windows,
+@@ -59,13 +56,10 @@
+   int          _current_step;
+   const char * _current_step_info;
+   int          _verbose;
+-    
++
+   // used by reporting about OOM
+   size_t       _size;
+ 
+-  // run cmd in a separate process and return its exit code; or -1 on failures
+-  int fork_and_exec(char* cmd);
+-
+   // set signal handlers on Solaris/Linux or the default exception filter
+   // on Windows, to handle recursive crashes.
+   void reset_signal_handlers();
+@@ -76,8 +70,8 @@
+   // generate an error report
+   void report(outputStream* st);
+ 
+-  // accessor 
+-  const char* message()		{ return _message; }
++  // accessor
++  const char* message()         { return _message; }
+ 
+ public:
+   // Constructor for crashes
+@@ -107,4 +101,3 @@
+   // signal was not changed by error reporter
+   static address get_resetted_sighandler(int sig);
+ };
+-
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/workgroup.cpp openjdk/hotspot/src/share/vm/utilities/workgroup.cpp
+--- openjdk6/hotspot/src/share/vm/utilities/workgroup.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/workgroup.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)workgroup.cpp	1.36 07/05/05 17:07:11 JVM"
+-#endif
+ /*
+  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -47,8 +44,8 @@
+ }
+ 
+ WorkGang::WorkGang(const char* name,
+-		   int           workers,
+-		   bool          are_GC_threads) :
++                   int           workers,
++                   bool          are_GC_threads) :
+   AbstractWorkGang(name, are_GC_threads) {
+   // Save arguments.
+   _total_workers = workers;
+@@ -111,16 +108,16 @@
+   while (finished_workers() < total_workers()) {
+     if (TraceWorkGang) {
+       tty->print_cr("Waiting in work gang %s: %d/%d finished sequence %d",
+-		    name(), finished_workers(), total_workers(),
+-		    _sequence_number);
++                    name(), finished_workers(), total_workers(),
++                    _sequence_number);
+     }
+     monitor()->wait(/* no_safepoint_check */ true);
+   }
+   _task = NULL;
+   if (TraceWorkGang) {
+     tty->print_cr("/nFinished work gang %s: %d/%d sequence %d",
+-	          name(), finished_workers(), total_workers(),
+-	          _sequence_number);
++                  name(), finished_workers(), total_workers(),
++                  _sequence_number);
+     }
+ }
+ 
+@@ -136,7 +133,7 @@
+   while (finished_workers() < total_workers()) {
+     if (TraceWorkGang) {
+       tty->print_cr("Waiting in work gang %s: %d/%d finished",
+-		    name(), finished_workers(), total_workers());
++                    name(), finished_workers(), total_workers());
+     }
+     monitor()->wait(/* no_safepoint_check */ true);
+   }
+@@ -174,7 +171,7 @@
+   for (uint i = 0; i < num_thr; i++) {
+     tc->do_thread(gang_worker(i));
+   }
+-}  
++}
+ 
+ // GangWorker methods.
+ 
+@@ -195,12 +192,12 @@
+   os::set_priority(this, NearMaxPriority);
+   if (TraceWorkGang) {
+     tty->print_cr("Running gang worker for gang %s id %d",
+-		  gang()->name(), id());
++                  gang()->name(), id());
+   }
+   // The VM thread should not execute here because MutexLocker's are used
+   // as (opposed to MutexLockerEx's).
+   assert(!Thread::current()->is_VM_thread(), "VM thread should not be part"
+-	 " of a work gang");
++         " of a work gang");
+ }
+ 
+ void GangWorker::loop() {
+@@ -217,64 +214,64 @@
+       // in the outer loop.
+       gang()->internal_worker_poll(&data);
+       if (TraceWorkGang) {
+-	tty->print("Polled outside for work in gang %s worker %d",
+-		   gang()->name(), id());
+-	tty->print("  terminate: %s",
+-		   data.terminate() ? "true" : "false");
+-	tty->print("  sequence: %d (prev: %d)",
+-		   data.sequence_number(), previous_sequence_number);
+-	if (data.task() != NULL) {
+-	  tty->print("  task: %s", data.task()->name());
+-	} else {
+-	  tty->print("  task: NULL");
+-	}
+-	tty->cr();
++        tty->print("Polled outside for work in gang %s worker %d",
++                   gang()->name(), id());
++        tty->print("  terminate: %s",
++                   data.terminate() ? "true" : "false");
++        tty->print("  sequence: %d (prev: %d)",
++                   data.sequence_number(), previous_sequence_number);
++        if (data.task() != NULL) {
++          tty->print("  task: %s", data.task()->name());
++        } else {
++          tty->print("  task: NULL");
++        }
++        tty->cr();
+       }
+       for ( ; /* break or return */; ) {
+-	// Terminate if requested.
+-	if (data.terminate()) {
+-	  gang()->internal_note_finish();
+-	  gang_monitor->notify_all();
+-	  return;
+-	}
+-	// Check for new work.
+-	if ((data.task() != NULL) &&
+-	    (data.sequence_number() != previous_sequence_number)) {
+-	  gang()->internal_note_start();
+-	  gang_monitor->notify_all();
+-	  part = gang()->started_workers() - 1;
+-	  break;
+-	}
+-	// Nothing to do.
+-	gang_monitor->wait(/* no_safepoint_check */ true);
+-	gang()->internal_worker_poll(&data);
+-	if (TraceWorkGang) {
+-	  tty->print("Polled inside for work in gang %s worker %d",
+-		     gang()->name(), id());
+-	  tty->print("  terminate: %s",
+-		     data.terminate() ? "true" : "false");
+-	  tty->print("  sequence: %d (prev: %d)",
+-		     data.sequence_number(), previous_sequence_number);
+-	  if (data.task() != NULL) {
+-	    tty->print("  task: %s", data.task()->name());
+-	  } else {
+-	    tty->print("  task: NULL");
+-	  }
+-	  tty->cr();
+-	}
++        // Terminate if requested.
++        if (data.terminate()) {
++          gang()->internal_note_finish();
++          gang_monitor->notify_all();
++          return;
++        }
++        // Check for new work.
++        if ((data.task() != NULL) &&
++            (data.sequence_number() != previous_sequence_number)) {
++          gang()->internal_note_start();
++          gang_monitor->notify_all();
++          part = gang()->started_workers() - 1;
++          break;
++        }
++        // Nothing to do.
++        gang_monitor->wait(/* no_safepoint_check */ true);
++        gang()->internal_worker_poll(&data);
++        if (TraceWorkGang) {
++          tty->print("Polled inside for work in gang %s worker %d",
++                     gang()->name(), id());
++          tty->print("  terminate: %s",
++                     data.terminate() ? "true" : "false");
++          tty->print("  sequence: %d (prev: %d)",
++                     data.sequence_number(), previous_sequence_number);
++          if (data.task() != NULL) {
++            tty->print("  task: %s", data.task()->name());
++          } else {
++            tty->print("  task: NULL");
++          }
++          tty->cr();
++        }
+       }
+       // Drop gang mutex.
+     }
+     if (TraceWorkGang) {
+       tty->print("Work for work gang %s id %d task %s part %d",
+-		 gang()->name(), id(), data.task()->name(), part);
++                 gang()->name(), id(), data.task()->name(), part);
+     }
+     assert(data.task() != NULL, "Got null task");
+     data.task()->work(part);
+     {
+       if (TraceWorkGang) {
+-	tty->print("Finish for work gang %s id %d task %s part %d",
+-		   gang()->name(), id(), data.task()->name(), part);
++        tty->print("Finish for work gang %s id %d task %s part %d",
++                   gang()->name(), id(), data.task()->name(), part);
+       }
+       // Grab the gang mutex.
+       MutexLocker ml(gang_monitor);
+@@ -357,7 +354,7 @@
+ void SubTasksDone::set_par_threads(int t) {
+ #ifdef ASSERT
+   assert(_claimed == 0 || _threads_completed == _n_threads,
+-	 "should not be called while tasks are being processed!");
++         "should not be called while tasks are being processed!");
+ #endif
+   _n_threads = (t == 0 ? 1 : t);
+ }
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/workgroup.hpp openjdk/hotspot/src/share/vm/utilities/workgroup.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/workgroup.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/workgroup.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)workgroup.hpp	1.22 07/05/05 17:07:11 JVM"
+-#endif
+ /*
+  * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Forward declarations of classes defined here
+@@ -263,10 +260,10 @@
+   jint _threads_completed;
+ #ifdef ASSERT
+   jint _claimed;
+-#endif  
++#endif
+ 
+   // Set all tasks to unclaimed.
+-  void clear(); 
++  void clear();
+ 
+ public:
+   // Initializes "this" to a state in which there are "n" tasks to be
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/xmlstream.cpp openjdk/hotspot/src/share/vm/utilities/xmlstream.cpp
+--- openjdk6/hotspot/src/share/vm/utilities/xmlstream.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/xmlstream.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)xmlstream.cpp	1.17 07/05/05 17:07:12 JVM"
+-#endif
+ /*
+  * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/xmlstream.hpp openjdk/hotspot/src/share/vm/utilities/xmlstream.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/xmlstream.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/xmlstream.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)xmlstream.hpp	1.12 07/05/05 17:07:11 JVM"
+-#endif
+ /*
+  * Copyright 2002-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ class xmlStream;
+@@ -133,17 +130,17 @@
+   }
+ 
+   // commonly used XML attributes
+-  void	        stamp();                 // stamp='1.234'
+-  void	        method(methodHandle m);  // method='k n s' ...
+-  void	        klass(KlassHandle k);    // klass='name'
+-  void	        name(symbolHandle s);    // name='name'
++  void          stamp();                 // stamp='1.234'
++  void          method(methodHandle m);  // method='k n s' ...
++  void          klass(KlassHandle k);    // klass='name'
++  void          name(symbolHandle s);    // name='name'
+   void          object(const char* attr, Handle val);
+ 
+   // print the text alone (sans ''):
+   void          method_text(methodHandle m);
+-  void	        klass_text(KlassHandle k);    // klass='name'
+-  void	        name_text(symbolHandle s);    // name='name'
+-  void	        object_text(Handle x);
++  void          klass_text(KlassHandle k);    // klass='name'
++  void          name_text(symbolHandle s);    // name='name'
++  void          object_text(Handle x);
+ 
+   /*  Example uses:
+ 
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/yieldingWorkgroup.cpp openjdk/hotspot/src/share/vm/utilities/yieldingWorkgroup.cpp
+--- openjdk6/hotspot/src/share/vm/utilities/yieldingWorkgroup.cpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/yieldingWorkgroup.cpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)yieldingWorkgroup.cpp	1.11 07/05/05 17:07:11 JVM"
+-#endif
+ /*
+  * Copyright 2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -70,7 +67,7 @@
+ // immediately with no actual work having been done by the workers.
+ /////////////////////
+ // Implementatiuon notes: remove before checking XXX
+-/* 
++/*
+ Each gang is working on a task at a certain time.
+ Some subset of workers may have yielded and some may
+ have finished their quota of work. Until this task has
+@@ -150,13 +147,13 @@
+   assert(_finished_workers == 0, "Tabula rasa non");
+   assert(_yielded_workers == 0, "Tabula rasa non");
+   yielding_task()->set_status(ACTIVE);
+-  
++
+   // Wake up all the workers, the first few will get to work,
+   // and the rest will go back to sleep
+   monitor()->notify_all();
+   wait_for_gang();
+ }
+-  
++
+ void YieldingFlexibleWorkGang::wait_for_gang() {
+ 
+   assert(monitor()->owned_by_self(), "Data race");
+@@ -236,7 +233,7 @@
+   } else {
+     yielding_task()->set_status(YIELDING);
+   }
+-  
++
+   while (true) {
+     switch (yielding_task()->status()) {
+       case YIELDING:
+@@ -325,7 +322,7 @@
+       assert(gang()->task() == NULL, "No task binding");
+       // set_status(TERMINATED);
+       return;
+-    } else if (data.task() != NULL && 
++    } else if (data.task() != NULL &&
+                data.sequence_number() != previous_sequence_number) {
+       // There is work to be done.
+       // First check if we need to become active or if there
+diff -ruN openjdk6/hotspot/src/share/vm/utilities/yieldingWorkgroup.hpp openjdk/hotspot/src/share/vm/utilities/yieldingWorkgroup.hpp
+--- openjdk6/hotspot/src/share/vm/utilities/yieldingWorkgroup.hpp	2008-02-28 05:02:44.000000000 -0500
++++ openjdk/hotspot/src/share/vm/utilities/yieldingWorkgroup.hpp	2008-01-31 09:19:01.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)yieldingWorkgroup.hpp	1.10 07/05/05 17:07:12 JVM"
+-#endif
+ /*
+  * Copyright 2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ 
+@@ -88,7 +85,7 @@
+     _requested_size(0) { }
+ 
+   virtual ~YieldingFlexibleGangTask() { }
+-  
++
+   friend class YieldingFlexibleWorkGang;
+   friend class YieldingFlexibleGangWorker;
+   NOT_PRODUCT(virtual bool is_YieldingFlexibleGang_task() const {
+@@ -166,7 +163,7 @@
+   }
+   void start_task(YieldingFlexibleGangTask* new_task);
+   void continue_task(YieldingFlexibleGangTask* gang_task);
+-  
++
+   // Abort a currently running task, if any; returns when all the workers
+   // have stopped working on the current task and have returned to their
+   // waiting stations.
+@@ -189,7 +186,7 @@
+   int _active_workers;
+   int _yielded_workers;
+   void wait_for_gang();
+-  
++
+ public:
+   // Accessors for fields
+   int active_workers() const {
+diff -ruN openjdk6/hotspot/src/os/linux/launcher/java.c openjdk/hotspot/src/os/linux/launcher/java.c
+--- openjdk6/hotspot/src/os/linux/launcher/java.c	2008-02-28 05:02:29.000000000 -0500
++++ openjdk/hotspot/src/os/linux/launcher/java.c	2008-01-31 09:19:00.000000000 -0500
+@@ -19,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ /*
+@@ -27,7 +27,7 @@
+  * search "GAMMA" for gamma specific changes.
+  *
+  * GAMMA: gamma launcher is much simpler than regular java launcher in that
+- *        JVM is either statically linked in or it is installed in the 
++ *        JVM is either statically linked in or it is installed in the
+  *        same directory where the launcher exists, so we don't have to
+  *        worry about choosing the right JVM based on command line flag, jar
+  *        file and/or ergonomics. Intead of removing unused logic from source
+@@ -132,9 +132,9 @@
+ static void SetClassPath(char *s);
+ static void SelectVersion(int argc, char **argv, char **main_class);
+ static jboolean ParseArguments(int *pargc, char ***pargv, char **pjarfile,
+-			       char **pclassname, int *pret);
++                               char **pclassname, int *pret);
+ static jboolean InitializeJVM(JavaVM **pvm, JNIEnv **penv,
+-			      InvocationFunctions *ifn);
++                              InvocationFunctions *ifn);
+ static jstring NewPlatformString(JNIEnv *env, char *s);
+ static jobjectArray NewPlatformStringArray(JNIEnv *env, char **strv, int strc);
+ static jclass LoadClass(JNIEnv *env, char *name);
+@@ -156,15 +156,15 @@
+ static void SetPaths(int argc, char **argv);
+ 
+ /* Maximum supported entries from jvm.cfg. */
+-#define INIT_MAX_KNOWN_VMS	10
+-/* Values for vmdesc.flag */ 
+-#define VM_UNKNOWN		-1
+-#define VM_KNOWN		 0
+-#define VM_ALIASED_TO		 1
+-#define VM_WARN			 2
+-#define VM_ERROR		 3
+-#define VM_IF_SERVER_CLASS	 4
+-#define VM_IGNORE		 5
++#define INIT_MAX_KNOWN_VMS      10
++/* Values for vmdesc.flag */
++#define VM_UNKNOWN              -1
++#define VM_KNOWN                 0
++#define VM_ALIASED_TO            1
++#define VM_WARN                  2
++#define VM_ERROR                 3
++#define VM_IF_SERVER_CLASS       4
++#define VM_IGNORE                5
+ struct vmdesc {
+     char *name;
+     int flag;
+@@ -177,7 +177,7 @@
+ 
+ static void GrowKnownVMs();
+ static int  KnownVMIndex(const char* name);
+-static void FreeKnownVMs(); 
++static void FreeKnownVMs();
+ 
+ jboolean ServerClassMachine();
+ 
+@@ -206,7 +206,7 @@
+     char jrepath[MAXPATHLEN], jvmpath[MAXPATHLEN];
+     char ** original_argv = argv;
+ 
+-    /* 
++    /*
+      * Error message to print or display; by default the message will
+      * only be displayed in a window.
+      */
+@@ -214,8 +214,8 @@
+     jboolean messageDest = JNI_FALSE;
+ 
+     if (getenv("_JAVA_LAUNCHER_DEBUG") != 0) {
+-	_launcher_debug = JNI_TRUE;
+-	printf("----_JAVA_LAUNCHER_DEBUG----\n");
++        _launcher_debug = JNI_TRUE;
++        printf("----_JAVA_LAUNCHER_DEBUG----\n");
+     }
+ 
+ #ifndef GAMMA
+@@ -223,18 +223,18 @@
+      * Make sure the specified version of the JRE is running.
+      *
+      * There are three things to note about the SelectVersion() routine:
+-     *	1) If the version running isn't correct, this routine doesn't
+-     *	   return (either the correct version has been exec'd or an error
+-     *	   was issued).
++     *  1) If the version running isn't correct, this routine doesn't
++     *     return (either the correct version has been exec'd or an error
++     *     was issued).
+      *  2) Argc and Argv in this scope are *not* altered by this routine.
+-     *	   It is the responsibility of subsequent code to ignore the
+-     *	   arguments handled by this routine.
++     *     It is the responsibility of subsequent code to ignore the
++     *     arguments handled by this routine.
+      *  3) As a side-effect, the variable "main_class" is guaranteed to
+      *     be set (if it should ever be set).  This isn't exactly the
+-     *	   poster child for structured programming, but it is a small
+-     *	   price to pay for not processing a jar file operand twice.
+-     *	   (Note: This side effect has been disabled.  See comment on
+-     *	   bugid 5030265 below.)
++     *     poster child for structured programming, but it is a small
++     *     price to pay for not processing a jar file operand twice.
++     *     (Note: This side effect has been disabled.  See comment on
++     *     bugid 5030265 below.)
+      */
+     SelectVersion(argc, argv, &main_class);
+ #endif /* ifndef GAMMA */
+@@ -244,13 +244,13 @@
+       int i;
+       original_argv = (char**)MemAlloc(sizeof(char*)*(argc+1));
+       for(i = 0; i < argc+1; i++)
+-	original_argv[i] = argv[i];
++        original_argv[i] = argv[i];
+     }
+ 
+     CreateExecutionEnvironment(&argc, &argv,
+-			       jrepath, sizeof(jrepath),
+-			       jvmpath, sizeof(jvmpath),
+-			       original_argv);
++                               jrepath, sizeof(jrepath),
++                               jvmpath, sizeof(jvmpath),
++                               original_argv);
+     ifn.CreateJavaVM = 0;
+     ifn.GetDefaultJavaVMInitArgs = 0;
+ 
+@@ -262,9 +262,9 @@
+     if (_launcher_debug) {
+       end   = CounterGet();
+       printf("%ld micro seconds to LoadJavaVM\n",
+-	     (long)(jint)Counter2Micros(end-start));
++             (long)(jint)Counter2Micros(end-start));
+     }
+-    
++
+ #ifdef JAVA_ARGS  /* javac, jar and friends. */
+     progname = "java";
+ #else             /* java, oldjava, javaw and friends */
+@@ -273,7 +273,7 @@
+ #else
+     progname = *argv;
+     if ((s = strrchr(progname, FILE_SEPARATOR)) != 0) {
+-	progname = s + 1;
++        progname = s + 1;
+     }
+ #endif /* PROGNAME */
+ #endif /* JAVA_ARGS */
+@@ -284,19 +284,19 @@
+     /* Preprocess wrapper arguments */
+     TranslateDashJArgs(&argc, &argv);
+     if (!AddApplicationOptions()) {
+-	exit(1);
++        exit(1);
+     }
+ #endif
+ 
+     /* Set default CLASSPATH */
+     if ((s = getenv("CLASSPATH")) == 0) {
+-	s = ".";
++        s = ".";
+     }
+ #ifndef JAVA_ARGS
+     SetClassPath(s);
+ #endif
+ 
+-    /* 
++    /*
+      *  Parse command line options; if the return value of
+      *  ParseArguments is false, the program should exit.
+      */
+@@ -306,7 +306,7 @@
+ 
+     /* Override class path if -jar flag was specified */
+     if (jarfile != 0) {
+-	SetClassPath(jarfile);
++        SetClassPath(jarfile);
+     }
+ 
+     /* set the -Dsun.java.command pseudo property */
+@@ -326,34 +326,34 @@
+     /* Initialize the virtual machine */
+ 
+     if (_launcher_debug)
+-	start = CounterGet();
++        start = CounterGet();
+     if (!InitializeJVM(&vm, &env, &ifn)) {
+-	ReportErrorMessage("Could not create the Java virtual machine.",
+-			   JNI_TRUE);
+-	exit(1);
++        ReportErrorMessage("Could not create the Java virtual machine.",
++                           JNI_TRUE);
++        exit(1);
+     }
+ 
+     if (printVersion || showVersion) {
+         PrintJavaVersion(env);
+-	if ((*env)->ExceptionOccurred(env)) {
+-	    ReportExceptionDescription(env);
+-	    goto leave;
+-	}
+-	if (printVersion) {
+-	    ret = 0;
+-	    message = NULL;
+-	    goto leave;
+-	}
+-	if (showVersion) {
+-	    fprintf(stderr, "\n");
+-	}
++        if ((*env)->ExceptionOccurred(env)) {
++            ReportExceptionDescription(env);
++            goto leave;
++        }
++        if (printVersion) {
++            ret = 0;
++            message = NULL;
++            goto leave;
++        }
++        if (showVersion) {
++            fprintf(stderr, "\n");
++        }
+     }
+ 
+     /* If the user specified neither a class name nor a JAR file */
+     if (jarfile == 0 && classname == 0) {
+-	PrintUsage();
+-	message = NULL;
+-	goto leave;
++        PrintUsage();
++        message = NULL;
++        goto leave;
+     }
+ 
+ #ifndef GAMMA
+@@ -361,19 +361,19 @@
+ #endif
+ 
+     if (_launcher_debug) {
+-	end   = CounterGet();
+-	printf("%ld micro seconds to InitializeJVM\n",
+-	       (long)(jint)Counter2Micros(end-start));
++        end   = CounterGet();
++        printf("%ld micro seconds to InitializeJVM\n",
++               (long)(jint)Counter2Micros(end-start));
+     }
+ 
+     /* At this stage, argc/argv have the applications' arguments */
+     if (_launcher_debug) {
+-	int i = 0;
+-	printf("Main-Class is '%s'\n", classname ? classname : "");
+-	printf("Apps' argc is %d\n", argc);
+-	for (; i < argc; i++) {
+-	    printf("    argv[%2d] = '%s'\n", i, argv[i]);
+-	}
++        int i = 0;
++        printf("Main-Class is '%s'\n", classname ? classname : "");
++        printf("Apps' argc is %d\n", argc);
++        for (; i < argc; i++) {
++            printf("    argv[%2d] = '%s'\n", i, argv[i]);
++        }
+     }
+ 
+     ret = 1;
+@@ -392,109 +392,109 @@
+      * launcher should never be enhanced.
+      *
+      * Hence, future work should either:
+-     *	   1)	Correct the local parsing code and verify that the
+-     *		Main-Class attribute gets properly passed through
+-     *		all environments,
+-     *	   2)	Remove the vestages of maintaining main_class through
+-     *		the environment (and remove these comments).
++     *     1)   Correct the local parsing code and verify that the
++     *          Main-Class attribute gets properly passed through
++     *          all environments,
++     *     2)   Remove the vestages of maintaining main_class through
++     *          the environment (and remove these comments).
+      */
+     if (jarfile != 0) {
+-	mainClassName = GetMainClassName(env, jarfile);
+-	if ((*env)->ExceptionOccurred(env)) {
+-	    ReportExceptionDescription(env);
+-	    goto leave;
+-	}
+-	if (mainClassName == NULL) {
+-	  const char * format = "Failed to load Main-Class manifest "
+-	                        "attribute from\n%s";
+-	  message = (char*)MemAlloc((strlen(format) + strlen(jarfile)) *
+-				    sizeof(char));
+-	  sprintf(message, format, jarfile);
+-	  messageDest = JNI_TRUE;
+-	  goto leave;
+-	}
+-	classname = (char *)(*env)->GetStringUTFChars(env, mainClassName, 0);
+-	if (classname == NULL) {
+-	    ReportExceptionDescription(env);
+-	    goto leave;
+-	}
+-	mainClass = LoadClass(env, classname);
+-	if(mainClass == NULL) { /* exception occured */
+-	    ReportExceptionDescription(env);
+-	    message = "Could not find the main class.  Program will exit.";
+-	    goto leave;
+-	}
+-	(*env)->ReleaseStringUTFChars(env, mainClassName, classname);
++        mainClassName = GetMainClassName(env, jarfile);
++        if ((*env)->ExceptionOccurred(env)) {
++            ReportExceptionDescription(env);
++            goto leave;
++        }
++        if (mainClassName == NULL) {
++          const char * format = "Failed to load Main-Class manifest "
++                                "attribute from\n%s";
++          message = (char*)MemAlloc((strlen(format) + strlen(jarfile)) *
++                                    sizeof(char));
++          sprintf(message, format, jarfile);
++          messageDest = JNI_TRUE;
++          goto leave;
++        }
++        classname = (char *)(*env)->GetStringUTFChars(env, mainClassName, 0);
++        if (classname == NULL) {
++            ReportExceptionDescription(env);
++            goto leave;
++        }
++        mainClass = LoadClass(env, classname);
++        if(mainClass == NULL) { /* exception occured */
++            ReportExceptionDescription(env);
++            message = "Could not find the main class.  Program will exit.";
++            goto leave;
++        }
++        (*env)->ReleaseStringUTFChars(env, mainClassName, classname);
+     } else {
+       mainClassName = NewPlatformString(env, classname);
+       if (mainClassName == NULL) {
+-	const char * format = "Failed to load Main Class: %s";
+-	message = (char *)MemAlloc((strlen(format) + strlen(classname)) * 
+-				   sizeof(char) );
+-	sprintf(message, format, classname); 
+-	messageDest = JNI_TRUE;
+-	goto leave;
++        const char * format = "Failed to load Main Class: %s";
++        message = (char *)MemAlloc((strlen(format) + strlen(classname)) *
++                                   sizeof(char) );
++        sprintf(message, format, classname);
++        messageDest = JNI_TRUE;
++        goto leave;
+       }
+       classname = (char *)(*env)->GetStringUTFChars(env, mainClassName, 0);
+       if (classname == NULL) {
+-	ReportExceptionDescription(env);
+-	goto leave;
++        ReportExceptionDescription(env);
++        goto leave;
+       }
+       mainClass = LoadClass(env, classname);
+       if(mainClass == NULL) { /* exception occured */
+-	ReportExceptionDescription(env);
+-	message = "Could not find the main class. Program will exit.";
+-	goto leave;
++        ReportExceptionDescription(env);
++        message = "Could not find the main class. Program will exit.";
++        goto leave;
+       }
+       (*env)->ReleaseStringUTFChars(env, mainClassName, classname);
+     }
+ 
+     /* Get the application's main method */
+     mainID = (*env)->GetStaticMethodID(env, mainClass, "main",
+-				       "([Ljava/lang/String;)V");
++                                       "([Ljava/lang/String;)V");
+     if (mainID == NULL) {
+-	if ((*env)->ExceptionOccurred(env)) {
+-	    ReportExceptionDescription(env);
+-	} else {
+-	  message = "No main method found in specified class.";
+-	  messageDest = JNI_TRUE;
+-	}
+-	goto leave;
++        if ((*env)->ExceptionOccurred(env)) {
++            ReportExceptionDescription(env);
++        } else {
++          message = "No main method found in specified class.";
++          messageDest = JNI_TRUE;
++        }
++        goto leave;
+     }
+ 
+     {    /* Make sure the main method is public */
+-	jint mods;
+-	jmethodID mid;
+-	jobject obj = (*env)->ToReflectedMethod(env, mainClass, 
+-						mainID, JNI_TRUE);
+-
+-	if( obj == NULL) { /* exception occurred */
+-	    ReportExceptionDescription(env);
+-	    goto leave;
+-	}
+-
+-	mid = 
+-	  (*env)->GetMethodID(env, 
+-			      (*env)->GetObjectClass(env, obj),
+-			      "getModifiers", "()I");
+-	if ((*env)->ExceptionOccurred(env)) {
+-	    ReportExceptionDescription(env);
+-	    goto leave;
+-	}
+-
+-	mods = (*env)->CallIntMethod(env, obj, mid);
+-	if ((mods & 1) == 0) { /* if (!Modifier.isPublic(mods)) ... */
+-	    message = "Main method not public.";
+-	    messageDest = JNI_TRUE;
+-	    goto leave;
+-	}
++        jint mods;
++        jmethodID mid;
++        jobject obj = (*env)->ToReflectedMethod(env, mainClass,
++                                                mainID, JNI_TRUE);
++
++        if( obj == NULL) { /* exception occurred */
++            ReportExceptionDescription(env);
++            goto leave;
++        }
++
++        mid =
++          (*env)->GetMethodID(env,
++                              (*env)->GetObjectClass(env, obj),
++                              "getModifiers", "()I");
++        if ((*env)->ExceptionOccurred(env)) {
++            ReportExceptionDescription(env);
++            goto leave;
++        }
++
++        mods = (*env)->CallIntMethod(env, obj, mid);
++        if ((mods & 1) == 0) { /* if (!Modifier.isPublic(mods)) ... */
++            message = "Main method not public.";
++            messageDest = JNI_TRUE;
++            goto leave;
++        }
+     }
+ 
+     /* Build argument array */
+     mainArgs = NewPlatformStringArray(env, argv, argc);
+     if (mainArgs == NULL) {
+-	ReportExceptionDescription(env);
+-	goto leave;
++        ReportExceptionDescription(env);
++        goto leave;
+     }
+ 
+     /* Invoke main method. */
+@@ -514,10 +514,10 @@
+      * launcher's return code except by calling System.exit.
+      */
+     if ((*vm)->DetachCurrentThread(vm) != 0) {
+-	message = "Could not detach main thread.";
+-	messageDest = JNI_TRUE;
+-	ret = 1;
+-	goto leave;
++        message = "Could not detach main thread.";
++        messageDest = JNI_TRUE;
++        ret = 1;
++        goto leave;
+     }
+ 
+     message = NULL;
+@@ -566,48 +566,48 @@
+     newArgv[newArgvIdx++] = (*argv)[0];
+ 
+     for (argi = 1; argi < argc; argi++) {
+-	char *arg = (*argv)[argi];
++        char *arg = (*argv)[argi];
+         isVMType = 0;
+ 
+ #ifdef JAVA_ARGS
+- 	if (arg[0] != '-') {
++        if (arg[0] != '-') {
+             newArgv[newArgvIdx++] = arg;
+             continue;
+         }
+ #else
+- 	if (strcmp(arg, "-classpath") == 0 || 
+- 	    strcmp(arg, "-cp") == 0) {
++        if (strcmp(arg, "-classpath") == 0 ||
++            strcmp(arg, "-cp") == 0) {
+             newArgv[newArgvIdx++] = arg;
+- 	    argi++;
++            argi++;
+             if (argi < argc) {
+                 newArgv[newArgvIdx++] = (*argv)[argi];
+             }
+- 	    continue;
+- 	}
+- 	if (arg[0] != '-') break;
++            continue;
++        }
++        if (arg[0] != '-') break;
+ #endif
+ 
+- 	/* Did the user pass an explicit VM type? */
+-	i = KnownVMIndex(arg);
+-	if (i >= 0) {
+-	    jvmtype = knownVMs[jvmidx = i].name + 1; /* skip the - */
+-	    isVMType = 1;
+-	    *pargc = *pargc - 1;
+-	}
+-
+-	/* Did the user specify an "alternate" VM? */
+-	else if (strncmp(arg, "-XXaltjvm=", 10) == 0 || strncmp(arg, "-J-XXaltjvm=", 12) == 0) {
+-	    isVMType = 1;
+-	    jvmtype = arg+((arg[1]=='X')? 10 : 12);
+-	    jvmidx = -1;
+-	}
++        /* Did the user pass an explicit VM type? */
++        i = KnownVMIndex(arg);
++        if (i >= 0) {
++            jvmtype = knownVMs[jvmidx = i].name + 1; /* skip the - */
++            isVMType = 1;
++            *pargc = *pargc - 1;
++        }
++
++        /* Did the user specify an "alternate" VM? */
++        else if (strncmp(arg, "-XXaltjvm=", 10) == 0 || strncmp(arg, "-J-XXaltjvm=", 12) == 0) {
++            isVMType = 1;
++            jvmtype = arg+((arg[1]=='X')? 10 : 12);
++            jvmidx = -1;
++        }
+ 
+         if (!isVMType) {
+             newArgv[newArgvIdx++] = arg;
+         }
+     }
+ 
+-    /* 
++    /*
+      * Finish copying the arguments if we aborted the above loop.
+      * NOTE that if we aborted via "break" then we did NOT copy the
+      * last argument above, and in addition argi will be less than
+@@ -644,55 +644,55 @@
+       return jvmtype;
+ 
+     /* Resolve aliases first */
+-    {    
++    {
+       int loopCount = 0;
+       while (knownVMs[jvmidx].flag == VM_ALIASED_TO) {
+         int nextIdx = KnownVMIndex(knownVMs[jvmidx].alias);
+ 
+         if (loopCount > knownVMsCount) {
+-	  if (!speculative) {
+-	    ReportErrorMessage("Error: Corrupt jvm.cfg file; cycle in alias list.",
+-			       JNI_TRUE);
+-	    exit(1);
+-	  } else {
+-	    return "ERROR";
+-	    /* break; */
+-	  }
++          if (!speculative) {
++            ReportErrorMessage("Error: Corrupt jvm.cfg file; cycle in alias list.",
++                               JNI_TRUE);
++            exit(1);
++          } else {
++            return "ERROR";
++            /* break; */
++          }
+         }
+ 
+         if (nextIdx < 0) {
+-	  if (!speculative) {
++          if (!speculative) {
+             ReportErrorMessage2("Error: Unable to resolve VM alias %s",
+-				knownVMs[jvmidx].alias, JNI_TRUE);
++                                knownVMs[jvmidx].alias, JNI_TRUE);
+             exit(1);
+-	  } else {
+-	    return "ERROR";
+-	  }
++          } else {
++            return "ERROR";
++          }
+         }
+         jvmidx = nextIdx;
+         jvmtype = knownVMs[jvmidx].name+1;
+-	loopCount++;
++        loopCount++;
+       }
+     }
+ 
+     switch (knownVMs[jvmidx].flag) {
+     case VM_WARN:
+         if (!speculative) {
+-	    fprintf(stderr, "Warning: %s VM not supported; %s VM will be used\n", 
+-		    jvmtype, knownVMs[0].name + 1);
++            fprintf(stderr, "Warning: %s VM not supported; %s VM will be used\n",
++                    jvmtype, knownVMs[0].name + 1);
+         }
+-	/* fall through */
++        /* fall through */
+     case VM_IGNORE:
+-	jvmtype = knownVMs[jvmidx=0].name + 1;
+-	/* fall through */
++        jvmtype = knownVMs[jvmidx=0].name + 1;
++        /* fall through */
+     case VM_KNOWN:
+-	break;
++        break;
+     case VM_ERROR:
+         if (!speculative) {
+-	    ReportErrorMessage2("Error: %s VM not supported", jvmtype, JNI_TRUE);
+-	    exit(1);
++            ReportErrorMessage2("Error: %s VM not supported", jvmtype, JNI_TRUE);
++            exit(1);
+         } else {
+-	    return "ERROR";
++            return "ERROR";
+         }
+     }
+ 
+@@ -711,17 +711,17 @@
+      * VM option.
+      */
+     if (numOptions >= maxOptions) {
+-	if (options == 0) {
+-	    maxOptions = 4;
+-	    options = MemAlloc(maxOptions * sizeof(JavaVMOption));
+-	} else {
+-	    JavaVMOption *tmp;
+-	    maxOptions *= 2;
+-	    tmp = MemAlloc(maxOptions * sizeof(JavaVMOption));
+-	    memcpy(tmp, options, numOptions * sizeof(JavaVMOption));
+-	    free(options);
+-	    options = tmp;
+-	}
++        if (options == 0) {
++            maxOptions = 4;
++            options = MemAlloc(maxOptions * sizeof(JavaVMOption));
++        } else {
++            JavaVMOption *tmp;
++            maxOptions *= 2;
++            tmp = MemAlloc(maxOptions * sizeof(JavaVMOption));
++            memcpy(tmp, options, numOptions * sizeof(JavaVMOption));
++            free(options);
++            options = tmp;
++        }
+     }
+     options[numOptions].optionString = str;
+     options[numOptions++].extraInfo = info;
+@@ -752,7 +752,7 @@
+     char    *version = NULL;
+     char    *jre = NULL;
+     int     jarflag = 0;
+-    int     restrict_search = -1;		/* -1 implies not known */
++    int     restrict_search = -1;               /* -1 implies not known */
+     manifest_info info;
+     char    env_entry[MAXNAMELEN + 24] = ENV_ENTRY "=";
+     char    *env_in;
+@@ -764,9 +764,9 @@
+      * simply return.
+      */
+     if ((env_in = getenv(ENV_ENTRY)) != NULL) {
+-	if (*env_in != '\0')
+-	    *main_class = strdup(env_in);
+-	return;
++        if (*env_in != '\0')
++            *main_class = strdup(env_in);
++        return;
+     }
+ 
+     /*
+@@ -788,36 +788,36 @@
+     argc--;
+     argv++;
+     while ((arg = *argv) != 0 && *arg == '-') {
+-	if (strncmp(arg, "-version:", 9) == 0) {
+-	    version = arg + 9;
+-	} else if (strcmp(arg, "-jre-restrict-search") == 0) {
+-	    restrict_search = 1;
+-	} else if (strcmp(arg, "-no-jre-restrict-search") == 0) {
+-	    restrict_search = 0;
+-	} else {
+-	    if (strcmp(arg, "-jar") == 0)
+-		jarflag = 1;
+-	    /* deal with "unfortunate" classpath syntax */
+-	    if ((strcmp(arg, "-classpath") == 0 || strcmp(arg, "-cp") == 0) &&
+-	      (argc >= 2)) {
+-		*new_argp++ = arg;
+-		argc--;
+-		argv++;
+-		arg = *argv;
+-	    }
+-	    *new_argp++ = arg;
+-	}
+-	argc--;
+-	argv++;
++        if (strncmp(arg, "-version:", 9) == 0) {
++            version = arg + 9;
++        } else if (strcmp(arg, "-jre-restrict-search") == 0) {
++            restrict_search = 1;
++        } else if (strcmp(arg, "-no-jre-restrict-search") == 0) {
++            restrict_search = 0;
++        } else {
++            if (strcmp(arg, "-jar") == 0)
++                jarflag = 1;
++            /* deal with "unfortunate" classpath syntax */
++            if ((strcmp(arg, "-classpath") == 0 || strcmp(arg, "-cp") == 0) &&
++              (argc >= 2)) {
++                *new_argp++ = arg;
++                argc--;
++                argv++;
++                arg = *argv;
++            }
++            *new_argp++ = arg;
++        }
++        argc--;
++        argv++;
+     }
+-    if (argc <= 0) {	/* No operand? Possibly legit with -[full]version */
+-	operand = NULL;
++    if (argc <= 0) {    /* No operand? Possibly legit with -[full]version */
++        operand = NULL;
+     } else {
+-	argc--;
+-	*new_argp++ = operand = *argv++;
++        argc--;
++        *new_argp++ = operand = *argv++;
+     }
+     while (argc-- > 0)  /* Copy over [argument...] */
+-	*new_argp++ = *argv++;
++        *new_argp++ = *argv++;
+     *new_argp = NULL;
+ 
+     /*
+@@ -830,20 +830,20 @@
+      * this data around.
+      */
+     if (jarflag && operand) {
+-	if ((res = parse_manifest(operand, &info)) != 0) {
+-	    if (res == -1)
+-		ReportErrorMessage2("Unable to access jarfile %s",
+-		  operand, JNI_TRUE);
+-	    else
+-		ReportErrorMessage2("Invalid or corrupt jarfile %s",
+-		  operand, JNI_TRUE);
+-	    exit(1);
+-	}
++        if ((res = parse_manifest(operand, &info)) != 0) {
++            if (res == -1)
++                ReportErrorMessage2("Unable to access jarfile %s",
++                  operand, JNI_TRUE);
++            else
++                ReportErrorMessage2("Invalid or corrupt jarfile %s",
++                  operand, JNI_TRUE);
++            exit(1);
++        }
+     } else {
+-	info.manifest_version = NULL;
+-	info.main_class = NULL;
+-	info.jre_version = NULL;
+-	info.jre_restrict_search = 0;
++        info.manifest_version = NULL;
++        info.main_class = NULL;
++        info.jre_version = NULL;
++        info.jre_restrict_search = 0;
+     }
+ 
+     /*
+@@ -851,34 +851,34 @@
+      * manifest are overwritten by any specified on the command line.
+      */
+     if (version != NULL)
+-	info.jre_version = version;
++        info.jre_version = version;
+     if (restrict_search != -1)
+-	info.jre_restrict_search = restrict_search;
++        info.jre_restrict_search = restrict_search;
+ 
+     /*
+      * "Valid" returns (other than unrecoverable errors) follow.  Set
+      * main_class as a side-effect of this routine.
+      */
+     if (info.main_class != NULL)
+-	*main_class = strdup(info.main_class);
++        *main_class = strdup(info.main_class);
+ 
+     /*
+      * If no version selection information is found either on the command
+      * line or in the manifest, simply return.
+      */
+     if (info.jre_version == NULL) {
+-	free_manifest();
+-	free(new_argv);
+-	return;
++        free_manifest();
++        free(new_argv);
++        return;
+     }
+ 
+     /*
+      * Check for correct syntax of the version specification (JSR 56).
+      */
+     if (!valid_version_string(info.jre_version)) {
+-	ReportErrorMessage2("Syntax error in version specification \"%s\"",
+-	  info.jre_version, JNI_TRUE);
+-	exit(1);
++        ReportErrorMessage2("Syntax error in version specification \"%s\"",
++          info.jre_version, JNI_TRUE);
++        exit(1);
+     }
+ 
+     /*
+@@ -894,16 +894,16 @@
+           (info.jre_version?info.jre_version:"null"),
+           (info.jre_restrict_search?"true":"false"), (jre?jre:"null"));
+     if (jre == NULL) {
+-	if (acceptable_release(FULL_VERSION, info.jre_version)) {
+-	    free_manifest();
+-	    free(new_argv);
+-	    return;
+-	} else {
+-	    ReportErrorMessage2(
+-	      "Unable to locate JRE meeting specification \"%s\"",
+-	      info.jre_version, JNI_TRUE);
+-	    exit(1);
+-	}
++        if (acceptable_release(FULL_VERSION, info.jre_version)) {
++            free_manifest();
++            free(new_argv);
++            return;
++        } else {
++            ReportErrorMessage2(
++              "Unable to locate JRE meeting specification \"%s\"",
++              info.jre_version, JNI_TRUE);
++            exit(1);
++        }
+     }
+ 
+     /*
+@@ -919,7 +919,7 @@
+      * times.
+      */
+     if (info.main_class != NULL)
+-	(void)strcat(env_entry, info.main_class);
++        (void)strcat(env_entry, info.main_class);
+     (void)putenv(env_entry);
+     ExecJRE(jre, new_argv);
+     free_manifest();
+@@ -937,7 +937,7 @@
+  */
+ static jboolean
+ ParseArguments(int *pargc, char ***pargv, char **pjarfile,
+-		       char **pclassname, int *pret)
++                       char **pclassname, int *pret)
+ {
+     int argc = *pargc;
+     char **argv = *pargv;
+@@ -946,106 +946,106 @@
+ 
+     *pret = 1;
+     while ((arg = *argv) != 0 && *arg == '-') {
+-	argv++; --argc;
+-	if (strcmp(arg, "-classpath") == 0 || strcmp(arg, "-cp") == 0) {
+-	    if (argc < 1) {
+-	        ReportErrorMessage2("%s requires class path specification",
+-				    arg, JNI_TRUE);
+-		PrintUsage();
+-		return JNI_FALSE;
+-	    }
+-	    SetClassPath(*argv);
+-	    argv++; --argc;
+-	} else if (strcmp(arg, "-jar") == 0) {
+-	    jarflag = JNI_TRUE;
+-	} else if (strcmp(arg, "-help") == 0 ||
+-		   strcmp(arg, "-h") == 0 ||
+-		   strcmp(arg, "-?") == 0) {
+-	    PrintUsage();
+-	    *pret = 0;
+-	    return JNI_FALSE;
+-	} else if (strcmp(arg, "-version") == 0) {
+-	    printVersion = JNI_TRUE;
+-	    return JNI_TRUE;
+-	} else if (strcmp(arg, "-showversion") == 0) {
+-	    showVersion = JNI_TRUE;
+-	} else if (strcmp(arg, "-X") == 0) {
+-	    *pret = PrintXUsage();
+-	    return JNI_FALSE;
++        argv++; --argc;
++        if (strcmp(arg, "-classpath") == 0 || strcmp(arg, "-cp") == 0) {
++            if (argc < 1) {
++                ReportErrorMessage2("%s requires class path specification",
++                                    arg, JNI_TRUE);
++                PrintUsage();
++                return JNI_FALSE;
++            }
++            SetClassPath(*argv);
++            argv++; --argc;
++        } else if (strcmp(arg, "-jar") == 0) {
++            jarflag = JNI_TRUE;
++        } else if (strcmp(arg, "-help") == 0 ||
++                   strcmp(arg, "-h") == 0 ||
++                   strcmp(arg, "-?") == 0) {
++            PrintUsage();
++            *pret = 0;
++            return JNI_FALSE;
++        } else if (strcmp(arg, "-version") == 0) {
++            printVersion = JNI_TRUE;
++            return JNI_TRUE;
++        } else if (strcmp(arg, "-showversion") == 0) {
++            showVersion = JNI_TRUE;
++        } else if (strcmp(arg, "-X") == 0) {
++            *pret = PrintXUsage();
++            return JNI_FALSE;
+ /*
+  * The following case provide backward compatibility with old-style
+  * command line options.
+  */
+-	} else if (strcmp(arg, "-fullversion") == 0) {
+-	    fprintf(stderr, "%s full version \"%s\"\n", progname,
+-		    FULL_VERSION);
+-	    *pret = 0;
+-	    return JNI_FALSE;
+-	} else if (strcmp(arg, "-verbosegc") == 0) {
+-	    AddOption("-verbose:gc", NULL);
+-	} else if (strcmp(arg, "-t") == 0) {
+-	    AddOption("-Xt", NULL);
+-	} else if (strcmp(arg, "-tm") == 0) {
+-	    AddOption("-Xtm", NULL);
+-	} else if (strcmp(arg, "-debug") == 0) {
+-	    AddOption("-Xdebug", NULL);
+-	} else if (strcmp(arg, "-noclassgc") == 0) {
+-	    AddOption("-Xnoclassgc", NULL);
+-	} else if (strcmp(arg, "-Xfuture") == 0) {
+-	    AddOption("-Xverify:all", NULL);
+-	} else if (strcmp(arg, "-verify") == 0) {
+-	    AddOption("-Xverify:all", NULL);
+-	} else if (strcmp(arg, "-verifyremote") == 0) {
+-	    AddOption("-Xverify:remote", NULL);
+-	} else if (strcmp(arg, "-noverify") == 0) {
+-	    AddOption("-Xverify:none", NULL);
+-	} else if (strcmp(arg, "-XXsuppressExitMessage") == 0) {
++        } else if (strcmp(arg, "-fullversion") == 0) {
++            fprintf(stderr, "%s full version \"%s\"\n", progname,
++                    FULL_VERSION);
++            *pret = 0;
++            return JNI_FALSE;
++        } else if (strcmp(arg, "-verbosegc") == 0) {
++            AddOption("-verbose:gc", NULL);
++        } else if (strcmp(arg, "-t") == 0) {
++            AddOption("-Xt", NULL);
++        } else if (strcmp(arg, "-tm") == 0) {
++            AddOption("-Xtm", NULL);
++        } else if (strcmp(arg, "-debug") == 0) {
++            AddOption("-Xdebug", NULL);
++        } else if (strcmp(arg, "-noclassgc") == 0) {
++            AddOption("-Xnoclassgc", NULL);
++        } else if (strcmp(arg, "-Xfuture") == 0) {
++            AddOption("-Xverify:all", NULL);
++        } else if (strcmp(arg, "-verify") == 0) {
++            AddOption("-Xverify:all", NULL);
++        } else if (strcmp(arg, "-verifyremote") == 0) {
++            AddOption("-Xverify:remote", NULL);
++        } else if (strcmp(arg, "-noverify") == 0) {
++            AddOption("-Xverify:none", NULL);
++        } else if (strcmp(arg, "-XXsuppressExitMessage") == 0) {
+             noExitErrorMessage = 1;
+-	} else if (strncmp(arg, "-prof", 5) == 0) {
+-	    char *p = arg + 5;
+-	    char *tmp = MemAlloc(strlen(arg) + 50);
+-	    if (*p) {
+-	        sprintf(tmp, "-Xrunhprof:cpu=old,file=%s", p + 1);
+-	    } else {
+-	        sprintf(tmp, "-Xrunhprof:cpu=old,file=java.prof");
+-	    }
+-	    AddOption(tmp, NULL);
+-	} else if (strncmp(arg, "-ss", 3) == 0 ||
+-		   strncmp(arg, "-oss", 4) == 0 ||
+-		   strncmp(arg, "-ms", 3) == 0 ||
+-		   strncmp(arg, "-mx", 3) == 0) {
+-	    char *tmp = MemAlloc(strlen(arg) + 6);
+-	    sprintf(tmp, "-X%s", arg + 1); /* skip '-' */
+-	    AddOption(tmp, NULL);
+-	} else if (strcmp(arg, "-checksource") == 0 ||
+-		   strcmp(arg, "-cs") == 0 ||
+-		   strcmp(arg, "-noasyncgc") == 0) {
+-	    /* No longer supported */
+-	    fprintf(stderr,
+-		    "Warning: %s option is no longer supported.\n",
+-		    arg);
++        } else if (strncmp(arg, "-prof", 5) == 0) {
++            char *p = arg + 5;
++            char *tmp = MemAlloc(strlen(arg) + 50);
++            if (*p) {
++                sprintf(tmp, "-Xrunhprof:cpu=old,file=%s", p + 1);
++            } else {
++                sprintf(tmp, "-Xrunhprof:cpu=old,file=java.prof");
++            }
++            AddOption(tmp, NULL);
++        } else if (strncmp(arg, "-ss", 3) == 0 ||
++                   strncmp(arg, "-oss", 4) == 0 ||
++                   strncmp(arg, "-ms", 3) == 0 ||
++                   strncmp(arg, "-mx", 3) == 0) {
++            char *tmp = MemAlloc(strlen(arg) + 6);
++            sprintf(tmp, "-X%s", arg + 1); /* skip '-' */
++            AddOption(tmp, NULL);
++        } else if (strcmp(arg, "-checksource") == 0 ||
++                   strcmp(arg, "-cs") == 0 ||
++                   strcmp(arg, "-noasyncgc") == 0) {
++            /* No longer supported */
++            fprintf(stderr,
++                    "Warning: %s option is no longer supported.\n",
++                    arg);
+         } else if (strncmp(arg, "-version:", 9) == 0 ||
+                    strcmp(arg, "-no-jre-restrict-search") == 0 ||
+                    strcmp(arg, "-jre-restrict-search") == 0) {
+-	    ; /* Ignore machine independent options already handled */
+-	} else if (RemovableMachineDependentOption(arg) ) {
+-	    ; /* Do not pass option to vm. */
+-	}
+-	else {
+-	    AddOption(arg, NULL);
+-	}
++            ; /* Ignore machine independent options already handled */
++        } else if (RemovableMachineDependentOption(arg) ) {
++            ; /* Do not pass option to vm. */
++        }
++        else {
++            AddOption(arg, NULL);
++        }
+     }
+ 
+     if (--argc >= 0) {
+         if (jarflag) {
+-	    *pjarfile = *argv++;
+-	    *pclassname = 0;
+-	} else {
+-	    *pjarfile = 0;
+-	    *pclassname = *argv++;
+-	}
+-	*pargc = argc;
+-	*pargv = argv;
++            *pjarfile = *argv++;
++            *pclassname = 0;
++        } else {
++            *pjarfile = 0;
++            *pclassname = *argv++;
++        }
++        *pargc = argc;
++        *pargv = argv;
+     }
+ 
+     return JNI_TRUE;
+@@ -1068,15 +1068,15 @@
+     args.ignoreUnrecognized = JNI_FALSE;
+ 
+     if (_launcher_debug) {
+-	int i = 0;
+-	printf("JavaVM args:\n    ");
+-	printf("version 0x%08lx, ", (long)args.version);
+-	printf("ignoreUnrecognized is %s, ",
+-	       args.ignoreUnrecognized ? "JNI_TRUE" : "JNI_FALSE");
+-	printf("nOptions is %ld\n", (long)args.nOptions);
+-	for (i = 0; i < numOptions; i++)
+-	    printf("    option[%2d] = '%s'\n",
+-		   i, args.options[i].optionString);
++        int i = 0;
++        printf("JavaVM args:\n    ");
++        printf("version 0x%08lx, ", (long)args.version);
++        printf("ignoreUnrecognized is %s, ",
++               args.ignoreUnrecognized ? "JNI_TRUE" : "JNI_FALSE");
++        printf("nOptions is %ld\n", (long)args.nOptions);
++        for (i = 0; i < numOptions; i++)
++            printf("    option[%2d] = '%s'\n",
++                   i, args.options[i].optionString);
+     }
+ 
+     r = ifn->CreateJavaVM(pvm, (void **)penv, &args);
+@@ -1097,8 +1097,8 @@
+ {
+     void *p = malloc(size);
+     if (p == 0) {
+-	perror("malloc");
+-	exit(1);
++        perror("malloc");
++        exit(1);
+     }
+     return p;
+ }
+@@ -1112,12 +1112,12 @@
+             jmethodID mid;
+             NULL_CHECK0 (cls = (*env)->FindClass(env, "java/lang/System"));
+             NULL_CHECK0 (mid = (*env)->GetStaticMethodID(
+-		                   env, cls, 
+-			           "getProperty",
+-			           "(Ljava/lang/String;)Ljava/lang/String;"));
++                                   env, cls,
++                                   "getProperty",
++                                   "(Ljava/lang/String;)Ljava/lang/String;"));
+             platformEncoding = (*env)->CallStaticObjectMethod (
+                                     env, cls, mid, propname);
+-        } 
++        }
+     }
+     return platformEncoding;
+ }
+@@ -1127,9 +1127,9 @@
+     jmethodID mid;
+     NULL_CHECK0 (cls = (*env)->FindClass(env, "java/nio/charset/Charset"));
+     NULL_CHECK0 (mid = (*env)->GetStaticMethodID(
+-	                   env, cls, 
+-		           "isSupported",
+-		           "(Ljava/lang/String;)Z"));
++                           env, cls,
++                           "isSupported",
++                           "(Ljava/lang/String;)Z"));
+     return (*env)->CallStaticBooleanMethod (env, cls, mid, enc);
+ }
+ 
+@@ -1138,7 +1138,7 @@
+  */
+ static jstring
+ NewPlatformString(JNIEnv *env, char *s)
+-{    
++{
+     int len = (int)strlen(s);
+     jclass cls;
+     jmethodID mid;
+@@ -1146,14 +1146,14 @@
+     jstring enc;
+ 
+     if (s == NULL)
+-	return 0;
++        return 0;
+     enc = getPlatformEncoding(env);
+ 
+     ary = (*env)->NewByteArray(env, len);
+     if (ary != 0) {
+         jstring str = 0;
+-	(*env)->SetByteArrayRegion(env, ary, 0, len, (jbyte *)s);
+-	if (!(*env)->ExceptionOccurred(env)) {
++        (*env)->SetByteArrayRegion(env, ary, 0, len, (jbyte *)s);
++        if (!(*env)->ExceptionOccurred(env)) {
+ #ifdef GAMMA
+             /* We support running JVM with older JDK, so here we have to deal */
+             /* with the case that sun.jnu.encoding is undefined (enc == NULL) */
+@@ -1162,25 +1162,25 @@
+             if (isEncodingSupported(env, enc) == JNI_TRUE) {
+ #endif
+                 NULL_CHECK0(cls = (*env)->FindClass(env, "java/lang/String"));
+-                NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "<init>", 
+-	   				  "([BLjava/lang/String;)V"));
+-	        str = (*env)->NewObject(env, cls, mid, ary, enc);
+-	    } else {
+-                /*If the encoding specified in sun.jnu.encoding is not 
+-                  endorsed by "Charset.isSupported" we have to fall back 
++                NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "<init>",
++                                          "([BLjava/lang/String;)V"));
++                str = (*env)->NewObject(env, cls, mid, ary, enc);
++            } else {
++                /*If the encoding specified in sun.jnu.encoding is not
++                  endorsed by "Charset.isSupported" we have to fall back
+                   to use String(byte[]) explicitly here without specifying
+-                  the encoding name, in which the StringCoding class will 
+-                  pickup the iso-8859-1 as the fallback converter for us. 
+-	        */
++                  the encoding name, in which the StringCoding class will
++                  pickup the iso-8859-1 as the fallback converter for us.
++                */
+                 NULL_CHECK0(cls = (*env)->FindClass(env, "java/lang/String"));
+-                NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "<init>", 
+-	   				  "([B)V"));
+-	        str = (*env)->NewObject(env, cls, mid, ary);
++                NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "<init>",
++                                          "([B)V"));
++                str = (*env)->NewObject(env, cls, mid, ary);
+             }
+-	    (*env)->DeleteLocalRef(env, ary);
+-	    return str;
++            (*env)->DeleteLocalRef(env, ary);
++            return str;
+         }
+-    } 
++    }
+     return 0;
+ }
+ 
+@@ -1198,10 +1198,10 @@
+     NULL_CHECK0(cls = (*env)->FindClass(env, "java/lang/String"));
+     NULL_CHECK0(ary = (*env)->NewObjectArray(env, strc, cls, 0));
+     for (i = 0; i < strc; i++) {
+-	jstring str = NewPlatformString(env, *strv++);
+-	NULL_CHECK0(str);
+-	(*env)->SetObjectArrayElement(env, ary, i, str);
+-	(*env)->DeleteLocalRef(env, str);
++        jstring str = NewPlatformString(env, *strv++);
++        NULL_CHECK0(str);
++        (*env)->SetObjectArrayElement(env, ary, i, str);
++        (*env)->DeleteLocalRef(env, str);
+     }
+     return ary;
+ }
+@@ -1218,20 +1218,20 @@
+     jlong start, end;
+ 
+     if (_launcher_debug)
+-	start = CounterGet();
++        start = CounterGet();
+ 
+     do {
+         c = *t++;
+-	*s++ = (c == '.') ? '/' : c;
++        *s++ = (c == '.') ? '/' : c;
+     } while (c != '\0');
+     cls = (*env)->FindClass(env, buf);
+     free(buf);
+ 
+     if (_launcher_debug) {
+-	end   = CounterGet();
+-	printf("%ld micro seconds to load main class\n",
+-	       (long)(jint)Counter2Micros(end-start));
+-	printf("----_JAVA_LAUNCHER_DEBUG----\n");
++        end   = CounterGet();
++        printf("%ld micro seconds to load main class\n",
++               (long)(jint)Counter2Micros(end-start));
++        printf("----_JAVA_LAUNCHER_DEBUG----\n");
+     }
+ 
+     return cls;
+@@ -1252,26 +1252,26 @@
+ 
+     NULL_CHECK0(cls = (*env)->FindClass(env, "java/util/jar/JarFile"));
+     NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "<init>",
+-					  "(Ljava/lang/String;)V"));
++                                          "(Ljava/lang/String;)V"));
+     NULL_CHECK0(str = NewPlatformString(env, jarname));
+     NULL_CHECK0(jar = (*env)->NewObject(env, cls, mid, str));
+     NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "getManifest",
+-					  "()Ljava/util/jar/Manifest;"));
++                                          "()Ljava/util/jar/Manifest;"));
+     man = (*env)->CallObjectMethod(env, jar, mid);
+     if (man != 0) {
+-	NULL_CHECK0(mid = (*env)->GetMethodID(env,
+-				    (*env)->GetObjectClass(env, man),
+-				    "getMainAttributes",
+-				    "()Ljava/util/jar/Attributes;"));
+-	attr = (*env)->CallObjectMethod(env, man, mid);
+-	if (attr != 0) {
+-	    NULL_CHECK0(mid = (*env)->GetMethodID(env,
+-				    (*env)->GetObjectClass(env, attr),
+-				    "getValue",
+-				    "(Ljava/lang/String;)Ljava/lang/String;"));
+-	    NULL_CHECK0(str = NewPlatformString(env, MAIN_CLASS));
+-	    result = (*env)->CallObjectMethod(env, attr, mid, str);
+-	}
++        NULL_CHECK0(mid = (*env)->GetMethodID(env,
++                                    (*env)->GetObjectClass(env, man),
++                                    "getMainAttributes",
++                                    "()Ljava/util/jar/Attributes;"));
++        attr = (*env)->CallObjectMethod(env, man, mid);
++        if (attr != 0) {
++            NULL_CHECK0(mid = (*env)->GetMethodID(env,
++                                    (*env)->GetObjectClass(env, attr),
++                                    "getValue",
++                                    "(Ljava/lang/String;)Ljava/lang/String;"));
++            NULL_CHECK0(str = NewPlatformString(env, MAIN_CLASS));
++            result = (*env)->CallObjectMethod(env, attr, mid, str);
++        }
+     }
+     return result;
+ }
+@@ -1298,45 +1298,45 @@
+ 
+     /* Copy the VM arguments (i.e. prefixed with -J) */
+     for (i = 0; i < NUM_ARGS; i++) {
+-	char *arg = java_args[i];
+-	if (arg[0] == '-' && arg[1] == 'J') {
+-	    *nargv++ = arg + 2;
+-	}
++        char *arg = java_args[i];
++        if (arg[0] == '-' && arg[1] == 'J') {
++            *nargv++ = arg + 2;
++        }
+     }
+ 
+     for (i = 0; i < argc; i++) {
+-	char *arg = argv[i];
+-	if (arg[0] == '-' && arg[1] == 'J') {
+-	    if (arg[2] == '\0') {
+-		ReportErrorMessage("Error: the -J option should not be "
+-				   "followed by a space.", JNI_TRUE);
+-		exit(1);
+-	    }
+-	    *nargv++ = arg + 2;
+-	}
++        char *arg = argv[i];
++        if (arg[0] == '-' && arg[1] == 'J') {
++            if (arg[2] == '\0') {
++                ReportErrorMessage("Error: the -J option should not be "
++                                   "followed by a space.", JNI_TRUE);
++                exit(1);
++            }
++            *nargv++ = arg + 2;
++        }
+     }
+ 
+     /* Copy the rest of the arguments */
+     for (i = 0; i < NUM_ARGS; i++) {
+-	char *arg = java_args[i];
+-	if (arg[0] != '-' || arg[1] != 'J') {
+-	    *nargv++ = arg;
+-	}
++        char *arg = java_args[i];
++        if (arg[0] != '-' || arg[1] != 'J') {
++            *nargv++ = arg;
++        }
+     }
+     for (i = 0; i < argc; i++) {
+-	char *arg = argv[i];
+-	if (arg[0] != '-' || arg[1] != 'J') {
+-	    *nargv++ = arg;
+-	}
++        char *arg = argv[i];
++        if (arg[0] != '-' || arg[1] != 'J') {
++            *nargv++ = arg;
++        }
+     }
+     *nargv = 0;
+ }
+ 
+ /*
+  * For our tools, we try to add 3 VM options:
+- *	-Denv.class.path=<envcp>
+- *	-Dapplication.home=<apphome>
+- *	-Djava.class.path=<appcp>
++ *      -Denv.class.path=<envcp>
++ *      -Dapplication.home=<apphome>
++ *      -Djava.class.path=<appcp>
+  * <envcp>   is the user's setting of CLASSPATH -- for instance the user
+  *           tells javac where to find binary classes through this environment
+  *           variable.  Notice that users will be able to compile against our
+@@ -1357,15 +1357,15 @@
+ 
+     s = getenv("CLASSPATH");
+     if (s) {
+-	/* 40 for -Denv.class.path= */
+-	envcp = (char *)MemAlloc(strlen(s) + 40);
+-	sprintf(envcp, "-Denv.class.path=%s", s);
+-	AddOption(envcp, NULL);
++        /* 40 for -Denv.class.path= */
++        envcp = (char *)MemAlloc(strlen(s) + 40);
++        sprintf(envcp, "-Denv.class.path=%s", s);
++        AddOption(envcp, NULL);
+     }
+ 
+     if (!GetApplicationHome(home, sizeof(home))) {
+-	ReportErrorMessage("Can't determine application home", JNI_TRUE);
+-	return JNI_FALSE;
++        ReportErrorMessage("Can't determine application home", JNI_TRUE);
++        return JNI_FALSE;
+     }
+ 
+     /* 40 for '-Dapplication.home=' */
+@@ -1377,14 +1377,14 @@
+     size = 40;                                 /* 40: "-Djava.class.path=" */
+     strlenHome = (int)strlen(home);
+     for (i = 0; i < NUM_APP_CLASSPATH; i++) {
+-	size += strlenHome + (int)strlen(app_classpath[i]) + 1; /* 1: separator */
++        size += strlenHome + (int)strlen(app_classpath[i]) + 1; /* 1: separator */
+     }
+     appcp = (char *)MemAlloc(size + 1);
+     strcpy(appcp, "-Djava.class.path=");
+     for (i = 0; i < NUM_APP_CLASSPATH; i++) {
+-	strcat(appcp, home);			/* c:\program files\myapp */
+-	strcat(appcp, app_classpath[i]);	/* \lib\myapp.jar	  */
+-	strcat(appcp, separator);		/* ;			  */
++        strcat(appcp, home);                    /* c:\program files\myapp */
++        strcat(appcp, app_classpath[i]);        /* \lib\myapp.jar         */
++        strcat(appcp, separator);               /* ;                      */
+     }
+     appcp[strlen(appcp)-1] = '\0';  /* remove trailing path separator */
+     AddOption(appcp, NULL);
+@@ -1403,7 +1403,7 @@
+  */
+ void
+ SetJavaCommandLineProp(char *classname, char *jarfile,
+-		       int argc, char **argv)
++                       int argc, char **argv)
+ {
+ 
+     int i = 0;
+@@ -1486,14 +1486,14 @@
+     int i;
+ 
+     fprintf(stdout,
+-	"Usage: %s [-options] class [args...]\n"
+-	"           (to execute a class)\n"
+-	"   or  %s [-options] -jar jarfile [args...]\n"
+-	"           (to execute a jar file)\n"
+-	"\n"
+-	"where options include:\n",
+-	progname,
+-	progname);
++        "Usage: %s [-options] class [args...]\n"
++        "           (to execute a class)\n"
++        "   or  %s [-options] -jar jarfile [args...]\n"
++        "           (to execute a jar file)\n"
++        "\n"
++        "where options include:\n",
++        progname,
++        progname);
+ 
+ #ifndef GAMMA
+     PrintMachineDependentOptions();
+@@ -1502,17 +1502,17 @@
+         (knownVMs[0].flag == VM_IF_SERVER_CLASS)) {
+       fprintf(stdout, "    %s\t  to select the \"%s\" VM\n",
+               knownVMs[0].name, knownVMs[0].name+1);
+-    }        
++    }
+     for (i=1; i<knownVMsCount; i++) {
+-	if (knownVMs[i].flag == VM_KNOWN)
+-	    fprintf(stdout, "    %s\t  to select the \"%s\" VM\n",
+-		    knownVMs[i].name, knownVMs[i].name+1);
++        if (knownVMs[i].flag == VM_KNOWN)
++            fprintf(stdout, "    %s\t  to select the \"%s\" VM\n",
++                    knownVMs[i].name, knownVMs[i].name+1);
+     }
+     for (i=1; i<knownVMsCount; i++) {
+-	if (knownVMs[i].flag == VM_ALIASED_TO)
+-	    fprintf(stdout, "    %s\t  is a synonym for "
+-		    "the \"%s\" VM  [deprecated]\n",
+-		    knownVMs[i].name, knownVMs[i].alias+1);
++        if (knownVMs[i].flag == VM_ALIASED_TO)
++            fprintf(stdout, "    %s\t  is a synonym for "
++                    "the \"%s\" VM  [deprecated]\n",
++                    knownVMs[i].name, knownVMs[i].alias+1);
+     }
+ 
+     /* The first known VM is the default */
+@@ -1526,18 +1526,18 @@
+         punctuation = ", ";
+         reason = "because you are running on a server-class machine.\n";
+       }
+-      fprintf(stdout, "                  The default VM is %s%s\n", 
++      fprintf(stdout, "                  The default VM is %s%s\n",
+               defaultVM, punctuation);
+       fprintf(stdout, "                  %s\n",
+               reason);
+-    }       
++    }
+ #endif /* ifndef GAMMA */
+ 
+     fprintf(stdout,
+ "    -cp <class search path of directories and zip/jar files>\n"
+ "    -classpath <class search path of directories and zip/jar files>\n"
+ "                  A %c separated list of directories, JAR archives,\n"
+-"                  and ZIP archives to search for class files.\n" 
++"                  and ZIP archives to search for class files.\n"
+ "    -D<name>=<value>\n"
+ "                  set a system property\n"
+ "    -verbose[:class|gc|jni]\n"
+@@ -1568,7 +1568,7 @@
+ "    -javaagent:<jarpath>[=<options>]\n"
+ "                  load Java programming language agent, see java.lang.instrument\n"
+ 
+-	    ,PATH_SEPARATOR);
++            ,PATH_SEPARATOR);
+ }
+ 
+ /*
+@@ -1586,7 +1586,7 @@
+     fp = fopen(path, "r");
+     if (fp == 0) {
+         fprintf(stderr, "Can't open %s\n", path);
+-	return 1;
++        return 1;
+     }
+     while ((n = fread(buf, 1, sizeof(buf), fp)) != 0) {
+         fwrite(buf, 1, n, stdout);
+@@ -1600,17 +1600,17 @@
+ /*
+  * Read the jvm.cfg file and fill the knownJVMs[] array.
+  *
+- * The functionality of the jvm.cfg file is subject to change without 
++ * The functionality of the jvm.cfg file is subject to change without
+  * notice and the mechanism will be removed in the future.
+  *
+  * The lexical structure of the jvm.cfg file is as follows:
+  *
+  *     jvmcfg         :=  { vmLine }
+- *     vmLine         :=  knownLine 
+- *                    |   aliasLine 
+- *                    |   warnLine 
+- *                    |   ignoreLine 
+- *                    |   errorLine 
++ *     vmLine         :=  knownLine
++ *                    |   aliasLine
++ *                    |   warnLine
++ *                    |   ignoreLine
++ *                    |   errorLine
+  *                    |   predicateLine
+  *                    |   commentLine
+  *     knownLine      :=  flag  "KNOWN"                  EOL
+@@ -1623,28 +1623,28 @@
+  *     flag           :=  "-" identifier
+  *
+  * The semantics are that when someone specifies a flag on the command line:
+- * - if the flag appears on a knownLine, then the identifier is used as 
++ * - if the flag appears on a knownLine, then the identifier is used as
+  *   the name of the directory holding the JVM library (the name of the JVM).
+- * - if the flag appears as the first flag on an aliasLine, the identifier 
++ * - if the flag appears as the first flag on an aliasLine, the identifier
+  *   of the second flag is used as the name of the JVM.
+- * - if the flag appears on a warnLine, the identifier is used as the 
++ * - if the flag appears on a warnLine, the identifier is used as the
+  *   name of the JVM, but a warning is generated.
+- * - if the flag appears on an ignoreLine, the identifier is recognized as the 
++ * - if the flag appears on an ignoreLine, the identifier is recognized as the
+  *   name of a JVM, but the identifier is ignored and the default vm used
+  * - if the flag appears on an errorLine, an error is generated.
+- * - if the flag appears as the first flag on a predicateLine, and 
+- *   the machine on which you are running passes the predicate indicated, 
+- *   then the identifier of the second flag is used as the name of the JVM, 
++ * - if the flag appears as the first flag on a predicateLine, and
++ *   the machine on which you are running passes the predicate indicated,
++ *   then the identifier of the second flag is used as the name of the JVM,
+  *   otherwise the identifier of the first flag is used as the name of the JVM.
+- * If no flag is given on the command line, the first vmLine of the jvm.cfg 
++ * If no flag is given on the command line, the first vmLine of the jvm.cfg
+  * file determines the name of the JVM.
+- * PredicateLines are only interpreted on first vmLine of a jvm.cfg file, 
+- * since they only make sense if someone hasn't specified the name of the 
++ * PredicateLines are only interpreted on first vmLine of a jvm.cfg file,
++ * since they only make sense if someone hasn't specified the name of the
+  * JVM on the command line.
+  *
+- * The intent of the jvm.cfg file is to allow several JVM libraries to 
+- * be installed in different subdirectories of a single JRE installation, 
+- * for space-savings and convenience in testing.  
++ * The intent of the jvm.cfg file is to allow several JVM libraries to
++ * be installed in different subdirectories of a single JRE installation,
++ * for space-savings and convenience in testing.
+  * The intent is explicitly not to provide a full aliasing or predicate
+  * mechanism.
+  */
+@@ -1670,15 +1670,15 @@
+     strcat(jvmCfgName, FILESEP "lib" FILESEP);
+     strcat(jvmCfgName, arch);
+     strcat(jvmCfgName, FILESEP "jvm.cfg");
+-    
++
+     jvmCfg = fopen(jvmCfgName, "r");
+     if (jvmCfg == NULL) {
+       if (!speculative) {
+         ReportErrorMessage2("Error: could not open `%s'", jvmCfgName,
+-			    JNI_TRUE);
+-	exit(1);
++                            JNI_TRUE);
++        exit(1);
+       } else {
+-	return -1;
++        return -1;
+       }
+     }
+     while (fgets(line, sizeof(line), jvmCfg) != NULL) {
+@@ -1765,14 +1765,14 @@
+             case VM_ALIASED_TO:
+                 knownVMs[cnt].alias = strdup(altVMName);
+                 if (_launcher_debug) {
+-                    printf("    name: %s  vmType: %s  alias: %s\n", 
++                    printf("    name: %s  vmType: %s  alias: %s\n",
+                            knownVMs[cnt].name, "VM_ALIASED_TO", knownVMs[cnt].alias);
+                 }
+                 break;
+             case VM_IF_SERVER_CLASS:
+                 knownVMs[cnt].server_class = strdup(serverClassVMName);
+                 if (_launcher_debug) {
+-                    printf("    name: %s  vmType: %s  server_class: %s\n", 
++                    printf("    name: %s  vmType: %s  server_class: %s\n",
+                            knownVMs[cnt].name, "VM_IF_SERVER_CLASS", knownVMs[cnt].server_class);
+                 }
+                 break;
+diff -ruN openjdk6/hotspot/src/os/linux/launcher/java.h openjdk/hotspot/src/os/linux/launcher/java.h
+--- openjdk6/hotspot/src/os/linux/launcher/java.h	2008-02-28 05:02:29.000000000 -0500
++++ openjdk/hotspot/src/os/linux/launcher/java.h	2008-01-31 09:19:00.000000000 -0500
+@@ -19,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ /*
+@@ -64,12 +64,12 @@
+ GetArch();
+ 
+ void CreateExecutionEnvironment(int *_argc,
+-				       char ***_argv,
+-				       char jrepath[],
+-				       jint so_jrepath,
+-				       char jvmpath[],
+-				       jint so_jvmpath,
+-				       char **original_argv);
++                                       char ***_argv,
++                                       char jrepath[],
++                                       jint so_jrepath,
++                                       char jvmpath[],
++                                       jint so_jvmpath,
++                                       char **original_argv);
+ 
+ /*
+  * Report an error message to stderr or a window as appropriate.  The
+@@ -89,10 +89,10 @@
+ jboolean RemovableMachineDependentOption(char * option);
+ void PrintMachineDependentOptions();
+ 
+-/* 
++/*
+  * Functions defined in java.c and used in java_md.c.
+  */
+-jint ReadKnownVMs(const char *jrepath, char * arch, jboolean speculative); 
++jint ReadKnownVMs(const char *jrepath, char * arch, jboolean speculative);
+ char *CheckJvmType(int *argc, char ***argv, jboolean speculative);
+ void* MemAlloc(size_t size);
+ 
+diff -ruN openjdk6/hotspot/src/os/linux/launcher/java_md.c openjdk/hotspot/src/os/linux/launcher/java_md.c
+--- openjdk6/hotspot/src/os/linux/launcher/java_md.c	2008-02-28 05:02:29.000000000 -0500
++++ openjdk/hotspot/src/os/linux/launcher/java_md.c	2008-01-31 09:19:00.000000000 -0500
+@@ -19,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ /*
+@@ -113,11 +113,11 @@
+  *      entries, but actual strings can be more efficient (with many compilers).
+  */
+ #ifdef __linux__
+-static const char *system_dir	= "/usr/java";
+-static const char *user_dir	= "/java";
++static const char *system_dir   = "/usr/java";
++static const char *user_dir     = "/java";
+ #else /* Solaris */
+-static const char *system_dir	= "/usr/jdk";
+-static const char *user_dir	= "/jdk";
++static const char *system_dir   = "/usr/jdk";
++static const char *user_dir     = "/jdk";
+ #endif
+ 
+ #endif  /* ifndef GAMMA */
+@@ -151,9 +151,9 @@
+  * (removes -client, -server, etc.)            |
+  *                                            \|/
+  *                                            CreateExecutionEnvironment
+- *                                            (removes -d32 and -d64, 
++ *                                            (removes -d32 and -d64,
+  *                                             determines desired data model,
+- *                                             sets up LD_LIBRARY_PATH, 
++ *                                             sets up LD_LIBRARY_PATH,
+  *                                             and exec's)
+  *                                             |
+  *  --------------------------------------------
+@@ -166,8 +166,8 @@
+  * (removes -client, -server, etc.)            |
+  *  |                                         \|/
+  *  |                                          CreateExecutionEnvironment
+- *  |                                          (verifies desired data model 
+- *  |                                           is running and acceptable 
++ *  |                                          (verifies desired data model
++ *  |                                           is running and acceptable
+  *  |                                           LD_LIBRARY_PATH;
+  *  |                                           no-op in child)
+  *  |
+@@ -181,15 +181,15 @@
+  * ParseArguments
+  * (ignores -d32 and -d64,
+  *  processes version options,
+- *  creates argument list for vm, 
++ *  creates argument list for vm,
+  *  etc.)
+- * 
++ *
+  */
+ 
+ static char *SetExecname(char **argv);
+ static char * GetExecname();
+ static jboolean GetJVMPath(const char *jrepath, const char *jvmtype,
+-			   char *jvmpath, jint jvmpathsize, char * arch);
++                           char *jvmpath, jint jvmpathsize, char * arch);
+ static jboolean GetJREPath(char *path, jint pathsize, char * arch, jboolean speculative);
+ 
+ const char *
+@@ -198,7 +198,7 @@
+     static char *arch = NULL;
+     static char buf[12];
+     if (arch) {
+-	return arch;
++        return arch;
+     }
+ 
+ #ifdef ARCH
+@@ -212,12 +212,12 @@
+ 
+ void
+ CreateExecutionEnvironment(int *_argcp,
+-			   char ***_argvp,
+-			   char jrepath[],
+-			   jint so_jrepath,
+-			   char jvmpath[],
+-			   jint so_jvmpath,
+-			   char **original_argv) {
++                           char ***_argvp,
++                           char jrepath[],
++                           jint so_jrepath,
++                           char jvmpath[],
++                           jint so_jvmpath,
++                           char **original_argv) {
+   /*
+    * First, determine if we are running the desired data model.  If we
+    * are running the desired data model, all the error messages
+@@ -239,39 +239,39 @@
+     /* Set the LD_LIBRARY_PATH environment variable, check data model
+        flags, and exec process, if needed */
+     {
+-      char *arch	= (char *)GetArch(); /* like sparc or sparcv9 */
+-      char * jvmtype 	= NULL;
+-      int argc		= *_argcp;
+-      char **argv	= original_argv;
+-
+-      char *runpath	= NULL; /* existing effective LD_LIBRARY_PATH
+-				   setting */
+-
+-      int running	=	/* What data model is being ILP32 =>
+-				   32 bit vm; LP64 => 64 bit vm */
+-#ifdef _LP64 
+-	64;
++      char *arch        = (char *)GetArch(); /* like sparc or sparcv9 */
++      char * jvmtype    = NULL;
++      int argc          = *_argcp;
++      char **argv       = original_argv;
++
++      char *runpath     = NULL; /* existing effective LD_LIBRARY_PATH
++                                   setting */
++
++      int running       =       /* What data model is being ILP32 =>
++                                   32 bit vm; LP64 => 64 bit vm */
++#ifdef _LP64
++        64;
+ #else
+       32;
+ #endif
+ 
+-      int wanted	= running;	/* What data mode is being
+-					   asked for? Current model is
+-					   fine unless another model
+-					   is asked for */
+-
+-      char* new_runpath	= NULL; /* desired new LD_LIBRARY_PATH string */
+-      char* newpath	= NULL; /* path on new LD_LIBRARY_PATH */
+-      char* lastslash	= NULL;
++      int wanted        = running;      /* What data mode is being
++                                           asked for? Current model is
++                                           fine unless another model
++                                           is asked for */
++
++      char* new_runpath = NULL; /* desired new LD_LIBRARY_PATH string */
++      char* newpath     = NULL; /* path on new LD_LIBRARY_PATH */
++      char* lastslash   = NULL;
+ 
+-      char** newenvp	= NULL; /* current environment */
++      char** newenvp    = NULL; /* current environment */
+ 
+-      char** newargv	= NULL;
+-      int    newargc	= 0;
++      char** newargv    = NULL;
++      int    newargc    = 0;
+ #ifdef __sun
+-      char*  dmpath	= NULL;  /* data model specific LD_LIBRARY_PATH,
+-				    Solaris only */
+-#endif    
++      char*  dmpath     = NULL;  /* data model specific LD_LIBRARY_PATH,
++                                    Solaris only */
++#endif
+ 
+       /*
+        * Starting in 1.5, all unix platforms accept the -d32 and -d64
+@@ -281,118 +281,118 @@
+        */
+ 
+       { /* open new scope to declare local variables */
+-	int i;
++        int i;
+ 
+-	newargv = (char **)MemAlloc((argc+1) * sizeof(*newargv));
+-	newargv[newargc++] = argv[0];
++        newargv = (char **)MemAlloc((argc+1) * sizeof(*newargv));
++        newargv[newargc++] = argv[0];
+ 
+-	/* scan for data model arguments and remove from argument list;
+-	   last occurrence determines desired data model */
+-	for (i=1; i < argc; i++) {
+-
+-	  if (strcmp(argv[i], "-J-d64") == 0 || strcmp(argv[i], "-d64") == 0) {
+-	    wanted = 64;
+-	    continue;
+-	  }
+-	  if (strcmp(argv[i], "-J-d32") == 0 || strcmp(argv[i], "-d32") == 0) {
+-	    wanted = 32;
+-	    continue;
+-	  }
+-	  newargv[newargc++] = argv[i];
++        /* scan for data model arguments and remove from argument list;
++           last occurrence determines desired data model */
++        for (i=1; i < argc; i++) {
++
++          if (strcmp(argv[i], "-J-d64") == 0 || strcmp(argv[i], "-d64") == 0) {
++            wanted = 64;
++            continue;
++          }
++          if (strcmp(argv[i], "-J-d32") == 0 || strcmp(argv[i], "-d32") == 0) {
++            wanted = 32;
++            continue;
++          }
++          newargv[newargc++] = argv[i];
+ 
+ #ifdef JAVA_ARGS
+-	  if (argv[i][0] != '-')
+-	    continue;
++          if (argv[i][0] != '-')
++            continue;
+ #else
+-	  if (strcmp(argv[i], "-classpath") == 0 || strcmp(argv[i], "-cp") == 0) {
+-	    i++;
+-	    if (i >= argc) break;
+-	    newargv[newargc++] = argv[i];
+-	    continue;
+-	  }
+-	  if (argv[i][0] != '-') { i++; break; }
+-#endif
+-	}
+-
+-	/* copy rest of args [i .. argc) */
+-	while (i < argc) {
+-	  newargv[newargc++] = argv[i++];
+-	}
+-	newargv[newargc] = NULL;
+-
+-	/* 
+-	 * newargv has all proper arguments here
+-	 */
+-    
+-	argc = newargc;
+-	argv = newargv;
++          if (strcmp(argv[i], "-classpath") == 0 || strcmp(argv[i], "-cp") == 0) {
++            i++;
++            if (i >= argc) break;
++            newargv[newargc++] = argv[i];
++            continue;
++          }
++          if (argv[i][0] != '-') { i++; break; }
++#endif
++        }
++
++        /* copy rest of args [i .. argc) */
++        while (i < argc) {
++          newargv[newargc++] = argv[i++];
++        }
++        newargv[newargc] = NULL;
++
++        /*
++         * newargv has all proper arguments here
++         */
++
++        argc = newargc;
++        argv = newargv;
+       }
+ 
+       /* If the data model is not changing, it is an error if the
+-	 jvmpath does not exist */
++         jvmpath does not exist */
+       if (wanted == running) {
+-	/* Find out where the JRE is that we will be using. */
+-	if (!GetJREPath(jrepath, so_jrepath, arch, JNI_FALSE) ) {
+-	  fprintf(stderr, "Error: could not find Java 2 Runtime Environment.\n");
+-	  exit(2);
+-	}
+-
+-	/* Find the specified JVM type */
+-	if (ReadKnownVMs(jrepath, arch, JNI_FALSE) < 1) {
+-	  fprintf(stderr, "Error: no known VMs. (check for corrupt jvm.cfg file)\n");
+-	  exit(1);
+-	}
+-
+-	jvmpath[0] = '\0';
+-	jvmtype = CheckJvmType(_argcp, _argvp, JNI_FALSE);
+-
+-	if (!GetJVMPath(jrepath, jvmtype, jvmpath, so_jvmpath, arch )) {
+-	  fprintf(stderr, "Error: no `%s' JVM at `%s'.\n", jvmtype, jvmpath);
+-	  exit(4);
+-	}
++        /* Find out where the JRE is that we will be using. */
++        if (!GetJREPath(jrepath, so_jrepath, arch, JNI_FALSE) ) {
++          fprintf(stderr, "Error: could not find Java 2 Runtime Environment.\n");
++          exit(2);
++        }
++
++        /* Find the specified JVM type */
++        if (ReadKnownVMs(jrepath, arch, JNI_FALSE) < 1) {
++          fprintf(stderr, "Error: no known VMs. (check for corrupt jvm.cfg file)\n");
++          exit(1);
++        }
++
++        jvmpath[0] = '\0';
++        jvmtype = CheckJvmType(_argcp, _argvp, JNI_FALSE);
++
++        if (!GetJVMPath(jrepath, jvmtype, jvmpath, so_jvmpath, arch )) {
++          fprintf(stderr, "Error: no `%s' JVM at `%s'.\n", jvmtype, jvmpath);
++          exit(4);
++        }
+       } else {  /* do the same speculatively or exit */
+ #ifdef DUAL_MODE
+-	if (running != wanted) {
+-	  /* Find out where the JRE is that we will be using. */
+-	  if (!GetJREPath(jrepath, so_jrepath, ((wanted==64)?BIG_ARCH:SMALL_ARCH), JNI_TRUE)) {
+-	    goto EndDataModelSpeculate;
+-	  }
+-
+-	  /*
+-	   * Read in jvm.cfg for target data model and process vm
+-	   * selection options.
+-	   */
+-	  if (ReadKnownVMs(jrepath, ((wanted==64)?BIG_ARCH:SMALL_ARCH), JNI_TRUE) < 1) {
+-	    goto EndDataModelSpeculate;
+-	  }
+-	  jvmpath[0] = '\0';
+-	  jvmtype = CheckJvmType(_argcp, _argvp, JNI_TRUE);
+-	  /* exec child can do error checking on the existence of the path */
+-	  jvmpathExists = GetJVMPath(jrepath, jvmtype, jvmpath, so_jvmpath, 
+-				     ((wanted==64)?BIG_ARCH:SMALL_ARCH));
++        if (running != wanted) {
++          /* Find out where the JRE is that we will be using. */
++          if (!GetJREPath(jrepath, so_jrepath, ((wanted==64)?BIG_ARCH:SMALL_ARCH), JNI_TRUE)) {
++            goto EndDataModelSpeculate;
++          }
++
++          /*
++           * Read in jvm.cfg for target data model and process vm
++           * selection options.
++           */
++          if (ReadKnownVMs(jrepath, ((wanted==64)?BIG_ARCH:SMALL_ARCH), JNI_TRUE) < 1) {
++            goto EndDataModelSpeculate;
++          }
++          jvmpath[0] = '\0';
++          jvmtype = CheckJvmType(_argcp, _argvp, JNI_TRUE);
++          /* exec child can do error checking on the existence of the path */
++          jvmpathExists = GetJVMPath(jrepath, jvmtype, jvmpath, so_jvmpath,
++                                     ((wanted==64)?BIG_ARCH:SMALL_ARCH));
+ 
+-	}
++        }
+       EndDataModelSpeculate: /* give up and let other code report error message */
+-	;
++        ;
+ #else
+-	fprintf(stderr, "Running a %d-bit JVM is not supported on this platform.\n", wanted);
+-	exit(1);
++        fprintf(stderr, "Running a %d-bit JVM is not supported on this platform.\n", wanted);
++        exit(1);
+ #endif
+       }
+ 
+       /*
+        * We will set the LD_LIBRARY_PATH as follows:
+        *
+-       *     o		$JVMPATH (directory portion only)
+-       *     o		$JRE/lib/$ARCH
+-       *     o		$JRE/../lib/$ARCH
++       *     o          $JVMPATH (directory portion only)
++       *     o          $JRE/lib/$ARCH
++       *     o          $JRE/../lib/$ARCH
+        *
+        * followed by the user's previous effective LD_LIBRARY_PATH, if
+        * any.
+        */
+ 
+ #ifdef __sun
+-      /* 
++      /*
+        * Starting in Solaris 7, ld.so.1 supports three LD_LIBRARY_PATH
+        * variables:
+        *
+@@ -417,39 +417,39 @@
+ 
+       switch(wanted) {
+       case 0:
+-	if(running == 32) {
+-	  dmpath = getenv("LD_LIBRARY_PATH_32");
+-	  wanted = 32;
+-	}
+-	else {
+-	  dmpath = getenv("LD_LIBRARY_PATH_64");
+-	  wanted = 64;
+-	}
+-	break;
++        if(running == 32) {
++          dmpath = getenv("LD_LIBRARY_PATH_32");
++          wanted = 32;
++        }
++        else {
++          dmpath = getenv("LD_LIBRARY_PATH_64");
++          wanted = 64;
++        }
++        break;
+ 
+       case 32:
+-	dmpath = getenv("LD_LIBRARY_PATH_32");
+-	break;
++        dmpath = getenv("LD_LIBRARY_PATH_32");
++        break;
+ 
+       case 64:
+-	dmpath = getenv("LD_LIBRARY_PATH_64");
+-	break;
+-      
++        dmpath = getenv("LD_LIBRARY_PATH_64");
++        break;
++
+       default:
+-	fprintf(stderr, "Improper value at line %d.", __LINE__);
+-	exit(1); /* unknown value in wanted */
+-	break;
++        fprintf(stderr, "Improper value at line %d.", __LINE__);
++        exit(1); /* unknown value in wanted */
++        break;
+       }
+-    
+-      /* 
++
++      /*
+        * If dmpath is NULL, the relevant data model specific variable is
+        * not set and normal LD_LIBRARY_PATH should be used.
+        */
+       if( dmpath == NULL) {
+-	runpath = getenv("LD_LIBRARY_PATH");
++        runpath = getenv("LD_LIBRARY_PATH");
+       }
+       else {
+-	runpath = dmpath;
++        runpath = dmpath;
+       }
+ #else
+       /*
+@@ -473,16 +473,16 @@
+        * be found must be handled through other mechanisms.
+        */
+       if((getgid() != getegid()) || (getuid() != geteuid()) ) {
+-	return;
++        return;
+       }
+-#endif    
++#endif
+ 
+       /* runpath contains current effective LD_LIBRARY_PATH setting */
+ 
+       jvmpath = strdup(jvmpath);
+-      new_runpath = MemAlloc( ((runpath!=NULL)?strlen(runpath):0) + 
+-			      2*strlen(jrepath) + 2*strlen(arch) +
+-			      strlen(jvmpath) + 52);
++      new_runpath = MemAlloc( ((runpath!=NULL)?strlen(runpath):0) +
++                              2*strlen(jrepath) + 2*strlen(arch) +
++                              strlen(jvmpath) + 52);
+       newpath = new_runpath + strlen("LD_LIBRARY_PATH=");
+ 
+ 
+@@ -490,63 +490,63 @@
+        * Create desired LD_LIBRARY_PATH value for target data model.
+        */
+       {
+-	/* remove the name of the .so from the JVM path */
+-	lastslash = strrchr(jvmpath, '/');
+-	if (lastslash)
+-	  *lastslash = '\0';
++        /* remove the name of the .so from the JVM path */
++        lastslash = strrchr(jvmpath, '/');
++        if (lastslash)
++          *lastslash = '\0';
+ 
+ 
+-	/* jvmpath, ((running != wanted)?((wanted==64)?"/"BIG_ARCH:"/.."):""), */
+-
+-	sprintf(new_runpath, "LD_LIBRARY_PATH="
+-		"%s:"
+-		"%s/lib/%s:"
+-		"%s/../lib/%s",
+-		jvmpath,
++        /* jvmpath, ((running != wanted)?((wanted==64)?"/"BIG_ARCH:"/.."):""), */
++
++        sprintf(new_runpath, "LD_LIBRARY_PATH="
++                "%s:"
++                "%s/lib/%s:"
++                "%s/../lib/%s",
++                jvmpath,
+ #ifdef DUAL_MODE
+-		jrepath, ((wanted==64)?BIG_ARCH:SMALL_ARCH),
+-		jrepath, ((wanted==64)?BIG_ARCH:SMALL_ARCH)
++                jrepath, ((wanted==64)?BIG_ARCH:SMALL_ARCH),
++                jrepath, ((wanted==64)?BIG_ARCH:SMALL_ARCH)
+ #else
+-		jrepath, arch,
+-		jrepath, arch
++                jrepath, arch,
++                jrepath, arch
+ #endif
+-		);
++                );
+ 
+ 
+-	/* 
+-	 * Check to make sure that the prefix of the current path is the 
+-	 * desired environment variable setting.
+-	 */
+-	if (runpath != NULL && 
+-	    strncmp(newpath, runpath, strlen(newpath))==0 &&
+-	    (runpath[strlen(newpath)] == 0 || runpath[strlen(newpath)] == ':') &&
+-	    (running == wanted) /* data model does not have to be changed */
++        /*
++         * Check to make sure that the prefix of the current path is the
++         * desired environment variable setting.
++         */
++        if (runpath != NULL &&
++            strncmp(newpath, runpath, strlen(newpath))==0 &&
++            (runpath[strlen(newpath)] == 0 || runpath[strlen(newpath)] == ':') &&
++            (running == wanted) /* data model does not have to be changed */
+ #ifdef __sun
+-	    && (dmpath == NULL)    /* data model specific variables not set  */
++            && (dmpath == NULL)    /* data model specific variables not set  */
+ #endif
+-	    ) {
++            ) {
+ 
+-	  return;
++          return;
+ 
+-	}
++        }
+       }
+-    
+-      /* 
++
++      /*
+        * Place the desired environment setting onto the prefix of
+        * LD_LIBRARY_PATH.  Note that this prevents any possible infinite
+        * loop of execv() because we test for the prefix, above.
+        */
+       if (runpath != 0) {
+-	strcat(new_runpath, ":");
+-	strcat(new_runpath, runpath);
++        strcat(new_runpath, ":");
++        strcat(new_runpath, runpath);
+       }
+-    
++
+       if( putenv(new_runpath) != 0) {
+-	exit(1); /* problem allocating memory; LD_LIBRARY_PATH not set
+-		    properly */
++        exit(1); /* problem allocating memory; LD_LIBRARY_PATH not set
++                    properly */
+       }
+ 
+-      /* 
++      /*
+        * Unix systems document that they look at LD_LIBRARY_PATH only
+        * once at startup, so we have to re-exec the current executable
+        * to get the changed environment variable to have an effect.
+@@ -559,63 +559,63 @@
+        */
+ 
+       if( dmpath != NULL)
+-	(void)UnsetEnv((wanted==32)?"LD_LIBRARY_PATH_32":"LD_LIBRARY_PATH_64");
++        (void)UnsetEnv((wanted==32)?"LD_LIBRARY_PATH_32":"LD_LIBRARY_PATH_64");
+ #endif
+ 
+       newenvp = environ;
+ 
+       {
+-	char *newexec = execname;
++        char *newexec = execname;
+ #ifdef DUAL_MODE
+-	/* 
+-	 * If the data model is being changed, the path to the
+-	 * executable must be updated accordingly; the executable name
+-	 * and directory the executable resides in are separate.  In the
+-	 * case of 32 => 64, the new bits are assumed to reside in, e.g.
+-	 * "olddir/BIGARCH/execname"; in the case of 64 => 32,
+-	 * the bits are assumed to be in "olddir/../execname".  For example,
+-	 *
+-	 * olddir/sparcv9/execname
+-	 * olddir/amd64/execname
+-	 *
+-	 * for Solaris SPARC and Linux amd64, respectively.
+-	 */
+-
+-	if (running != wanted) {
+-	  char *oldexec = strcpy(MemAlloc(strlen(execname) + 1), execname);
+-	  char *olddir = oldexec;
+-	  char *oldbase = strrchr(oldexec, '/');
+-
+-	
+-	  newexec = MemAlloc(strlen(execname) + 20);
+-	  *oldbase++ = 0;
+-	  sprintf(newexec, "%s/%s/%s", olddir, 
+-		  ((wanted==64) ? BIG_ARCH : ".."), oldbase);
+-	  argv[0] = newexec;
+-	} 
++        /*
++         * If the data model is being changed, the path to the
++         * executable must be updated accordingly; the executable name
++         * and directory the executable resides in are separate.  In the
++         * case of 32 => 64, the new bits are assumed to reside in, e.g.
++         * "olddir/BIGARCH/execname"; in the case of 64 => 32,
++         * the bits are assumed to be in "olddir/../execname".  For example,
++         *
++         * olddir/sparcv9/execname
++         * olddir/amd64/execname
++         *
++         * for Solaris SPARC and Linux amd64, respectively.
++         */
++
++        if (running != wanted) {
++          char *oldexec = strcpy(MemAlloc(strlen(execname) + 1), execname);
++          char *olddir = oldexec;
++          char *oldbase = strrchr(oldexec, '/');
++
++
++          newexec = MemAlloc(strlen(execname) + 20);
++          *oldbase++ = 0;
++          sprintf(newexec, "%s/%s/%s", olddir,
++                  ((wanted==64) ? BIG_ARCH : ".."), oldbase);
++          argv[0] = newexec;
++        }
+ #endif
+ 
+-	execve(newexec, argv, newenvp);
+-	perror("execve()");
++        execve(newexec, argv, newenvp);
++        perror("execve()");
+ 
+-	fprintf(stderr, "Error trying to exec %s.\n", newexec);
+-	fprintf(stderr, "Check if file exists and permissions are set correctly.\n");
++        fprintf(stderr, "Error trying to exec %s.\n", newexec);
++        fprintf(stderr, "Check if file exists and permissions are set correctly.\n");
+ 
+ #ifdef DUAL_MODE
+-	if (running != wanted) {
+-	  fprintf(stderr, "Failed to start a %d-bit JVM process from a %d-bit JVM.\n",
+-		  wanted, running);
++        if (running != wanted) {
++          fprintf(stderr, "Failed to start a %d-bit JVM process from a %d-bit JVM.\n",
++                  wanted, running);
+ #  ifdef __sun
+ 
+ #    ifdef __sparc
+-	  fprintf(stderr, "Verify all necessary J2SE components have been installed.\n" );
+-	  fprintf(stderr,
+-		  "(Solaris SPARC 64-bit components must be installed after 32-bit components.)\n" );
+-#    else 
+-	  fprintf(stderr, "Either 64-bit processes are not supported by this platform\n");
+-	  fprintf(stderr, "or the 64-bit components have not been installed.\n");
++          fprintf(stderr, "Verify all necessary J2SE components have been installed.\n" );
++          fprintf(stderr,
++                  "(Solaris SPARC 64-bit components must be installed after 32-bit components.)\n" );
++#    else
++          fprintf(stderr, "Either 64-bit processes are not supported by this platform\n");
++          fprintf(stderr, "or the 64-bit components have not been installed.\n");
+ #    endif
+-	}
++        }
+ #  endif
+ #endif
+ 
+@@ -654,15 +654,15 @@
+  */
+ static jboolean
+ GetJVMPath(const char *jrepath, const char *jvmtype,
+-	   char *jvmpath, jint jvmpathsize, char * arch)
++           char *jvmpath, jint jvmpathsize, char * arch)
+ {
+     struct stat s;
+-    
++
+ #ifndef GAMMA
+     if (strchr(jvmtype, '/')) {
+-	sprintf(jvmpath, "%s/" JVM_DLL, jvmtype);
++        sprintf(jvmpath, "%s/" JVM_DLL, jvmtype);
+     } else {
+-	sprintf(jvmpath, "%s/lib/%s/%s/" JVM_DLL, jrepath, arch, jvmtype);
++        sprintf(jvmpath, "%s/lib/%s/%s/" JVM_DLL, jrepath, arch, jvmtype);
+     }
+ #else
+     /* For gamma launcher, JVM is either built-in or in the same directory. */
+@@ -671,11 +671,11 @@
+ 
+     char *p;
+ 
+-    snprintf(jvmpath, jvmpathsize, GetExecname());
++    snprintf(jvmpath, jvmpathsize, "%s", GetExecname());
+     p = strrchr(jvmpath, '/');
+     if (p) {
+        /* replace executable name with libjvm.so */
+-       snprintf(p + 1, jvmpathsize - (p + 1 - jvmpath), JVM_DLL);
++       snprintf(p + 1, jvmpathsize - (p + 1 - jvmpath), "%s", JVM_DLL);
+     } else {
+        /* this case shouldn't happen */
+        snprintf(jvmpath, jvmpathsize, "%s", JVM_DLL);
+@@ -686,13 +686,13 @@
+       printf("Does `%s' exist ... ", jvmpath);
+ 
+     if (stat(jvmpath, &s) == 0) {
+-	if (_launcher_debug) 
+-	  printf("yes.\n");
+-	return JNI_TRUE;
++        if (_launcher_debug)
++          printf("yes.\n");
++        return JNI_TRUE;
+     } else {
+-	if (_launcher_debug)
+-	  printf("no.\n");
+-	return JNI_FALSE;
++        if (_launcher_debug)
++          printf("no.\n");
++        return JNI_FALSE;
+     }
+ }
+ 
+@@ -705,21 +705,21 @@
+     char libjava[MAXPATHLEN];
+ 
+     if (GetApplicationHome(path, pathsize)) {
+-	/* Is JRE co-located with the application? */
+-	sprintf(libjava, "%s/lib/%s/" JAVA_DLL, path, arch);
+-	if (access(libjava, F_OK) == 0) {
+-	    goto found;
+-	}
+-
+-	/* Does the app ship a private JRE in <apphome>/jre directory? */
+-	sprintf(libjava, "%s/jre/lib/%s/" JAVA_DLL, path, arch);
+-	if (access(libjava, F_OK) == 0) {
+-	    strcat(path, "/jre");
+-	    goto found;
+-	}
++        /* Is JRE co-located with the application? */
++        sprintf(libjava, "%s/lib/%s/" JAVA_DLL, path, arch);
++        if (access(libjava, F_OK) == 0) {
++            goto found;
++        }
++
++        /* Does the app ship a private JRE in <apphome>/jre directory? */
++        sprintf(libjava, "%s/jre/lib/%s/" JAVA_DLL, path, arch);
++        if (access(libjava, F_OK) == 0) {
++            strcat(path, "/jre");
++            goto found;
++        }
+     }
+ 
+-    if (!speculative) 
++    if (!speculative)
+       fprintf(stderr, "Error: could not find " JAVA_DLL "\n");
+     return JNI_FALSE;
+ 
+@@ -742,7 +742,7 @@
+     void *libjvm;
+ 
+     if (_launcher_debug) {
+-	printf("JVM path is %s\n", jvmpath);
++        printf("JVM path is %s\n", jvmpath);
+     }
+ 
+     libjvm = dlopen(jvmpath, RTLD_NOW + RTLD_GLOBAL);
+@@ -752,18 +752,18 @@
+       Elf32_Ehdr elf_head;
+       int count;
+       int location;
+-      
++
+       fp = fopen(jvmpath, "r");
+       if(fp == NULL)
+-	goto error;
+-    
++        goto error;
++
+       /* read in elf header */
+       count = fread((void*)(&elf_head), sizeof(Elf32_Ehdr), 1, fp);
+       fclose(fp);
+       if(count < 1)
+-	goto error;
++        goto error;
+ 
+-      /* 
++      /*
+        * Check for running a server vm (compiled with -xarch=v8plus)
+        * on a stock v8 processor.  In this case, the machine type in
+        * the elf header would not be included the architecture list
+@@ -773,23 +773,23 @@
+        * model.
+        */
+       if(elf_head.e_machine == EM_SPARC32PLUS) {
+-	char buf[257];  /* recommended buffer size from sysinfo man
+-			   page */
+-	long length;
+-	char* location;
+-	
+-	length = sysinfo(SI_ISALIST, buf, 257);
+-	if(length > 0) {
+-	  location = strstr(buf, "sparcv8plus ");
+-	  if(location == NULL) {
+-	    fprintf(stderr, "SPARC V8 processor detected; Server compiler requires V9 or better.\n");
+-	    fprintf(stderr, "Use Client compiler on V8 processors.\n");
+-	    fprintf(stderr, "Could not create the Java virtual machine.\n");
+-	    return JNI_FALSE;
+-	  }
+-	}
++        char buf[257];  /* recommended buffer size from sysinfo man
++                           page */
++        long length;
++        char* location;
++
++        length = sysinfo(SI_ISALIST, buf, 257);
++        if(length > 0) {
++          location = strstr(buf, "sparcv8plus ");
++          if(location == NULL) {
++            fprintf(stderr, "SPARC V8 processor detected; Server compiler requires V9 or better.\n");
++            fprintf(stderr, "Use Client compiler on V8 processors.\n");
++            fprintf(stderr, "Could not create the Java virtual machine.\n");
++            return JNI_FALSE;
++          }
++        }
+       }
+-#endif 
++#endif
+       fprintf(stderr, "dl failure on line %d", __LINE__);
+       goto error;
+     }
+@@ -797,10 +797,10 @@
+     ifn->CreateJavaVM = (CreateJavaVM_t)
+       dlsym(libjvm, "JNI_CreateJavaVM");
+     if (ifn->CreateJavaVM == NULL)
+-	goto error;
++        goto error;
+ 
+     ifn->GetDefaultJavaVMInitArgs = (GetDefaultJavaVMInitArgs_t)
+-	dlsym(libjvm, "JNI_GetDefaultJavaVMInitArgs");
++        dlsym(libjvm, "JNI_GetDefaultJavaVMInitArgs");
+     if (ifn->GetDefaultJavaVMInitArgs == NULL)
+       goto error;
+ 
+@@ -820,7 +820,7 @@
+ {
+     static const char Xusage_txt[] = "/Xusage.txt";
+     Dl_info dlinfo;
+-   
++
+     /* we use RTLD_NOW because of problems with ld.so.1 and green threads */
+     dladdr(dlsym(dlopen(JVM_DLL, RTLD_NOW), "JNI_CreateJavaVM"), &dlinfo);
+     strncpy(buf, (char *)dlinfo.dli_fname, bufsize - sizeof(Xusage_txt));
+@@ -839,18 +839,18 @@
+ #ifdef __linux__
+     char *execname = GetExecname();
+     if (execname) {
+-	strncpy(buf, execname, bufsize-1);
+-	buf[bufsize-1] = '\0';
++        strncpy(buf, execname, bufsize-1);
++        buf[bufsize-1] = '\0';
+     } else {
+-	return JNI_FALSE;
++        return JNI_FALSE;
+     }
+ #else
+     Dl_info dlinfo;
+ 
+     dladdr((void *)GetApplicationHome, &dlinfo);
+     if (realpath(dlinfo.dli_fname, buf) == NULL) {
+-	fprintf(stderr, "Error: realpath(`%s') failed.\n", dlinfo.dli_fname);
+-	return JNI_FALSE;
++        fprintf(stderr, "Error: realpath(`%s') failed.\n", dlinfo.dli_fname);
++        return JNI_FALSE;
+     }
+ #endif
+ 
+@@ -866,21 +866,21 @@
+     }
+ #else
+     if (strrchr(buf, '/') == 0) {
+-	buf[0] = '\0';
+-	return JNI_FALSE;
++        buf[0] = '\0';
++        return JNI_FALSE;
+     }
+-    *(strrchr(buf, '/')) = '\0';	/* executable file      */
++    *(strrchr(buf, '/')) = '\0';        /* executable file      */
+     if (strlen(buf) < 4 || strrchr(buf, '/') == 0) {
+-	buf[0] = '\0';
+-	return JNI_FALSE;
++        buf[0] = '\0';
++        return JNI_FALSE;
+     }
+-    if (strcmp("/bin", buf + strlen(buf) - 4) != 0) 
+-	*(strrchr(buf, '/')) = '\0';	/* sparcv9 or amd64     */
++    if (strcmp("/bin", buf + strlen(buf) - 4) != 0)
++        *(strrchr(buf, '/')) = '\0';    /* sparcv9 or amd64     */
+     if (strlen(buf) < 4 || strcmp("/bin", buf + strlen(buf) - 4) != 0) {
+-	buf[0] = '\0';
+-	return JNI_FALSE;
++        buf[0] = '\0';
++        return JNI_FALSE;
+     }
+-    *(strrchr(buf, '/')) = '\0';	/* bin                  */
++    *(strrchr(buf, '/')) = '\0';        /* bin                  */
+ #endif /* GAMMA */
+ 
+     return JNI_TRUE;
+@@ -912,8 +912,8 @@
+     sprintf(name, "%s%c%s", indir, FILE_SEPARATOR, cmd);
+     if (!ProgramExists(name)) return 0;
+     real = MemAlloc(PATH_MAX + 2);
+-    if (!realpath(name, real)) 
+-	strcpy(real, name);
++    if (!realpath(name, real))
++        strcpy(real, name);
+     return real;
+ }
+ 
+@@ -931,14 +931,14 @@
+     char *result = NULL;
+ 
+     /* absolute path? */
+-    if (*program == FILE_SEPARATOR || 
+-	(FILE_SEPARATOR=='\\' && strrchr(program, ':')))
+-	return Resolve("", program+1);
++    if (*program == FILE_SEPARATOR ||
++        (FILE_SEPARATOR=='\\' && strrchr(program, ':')))
++        return Resolve("", program+1);
+ 
+     /* relative path? */
+     if (strrchr(program, FILE_SEPARATOR) != 0) {
+-	char buf[PATH_MAX+2];
+-	return Resolve(getcwd(cwdbuf, sizeof(cwdbuf)), program);
++        char buf[PATH_MAX+2];
++        return Resolve(getcwd(cwdbuf, sizeof(cwdbuf)), program);
+     }
+ 
+     /* from search path? */
+@@ -948,19 +948,19 @@
+     strcpy(tmp_path, path);
+ 
+     for (f=tmp_path; *f && result==0; ) {
+-	char *s = f;
+-	while (*f && (*f != PATH_SEPARATOR)) ++f;
+-	if (*f) *f++ = 0;
+-	if (*s == FILE_SEPARATOR)
+-	    result = Resolve(s, program);
+-	else {
+-	    /* relative path element */
+-	    char dir[2*PATH_MAX];
+-	    sprintf(dir, "%s%c%s", getcwd(cwdbuf, sizeof(cwdbuf)), 
+-		    FILE_SEPARATOR, s);
+-	    result = Resolve(dir, program);
+-	}
+-	if (result != 0) break;
++        char *s = f;
++        while (*f && (*f != PATH_SEPARATOR)) ++f;
++        if (*f) *f++ = 0;
++        if (*s == FILE_SEPARATOR)
++            result = Resolve(s, program);
++        else {
++            /* relative path element */
++            char dir[2*PATH_MAX];
++            sprintf(dir, "%s%c%s", getcwd(cwdbuf, sizeof(cwdbuf)),
++                    FILE_SEPARATOR, s);
++            result = Resolve(dir, program);
++        }
++        if (result != 0) break;
+     }
+ 
+     free(tmp_path);
+@@ -989,37 +989,37 @@
+ {
+     char* exec_path = NULL;
+ 
+-    if (execname != NULL)	/* Already determined */
+-	return (execname);
+-   
++    if (execname != NULL)       /* Already determined */
++        return (execname);
++
+ #if defined(__sun)
+     {
+         Dl_info dlinfo;
+         if (dladdr((void*)&SetExecname, &dlinfo)) {
+-	    char *resolved = (char*)MemAlloc(PATH_MAX+1);
+-   	    if (resolved != NULL) {
+-		exec_path = realpath(dlinfo.dli_fname, resolved);
+-		if (exec_path == NULL) {
+-		    free(resolved);
+-		}
+-	    }
++            char *resolved = (char*)MemAlloc(PATH_MAX+1);
++            if (resolved != NULL) {
++                exec_path = realpath(dlinfo.dli_fname, resolved);
++                if (exec_path == NULL) {
++                    free(resolved);
++                }
++            }
+         }
+     }
+ #elif defined(__linux__)
+     {
+-	const char* self = "/proc/self/exe";
++        const char* self = "/proc/self/exe";
+         char buf[PATH_MAX+1];
+         int len = readlink(self, buf, PATH_MAX);
+         if (len >= 0) {
+-	    buf[len] = '\0';		/* readlink doesn't nul terminate */
+-	    exec_path = strdup(buf);
+-	}
++            buf[len] = '\0';            /* readlink doesn't nul terminate */
++            exec_path = strdup(buf);
++        }
+     }
+ #else /* !__sun && !__linux */
+     {
+         /* Not implemented */
+     }
+-#endif 
++#endif
+ 
+     if (exec_path == NULL) {
+         exec_path = FindExecName(argv[0]);
+@@ -1066,13 +1066,13 @@
+ jboolean RemovableMachineDependentOption(char * option) {
+   /*
+    * Unconditionally remove both -d32 and -d64 options since only
+-   * the last such options has an effect; e.g. 
++   * the last such options has an effect; e.g.
+    * java -d32 -d64 -d32 -version
+-   * is equivalent to 
++   * is equivalent to
+    * java -d32 -version
+    */
+ 
+-  if( (strcmp(option, "-d32")  == 0 ) || 
++  if( (strcmp(option, "-d32")  == 0 ) ||
+       (strcmp(option, "-d64")  == 0 ))
+     return JNI_TRUE;
+   else
+@@ -1081,15 +1081,15 @@
+ 
+ void PrintMachineDependentOptions() {
+       fprintf(stdout,
+-	"    -d32          use a 32-bit data model if available\n"
+-	"\n"
+-	"    -d64          use a 64-bit data model if available\n");
++        "    -d32          use a 32-bit data model if available\n"
++        "\n"
++        "    -d64          use a 64-bit data model if available\n");
+       return;
+ }
+ 
+ #ifndef GAMMA  /* gamma launcher does not have ergonomics */
+ 
+-/* 
++/*
+  * The following methods (down to ServerClassMachine()) answer
+  * the question about whether a machine is a "server-class"
+  * machine.  A server-class machine is loosely defined as one
+@@ -1099,7 +1099,7 @@
+  * The definition of memory is also somewhat fuzzy, since x86
+  * machines seem not to report all the memory in their DIMMs, we
+  * think because of memory mapping of graphics cards, etc.
+- * 
++ *
+  * This code is somewhat more confused with #ifdef's than we'd
+  * like because this file is used by both Solaris and Linux
+  * platforms, and so needs to be parameterized for SPARC and
+@@ -1118,7 +1118,7 @@
+   const uint64_t page_size = (uint64_t) sysconf(_SC_PAGESIZE);
+   const uint64_t result    = pages * page_size;
+ # define UINT64_FORMAT "%" PRIu64
+-    
++
+   if (_launcher_debug) {
+     printf("pages: " UINT64_FORMAT
+            "  page_size: " UINT64_FORMAT
+@@ -1133,7 +1133,7 @@
+ /* Methods for solaris-sparc: these are easy. */
+ 
+ /* Ask the OS how many processors there are. */
+-unsigned long 
++unsigned long
+ physical_processors(void) {
+   const unsigned long sys_processors = sysconf(_SC_NPROCESSORS_CONF);
+ 
+@@ -1175,7 +1175,7 @@
+  * There's a corresponding version of linux-i586
+  * because the compilers are different.
+  */
+-void 
++void
+ get_cpuid(uint32_t arg,
+           uint32_t* eaxp,
+           uint32_t* ebxp,
+@@ -1227,7 +1227,7 @@
+  * There's a corresponding version of solaris-i586
+  * because the compilers are different.
+  */
+-void 
++void
+ get_cpuid(uint32_t arg,
+           uint32_t* eaxp,
+           uint32_t* ebxp,
+@@ -1235,12 +1235,12 @@
+           uint32_t* edxp) {
+ #ifdef _LP64
+   __asm__ volatile (/* Instructions */
+-                    "	movl	%4, %%eax  \n"
+-                    "	cpuid              \n"
+-                    "	movl    %%eax, (%0)\n"
+-                    "	movl    %%ebx, (%1)\n"
+-                    "	movl    %%ecx, (%2)\n"
+-                    "	movl    %%edx, (%3)\n"
++                    "   movl    %4, %%eax  \n"
++                    "   cpuid              \n"
++                    "   movl    %%eax, (%0)\n"
++                    "   movl    %%ebx, (%1)\n"
++                    "   movl    %%ecx, (%2)\n"
++                    "   movl    %%edx, (%3)\n"
+                     : /* Outputs */
+                     : /* Inputs */
+                     "r" (eaxp),
+@@ -1260,12 +1260,12 @@
+                         /* ebx is callee-save, so push it */
+                         /* even though it's in the clobbers section */
+                     "   pushl   %%ebx      \n"
+-                    "	movl	%4, %%eax  \n"
+-                    "	cpuid              \n"
+-                    "	movl    %%eax, %0  \n"
+-                    "	movl    %%ebx, %1  \n"
+-                    "	movl    %%ecx, %2  \n"
+-                    "	movl    %%edx, %3  \n"
++                    "   movl    %4, %%eax  \n"
++                    "   cpuid              \n"
++                    "   movl    %%eax, %0  \n"
++                    "   movl    %%ebx, %1  \n"
++                    "   movl    %%ecx, %2  \n"
++                    "   movl    %%edx, %3  \n"
+                         /* restore ebx */
+                     "   popl    %%ebx      \n"
+ 
+@@ -1289,7 +1289,7 @@
+ #endif /* __linux__ && i586 */
+ 
+ #ifdef i586
+-/* 
++/*
+  * Routines shared by solaris-i586 and linux-i586.
+  */
+ 
+@@ -1326,17 +1326,17 @@
+   get_cpuid(0, &dummy, &vendor_id[0], &vendor_id[2], &vendor_id[1]);
+   if (_launcher_debug) {
+     printf("vendor: %c %c %c %c %c %c %c %c %c %c %c %c \n",
+-           ((vendor_id[0] >>  0) & 0xff), 
+-           ((vendor_id[0] >>  8) & 0xff), 
+-           ((vendor_id[0] >> 16) & 0xff), 
+-           ((vendor_id[0] >> 24) & 0xff), 
+-           ((vendor_id[1] >>  0) & 0xff), 
+-           ((vendor_id[1] >>  8) & 0xff), 
+-           ((vendor_id[1] >> 16) & 0xff), 
+-           ((vendor_id[1] >> 24) & 0xff), 
+-           ((vendor_id[2] >>  0) & 0xff), 
+-           ((vendor_id[2] >>  8) & 0xff), 
+-           ((vendor_id[2] >> 16) & 0xff), 
++           ((vendor_id[0] >>  0) & 0xff),
++           ((vendor_id[0] >>  8) & 0xff),
++           ((vendor_id[0] >> 16) & 0xff),
++           ((vendor_id[0] >> 24) & 0xff),
++           ((vendor_id[1] >>  0) & 0xff),
++           ((vendor_id[1] >>  8) & 0xff),
++           ((vendor_id[1] >> 16) & 0xff),
++           ((vendor_id[1] >> 24) & 0xff),
++           ((vendor_id[2] >>  0) & 0xff),
++           ((vendor_id[2] >>  8) & 0xff),
++           ((vendor_id[2] >> 16) & 0xff),
+            ((vendor_id[2] >> 24) & 0xff));
+   }
+   get_cpuid(1, &value_of_eax, &dummy, &dummy, &value_of_edx);
+@@ -1346,17 +1346,17 @@
+   }
+   if ((((value_of_eax >> FAMILY_ID_SHIFT) & FAMILY_ID_MASK) == PENTIUM4_FAMILY_ID) ||
+       (((value_of_eax >> EXT_FAMILY_ID_SHIFT) & EXT_FAMILY_ID_MASK) != 0)) {
+-    if ((((vendor_id[0] >>  0) & 0xff) == 'G') && 
+-        (((vendor_id[0] >>  8) & 0xff) == 'e') && 
+-        (((vendor_id[0] >> 16) & 0xff) == 'n') && 
+-        (((vendor_id[0] >> 24) & 0xff) == 'u') && 
+-        (((vendor_id[1] >>  0) & 0xff) == 'i') && 
+-        (((vendor_id[1] >>  8) & 0xff) == 'n') && 
+-        (((vendor_id[1] >> 16) & 0xff) == 'e') && 
+-        (((vendor_id[1] >> 24) & 0xff) == 'I') && 
+-        (((vendor_id[2] >>  0) & 0xff) == 'n') && 
+-        (((vendor_id[2] >>  8) & 0xff) == 't') && 
+-        (((vendor_id[2] >> 16) & 0xff) == 'e') && 
++    if ((((vendor_id[0] >>  0) & 0xff) == 'G') &&
++        (((vendor_id[0] >>  8) & 0xff) == 'e') &&
++        (((vendor_id[0] >> 16) & 0xff) == 'n') &&
++        (((vendor_id[0] >> 24) & 0xff) == 'u') &&
++        (((vendor_id[1] >>  0) & 0xff) == 'i') &&
++        (((vendor_id[1] >>  8) & 0xff) == 'n') &&
++        (((vendor_id[1] >> 16) & 0xff) == 'e') &&
++        (((vendor_id[1] >> 24) & 0xff) == 'I') &&
++        (((vendor_id[2] >>  0) & 0xff) == 'n') &&
++        (((vendor_id[2] >>  8) & 0xff) == 't') &&
++        (((vendor_id[2] >> 16) & 0xff) == 'e') &&
+         (((vendor_id[2] >> 24) & 0xff) == 'l')) {
+       if (((value_of_edx >> HT_BIT_SHIFT) & HT_BIT_MASK) == HT_BIT_MASK) {
+         if (_launcher_debug) {
+@@ -1388,14 +1388,14 @@
+ unsigned int
+ logical_processors_per_package(void) {
+   /*
+-   * After CPUID with EAX==1, register EBX bits 23 through 16 
++   * After CPUID with EAX==1, register EBX bits 23 through 16
+    * indicate the number of logical processors per package
+    */
+ # define NUM_LOGICAL_SHIFT 16
+ # define NUM_LOGICAL_MASK 0xff
+   unsigned int result                        = 1U;
+   const HyperThreadingSupport hyperthreading = hyperthreading_support();
+-  
++
+   if (hyperthreading == hts_supported) {
+     uint32_t value_of_ebx = 0U;
+     uint32_t dummy        = 0U;
+@@ -1410,7 +1410,7 @@
+ }
+ 
+ /* Compute the number of physical processors, not logical processors */
+-unsigned long 
++unsigned long
+ physical_processors(void) {
+   const long sys_processors = sysconf(_SC_NPROCESSORS_CONF);
+   unsigned long result      = sys_processors;
+@@ -1522,15 +1522,15 @@
+ #ifndef GAMMA /* gamma launcher does not choose JDK/JRE/JVM */
+ 
+ /*
+- *	Since using the file system as a registry is a bit risky, perform
+- *	additional sanity checks on the identified directory to validate
+- *	it as a valid jre/sdk.
++ *      Since using the file system as a registry is a bit risky, perform
++ *      additional sanity checks on the identified directory to validate
++ *      it as a valid jre/sdk.
+  *
+- *	Return 0 if the tests fail; otherwise return non-zero (true).
++ *      Return 0 if the tests fail; otherwise return non-zero (true).
+  *
+- *	Note that checking for anything more than the existence of an
+- *	executable object at bin/java relative to the path being checked
+- *	will break the regression tests.
++ *      Note that checking for anything more than the existence of an
++ *      executable object at bin/java relative to the path being checked
++ *      will break the regression tests.
+  */
+ static int
+ CheckSanity(char *path, char *dir)
+@@ -1538,110 +1538,110 @@
+     char    buffer[PATH_MAX];
+ 
+     if (strlen(path) + strlen(dir) + 11 > PATH_MAX)
+-	return (0);	/* Silently reject "impossibly" long paths */
++        return (0);     /* Silently reject "impossibly" long paths */
+ 
+     (void)strcat(strcat(strcat(strcpy(buffer, path), "/"), dir), "/bin/java");
+     return ((access(buffer, X_OK) == 0) ? 1 : 0);
+ }
+ 
+ /*
+- *	Determine if there is an acceptable JRE in the directory dirname.
+- *	Upon locating the "best" one, return a fully qualified path to
+- *	it. "Best" is defined as the most advanced JRE meeting the
+- *	constraints contained in the manifest_info. If no JRE in this
+- *	directory meets the constraints, return NULL.
++ *      Determine if there is an acceptable JRE in the directory dirname.
++ *      Upon locating the "best" one, return a fully qualified path to
++ *      it. "Best" is defined as the most advanced JRE meeting the
++ *      constraints contained in the manifest_info. If no JRE in this
++ *      directory meets the constraints, return NULL.
+  *
+- *	Note that we don't check for errors in reading the directory
+- *	(which would be done by checking errno).  This is because it
+- *	doesn't matter if we get an error reading the directory, or
+- *	we just don't find anything interesting in the directory.  We
+- *	just return NULL in either case.
++ *      Note that we don't check for errors in reading the directory
++ *      (which would be done by checking errno).  This is because it
++ *      doesn't matter if we get an error reading the directory, or
++ *      we just don't find anything interesting in the directory.  We
++ *      just return NULL in either case.
+  *
+- *	The historical names of j2sdk and j2re were changed to jdk and
+- *	jre respecively as part of the 1.5 rebranding effort.  Since the
+- *	former names are legacy on Linux, they must be recognized for
+- *	all time.  Fortunately, this is a minor cost.
++ *      The historical names of j2sdk and j2re were changed to jdk and
++ *      jre respecively as part of the 1.5 rebranding effort.  Since the
++ *      former names are legacy on Linux, they must be recognized for
++ *      all time.  Fortunately, this is a minor cost.
+  */
+ static char
+ *ProcessDir(manifest_info *info, char *dirname)
+ {
+-    DIR	    *dirp;
++    DIR     *dirp;
+     struct dirent *dp;
+     char    *best = NULL;
+     int     offset;
+-    int	    best_offset = 0;
++    int     best_offset = 0;
+     char    *ret_str = NULL;
+     char    buffer[PATH_MAX];
+ 
+     if ((dirp = opendir(dirname)) == NULL)
+-	return (NULL);
++        return (NULL);
+ 
+     do {
+-	if ((dp = readdir(dirp)) != NULL) {
+-	    offset = 0;
+-	    if ((strncmp(dp->d_name, "jre", 3) == 0) ||
+-	        (strncmp(dp->d_name, "jdk", 3) == 0))
+-		offset = 3;
+-	    else if (strncmp(dp->d_name, "j2re", 4) == 0)
+-		offset = 4;
+-	    else if (strncmp(dp->d_name, "j2sdk", 5) == 0)
+-		offset = 5;
+-	    if (offset > 0) {
+-	    	if ((acceptable_release(dp->d_name + offset,
+-		    info->jre_version)) && CheckSanity(dirname, dp->d_name))
+-	    	    if ((best == NULL) || (exact_version_id(
+-		      dp->d_name + offset, best + best_offset) > 0)) {
+-			if (best != NULL)
+-			    free(best);
+-			best = strdup(dp->d_name);
+-			best_offset = offset;
+-		    }
+-	    }
+-	}
++        if ((dp = readdir(dirp)) != NULL) {
++            offset = 0;
++            if ((strncmp(dp->d_name, "jre", 3) == 0) ||
++                (strncmp(dp->d_name, "jdk", 3) == 0))
++                offset = 3;
++            else if (strncmp(dp->d_name, "j2re", 4) == 0)
++                offset = 4;
++            else if (strncmp(dp->d_name, "j2sdk", 5) == 0)
++                offset = 5;
++            if (offset > 0) {
++                if ((acceptable_release(dp->d_name + offset,
++                    info->jre_version)) && CheckSanity(dirname, dp->d_name))
++                    if ((best == NULL) || (exact_version_id(
++                      dp->d_name + offset, best + best_offset) > 0)) {
++                        if (best != NULL)
++                            free(best);
++                        best = strdup(dp->d_name);
++                        best_offset = offset;
++                    }
++            }
++        }
+     } while (dp != NULL);
+     (void) closedir(dirp);
+     if (best == NULL)
+-	return (NULL);
++        return (NULL);
+     else {
+-	ret_str = MemAlloc(strlen(dirname) + strlen(best) + 2);
+-	ret_str = strcat(strcat(strcpy(ret_str, dirname), "/"), best);
+-	free(best);
+-	return (ret_str);
++        ret_str = MemAlloc(strlen(dirname) + strlen(best) + 2);
++        ret_str = strcat(strcat(strcpy(ret_str, dirname), "/"), best);
++        free(best);
++        return (ret_str);
+     }
+ }
+ 
+ /*
+- *	This is the global entry point. It examines the host for the optimal
+- *	JRE to be used by scanning a set of directories.  The set of directories
+- *	is platform dependent and can be overridden by the environment
+- *	variable JAVA_VERSION_PATH.
++ *      This is the global entry point. It examines the host for the optimal
++ *      JRE to be used by scanning a set of directories.  The set of directories
++ *      is platform dependent and can be overridden by the environment
++ *      variable JAVA_VERSION_PATH.
+  *
+- *	This routine itself simply determines the set of appropriate
+- *	directories before passing control onto ProcessDir().
++ *      This routine itself simply determines the set of appropriate
++ *      directories before passing control onto ProcessDir().
+  */
+ char*
+ LocateJRE(manifest_info* info)
+ {
+-    char	*path;
+-    char	*home;
+-    char	*target = NULL;
+-    char	*dp;
+-    char	*cp;
++    char        *path;
++    char        *home;
++    char        *target = NULL;
++    char        *dp;
++    char        *cp;
+ 
+     /*
+      * Start by getting JAVA_VERSION_PATH
+      */
+     if (info->jre_restrict_search)
+-	path = strdup(system_dir);
++        path = strdup(system_dir);
+     else if ((path = getenv("JAVA_VERSION_PATH")) != NULL)
+-	path = strdup(path);
++        path = strdup(path);
+     else
+-	if ((home = getenv("HOME")) != NULL) {
+-	    path = (char *)MemAlloc(strlen(home) + 13);
+-	    path = strcat(strcat(strcat(strcpy(path, home),
+-	        user_dir), ":"), system_dir);
+-	} else
+-	    path = strdup(system_dir);
++        if ((home = getenv("HOME")) != NULL) {
++            path = (char *)MemAlloc(strlen(home) + 13);
++            path = strcat(strcat(strcat(strcpy(path, home),
++                user_dir), ":"), system_dir);
++        } else
++            path = strdup(system_dir);
+ 
+     /*
+      * Step through each directory on the path. Terminate the scan with
+@@ -1649,14 +1649,14 @@
+      */
+     cp = dp = path;
+     while (dp != NULL) {
+-	cp = strchr(dp, (int)':');
+-	if (cp != NULL)
+-	    *cp = (char)NULL;
+-	if ((target = ProcessDir(info, dp)) != NULL)
+-	    break;
+-	dp = cp;
+-	if (dp != NULL)
+-	    dp++;
++        cp = strchr(dp, (int)':');
++        if (cp != NULL)
++            *cp = (char)NULL;
++        if ((target = ProcessDir(info, dp)) != NULL)
++            break;
++        dp = cp;
++        if (dp != NULL)
++            dp++;
+     }
+     free(path);
+     return (target);
+@@ -1682,8 +1682,8 @@
+      * Resolve the real path to the directory containing the selected JRE.
+      */
+     if (realpath(jre, wanted) == NULL) {
+-	fprintf(stderr, "Unable to resolve %s\n", jre);
+-	exit(1);
++        fprintf(stderr, "Unable to resolve %s\n", jre);
++        exit(1);
+     }
+ 
+     /*
+@@ -1691,8 +1691,8 @@
+      */
+     execname = SetExecname(argv);
+     if (execname == NULL) {
+-	fprintf(stderr, "Unable to resolve current executable\n");
+-	exit(1);
++        fprintf(stderr, "Unable to resolve current executable\n");
++        exit(1);
+     }
+ 
+     /*
+@@ -1701,7 +1701,7 @@
+      * If so, just return.
+      */
+     if (strncmp(wanted, execname, strlen(wanted)) == 0)
+-	return;			/* I am the droid you were looking for */
++        return;                 /* I am the droid you were looking for */
+ 
+     /*
+      * If this isn't the selected version, exec the selected version.
+@@ -1725,8 +1725,8 @@
+      * can be so deadly.
+      */
+     if (strlen(wanted) + strlen(progname) + 6 > PATH_MAX) {
+-	fprintf(stderr, "Path length exceeds maximum length (PATH_MAX)\n");
+-	exit(1);
++        fprintf(stderr, "Path length exceeds maximum length (PATH_MAX)\n");
++        exit(1);
+     }
+ 
+     /*
+@@ -1735,11 +1735,11 @@
+     (void)strcat(strcat(wanted, "/bin/"), progname);
+     argv[0] = progname;
+     if (_launcher_debug) {
+-	int i;
+-	printf("execv(\"%s\"", wanted);
+-	for (i = 0; argv[i] != NULL; i++)
+-	    printf(", \"%s\"", argv[i]);
+-	printf(")\n");
++        int i;
++        printf("execv(\"%s\"", wanted);
++        for (i = 0; argv[i] != NULL; i++)
++            printf(", \"%s\"", argv[i]);
++        printf(")\n");
+     }
+     execv(wanted, argv);
+     fprintf(stderr, "Exec of %s failed\n", wanted);
+@@ -1775,13 +1775,13 @@
+ static int
+ match_noeq(const char *s1, const char *s2)
+ {
+-	while (*s1 == *s2++) {
+-		if (*s1++ == '=')
+-			return (1);
+-	}
+-	if (*s1 == '=' && s2[-1] == '\0')
+-		return (1);
+-	return (0);
++        while (*s1 == *s2++) {
++                if (*s1++ == '=')
++                        return (1);
++        }
++        if (*s1 == '=' && s2[-1] == '\0')
++                return (1);
++        return (0);
+ }
+ 
+ /*
+@@ -1794,27 +1794,27 @@
+ static int
+ borrowed_unsetenv(const char *name)
+ {
+-	long	idx;		/* index into environ */
++        long    idx;            /* index into environ */
+ 
+-	if (name == NULL || *name == '\0' ||
+-	    strchr(name, '=') != NULL) {
+-		return (-1);
+-	}
+-
+-	for (idx = 0; environ[idx] != NULL; idx++) {
+-		if (match_noeq(environ[idx], name))
+-			break;
+-	}
+-	if (environ[idx] == NULL) {
+-		/* name not found but still a success */
+-		return (0);
+-	}
+-	/* squeeze up one entry */
+-	do {
+-		environ[idx] = environ[idx+1];
+-	} while (environ[++idx] != NULL);
++        if (name == NULL || *name == '\0' ||
++            strchr(name, '=') != NULL) {
++                return (-1);
++        }
++
++        for (idx = 0; environ[idx] != NULL; idx++) {
++                if (match_noeq(environ[idx], name))
++                        break;
++        }
++        if (environ[idx] == NULL) {
++                /* name not found but still a success */
++                return (0);
++        }
++        /* squeeze up one entry */
++        do {
++                environ[idx] = environ[idx+1];
++        } while (environ[++idx] != NULL);
+ 
+-	return (0);
++        return (0);
+ }
+ /* --- End of "borrowed" code --- */
+ 
+diff -ruN openjdk6/hotspot/src/os/linux/launcher/java_md.h openjdk/hotspot/src/os/linux/launcher/java_md.h
+--- openjdk6/hotspot/src/os/linux/launcher/java_md.h	2008-02-28 05:02:29.000000000 -0500
++++ openjdk/hotspot/src/os/linux/launcher/java_md.h	2008-01-31 09:19:00.000000000 -0500
+@@ -19,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ /*
+@@ -37,11 +37,11 @@
+ #include "manifest_info.h"
+ #endif
+ 
+-#define PATH_SEPARATOR		':'
+-#define FILESEP			"/"
+-#define FILE_SEPARATOR		'/'
+-#ifndef	MAXNAMELEN
+-#define MAXNAMELEN		PATH_MAX
++#define PATH_SEPARATOR          ':'
++#define FILESEP                 "/"
++#define FILE_SEPARATOR          '/'
++#ifndef MAXNAMELEN
++#define MAXNAMELEN              PATH_MAX
+ #endif
+ 
+ #ifdef JAVA_ARGS
+@@ -60,11 +60,11 @@
+  * Support for doing cheap, accurate interval timing.
+  */
+ #include <sys/time.h>
+-#define CounterGet()           	  (gethrtime()/1000)
+-#define Counter2Micros(counts) 	  (counts)
++#define CounterGet()              (gethrtime()/1000)
++#define Counter2Micros(counts)    (counts)
+ #else
+-#define CounterGet()		  (0)
+-#define Counter2Micros(counts)	  (1)
++#define CounterGet()              (0)
++#define Counter2Micros(counts)    (1)
+ #endif /* HAVE_GETHRTIME */
+ 
+ /*
+diff -ruN openjdk6/hotspot/src/os/linux/vm/attachListener_linux.cpp openjdk/hotspot/src/os/linux/vm/attachListener_linux.cpp
+--- openjdk6/hotspot/src/os/linux/vm/attachListener_linux.cpp	2008-02-28 05:02:29.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/attachListener_linux.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)attachListener_linux.cpp	1.14 07/05/05 17:04:34 JVM"
+-#endif
+ /*
+  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -40,9 +37,9 @@
+ // the client tool. The attach listener creates a socket and binds it to a file
+ // in the filesystem. The attach listener then acts as a simple (single-
+ // threaded) server - tt waits for a client to connect, reads the request,
+-// executes it, and returns the response to the client via the socket 
++// executes it, and returns the response to the client via the socket
+ // connection.
+-// 
++//
+ // As the socket is a UNIX domain socket it means that only clients on the
+ // local machine can connect. In addition there are two other aspects to
+ // the security:
+@@ -72,26 +69,26 @@
+       _has_path = true;
+     }
+   }
+- 
+-  static void set_listener(int s)		{ _listener = s; }
++
++  static void set_listener(int s)               { _listener = s; }
+ 
+   // reads a request from the given connected socket
+   static LinuxAttachOperation* read_request(int s);
+ 
+  public:
+   enum {
+-    ATTACH_PROTOCOL_VER = 1			// protocol version
++    ATTACH_PROTOCOL_VER = 1                     // protocol version
+   };
+   enum {
+-    ATTACH_ERROR_BADVERSION     = 101		// error codes
++    ATTACH_ERROR_BADVERSION     = 101           // error codes
+   };
+ 
+   // initialize the listener, returns 0 if okay
+   static int init();
+ 
+-  static char* path() 			{ return _path; }
+-  static bool has_path()		{ return _has_path; }
+-  static int listener()			{ return _listener; }
++  static char* path()                   { return _path; }
++  static bool has_path()                { return _has_path; }
++  static int listener()                 { return _listener; }
+ 
+   // write the given buffer to a socket
+   static int write_fully(int s, char* buf, int len);
+@@ -102,13 +99,13 @@
+ class LinuxAttachOperation: public AttachOperation {
+  private:
+   // the connection to the client
+-  int _socket;	
++  int _socket;
+ 
+  public:
+   void complete(jint res, bufferedStream* st);
+ 
+-  void set_socket(int s)				{ _socket = s; }
+-  int socket() const					{ return _socket; }
++  void set_socket(int s)                                { _socket = s; }
++  int socket() const                                    { return _socket; }
+ 
+   LinuxAttachOperation(char* name) : AttachOperation(name) {
+     set_socket(-1);
+@@ -154,7 +151,7 @@
+       cleanup_done = 1;
+       int s = LinuxAttachListener::listener();
+       if (s != -1) {
+-	::close(s);
++        ::close(s);
+       }
+       if (LinuxAttachListener::has_path()) {
+         ::unlink(LinuxAttachListener::path());
+@@ -166,8 +163,8 @@
+ // Initialization - create a listener socket and bind it to a file
+ 
+ int LinuxAttachListener::init() {
+-  char path[PATH_MAX+1];	// socket file
+-  int listener;			// listener socket (file descriptor)
++  char path[PATH_MAX+1];        // socket file
++  int listener;                 // listener socket (file descriptor)
+ 
+   // register function to cleanup
+   ::atexit(listener_cleanup);
+@@ -198,7 +195,7 @@
+     sprintf(path, "%s/.java_pid%d", os::get_temp_directory(), os::current_process_id());
+     strcpy(addr.sun_path, path);
+     ::unlink(path);
+-    res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr)); 
++    res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr));
+   }
+   if (res == -1) {
+     RESTARTABLE(::close(listener), res);
+@@ -206,7 +203,7 @@
+   }
+   set_path(path);
+ 
+-  // put in listen mode and set permission 
++  // put in listen mode and set permission
+   if ((::listen(listener, 5) == -1) || (::chmod(path, S_IREAD|S_IWRITE) == -1)) {
+     RESTARTABLE(::close(listener), res);
+     ::unlink(path);
+@@ -238,7 +235,7 @@
+   int max_len = (strlen(ver_str) + 1) + (AttachOperation::name_length_max + 1) +
+     AttachOperation::arg_count_max*(AttachOperation::arg_length_max + 1);
+ 
+-  char buf[max_len];	
++  char buf[max_len];
+   int str_count = 0;
+ 
+   // Read until all (expected) strings have been read, the buffer is
+@@ -249,29 +246,29 @@
+ 
+   do {
+     int n;
+-    RESTARTABLE(read(s, buf+off, left), n); 
++    RESTARTABLE(read(s, buf+off, left), n);
+     if (n == -1) {
+-      return NULL;	// reset by peer or other error
+-    } 
++      return NULL;      // reset by peer or other error
++    }
+     if (n == 0) {
+       break;
+     }
+     for (int i=0; i<n; i++) {
+       if (buf[off+i] == 0) {
+-	// EOS found
+- 	str_count++;
++        // EOS found
++        str_count++;
+ 
+-	// The first string is <ver> so check it now to
+-	// check for protocol mis-match
+-	if (str_count == 1) {
+-	  if ((strlen(buf) != strlen(ver_str)) || 
++        // The first string is <ver> so check it now to
++        // check for protocol mis-match
++        if (str_count == 1) {
++          if ((strlen(buf) != strlen(ver_str)) ||
+               (atoi(buf) != ATTACH_PROTOCOL_VER)) {
+             char msg[32];
+             sprintf(msg, "%d\n", ATTACH_ERROR_BADVERSION);
+             write_fully(s, msg, strlen(msg));
+             return NULL;
+-	  }
+-	}
++          }
++        }
+       }
+     }
+     off += n;
+@@ -279,23 +276,23 @@
+   } while (left > 0 && str_count < expected_str_count);
+ 
+   if (str_count != expected_str_count) {
+-    return NULL;	// incomplete request
++    return NULL;        // incomplete request
+   }
+ 
+   // parse request
+-                                                                                                   
++
+   ArgumentIterator args(buf, (max_len)-left);
+ 
+   // version already checked
+   char* v = args.next();
+-                                                                                                   
++
+   char* name = args.next();
+   if (name == NULL || strlen(name) > AttachOperation::name_length_max) {
+     return NULL;
+   }
+ 
+   LinuxAttachOperation* op = new LinuxAttachOperation(name);
+-                                                                                                   
++
+   for (int i=0; i<AttachOperation::arg_count_max; i++) {
+     char* arg = args.next();
+     if (arg == NULL) {
+@@ -317,7 +314,7 @@
+ // Dequeue an operation
+ //
+ // In the Linux implementation there is only a single operation and clients
+-// cannot queue commands (except at the socket level). 
++// cannot queue commands (except at the socket level).
+ //
+ LinuxAttachOperation* LinuxAttachListener::dequeue() {
+   for (;;) {
+@@ -328,7 +325,7 @@
+     socklen_t len = sizeof(addr);
+     RESTARTABLE(::accept(listener(), &addr, &len), s);
+     if (s == -1) {
+-      return NULL;	// log a warning?
++      return NULL;      // log a warning?
+     }
+ 
+     // get the credentials of the peer and check the effective uid/guid
+@@ -383,7 +380,7 @@
+ // default send buffer is sufficient to buffer everything. In the future
+ // if there are operations that involves a very big reply then it the
+ // socket could be made non-blocking and a timeout could be used.
+-                                                                                                   
++
+ void LinuxAttachOperation::complete(jint result, bufferedStream* st) {
+   JavaThread* thread = JavaThread::current();
+   ThreadBlockInVM tbivm(thread);
+@@ -397,13 +394,13 @@
+   sprintf(msg, "%d\n", result);
+   int rc = LinuxAttachListener::write_fully(this->socket(), msg, strlen(msg));
+ 
+-  // write any result data 
++  // write any result data
+   if (rc == 0) {
+     LinuxAttachListener::write_fully(this->socket(), (char*) st->base(), st->size());
+     ::shutdown(this->socket(), 2);
+   }
+ 
+-  // done 
++  // done
+   RESTARTABLE(::close(this->socket()), rc);
+ 
+   // were we externally suspended while we were waiting?
+diff -ruN openjdk6/hotspot/src/os/linux/vm/c1_globals_linux.hpp openjdk/hotspot/src/os/linux/vm/c1_globals_linux.hpp
+--- openjdk6/hotspot/src/os/linux/vm/c1_globals_linux.hpp	2008-02-28 05:02:29.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/c1_globals_linux.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c1_globals_linux.hpp	1.12 07/05/05 17:04:35 JVM"
+-#endif
+ /*
+  * Copyright 2000-2001 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //
+diff -ruN openjdk6/hotspot/src/os/linux/vm/c2_globals_linux.hpp openjdk/hotspot/src/os/linux/vm/c2_globals_linux.hpp
+--- openjdk6/hotspot/src/os/linux/vm/c2_globals_linux.hpp	2008-02-28 05:02:29.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/c2_globals_linux.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)c2_globals_linux.hpp	1.12 07/05/05 17:04:35 JVM"
+-#endif
+ /*
+  * Copyright 2000-2001 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //
+diff -ruN openjdk6/hotspot/src/os/linux/vm/chaitin_linux.cpp openjdk/hotspot/src/os/linux/vm/chaitin_linux.cpp
+--- openjdk6/hotspot/src/os/linux/vm/chaitin_linux.cpp	2008-02-28 05:02:29.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/chaitin_linux.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)chaitin_linux.cpp	1.11 07/05/05 17:04:35 JVM"
+-#endif
+ /*
+  * Copyright 1999-2001 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -40,5 +37,5 @@
+ 
+ 
+ // Reconciliation History
+-// chaitin_solaris.cpp	1.7 99/07/12 23:54:22
++// chaitin_solaris.cpp  1.7 99/07/12 23:54:22
+ // End
+diff -ruN openjdk6/hotspot/src/os/linux/vm/globals_linux.hpp openjdk/hotspot/src/os/linux/vm/globals_linux.hpp
+--- openjdk6/hotspot/src/os/linux/vm/globals_linux.hpp	2008-02-28 05:02:29.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/globals_linux.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)globals_linux.hpp	1.12 07/05/05 17:04:35 JVM"
+-#endif
+ /*
+  * Copyright 2005-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //
+@@ -30,7 +27,7 @@
+ //
+ #define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
+   product(bool, UseOprofile, false,                                 \
+-	"enable support for Oprofile profiler")                     \
++        "enable support for Oprofile profiler")                     \
+                                                                     \
+   product(bool, UseLinuxPosixThreadCPUClocks, false,                \
+           "enable fast Linux Posix clocks where available")         \
+diff -ruN openjdk6/hotspot/src/os/linux/vm/hpi_linux.cpp openjdk/hotspot/src/os/linux/vm/hpi_linux.cpp
+--- openjdk6/hotspot/src/os/linux/vm/hpi_linux.cpp	2008-02-28 05:02:29.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/hpi_linux.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)hpi_linux.cpp	1.16 07/05/05 17:04:35 JVM"
+-#endif
+ /*
+  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -44,9 +41,9 @@
+       buf[JVM_MAXPATHLEN - 1] = '\0';
+     } else {
+       const char *thread_type = "native_threads";
+-      
++
+       os::jvm_path(buf, JVM_MAXPATHLEN);
+-      
++
+ #ifdef PRODUCT
+       const char * hpi_lib = "/libhpi.so";
+ #else
+@@ -61,7 +58,7 @@
+       strcat(buf, thread_type);
+       strcat(buf, hpi_lib);
+     }
+-    
++
+     if (TraceHPI) tty->print_cr("Loading HPI %s ", buf);
+ #ifdef SPARC
+     // On 64-bit Ubuntu Sparc RTLD_NOW leads to unresolved deps in libpthread.so
+@@ -74,15 +71,15 @@
+ #undef OPEN_MODE
+ 
+     if (hpi_handle == NULL) {
+-	if (TraceHPI) tty->print_cr("HPI dlopen failed: %s", dlerror());
++        if (TraceHPI) tty->print_cr("HPI dlopen failed: %s", dlerror());
+         return;
+     }
+-    DLL_Initialize = CAST_TO_FN_PTR(jint (JNICALL *)(GetInterfaceFunc *, void *),  
++    DLL_Initialize = CAST_TO_FN_PTR(jint (JNICALL *)(GetInterfaceFunc *, void *),
+                                     dlsym(hpi_handle, "DLL_Initialize"));
+     if (TraceHPI && DLL_Initialize == NULL) tty->print_cr("HPI dlsym of DLL_Initialize failed: %s", dlerror());
+     if (DLL_Initialize == NULL ||
+         (*DLL_Initialize)(&getintf, callbacks) < 0) {
+-	if (TraceHPI) tty->print_cr("HPI DLL_Initialize failed");
++        if (TraceHPI) tty->print_cr("HPI DLL_Initialize failed");
+         return;
+     }
+     if (TraceHPI)  tty->print_cr("HPI loaded successfully");
+diff -ruN openjdk6/hotspot/src/os/linux/vm/hpi_linux.hpp openjdk/hotspot/src/os/linux/vm/hpi_linux.hpp
+--- openjdk6/hotspot/src/os/linux/vm/hpi_linux.hpp	2008-02-28 05:02:29.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/hpi_linux.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)hpi_linux.hpp	1.18 07/05/05 17:04:34 JVM"
+-#endif
+ /*
+  * Copyright 1999-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ //
+@@ -85,7 +82,7 @@
+ 
+     pfd.fd = fd;
+     pfd.events = POLLIN | POLLERR;
+-  
++
+     int res = ::poll(&pfd, 1, timeout);
+ 
+     if (res == OS_ERR && errno == EINTR) {
+@@ -93,12 +90,12 @@
+       // On Linux any value < 0 means "forever"
+ 
+       if(timeout >= 0) {
+-	gettimeofday(&t, NULL);
+-	newtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec / 1000;
+-	timeout -= newtime - prevtime;
+-	if(timeout <= 0)
+-	  return OS_OK;
+-	prevtime = newtime;
++        gettimeofday(&t, NULL);
++        newtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec / 1000;
++        timeout -= newtime - prevtime;
++        if(timeout <= 0)
++          return OS_OK;
++        prevtime = newtime;
+       }
+     } else
+       return res;
+@@ -125,12 +122,12 @@
+ }
+ 
+ inline int hpi::recvfrom(int fd, char *buf, int nBytes, int flags,
+-		         sockaddr *from, int *fromlen) {
++                         sockaddr *from, int *fromlen) {
+   RESTARTABLE_RETURN_INT(::recvfrom(fd, buf, nBytes, (unsigned int) flags, from, (socklen_t *)fromlen));
+ }
+ 
+ inline int hpi::sendto(int fd, char *buf, int len, int flags,
+-			struct sockaddr *to, int tolen) {
++                        struct sockaddr *to, int tolen) {
+   RESTARTABLE_RETURN_INT(::sendto(fd, buf, len, (unsigned int) flags, to, tolen));
+ }
+ 
+@@ -144,22 +141,22 @@
+ }
+ 
+ 
+-// following methods have been updated to avoid problems in 
++// following methods have been updated to avoid problems in
+ // hpi's sockets calls based on sys_api_td.c (JDK1.3)
+ 
+ /*
+-HPIDECL(socket_shutdown, "socket_shutdown", _socket, SocketShutdown, 
++HPIDECL(socket_shutdown, "socket_shutdown", _socket, SocketShutdown,
+         int, "%d",
+         (int fd, int howto),
+         ("fd = %d, howto = %d", fd, howto),
+         (fd, howto));
+-	*/
++        */
+ inline int hpi::socket_shutdown(int fd, int howto){
+   return ::shutdown(fd, howto);
+ }
+ 
+ /*
+-HPIDECL(bind, "bind", _socket, Bind, 
++HPIDECL(bind, "bind", _socket, Bind,
+         int, "%d",
+         (int fd, struct sockaddr *him, int len),
+         ("fd = %d, him = %p, len = %d",
+@@ -171,13 +168,13 @@
+ }
+ 
+ /*
+-HPIDECL(get_sock_name, "get_sock_name", _socket, GetSocketName, 
++HPIDECL(get_sock_name, "get_sock_name", _socket, GetSocketName,
+         int, "%d",
+         (int fd, struct sockaddr *him, int *len),
+         ("fd = %d, him = %p, len = %p",
+          fd, him, len),
+         (fd, him, len));
+-	*/
++        */
+ inline int hpi::get_sock_name(int fd, struct sockaddr *him, int *len){
+   return ::getsockname(fd, him, (socklen_t *)len);
+ }
+@@ -188,7 +185,7 @@
+         ("hostname = %p, namelen = %d",
+          hostname, namelen),
+         (hostname, namelen));
+-	*/
++        */
+ inline int hpi::get_host_name(char* name, int namelen){
+   return ::gethostname(name, namelen);
+ }
+@@ -199,9 +196,9 @@
+         ("fd = %d, level = %d, optname = %d, optval = %p, optlen = %p",
+          fd, level, optname, optval, optlen),
+         (fd, level, optname, optval, optlen));
+-	*/
+-inline int hpi::get_sock_opt(int fd, int level, int optname, 
+-			     char *optval, int* optlen){
++        */
++inline int hpi::get_sock_opt(int fd, int level, int optname,
++                             char *optval, int* optlen){
+   return ::getsockopt(fd, level, optname, optval, (socklen_t *)optlen);
+ }
+ 
+@@ -211,13 +208,13 @@
+         ("fd = %d, level = %d, optname = %d, optval = %p, optlen = %d",
+          fd, level, optname, optval, optlen),
+         (fd, level, optname, optval, optlen));
+-	*/
+-inline int hpi::set_sock_opt(int fd, int level, int optname, 
+-			     const char *optval, int optlen){
++        */
++inline int hpi::set_sock_opt(int fd, int level, int optname,
++                             const char *optval, int optlen){
+   return ::setsockopt(fd, level, optname, optval, optlen);
+ }
+ 
+ 
+ // Reconciliation History
+-// hpi_solaris.hpp	1.9 99/08/30 16:31:23
++// hpi_solaris.hpp      1.9 99/08/30 16:31:23
+ // End
+diff -ruN openjdk6/hotspot/src/os/linux/vm/interfaceSupport_linux.hpp openjdk/hotspot/src/os/linux/vm/interfaceSupport_linux.hpp
+--- openjdk6/hotspot/src/os/linux/vm/interfaceSupport_linux.hpp	2008-02-28 05:02:29.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/interfaceSupport_linux.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)interfaceSupport_linux.hpp	1.6 07/05/05 17:04:35 JVM"
+-#endif
+ /*
+  * Copyright 2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Contains inlined functions for class InterfaceSupport
+@@ -30,4 +27,3 @@
+ static inline void serialize_memory(JavaThread *thread) {
+   os::write_memory_serialize_page(thread);
+ }
+-
+diff -ruN openjdk6/hotspot/src/os/linux/vm/jsig.c openjdk/hotspot/src/os/linux/vm/jsig.c
+--- openjdk6/hotspot/src/os/linux/vm/jsig.c	2008-02-28 05:02:29.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/jsig.c	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)jsig.c	1.12 07/05/05 17:04:34 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ /* CopyrightVersion 1.2 */
+@@ -119,7 +116,7 @@
+ 
+     signal_unlock();
+     return oldhandler;
+-  } else if (jvm_signal_installing) { 
++  } else if (jvm_signal_installing) {
+     /* jvm is installing its signal handlers. Install the new
+      * handlers and save the old ones. jvm uses sigaction().
+      * Leave the piece here just in case. */
+@@ -181,7 +178,7 @@
+ 
+     signal_unlock();
+     return 0;
+-  } else if (jvm_signal_installing) { 
++  } else if (jvm_signal_installing) {
+     /* jvm is installing its signal handlers. Install the new
+      * handlers and save the old ones. */
+     res = call_os_sigaction(sig, act, &oldAct);
+diff -ruN openjdk6/hotspot/src/os/linux/vm/jvm_linux.cpp openjdk/hotspot/src/os/linux/vm/jvm_linux.cpp
+--- openjdk6/hotspot/src/os/linux/vm/jvm_linux.cpp	2008-02-28 05:02:29.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/jvm_linux.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)jvm_linux.cpp	1.21 07/05/05 17:04:36 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -37,7 +34,7 @@
+ int _JVM_native_threads = 1;
+ 
+ // sun.misc.Signal ///////////////////////////////////////////////////////////
+-// Signal code is mostly copied from classic vm, signals_md.c	1.4 98/08/23
++// Signal code is mostly copied from classic vm, signals_md.c   1.4 98/08/23
+ /*
+  * This function is included primarily as a debugging aid. If Java is
+  * running in a console window, then pressing <CTRL-\\> will cause
+@@ -47,7 +44,7 @@
+ 
+ JVM_ENTRY_NO_ENV(void*, JVM_RegisterSignal(jint sig, void* handler))
+   // Copied from classic vm
+-  // signals_md.c	1.4 98/08/23
++  // signals_md.c       1.4 98/08/23
+   void* newHandler = handler == (void *)2
+                    ? os::user_handler()
+                    : handler;
+@@ -66,14 +63,14 @@
+       return (void *)-1;
+ 
+     /* The following signals are used for Shutdown Hooks support. However, if
+-       ReduceSignalUsage (-Xrs) is set, Shutdown Hooks must be invoked via 
+-       System.exit(), Java is not allowed to use these signals, and the the 
++       ReduceSignalUsage (-Xrs) is set, Shutdown Hooks must be invoked via
++       System.exit(), Java is not allowed to use these signals, and the the
+        user is allowed to set his own _native_ handler for these signals and
+-       invoke System.exit() as needed. Terminator.setup() is avoiding 
+-       registration of these signals when -Xrs is present. 
++       invoke System.exit() as needed. Terminator.setup() is avoiding
++       registration of these signals when -Xrs is present.
+        - If the HUP signal is ignored (from the nohup) command, then Java
+          is not allowed to use this signal.
+-     */ 
++     */
+ 
+     case SHUTDOWN1_SIGNAL:
+     case SHUTDOWN2_SIGNAL:
+@@ -115,17 +112,17 @@
+   return JNI_TRUE;
+ JVM_END
+ 
+-/* 
+-  All the defined signal names for Linux. 
++/*
++  All the defined signal names for Linux.
+ 
+   NOTE that not all of these names are accepted by our Java implementation
+ 
+   Via an existing claim by the VM, sigaction restrictions, or
+   the "rules of Unix" some of these names will be rejected at runtime.
+-  For example the VM sets up to handle USR1, sigaction returns EINVAL for 
++  For example the VM sets up to handle USR1, sigaction returns EINVAL for
+   STOP, and Linux simply doesn't allow catching of KILL.
+ 
+-  Here are the names currently accepted by a user of sun.misc.Signal with 
++  Here are the names currently accepted by a user of sun.misc.Signal with
+   1.4.1 (ignoring potential interaction with use of chaining, etc):
+ 
+     HUP, INT, TRAP, ABRT, IOT, BUS, USR2, PIPE, ALRM, TERM, STKFLT,
+@@ -141,43 +138,43 @@
+ 
+ struct siglabel siglabels[] = {
+   /* derived from /usr/include/bits/signum.h on RH7.2 */
+-   "HUP",	SIGHUP,		/* Hangup (POSIX).  */
+-  "INT",	SIGINT,		/* Interrupt (ANSI).  */
+-  "QUIT",	SIGQUIT,	/* Quit (POSIX).  */
+-  "ILL",	SIGILL,		/* Illegal instruction (ANSI).  */
+-  "TRAP",	SIGTRAP,	/* Trace trap (POSIX).  */
+-  "ABRT",	SIGABRT,	/* Abort (ANSI).  */
+-  "IOT",	SIGIOT,		/* IOT trap (4.2 BSD).  */
+-  "BUS",	SIGBUS,		/* BUS error (4.2 BSD).  */
+-  "FPE",	SIGFPE,		/* Floating-point exception (ANSI).  */
+-  "KILL",	SIGKILL,	/* Kill, unblockable (POSIX).  */
+-  "USR1",	SIGUSR1,	/* User-defined signal 1 (POSIX).  */
+-  "SEGV",	SIGSEGV,	/* Segmentation violation (ANSI).  */
+-  "USR2",	SIGUSR2,	/* User-defined signal 2 (POSIX).  */
+-  "PIPE",	SIGPIPE,	/* Broken pipe (POSIX).  */
+-  "ALRM",	SIGALRM,	/* Alarm clock (POSIX).  */
+-  "TERM",	SIGTERM,	/* Termination (ANSI).  */
++   "HUP",       SIGHUP,         /* Hangup (POSIX).  */
++  "INT",        SIGINT,         /* Interrupt (ANSI).  */
++  "QUIT",       SIGQUIT,        /* Quit (POSIX).  */
++  "ILL",        SIGILL,         /* Illegal instruction (ANSI).  */
++  "TRAP",       SIGTRAP,        /* Trace trap (POSIX).  */
++  "ABRT",       SIGABRT,        /* Abort (ANSI).  */
++  "IOT",        SIGIOT,         /* IOT trap (4.2 BSD).  */
++  "BUS",        SIGBUS,         /* BUS error (4.2 BSD).  */
++  "FPE",        SIGFPE,         /* Floating-point exception (ANSI).  */
++  "KILL",       SIGKILL,        /* Kill, unblockable (POSIX).  */
++  "USR1",       SIGUSR1,        /* User-defined signal 1 (POSIX).  */
++  "SEGV",       SIGSEGV,        /* Segmentation violation (ANSI).  */
++  "USR2",       SIGUSR2,        /* User-defined signal 2 (POSIX).  */
++  "PIPE",       SIGPIPE,        /* Broken pipe (POSIX).  */
++  "ALRM",       SIGALRM,        /* Alarm clock (POSIX).  */
++  "TERM",       SIGTERM,        /* Termination (ANSI).  */
+ #ifdef SIGSTKFLT
+-  "STKFLT",	SIGSTKFLT,	/* Stack fault.  */
++  "STKFLT",     SIGSTKFLT,      /* Stack fault.  */
+ #endif
+-  "CLD",	SIGCLD,		/* Same as SIGCHLD (System V).  */
+-  "CHLD",	SIGCHLD,	/* Child status has changed (POSIX).  */
+-  "CONT",	SIGCONT,	/* Continue (POSIX).  */
+-  "STOP",	SIGSTOP,	/* Stop, unblockable (POSIX).  */
+-  "TSTP",	SIGTSTP,	/* Keyboard stop (POSIX).  */
+-  "TTIN",	SIGTTIN,	/* Background read from tty (POSIX).  */
+-  "TTOU",	SIGTTOU,	/* Background write to tty (POSIX).  */
+-  "URG",	SIGURG,		/* Urgent condition on socket (4.2 BSD).  */
+-  "XCPU",	SIGXCPU,	/* CPU limit exceeded (4.2 BSD).  */
+-  "XFSZ",	SIGXFSZ,	/* File size limit exceeded (4.2 BSD).  */
+-  "VTALRM",	SIGVTALRM,	/* Virtual alarm clock (4.2 BSD).  */
+-  "PROF",	SIGPROF,	/* Profiling alarm clock (4.2 BSD).  */
+-  "WINCH",	SIGWINCH,	/* Window size change (4.3 BSD, Sun).  */
+-  "POLL",	SIGPOLL,	/* Pollable event occurred (System V).  */
+-  "IO",		SIGIO,		/* I/O now possible (4.2 BSD).  */
+-  "PWR",	SIGPWR,		/* Power failure restart (System V).  */
++  "CLD",        SIGCLD,         /* Same as SIGCHLD (System V).  */
++  "CHLD",       SIGCHLD,        /* Child status has changed (POSIX).  */
++  "CONT",       SIGCONT,        /* Continue (POSIX).  */
++  "STOP",       SIGSTOP,        /* Stop, unblockable (POSIX).  */
++  "TSTP",       SIGTSTP,        /* Keyboard stop (POSIX).  */
++  "TTIN",       SIGTTIN,        /* Background read from tty (POSIX).  */
++  "TTOU",       SIGTTOU,        /* Background write to tty (POSIX).  */
++  "URG",        SIGURG,         /* Urgent condition on socket (4.2 BSD).  */
++  "XCPU",       SIGXCPU,        /* CPU limit exceeded (4.2 BSD).  */
++  "XFSZ",       SIGXFSZ,        /* File size limit exceeded (4.2 BSD).  */
++  "VTALRM",     SIGVTALRM,      /* Virtual alarm clock (4.2 BSD).  */
++  "PROF",       SIGPROF,        /* Profiling alarm clock (4.2 BSD).  */
++  "WINCH",      SIGWINCH,       /* Window size change (4.3 BSD, Sun).  */
++  "POLL",       SIGPOLL,        /* Pollable event occurred (System V).  */
++  "IO",         SIGIO,          /* I/O now possible (4.2 BSD).  */
++  "PWR",        SIGPWR,         /* Power failure restart (System V).  */
+ #ifdef SIGSYS
+-  "SYS",	SIGSYS		/* Bad system call. Only on some Linuxen! */
++  "SYS",        SIGSYS          /* Bad system call. Only on some Linuxen! */
+ #endif
+   };
+ 
+@@ -186,7 +183,7 @@
+   /* find and return the named signal's number */
+ 
+   for(uint i=0; i<ARRAY_SIZE(siglabels); i++)
+-    if(!strcmp(name, siglabels[i].name))  
++    if(!strcmp(name, siglabels[i].name))
+       return siglabels[i].number;
+ 
+   return -1;
+diff -ruN openjdk6/hotspot/src/os/linux/vm/jvm_linux.h openjdk/hotspot/src/os/linux/vm/jvm_linux.h
+--- openjdk6/hotspot/src/os/linux/vm/jvm_linux.h	2008-02-28 05:02:29.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/jvm_linux.h	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)jvm_linux.h	1.15 07/05/05 17:04:35 JVM"
+-#endif
+ /*
+  * Copyright 1999-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ /*
+@@ -42,14 +39,14 @@
+  * JNI conversion, which should be sorted out later.
+  */
+ 
+-#include <dirent.h>		/* For DIR */
+-#include <sys/param.h>		/* For MAXPATHLEN */
+-#include <unistd.h>		/* For F_OK, R_OK, W_OK */
++#include <dirent.h>             /* For DIR */
++#include <sys/param.h>          /* For MAXPATHLEN */
++#include <unistd.h>             /* For F_OK, R_OK, W_OK */
+ 
+ #define JNI_ONLOAD_SYMBOLS      {"JNI_OnLoad"}
+ #define JNI_ONUNLOAD_SYMBOLS    {"JNI_OnUnload"}
+-#define JVM_ONLOAD_SYMBOLS      {"JVM_OnLoad"} 
+-#define AGENT_ONLOAD_SYMBOLS    {"Agent_OnLoad"} 
++#define JVM_ONLOAD_SYMBOLS      {"JVM_OnLoad"}
++#define AGENT_ONLOAD_SYMBOLS    {"Agent_OnLoad"}
+ #define AGENT_ONUNLOAD_SYMBOLS  {"Agent_OnUnload"}
+ #define AGENT_ONATTACH_SYMBOLS  {"Agent_OnAttach"}
+ 
+@@ -96,5 +93,5 @@
+ #endif /* JVM_MD_H */
+ 
+ // Reconciliation History
+-// jvm_solaris.h	1.6 99/06/22 16:38:47
++// jvm_solaris.h        1.6 99/06/22 16:38:47
+ // End
+diff -ruN openjdk6/hotspot/src/os/linux/vm/mutex_linux.cpp openjdk/hotspot/src/os/linux/vm/mutex_linux.cpp
+--- openjdk6/hotspot/src/os/linux/vm/mutex_linux.cpp	2008-02-28 05:02:29.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/mutex_linux.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,8 +1,5 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)mutex_linux.cpp	1.48 07/05/29 11:38:16 JVM"
+-#endif
+ /*
+- * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
++ * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+  *
+  * This code is free software; you can redistribute it and/or modify it
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -30,470 +27,3 @@
+ 
+ // put OS-includes here
+ # include <signal.h>
+-
+-// Implementation of Mutex
+-
+-// A simple Mutex for VM locking: it is not guaranteed to interoperate with
+-// the fast object locking, so exclusively use Mutex locking or exclusively
+-// use fast object locking.
+-
+-Mutex::Mutex(int rank, const char *name, bool allow_vm_block)
+-  debug_only( : _rank(rank) )
+-{
+-  _lock_event     = new os::Linux::Event;
+-  _suppress_signal = false;
+-  _owner          = INVALID_THREAD;
+-  _name           = name;
+-
+-#ifdef ASSERT
+-  if (CountVMLocks) {
+-    _histogram         = new MutexHistogramElement(name);
+-    _contend_histogram = new MutexContentionHistogramElement(name);
+-  }
+-#endif
+-
+-#ifndef PRODUCT
+-  _lock_count     = -1; // unused in solaris
+-  _allow_vm_block = allow_vm_block;
+-  debug_only(_next = NULL;)
+-  debug_only(_last_owner = INVALID_THREAD;)
+-#endif
+-}
+-
+-
+-Mutex::~Mutex() {  
+-  os::Linux::Event* const _Lock_Event = (os::Linux::Event*)_lock_event;
+-
+-  assert(_owner == INVALID_THREAD, "Owned Mutex being deleted");
+-  assert(_lock_count == -1, "Mutex being deleted with non -1 lock count");
+-  delete _Lock_Event;
+-}
+-
+-
+-void Mutex::unlock() {
+-  os::Linux::Event* const _Lock_Event = (os::Linux::Event*)_lock_event;
+-
+-  assert(_owner == Thread::current(), "Mutex not being unlocked by owner");
+-
+-  set_owner(INVALID_THREAD);
+-
+-  if (_suppress_signal) {
+-    assert(SafepointSynchronize::is_at_safepoint() &&
+-           Thread::current()->is_VM_thread(), "can't sneak");
+-    _suppress_signal = false;
+-  }
+-  else {
+-    assert(_lock_count >= 0, "Mutex being unlocked without positive lock count");
+-    debug_only(_lock_count--;)
+-    _Lock_Event->unlock();
+-  }
+-}
+-
+-
+-// Can be called by non-Java threads (JVM_RawMonitorExit)
+-void Mutex::jvm_raw_unlock() {
+-  os::Linux::Event* const _Lock_Event = (os::Linux::Event*)_lock_event;
+-  // Do not call set_owner, as this would break.
+-  _owner = INVALID_THREAD;
+-  if (_suppress_signal) {
+-    assert(SafepointSynchronize::is_at_safepoint() &&
+-           Thread::current()->is_VM_thread(), "can't sneak");
+-    _suppress_signal = false;
+-  }
+-  else {
+-    debug_only(_lock_count--;)
+-    _Lock_Event->unlock();
+-  }
+-}
+-
+-
+-void Mutex::wait_for_lock_blocking_implementation(JavaThread *thread) {
+-  ThreadBlockInVM tbivm(thread);
+-
+-  wait_for_lock_implementation();
+-}
+-
+-
+-#ifndef PRODUCT
+-void Mutex::print_on(outputStream* st) const {
+-  os::Linux::Event* const _Lock_Event = (os::Linux::Event*)_lock_event;
+-
+-  st->print_cr("Mutex: [0x%x/0x%x] %s - owner: 0x%x", this, _Lock_Event, _name, _owner);
+-}
+-#endif
+-
+-
+-//
+-// Monitor
+-//
+-
+-
+-Monitor::Monitor(int rank, const char *name, bool allow_vm_block) : Mutex(rank, name, allow_vm_block) {
+-  _event   = NULL;		
+-  _counter = 0;
+-  _tickets = 0;
+-  _waiters = 0;
+-}
+-
+-
+-Monitor::~Monitor() {
+-}  
+-
+-
+-bool Monitor::wait(bool no_safepoint_check, long timeout,
+-                   bool as_suspend_equivalent) {
+-  os::Linux::Event* const _Lock_Event = (os::Linux::Event*)_lock_event;
+-  Thread* thread = Thread::current();
+-
+-  assert(_owner != INVALID_THREAD, "Wait on unknown thread");
+-  assert(_owner == thread, "Wait on Monitor not by owner");
+-
+-  // The design rule for use of mutexes of rank special or less is
+-  // that we are guaranteed not to block while holding such mutexes.
+-  // Here we verify that the least ranked mutex that we hold,
+-  // modulo the mutex we are about to relinquish, satisfies that
+-  // constraint, since we are about to block in a wait.
+-  #ifdef ASSERT
+-    Mutex* least = get_least_ranked_lock_besides_this(thread->owned_locks());
+-    assert(least != this, "Specification of get_least_... call above");
+-    if (least != NULL && least->rank() <= special) {
+-      tty->print("Attempting to wait on monitor %s/%d while holding"
+-                 " lock %s/%d -- possible deadlock",
+-                 name(), rank(), least->name(), least->rank());
+-      assert(false,
+-             "Shouldn't block(wait) while holding a lock of rank special");
+-    }
+-  #endif // ASSERT
+-
+-  long c = _counter;
+-
+-  #ifdef ASSERT
+-    // Don't catch signals while blocked; let the running threads have the signals.
+-    // (This allows a debugger to break into the running thread.)
+-    sigset_t oldsigs;
+-    sigset_t* allowdebug_blocked = os::Linux::allowdebug_blocked_signals();
+-    pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
+-  #endif
+-
+-  _waiters++;
+-  // Loop until condition variable is signaled.  Tickets will
+-  // reflect the number of threads which have been notified. The counter
+-  // field is used to make sure we don't respond to notifications that
+-  // have occurred *before* we started waiting, and is incremented each
+-  // time the condition variable is signaled.
+-  // Use a ticket scheme to guard against spurious wakeups.
+-  int wait_status;
+-
+-  while (true) {
+-
+-    if (no_safepoint_check) {
+-
+-      // conceptually set the owner to INVALID_THREAD in anticipation of yielding the lock in wait
+-      set_owner(Mutex::INVALID_THREAD);
+-
+-      // (SafepointTimeout is not implemented)
+-      if(timeout == 0) {
+-	wait_status = _Lock_Event->wait();
+-      }
+-      else {
+-	wait_status = _Lock_Event->timedwait(timeout);
+-      }
+-    } else {
+-      JavaThread *jt = (JavaThread *)thread;
+-
+-      // conceptually set the owner to INVALID_THREAD in anticipation of yielding the lock in wait
+-      set_owner(Mutex::INVALID_THREAD);
+-
+-      // Enter safepoint region
+-      ThreadBlockInVM tbivm(jt);
+-      OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
+-
+-      if (as_suspend_equivalent) {
+-        jt->set_suspend_equivalent();
+-        // cleared by handle_special_suspend_equivalent_condition() or
+-        // java_suspend_self()
+-      }
+-
+-      if(timeout == 0) {
+-	wait_status = _Lock_Event->wait();
+-      }
+-      else {
+-	wait_status = _Lock_Event->timedwait(timeout);
+-      }
+-
+-      // were we externally suspended while we were waiting?
+-      if (as_suspend_equivalent &&
+-          jt->handle_special_suspend_equivalent_condition()) {
+-        //
+-        // Our event wait has finished and we own the _Lock_Event, but
+-        // while we were waiting another thread suspended us. We don't
+-        // want to hold the _Lock_Event while suspended because that
+-        // would surprise the thread that suspended us.
+-        //
+-        _Lock_Event->unlock();
+-        jt->java_suspend_self();
+-        _Lock_Event->lock();
+-      }
+-    } // if no_safepoint_check
+-
+-    // conceptually reaquire the lock (the actual Linux lock is already reacquired after waiting)
+-    set_owner(thread);
+-
+-    // We get to this point if either:
+-    // a) a notify has been executed by some other thread and woke us up
+-    // b) a signal has been delivered to this thread and terminated wait
+-    // c) the above two events happened while we were waiting - that is a signal
+-    //    was delivered while notify was executed by some other thread.
+-
+-    // Handle cases a) and c) here. We consume one ticket even in case c) when notify
+-    // and a signal arrive together
+-    if (_tickets != 0 && _counter != c) {
+-      break;
+-    }
+-    
+-    // If wait was interrupted by a signal or timeout, do not use up a ticket
+-    if (wait_status == EINTR || wait_status == ETIME || wait_status == ETIMEDOUT) {
+-      ++_tickets;		// will be decremented again below
+-      break;
+-    }
+-
+-
+-  }
+-  _waiters--;
+-  _tickets--;
+-
+-#ifdef ASSERT
+-  pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
+-#endif
+-   
+-  // return true if timed out
+-  return (wait_status == ETIME || wait_status == ETIMEDOUT);
+-}
+-
+-
+-// Notify a single thread waiting on this condition variable
+-bool Monitor::notify() {
+-  os::Linux::Event* const _Lock_Event = (os::Linux::Event*)_lock_event;
+-
+-  assert(_owner != INVALID_THREAD, "notify on unknown thread");
+-  assert(_owner == Thread::current(), "notify on Monitor not by owner");
+-
+-  if (_waiters > _tickets) {
+-    
+-    _Lock_Event->signal();
+-    
+-    _tickets++;
+-    _counter++;
+-  }
+-
+-  return true;
+-
+-}
+-
+-
+-// Notify all threads waiting on this ConditionVariable
+-bool Monitor::notify_all() {
+-  os::Linux::Event* const _Lock_Event = (os::Linux::Event*)_lock_event;
+-
+-  assert(_owner != INVALID_THREAD, "notify on unknown thread");
+-  assert(_owner == Thread::current(), "notify on Monitor not by owner");
+-
+-  if (_waiters > 0) {
+-
+-    _Lock_Event->broadcast();
+-
+-    _tickets = _waiters;
+-    _counter++;
+-  }
+-
+-  return true;
+-}
+-
+-// JSR166
+-// -------------------------------------------------------
+-
+-/*
+- * The solaris and linux implementations of park/unpark are fairly
+- * conservative for now, but can be improved. They currently use a
+- * mutex/condvar pair, plus a a count. 
+- * Park decrements count if > 0, else does a condvar wait.  Unpark
+- * sets count to 1 and signals condvar.  Only one thread ever waits 
+- * on the condvar. Contention seen when trying to park implies that someone 
+- * is unparking you, so don't wait. And spurious returns are fine, so there 
+- * is no need to track notifications.
+- */
+-
+-#define NANOSECS_PER_SEC 1000000000
+-#define NANOSECS_PER_MILLISEC 1000000
+-#define MAX_SECS 100000000
+-/*
+- * This code is common to linux and solaris and will be moved to a
+- * common place in dolphin.
+- *
+- * The passed in time value is either a relative time in nanoseconds
+- * or an absolute time in milliseconds. Either way it has to be unpacked
+- * into suitable seconds and nanoseconds components and stored in the
+- * given timespec structure. 
+- * Given time is a 64-bit value and the time_t used in the timespec is only 
+- * a signed-32-bit value (except on 64-bit Linux) we have to watch for
+- * overflow if times way in the future are given. Further on Solaris versions
+- * prior to 10 there is a restriction (see cond_timedwait) that the specified
+- * number of seconds, in abstime, is less than current_time  + 100,000,000.
+- * As it will be 28 years before "now + 100000000" will overflow we can
+- * ignore overflow and just impose a hard-limit on seconds using the value
+- * of "now + 100,000,000". This places a limit on the timeout of about 3.17
+- * years from "now".
+- */
+-static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
+-  assert (time > 0, "convertTime");
+-
+-  struct timeval now;
+-  int status = gettimeofday(&now, NULL);
+-  assert(status == 0, "gettimeofday");
+-
+-  time_t max_secs = now.tv_sec + MAX_SECS;
+-
+-  if (isAbsolute) {
+-    jlong secs = time / 1000;
+-    if (secs > max_secs) {
+-      absTime->tv_sec = max_secs;
+-    }
+-    else {
+-      absTime->tv_sec = secs;
+-    }
+-    absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;   
+-  }
+-  else {
+-    jlong secs = time / NANOSECS_PER_SEC;
+-    if (secs >= MAX_SECS) {
+-      absTime->tv_sec = max_secs;
+-      absTime->tv_nsec = 0;
+-    }
+-    else {
+-      absTime->tv_sec = now.tv_sec + secs;
+-      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
+-      if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
+-        absTime->tv_nsec -= NANOSECS_PER_SEC;
+-        ++absTime->tv_sec; // note: this must be <= max_secs
+-      }
+-    }
+-  }
+-  assert(absTime->tv_sec >= 0, "tv_sec < 0");
+-  assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
+-  assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
+-  assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
+-}
+-
+-void Parker::park(bool isAbsolute, jlong time) {
+-  // Optional fast-path check:
+-  // Return immediately if a permit is available.
+-  if (_counter > 0) { 
+-      _counter = 0 ;  
+-      return ;  
+-  }
+-
+-  Thread* thread = Thread::current();
+-  assert(thread->is_Java_thread(), "Must be JavaThread");
+-  JavaThread *jt = (JavaThread *)thread;
+-
+-  // Optional optimization -- avoid state transitions if there's an interrupt pending.
+-  // Check interrupt before trying to wait
+-  if (Thread::is_interrupted(thread, false)) {
+-    return;
+-  }
+-
+-  // Next, demultiplex/decode time arguments
+-  timespec absTime;
+-  if (time < 0) { // don't wait at all
+-    return; 
+-  }
+-  if (time > 0) {
+-    unpackTime(&absTime, isAbsolute, time);
+-  }
+-
+-
+-  // Enter safepoint region
+-  // Beware of deadlocks such as 6317397. 
+-  // The per-thread Parker:: mutex is a classic leaf-lock.
+-  // In particular a thread must never block on the Threads_lock while
+-  // holding the Parker:: mutex.  If safepoints are pending both the
+-  // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.  
+-  ThreadBlockInVM tbivm(jt);
+-
+-  // Don't wait if cannot get lock since interference arises from
+-  // unblocking.  Also. check interrupt before trying wait
+-  if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
+-    return;
+-  }
+-
+-  int status ; 
+-  if (_counter > 0)  { // no wait needed
+-    _counter = 0;
+-    status = pthread_mutex_unlock(_mutex);
+-    assert (status == 0, "invariant") ; 
+-    return;
+-  }
+-
+-#ifdef ASSERT
+-  // Don't catch signals while blocked; let the running threads have the signals.
+-  // (This allows a debugger to break into the running thread.)
+-  sigset_t oldsigs;
+-  sigset_t* allowdebug_blocked = os::Linux::allowdebug_blocked_signals();
+-  pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
+-#endif
+-  
+-  OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
+-  jt->set_suspend_equivalent();
+-  // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
+-  
+-  if (time == 0) {
+-    status = pthread_cond_wait (_cond, _mutex) ; 
+-  } else {
+-    status = os::Linux::safe_cond_timedwait (_cond, _mutex, &absTime) ; 
+-    if (status != 0 && WorkAroundNPTLTimedWaitHang) { 
+-      pthread_cond_destroy (_cond) ; 
+-      pthread_cond_init    (_cond, NULL); 
+-    }
+-  }
+-  assert_status(status == 0 || status == EINTR || 
+-                status == ETIME || status == ETIMEDOUT, 
+-                status, "cond_timedwait");
+-
+-#ifdef ASSERT
+-  pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
+-#endif
+-
+-  _counter = 0 ; 
+-  status = pthread_mutex_unlock(_mutex) ;
+-  assert_status(status == 0, status, "invariant") ; 
+-  // If externally suspended while waiting, re-suspend
+-  if (jt->handle_special_suspend_equivalent_condition()) {
+-    jt->java_suspend_self();
+-  }
+-
+-}
+-
+-void Parker::unpark() {
+-  int s, status ; 
+-  status = pthread_mutex_lock(_mutex);
+-  assert (status == 0, "invariant") ; 
+-  s = _counter;
+-  _counter = 1;
+-  if (s < 1) { 
+-     if (WorkAroundNPTLTimedWaitHang) { 
+-        status = pthread_cond_signal (_cond) ; 
+-        assert (status == 0, "invariant") ; 
+-        status = pthread_mutex_unlock(_mutex);
+-        assert (status == 0, "invariant") ; 
+-     } else {
+-        status = pthread_mutex_unlock(_mutex);
+-        assert (status == 0, "invariant") ; 
+-        status = pthread_cond_signal (_cond) ; 
+-        assert (status == 0, "invariant") ; 
+-     }
+-  } else {
+-    pthread_mutex_unlock(_mutex);
+-    assert (status == 0, "invariant") ; 
+-  }
+-}
+-
+diff -ruN openjdk6/hotspot/src/os/linux/vm/mutex_linux.inline.hpp openjdk/hotspot/src/os/linux/vm/mutex_linux.inline.hpp
+--- openjdk6/hotspot/src/os/linux/vm/mutex_linux.inline.hpp	2008-02-28 05:02:29.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/mutex_linux.inline.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)mutex_linux.inline.hpp	1.12 07/05/05 17:04:36 JVM"
+-#endif
+ /*
+  * Copyright 1999-2002 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,30 +19,10 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+-inline bool Mutex::lock_implementation() {
+-  int status = ((os::Linux::Event*)_lock_event)->trylock();
+-  if (status != 0) { 
+-    debug_only(_lock_count++); 
+-    return true; 
+-  } 
+-  return false;
+-}
+-
+-inline bool Mutex::try_lock_implementation() {
+-  // Same on Linux.
+-  return lock_implementation();
+-}
+-
+-
+-inline void Mutex::wait_for_lock_implementation() {
+-  assert(!owned_by_self(), "deadlock");
+-  ((os::Linux::Event*)_lock_event)->lock();
+-  debug_only(_lock_count++;)
+-}
+ 
+ // Reconciliation History
+-// mutex_solaris.inline.hpp	1.5 99/06/22 16:38:49
++// mutex_solaris.inline.hpp     1.5 99/06/22 16:38:49
+ // End
+diff -ruN openjdk6/hotspot/src/os/linux/vm/objectMonitor_linux.cpp openjdk/hotspot/src/os/linux/vm/objectMonitor_linux.cpp
+--- openjdk6/hotspot/src/os/linux/vm/objectMonitor_linux.cpp	2008-02-28 05:02:29.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/objectMonitor_linux.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)objectMonitor_linux.cpp	1.69 07/05/05 17:04:36 JVM"
+-#endif
+ 
+ /*
+  * Copyright 1999-2005 Sun Microsystems, Inc.  All Rights Reserved.
+@@ -23,6 +20,5 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+-
+diff -ruN openjdk6/hotspot/src/os/linux/vm/objectMonitor_linux.hpp openjdk/hotspot/src/os/linux/vm/objectMonitor_linux.hpp
+--- openjdk6/hotspot/src/os/linux/vm/objectMonitor_linux.hpp	2008-02-28 05:02:29.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/objectMonitor_linux.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)objectMonitor_linux.hpp	1.18 07/05/05 17:04:36 JVM"
+-#endif
+ /*
+  * Copyright 1999-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,8 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+  private:
+-
+diff -ruN openjdk6/hotspot/src/os/linux/vm/objectMonitor_linux.inline.hpp openjdk/hotspot/src/os/linux/vm/objectMonitor_linux.inline.hpp
+--- openjdk6/hotspot/src/os/linux/vm/objectMonitor_linux.inline.hpp	2008-02-28 05:02:29.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/objectMonitor_linux.inline.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)objectMonitor_linux.inline.hpp	1.14 07/05/05 17:04:36 JVM"
+-#endif
+ /*
+  * Copyright 1999-2005 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,6 +19,5 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+-
+diff -ruN openjdk6/hotspot/src/os/linux/vm/os_linux.cpp openjdk/hotspot/src/os/linux/vm/os_linux.cpp
+--- openjdk6/hotspot/src/os/linux/vm/os_linux.cpp	2008-02-28 05:02:30.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/os_linux.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)os_linux.cpp	1.257 07/05/17 15:48:43 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // do not include  precompiled  header file
+@@ -111,7 +108,7 @@
+   // values in struct sysinfo are "unsigned long"
+   struct sysinfo si;
+   sysinfo(&si);
+-  
++
+   return (julong)si.freeram * si.mem_unit;
+ }
+ 
+@@ -149,14 +146,14 @@
+ #ifndef SYS_gettid
+ // i386: 224, ia64: 1105, amd64: 186, sparc 143
+ #ifdef __ia64__
+-#define SYS_gettid 1105     
++#define SYS_gettid 1105
+ #elif __i386__
+-#define SYS_gettid 224     
++#define SYS_gettid 224
+ #elif __amd64__
+ #define SYS_gettid 186
+ #elif __sparc__
+ #define SYS_gettid 143
+-#else 
++#else
+ #error define gettid for the arch
+ #endif
+ #endif
+@@ -189,7 +186,7 @@
+ //
+ pid_t os::Linux::gettid() {
+   int rslt = syscall(SYS_gettid);
+-  if (rslt == -1) {        
++  if (rslt == -1) {
+      // old kernel, no NPTL support
+      return getpid();
+   } else {
+@@ -233,8 +230,8 @@
+   // This library should be located at:
+   // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm[_g].so.
+   //
+-  // If "/jre/lib/" appears at the right place in the path, then we 
+-  // assume libjvm[_g].so is installed in a JDK and we use this path. 
++  // If "/jre/lib/" appears at the right place in the path, then we
++  // assume libjvm[_g].so is installed in a JDK and we use this path.
+   //
+   // Otherwise exit with message: "Could not create the Java virtual machine."
+   //
+@@ -248,9 +245,9 @@
+   // it looks like libjvm[_g].so is installed there
+   // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm[_g].so.
+   //
+-  // Otherwise exit. 
++  // Otherwise exit.
+   //
+-  // Important note: if the location of libjvm.so changes this 
++  // Important note: if the location of libjvm.so changes this
+   // code needs to be changed accordingly.
+ 
+   // The next few definitions allow the code to be verbatim:
+@@ -259,57 +256,57 @@
+ 
+ /*
+  * See ld(1):
+- *	The linker uses the following search paths to locate required
+- *	shared libraries:
+- *	  1: ...
+- *	  ...
+- *	  7: The default directories, normally /lib and /usr/lib.
++ *      The linker uses the following search paths to locate required
++ *      shared libraries:
++ *        1: ...
++ *        ...
++ *        7: The default directories, normally /lib and /usr/lib.
+  */
+-#define DEFAULT_LIBPATH	"/lib:/usr/lib"
++#define DEFAULT_LIBPATH "/lib:/usr/lib"
+ 
+-#define EXTENSIONS_DIR	"/lib/ext"
+-#define ENDORSED_DIR	"/lib/endorsed"
+-#define REG_DIR		"/usr/java/packages"
++#define EXTENSIONS_DIR  "/lib/ext"
++#define ENDORSED_DIR    "/lib/endorsed"
++#define REG_DIR         "/usr/java/packages"
+ 
+   {
+     /* sysclasspath, java_home, dll_dir */
+     {
+         char *home_path;
+-	char *dll_path;
+-	char *pslash;
++        char *dll_path;
++        char *pslash;
+         char buf[MAXPATHLEN];
+-	os::jvm_path(buf, sizeof(buf));
++        os::jvm_path(buf, sizeof(buf));
+ 
+-	// Found the full path to libjvm.so. 
+-	// Now cut the path to <java_home>/jre if we can. 
+-	*(strrchr(buf, '/')) = '\0';  /* get rid of /libjvm.so */
+-	pslash = strrchr(buf, '/');
+-	if (pslash != NULL)
+-	    *pslash = '\0';           /* get rid of /{client|server|hotspot} */
+-	dll_path = malloc(strlen(buf) + 1);
+-	if (dll_path == NULL)
+-	    return;
+-	strcpy(dll_path, buf);
++        // Found the full path to libjvm.so.
++        // Now cut the path to <java_home>/jre if we can.
++        *(strrchr(buf, '/')) = '\0';  /* get rid of /libjvm.so */
++        pslash = strrchr(buf, '/');
++        if (pslash != NULL)
++            *pslash = '\0';           /* get rid of /{client|server|hotspot} */
++        dll_path = malloc(strlen(buf) + 1);
++        if (dll_path == NULL)
++            return;
++        strcpy(dll_path, buf);
+         Arguments::set_dll_dir(dll_path);
+ 
+-	if (pslash != NULL) {
+-	    pslash = strrchr(buf, '/');
+-	    if (pslash != NULL) {
+-		*pslash = '\0';       /* get rid of /<arch> */ 
+-		pslash = strrchr(buf, '/');
+-		if (pslash != NULL)
+-		    *pslash = '\0';   /* get rid of /lib */
+-	    }
+-	}
+-
+-	home_path = malloc(strlen(buf) + 1);
+-	if (home_path == NULL)
+-	    return;
+-	strcpy(home_path, buf);
++        if (pslash != NULL) {
++            pslash = strrchr(buf, '/');
++            if (pslash != NULL) {
++                *pslash = '\0';       /* get rid of /<arch> */
++                pslash = strrchr(buf, '/');
++                if (pslash != NULL)
++                    *pslash = '\0';   /* get rid of /lib */
++            }
++        }
++
++        home_path = malloc(strlen(buf) + 1);
++        if (home_path == NULL)
++            return;
++        strcpy(home_path, buf);
+         Arguments::set_java_home(home_path);
+ 
+-	if (!set_boot_path('/', ':'))
+-	    return;
++        if (!set_boot_path('/', ':'))
++            return;
+     }
+ 
+     /*
+@@ -325,31 +322,31 @@
+      * Eventually, all the library path setting will be done here.
+      */
+     {
+-	char *ld_library_path;
++        char *ld_library_path;
+ 
+-	/*
+-	 * Construct the invariant part of ld_library_path. Note that the
+-	 * space for the colon and the trailing null are provided by the
+-	 * nulls included by the sizeof operator (so actually we allocate
+-	 * a byte more than necessary).
+-	 */
+-	ld_library_path = (char *) malloc(sizeof(REG_DIR) + sizeof("/lib/") +
+-	    strlen(cpu_arch) + sizeof(DEFAULT_LIBPATH));
+-	sprintf(ld_library_path, REG_DIR "/lib/%s:" DEFAULT_LIBPATH, cpu_arch);
+-
+-	/*
+-	 * Get the user setting of LD_LIBRARY_PATH, and prepended it.  It
+-	 * should always exist (until the legacy problem cited above is
+-	 * addressed).
+-	 */
+-	char *v = getenv("LD_LIBRARY_PATH");
+-	if (v != NULL) {
+-	    char *t = ld_library_path;
+-	    /* That's +1 for the colon and +1 for the trailing '\0' */
+-	    ld_library_path = (char *) malloc(strlen(v) + 1 + strlen(t) + 1);
+-	    sprintf(ld_library_path, "%s:%s", v, t);
+-	}
+-	Arguments::set_library_path(ld_library_path);
++        /*
++         * Construct the invariant part of ld_library_path. Note that the
++         * space for the colon and the trailing null are provided by the
++         * nulls included by the sizeof operator (so actually we allocate
++         * a byte more than necessary).
++         */
++        ld_library_path = (char *) malloc(sizeof(REG_DIR) + sizeof("/lib/") +
++            strlen(cpu_arch) + sizeof(DEFAULT_LIBPATH));
++        sprintf(ld_library_path, REG_DIR "/lib/%s:" DEFAULT_LIBPATH, cpu_arch);
++
++        /*
++         * Get the user setting of LD_LIBRARY_PATH, and prepended it.  It
++         * should always exist (until the legacy problem cited above is
++         * addressed).
++         */
++        char *v = getenv("LD_LIBRARY_PATH");
++        if (v != NULL) {
++            char *t = ld_library_path;
++            /* That's +1 for the colon and +1 for the trailing '\0' */
++            ld_library_path = (char *) malloc(strlen(v) + 1 + strlen(t) + 1);
++            sprintf(ld_library_path, "%s:%s", v, t);
++        }
++        Arguments::set_library_path(ld_library_path);
+     }
+ 
+     /*
+@@ -360,18 +357,18 @@
+      * than necessary is allocated).
+      */
+     {
+-	char *buf = malloc(strlen(Arguments::get_java_home()) +
+-	    sizeof(EXTENSIONS_DIR) + sizeof(REG_DIR) + sizeof(EXTENSIONS_DIR));
+-	sprintf(buf, "%s" EXTENSIONS_DIR ":" REG_DIR EXTENSIONS_DIR,
+-	    Arguments::get_java_home());
+-	Arguments::set_ext_dirs(buf);
++        char *buf = malloc(strlen(Arguments::get_java_home()) +
++            sizeof(EXTENSIONS_DIR) + sizeof(REG_DIR) + sizeof(EXTENSIONS_DIR));
++        sprintf(buf, "%s" EXTENSIONS_DIR ":" REG_DIR EXTENSIONS_DIR,
++            Arguments::get_java_home());
++        Arguments::set_ext_dirs(buf);
+     }
+ 
+     /* Endorsed standards default directory. */
+     {
+-	char * buf;
+-	buf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR));
+-	sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
++        char * buf;
++        buf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR));
++        sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
+         Arguments::set_endorsed_dirs(buf);
+     }
+   }
+@@ -409,7 +406,7 @@
+                                      : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
+       if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
+            return true;
+-      else 
++      else
+            return false;
+ }
+ 
+@@ -484,7 +481,7 @@
+   //Save caller's signal mask before setting VM signal mask
+   sigset_t caller_sigmask;
+   pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
+- 
++
+   OSThread* osthread = thread->osthread();
+   osthread->set_caller_sigmask(caller_sigmask);
+ 
+@@ -506,7 +503,7 @@
+ 
+ void os::Linux::libpthread_init() {
+   // Save glibc and pthread version strings. Note that _CS_GNU_LIBC_VERSION
+-  // and _CS_GNU_LIBPTHREAD_VERSION are supported in glibc >= 2.3.2. Use a 
++  // and _CS_GNU_LIBPTHREAD_VERSION are supported in glibc >= 2.3.2. Use a
+   // generic name for earlier versions.
+   // Define macros here so we can build HotSpot on old systems.
+ # ifndef _CS_GNU_LIBC_VERSION
+@@ -524,7 +521,7 @@
+   } else {
+      // _CS_GNU_LIBC_VERSION is not supported, try gnu_get_libc_version()
+      static char _gnu_libc_version[32];
+-     jio_snprintf(_gnu_libc_version, sizeof(_gnu_libc_version), 
++     jio_snprintf(_gnu_libc_version, sizeof(_gnu_libc_version),
+               "glibc %s %s", gnu_get_libc_version(), gnu_get_libc_release());
+      os::Linux::set_glibc_version(_gnu_libc_version);
+   }
+@@ -561,7 +558,7 @@
+      os::Linux::set_is_LinuxThreads();
+   }
+ 
+-  // LinuxThreads have two flavors: floating-stack mode, which allows variable 
++  // LinuxThreads have two flavors: floating-stack mode, which allows variable
+   // stack size; and fixed-stack mode. NPTL is always floating-stack.
+   if (os::Linux::is_NPTL() || os::Linux::supports_variable_stack_size()) {
+      os::Linux::set_is_floating_stack();
+@@ -575,30 +572,30 @@
+ // to the stack guard, caller should block all signals.
+ //
+ // MAP_GROWSDOWN:
+-//   A special mmap() flag that is used to implement thread stacks. It tells 
+-//   kernel that the memory region should extend downwards when needed. This 
+-//   allows early versions of LinuxThreads to only mmap the first few pages 
++//   A special mmap() flag that is used to implement thread stacks. It tells
++//   kernel that the memory region should extend downwards when needed. This
++//   allows early versions of LinuxThreads to only mmap the first few pages
+ //   when creating a new thread. Linux kernel will automatically expand thread
+-//   stack as needed (on page faults). 
++//   stack as needed (on page faults).
+ //
+ //   However, because the memory region of a MAP_GROWSDOWN stack can grow on
+ //   demand, if a page fault happens outside an already mapped MAP_GROWSDOWN
+-//   region, it's hard to tell if the fault is due to a legitimate stack 
+-//   access or because of reading/writing non-exist memory (e.g. buffer 
+-//   overrun). As a rule, if the fault happens below current stack pointer, 
+-//   Linux kernel does not expand stack, instead a SIGSEGV is sent to the 
++//   region, it's hard to tell if the fault is due to a legitimate stack
++//   access or because of reading/writing non-exist memory (e.g. buffer
++//   overrun). As a rule, if the fault happens below current stack pointer,
++//   Linux kernel does not expand stack, instead a SIGSEGV is sent to the
+ //   application (see Linux kernel fault.c).
+ //
+ //   This Linux feature can cause SIGSEGV when VM bangs thread stack for
+ //   stack overflow detection.
+ //
+-//   Newer version of LinuxThreads (since glibc-2.2, or, RH-7.x) and NPTL do 
++//   Newer version of LinuxThreads (since glibc-2.2, or, RH-7.x) and NPTL do
+ //   not use this flag. However, the stack of initial thread is not created
+-//   by pthread, it is still MAP_GROWSDOWN. Also it's possible (though 
++//   by pthread, it is still MAP_GROWSDOWN. Also it's possible (though
+ //   unlikely) that user code can create a thread with MAP_GROWSDOWN stack
+ //   and then attach the thread to JVM.
+ //
+-// To get around the problem and allow stack banging on Linux, we need to 
++// To get around the problem and allow stack banging on Linux, we need to
+ // manually expand thread stack after receiving the SIGSEGV.
+ //
+ // There are two ways to expand thread stack to address "bottom", we used
+@@ -614,10 +611,10 @@
+ // That will destroy the mmap() frame and cause VM to crash.
+ //
+ // The following code works by adjusting sp first, then accessing the "bottom"
+-// page to force a page fault. Linux kernel will then automatically expand the 
+-// stack mapping. 
++// page to force a page fault. Linux kernel will then automatically expand the
++// stack mapping.
+ //
+-// _expand_stack_to() assumes its frame size is less than page size, which 
++// _expand_stack_to() assumes its frame size is less than page size, which
+ // should always be true if the function is not inlined.
+ 
+ #if __GNUC__ < 3    // gcc 2.x does not support noinline attribute
+@@ -639,7 +636,7 @@
+   bottom += os::Linux::page_size() - 1;
+ 
+   // sp might be slightly above current stack pointer; if that's the case, we
+-  // will alloca() a little more space than necessary, which is OK. Don't use 
++  // will alloca() a little more space than necessary, which is OK. Don't use
+   // os::current_stack_pointer(), as its result can be slightly below current
+   // stack pointer, causing us to not alloca enough to reach "bottom".
+   sp = (address)&sp;
+@@ -680,11 +677,11 @@
+     //   Heap is mmap'ed at lower end of memory space. Thread stacks are
+     //   allocated (MAP_FIXED) from high address space. Every thread stack
+     //   occupies a fixed size slot (usually 2Mbytes, but user can change
+-    //   it to other values if they rebuild LinuxThreads). 
++    //   it to other values if they rebuild LinuxThreads).
+     //
+     // Problem with MAP_FIXED is that mmap() can still succeed even part of
+-    // the memory region has already been mmap'ed. That means if we have too 
+-    // many threads and/or very large heap, eventually thread stack will 
++    // the memory region has already been mmap'ed. That means if we have too
++    // many threads and/or very large heap, eventually thread stack will
+     // collide with heap.
+     //
+     // Here we try to prevent heap/stack collision by comparing current
+@@ -809,10 +806,10 @@
+           break;
+         } // else fall through:
+           // use VMThreadStackSize if CompilerThreadStackSize is not defined
+-      case os::vm_thread: 
+-      case os::pgc_thread: 
+-      case os::cgc_thread: 
+-      case os::watcher_thread: 
++      case os::vm_thread:
++      case os::pgc_thread:
++      case os::cgc_thread:
++      case os::watcher_thread:
+         if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
+         break;
+       }
+@@ -929,7 +926,7 @@
+     // It is also useful to get around the heap-stack-gap problem on SuSE
+     // kernel (see 4821821 for details). We first expand stack to the top
+     // of yellow zone, then enable stack yellow zone (order is significant,
+-    // enabling yellow zone first will crash JVM on SuSE Linux), so there 
++    // enabling yellow zone first will crash JVM on SuSE Linux), so there
+     // is no gap between the last two virtual memory regions.
+ 
+     JavaThread *jt = (JavaThread *)thread;
+@@ -960,13 +957,13 @@
+ // Free Linux resources related to the OSThread
+ void os::free_thread(OSThread* osthread) {
+   assert(osthread != NULL, "osthread not set");
+- 
++
+   if (Thread::current()->osthread() == osthread) {
+     // Restore caller's signal mask
+     sigset_t sigmask = osthread->caller_sigmask();
+     pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
+    }
+- 
++
+   delete osthread;
+ }
+ 
+@@ -1039,7 +1036,7 @@
+ }
+ 
+ // Locate initial thread stack. This special handling of initial thread stack
+-// is needed because pthread_getattr_np() on most (all?) Linux distros returns 
++// is needed because pthread_getattr_np() on most (all?) Linux distros returns
+ // bogus value for initial thread.
+ void os::Linux::capture_initial_stack(size_t max_size) {
+   // stack size is the easy part, get it from RLIMIT_STACK
+@@ -1070,21 +1067,21 @@
+   // Try to figure out where the stack base (top) is. This is harder.
+   //
+   // When an application is started, glibc saves the initial stack pointer in
+-  // a global variable "__libc_stack_end", which is then used by system 
++  // a global variable "__libc_stack_end", which is then used by system
+   // libraries. __libc_stack_end should be pretty close to stack top. The
+   // variable is available since the very early days. However, because it is
+   // a private interface, it could disappear in the future.
+   //
+   // Linux kernel saves start_stack information in /proc/<pid>/stat. Similar
+   // to __libc_stack_end, it is very close to stack top, but isn't the real
+-  // stack top. Note that /proc may not exist if VM is running as a chroot 
++  // stack top. Note that /proc may not exist if VM is running as a chroot
+   // program, so reading /proc/<pid>/stat could fail. Also the contents of
+   // /proc/<pid>/stat could change in the future (though unlikely).
+   //
+   // We try __libc_stack_end first. If that doesn't work, look for
+   // /proc/<pid>/stat. If neither of them works, we use current stack pointer
+   // as a hint, which should work well in most cases.
+-  
++
+   uintptr_t stack_start;
+ 
+   // try __libc_stack_end first
+@@ -1137,8 +1134,8 @@
+       // Skip pid and the command string. Note that we could be dealing with
+       // weird command names, e.g. user could decide to rename java launcher
+       // to "java 1.4.2 :)", then the stat file would look like
+-      //                1234 (java 1.4.2 :)) R ... ... 
+-      // We don't really need to know the command string, just find the last 
++      //                1234 (java 1.4.2 :)) R ... ...
++      // We don't really need to know the command string, just find the last
+       // occurrence of ")" and then start parsing from there. See bug 4726580.
+       char * s = strrchr(stat, ')');
+ 
+@@ -1149,33 +1146,33 @@
+ 
+         /*                                     1   1   1   1   1   1   1   1   1   1   2   2   2   2   2   2   2   2   2 */
+         /*              3  4  5  6  7  8   9   0   1   2   3   4   5   6   7   8   9   0   1   2   3   4   5   6   7   8 */
+-        i = sscanf(s, "%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld %lu %lu %ld %lu %lu %lu %lu", 
+-	     &state,          /* 3  %c  */
+-	     &ppid,           /* 4  %d  */
+-	     &pgrp,           /* 5  %d  */
+-	     &session,        /* 6  %d  */
+-	     &nr,             /* 7  %d  */
+-	     &tpgrp,          /* 8  %d  */
+-	     &flags,          /* 9  %lu  */
+-	     &minflt,         /* 10 %lu  */
+-	     &cminflt,        /* 11 %lu  */
+-	     &majflt,         /* 12 %lu  */
+-	     &cmajflt,        /* 13 %lu  */
+-	     &utime,          /* 14 %lu  */
+-	     &stime,          /* 15 %lu  */
+-	     &cutime,         /* 16 %ld  */
+-	     &cstime,         /* 17 %ld  */
+-	     &prio,           /* 18 %ld  */
+-	     &nice,           /* 19 %ld  */
+-	     &junk,           /* 20 %ld  */
+-	     &it_real,        /* 21 %ld  */
+-	     &start,          /* 22 %lu  */
+-	     &vsize,          /* 23 %lu  */
+-	     &rss,            /* 24 %ld  */
+-	     &rsslim,         /* 25 %lu  */
+-	     &scodes,         /* 26 %lu  */
+-	     &ecode,          /* 27 %lu  */
+-	     &stack_start);   /* 28 %lu  */
++        i = sscanf(s, "%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld %lu %lu %ld %lu %lu %lu %lu",
++             &state,          /* 3  %c  */
++             &ppid,           /* 4  %d  */
++             &pgrp,           /* 5  %d  */
++             &session,        /* 6  %d  */
++             &nr,             /* 7  %d  */
++             &tpgrp,          /* 8  %d  */
++             &flags,          /* 9  %lu  */
++             &minflt,         /* 10 %lu  */
++             &cminflt,        /* 11 %lu  */
++             &majflt,         /* 12 %lu  */
++             &cmajflt,        /* 13 %lu  */
++             &utime,          /* 14 %lu  */
++             &stime,          /* 15 %lu  */
++             &cutime,         /* 16 %ld  */
++             &cstime,         /* 17 %ld  */
++             &prio,           /* 18 %ld  */
++             &nice,           /* 19 %ld  */
++             &junk,           /* 20 %ld  */
++             &it_real,        /* 21 %ld  */
++             &start,          /* 22 %lu  */
++             &vsize,          /* 23 %lu  */
++             &rss,            /* 24 %ld  */
++             &rsslim,         /* 25 %lu  */
++             &scodes,         /* 26 %lu  */
++             &ecode,          /* 27 %lu  */
++             &stack_start);   /* 28 %lu  */
+       }
+ 
+       if (i != 28 - 2) {
+@@ -1187,20 +1184,20 @@
+       }
+     } else {
+       // For some reason we can't open /proc/self/stat (for example, running on
+-      // FreeBSD with a Linux emulator, or inside chroot), this should work for 
++      // FreeBSD with a Linux emulator, or inside chroot), this should work for
+       // most cases, so don't abort:
+       warning("Can't detect initial thread stack location - no /proc/self/stat");
+       stack_start = (uintptr_t) &rlim;
+     }
+   }
+ 
+-  // Now we have a pointer (stack_start) very close to the stack top, the 
++  // Now we have a pointer (stack_start) very close to the stack top, the
+   // next thing to do is to figure out the exact location of stack top. We
+-  // can find out the virtual memory area that contains stack_start by 
++  // can find out the virtual memory area that contains stack_start by
+   // reading /proc/self/maps, it should be the last vma in /proc/self/maps,
+-  // and its upper limit is the real stack top. (again, this would fail if 
++  // and its upper limit is the real stack top. (again, this would fail if
+   // running inside chroot, because /proc may not exist.)
+-  
++
+   uintptr_t stack_top;
+   address low, high;
+   if (find_vma((address)stack_start, &low, &high)) {
+@@ -1210,7 +1207,7 @@
+   } else {
+     // failed, likely because /proc/self/maps does not exist
+     warning("Can't detect initial thread stack location - find_vma failed");
+-    // best effort: stack_start is normally within a few pages below the real 
++    // best effort: stack_start is normally within a few pages below the real
+     // stack top, use it as stack top, and reduce stack size so we won't put
+     // guard page outside stack.
+     stack_top = stack_start;
+@@ -1276,9 +1273,9 @@
+   }
+ 
+   if (handle) {
+-    int (*clock_getres_func)(clockid_t, struct timespec*) = 
++    int (*clock_getres_func)(clockid_t, struct timespec*) =
+            (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_getres");
+-    int (*clock_gettime_func)(clockid_t, struct timespec*) = 
++    int (*clock_gettime_func)(clockid_t, struct timespec*) =
+            (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_gettime");
+     if (clock_getres_func && clock_gettime_func) {
+       // See if monotonic clock is supported by the kernel. Note that some
+@@ -1321,7 +1318,7 @@
+   }
+   clockid_t clockid;
+   struct timespec tp;
+-  int (*pthread_getcpuclockid_func)(pthread_t, clockid_t *) = 
++  int (*pthread_getcpuclockid_func)(pthread_t, clockid_t *) =
+       (int(*)(pthread_t, clockid_t *)) dlsym(RTLD_DEFAULT, "pthread_getcpuclockid");
+ 
+   // Switch to using fast clocks for thread cpu time if
+@@ -1360,18 +1357,18 @@
+ 
+ void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
+   if (Linux::supports_monotonic_clock()) {
+-    info_ptr->max_value = ALL_64_BITS; 
++    info_ptr->max_value = ALL_64_BITS;
+ 
+     // CLOCK_MONOTONIC - amount of time since some arbitrary point in the past
+     info_ptr->may_skip_backward = false;      // not subject to resetting or drifting
+     info_ptr->may_skip_forward = false;       // not subject to resetting or drifting
+   } else {
+     // gettimeofday - based on time in seconds since the Epoch thus does not wrap
+-    info_ptr->max_value = ALL_64_BITS;  
++    info_ptr->max_value = ALL_64_BITS;
+ 
+     // gettimeofday is a real time clock so it skips
+-    info_ptr->may_skip_backward = true;  
+-    info_ptr->may_skip_forward = true; 
++    info_ptr->may_skip_backward = true;
++    info_ptr->may_skip_forward = true;
+   }
+ 
+   info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
+@@ -1404,7 +1401,7 @@
+   time(&long_time);
+   localtime_r(&long_time, &t);
+   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
+-               t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, 
++               t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
+                t.tm_hour, t.tm_min, t.tm_sec);
+   return buf;
+ }
+@@ -1438,7 +1435,7 @@
+ // called from signal handler. Before adding something to os::abort(), make
+ // sure it is async-safe and can handle partially initialized VM.
+ void os::abort(bool dump_core) {
+-  os::shutdown(); 
++  os::shutdown();
+   if (dump_core) {
+ #ifndef PRODUCT
+     fdStream out(defaultStream::output_fd());
+@@ -1456,7 +1453,7 @@
+ 
+ // Die immediately, no exit hook, no abort hook, no cleanup.
+ void os::die() {
+-  // _exit() on LinuxThreads only kills current thread 
++  // _exit() on LinuxThreads only kills current thread
+   ::abort();
+ }
+ 
+@@ -1466,16 +1463,16 @@
+ intx os::current_thread_id() { return (intx)pthread_self(); }
+ int os::current_process_id() {
+ 
+-  // Under the old linux thread library, linux gives each thread 
+-  // its own process id. Because of this each thread will return 
+-  // a different pid if this method were to return the result 
+-  // of getpid(2). Linux provides no api that returns the pid 
+-  // of the launcher thread for the vm. This implementation 
+-  // returns a unique pid, the pid of the launcher thread 
++  // Under the old linux thread library, linux gives each thread
++  // its own process id. Because of this each thread will return
++  // a different pid if this method were to return the result
++  // of getpid(2). Linux provides no api that returns the pid
++  // of the launcher thread for the vm. This implementation
++  // returns a unique pid, the pid of the launcher thread
+   // that starts the vm 'process'.
+ 
+-  // Under the NPTL, getpid() returns the same pid as the 
+-  // launcher thread rather than a unique pid per thread. 
++  // Under the NPTL, getpid() returns the same pid as the
++  // launcher thread rather than a unique pid per thread.
+   // Use gettid() if you want the old pre NPTL behaviour.
+ 
+   // if you are looking for the result of a call to getpid() that
+@@ -1535,7 +1532,7 @@
+   address base;          //         library base addr
+ };
+ 
+-static int address_to_library_name_callback(struct dl_phdr_info *info, 
++static int address_to_library_name_callback(struct dl_phdr_info *info,
+                                             size_t size, void *data) {
+   int i;
+   bool found = false;
+@@ -1552,7 +1549,7 @@
+         libbase = segbase;
+       }
+       // see if 'addr' is within current segment
+-      if (segbase <= d->addr && 
++      if (segbase <= d->addr &&
+           d->addr < segbase + info->dlpi_phdr[i].p_memsz) {
+         found = true;
+       }
+@@ -1603,7 +1600,7 @@
+   }
+ }
+ 
+-  // Loads .dll/.so and 
++  // Loads .dll/.so and
+   // in case of error it checks if .dll/.so was built for the
+   // same architecture as Hotspot is running on
+ 
+@@ -1659,7 +1656,7 @@
+   #define EM_486          6               /* Intel 80486 */
+   #endif
+ 
+-  static const arch_t arch_array[]={ 
++  static const arch_t arch_array[]={
+     {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
+     {EM_486,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
+     {EM_IA_64,       EM_IA_64,   ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
+@@ -1672,19 +1669,19 @@
+   };
+ 
+   #if  (defined IA32)
+-    static  Elf32_Half running_arch_code=EM_386; 
++    static  Elf32_Half running_arch_code=EM_386;
+   #elif   (defined AMD64)
+-    static  Elf32_Half running_arch_code=EM_X86_64; 
+-  #elif  (defined IA64) 
+-    static  Elf32_Half running_arch_code=EM_IA_64; 
+-  #elif  (defined __sparc) && (defined _LP64)  
+-    static  Elf32_Half running_arch_code=EM_SPARCV9; 
+-  #elif  (defined __sparc) && (!defined _LP64)  
+-    static  Elf32_Half running_arch_code=EM_SPARC; 
+-  #elif  (defined __powerpc64__)  
+-    static  Elf32_Half running_arch_code=EM_PPC64; 
+-  #elif  (defined __powerpc__)  
+-    static  Elf32_Half running_arch_code=EM_PPC; 
++    static  Elf32_Half running_arch_code=EM_X86_64;
++  #elif  (defined IA64)
++    static  Elf32_Half running_arch_code=EM_IA_64;
++  #elif  (defined __sparc) && (defined _LP64)
++    static  Elf32_Half running_arch_code=EM_SPARCV9;
++  #elif  (defined __sparc) && (!defined _LP64)
++    static  Elf32_Half running_arch_code=EM_SPARC;
++  #elif  (defined __powerpc64__)
++    static  Elf32_Half running_arch_code=EM_PPC64;
++  #elif  (defined __powerpc__)
++    static  Elf32_Half running_arch_code=EM_PPC;
+   #else
+     #error Method os::dll_load requires that one of following is defined:\
+          IA32, AMD64, IA64, __sparc, __powerpc__
+@@ -1706,28 +1703,28 @@
+     }
+   }
+ 
+-  assert(running_arch_index != -1, 
++  assert(running_arch_index != -1,
+     "Didn't find running architecture code (running_arch_code) in arch_array");
+   if (running_arch_index == -1) {
+     // Even though running architecture detection failed
+     // we may still continue with reporting dlerror() message
+-    return NULL; 
++    return NULL;
+   }
+ 
+   if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
+     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
+-    return NULL; 
++    return NULL;
+   }
+ 
+   if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
+     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
+-    return NULL; 
++    return NULL;
+   }
+ 
+   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
+     if ( lib_arch.name!=NULL ) {
+       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
+-        " (Possible cause: can't load %s-bit .so on a %s-bit platform)", 
++        " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
+         lib_arch.name, arch_array[running_arch_index].name);
+     } else {
+       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
+@@ -1779,7 +1776,7 @@
+ 
+   // Try to identify popular distros.
+   // Most Linux distributions have /etc/XXX-release file, which contains
+-  // the OS version string. Some have more than one /etc/XXX-release file 
++  // the OS version string. Some have more than one /etc/XXX-release file
+   // (e.g. Mandrake has both /etc/mandrake-release and /etc/redhat-release.),
+   // so the order is important.
+   if (!_print_ascii_file("/etc/mandrake-release", st) &&
+@@ -1857,14 +1854,14 @@
+ }
+ 
+ void os::print_memory_info(outputStream* st) {
+-    
++
+   st->print("Memory:");
+   st->print(" %dk page", os::vm_page_size()>>10);
+ 
+   // values in struct sysinfo are "unsigned long"
+   struct sysinfo si;
+   sysinfo(&si);
+-  
++
+   st->print(", physical " UINT64_FORMAT "k",
+             os::physical_memory() >> 10);
+   st->print("(" UINT64_FORMAT "k free)",
+@@ -1940,7 +1937,7 @@
+ }
+ 
+ 
+-static void print_signal_handler(outputStream* st, int sig, 
++static void print_signal_handler(outputStream* st, int sig,
+                                  char* buf, size_t buflen);
+ 
+ void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
+@@ -2007,18 +2004,18 @@
+         realpath(java_home_var, buf);
+         sprintf(buf + strlen(buf), "/jre/lib/%s", cpu_arch);
+         if (0 == access(buf, F_OK)) {
+-	  // Use current module name "libjvm[_g].so" instead of 
+-	  // "libjvm"debug_only("_g")".so" since for fastdebug version
+-	  // we should have "libjvm.so" but debug_only("_g") adds "_g"!
+-	  // It is used when we are choosing the HPI library's name 
+-	  // "libhpi[_g].so" in hpi::initialize_get_interface().
+-	  sprintf(buf + strlen(buf), "/hotspot/libjvm%s.so", p);
++          // Use current module name "libjvm[_g].so" instead of
++          // "libjvm"debug_only("_g")".so" since for fastdebug version
++          // we should have "libjvm.so" but debug_only("_g") adds "_g"!
++          // It is used when we are choosing the HPI library's name
++          // "libhpi[_g].so" in hpi::initialize_get_interface().
++          sprintf(buf + strlen(buf), "/hotspot/libjvm%s.so", p);
+         } else {
+           // Go back to path of .so
+           realpath(dli_fname, buf);
+         }
+       }
+-    } 
++    }
+   }
+ 
+   strcpy(saved_jvm_path, buf);
+@@ -2042,10 +2039,10 @@
+   // 4511530 - sem_post is serialized and handled by the manager thread. When
+   // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
+   // don't want to flood the manager thread with sem_post requests.
+-  if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1) 
++  if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
+       return;
+ 
+-  // Ctrl-C is pressed during error reporting, likely because the error 
++  // Ctrl-C is pressed during error reporting, likely because the error
+   // handler fails to abort. Let VM die immediately.
+   if (sig == SIGINT && is_error_reported()) {
+      os::die();
+@@ -2172,14 +2169,14 @@
+   return os::Linux::page_size();
+ }
+ 
+-// Rationale behind this function: 
++// Rationale behind this function:
+ //  current (Mon Apr 25 20:12:18 MSD 2005) oprofile drops samples without executable
+ //  mapping for address (see lookup_dcookie() in the kernel module), thus we cannot get
+ //  samples for JITted code. Here we create private executable mapping over the code cache
+ //  and then we can use standard (well, almost, as mapping can change) way to provide
+ //  info for the reporting script by storing timestamp and location of symbol
+ void linux_wrap_code(char* base, size_t size) {
+-  static volatile jint cnt = 0;  
++  static volatile jint cnt = 0;
+ 
+   if (!UseOprofile) {
+     return;
+@@ -2190,16 +2187,16 @@
+ 
+   sprintf(buf, "/tmp/hs-vm-%d-%d", os::current_process_id(), num);
+   unlink(buf);
+- 
++
+   int fd = open(buf, O_CREAT | O_RDWR, S_IRWXU);
+ 
+   if (fd != -1) {
+     off_t rv = lseek(fd, size-2, SEEK_SET);
+     if (rv != (off_t)-1) {
+       if (write(fd, "", 1) == 1) {
+-	mmap(base, size,
+-	     PROT_READ|PROT_WRITE|PROT_EXEC,
+-	     MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, fd, 0);
++        mmap(base, size,
++             PROT_READ|PROT_WRITE|PROT_EXEC,
++             MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, fd, 0);
+       }
+     }
+     close(fd);
+@@ -2207,10 +2204,10 @@
+   }
+ }
+ 
+-// NOTE: Linux kernel does not really reserve the pages for us. 
+-//       All it does is to check if there are enough free pages 
+-//       left at the time of mmap(). This could be a potential  
+-//       problem.                                               
++// NOTE: Linux kernel does not really reserve the pages for us.
++//       All it does is to check if there are enough free pages
++//       left at the time of mmap(). This could be a potential
++//       problem.
+ bool os::commit_memory(char* addr, size_t size) {
+   uintptr_t res = (uintptr_t) ::mmap(addr, size,
+                                    PROT_READ|PROT_WRITE|PROT_EXEC,
+@@ -2247,8 +2244,8 @@
+ 
+ bool os::uncommit_memory(char* addr, size_t size) {
+   return ::mmap(addr, size,
+-		PROT_READ|PROT_WRITE|PROT_EXEC,
+-		MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0)
++                PROT_READ|PROT_WRITE|PROT_EXEC,
++                MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0)
+     != MAP_FAILED;
+ }
+ 
+@@ -2289,12 +2286,13 @@
+ // Don't update _highest_vm_reserved_address, because there might be memory
+ // regions above addr + size. If so, releasing a memory region only creates
+ // a hole in the address space, it doesn't help prevent heap-stack collision.
+-// 
++//
+ static int anon_munmap(char * addr, size_t size) {
+   return ::munmap(addr, size) == 0;
+ }
+ 
+-char* os::reserve_memory(size_t bytes, char* requested_addr) {
++char* os::reserve_memory(size_t bytes, char* requested_addr,
++                         size_t alignment_hint) {
+   return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
+ }
+ 
+@@ -2310,10 +2308,10 @@
+   // Linux wants the mprotect address argument to be page aligned.
+   char* bottom = (char*)align_size_down((intptr_t)addr, os::Linux::page_size());
+ 
+-  // According to SUSv3, mprotect() should only be used with mappings 
++  // According to SUSv3, mprotect() should only be used with mappings
+   // established by mmap(), and mmap() always maps whole pages. Unaligned
+-  // 'addr' likely indicates problem in the VM (e.g. trying to change 
+-  // protection of malloc'ed or statically allocated memory). Check the 
++  // 'addr' likely indicates problem in the VM (e.g. trying to change
++  // protection of malloc'ed or statically allocated memory). Check the
+   // caller if you hit this assert.
+   assert(addr == bottom, "sanity check");
+ 
+@@ -2344,9 +2342,9 @@
+     _large_page_size = LargePageSizeInBytes;
+   } else {
+     // large_page_size on Linux is used to round up heap size. x86 uses either
+-    // 2M or 4M page, depending on whether PAE (Physical Address Extensions) 
+-    // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use 
+-    // page as large as 256M. 
++    // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
++    // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
++    // page as large as 256M.
+     //
+     // Here we try to figure out page size by parsing /proc/meminfo and looking
+     // for a line with the following format:
+@@ -2380,6 +2378,13 @@
+     }
+   }
+ 
++  const size_t default_page_size = (size_t)Linux::page_size();
++  if (_large_page_size > default_page_size) {
++    _page_sizes[0] = _large_page_size;
++    _page_sizes[1] = default_page_size;
++    _page_sizes[2] = 0;
++  }
++
+   // Large page support is available on 2.6 or newer kernel, some vendors
+   // (e.g. Redhat) have backported it to their 2.4 based distributions.
+   // We optimistically assume the support is available. If later it turns out
+@@ -2397,7 +2402,7 @@
+   key_t key = IPC_PRIVATE;
+   char *addr;
+ 
+-  bool warn_on_failure = UseLargePages && 
++  bool warn_on_failure = UseLargePages &&
+                         (!FLAG_IS_DEFAULT(UseLargePages) ||
+                          !FLAG_IS_DEFAULT(LargePageSizeInBytes)
+                         );
+@@ -2419,7 +2424,7 @@
+      //            e.g. on Redhat AS-3 it is "hugetlb_pool".
+      //      Note 2: it's possible there's enough physical memory available but
+      //            they are so fragmented after a long run that they can't
+-     //            coalesce into large pages. Try to reserve large pages when 
++     //            coalesce into large pages. Try to reserve large pages when
+      //            the system is still "fresh".
+      if (warn_on_failure) {
+        jio_snprintf(msg, sizeof(msg), "Failed to reserve shared memory (errno = %d).", errno);
+@@ -2461,7 +2466,7 @@
+ 
+ // Linux does not support anonymous mmap with large page memory. The only way
+ // to reserve large page memory without file backing is through SysV shared
+-// memory API. The entire memory region is committed and pinned upfront. 
++// memory API. The entire memory region is committed and pinned upfront.
+ // Hopefully this will change in the future...
+ bool os::can_commit_large_page_memory() {
+   return false;
+@@ -2484,13 +2489,13 @@
+   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
+ 
+   // Repeatedly allocate blocks until the block is allocated at the
+-  // right spot. Give up after max_tries. Note that reserve_memory() will 
+-  // automatically update _highest_vm_reserved_address if the call is 
++  // right spot. Give up after max_tries. Note that reserve_memory() will
++  // automatically update _highest_vm_reserved_address if the call is
+   // successful. The variable tracks the highest memory address every reserved
+   // by JVM. It is used to detect heap-stack collision if running with
+   // fixed-stack LinuxThreads. Because here we may attempt to reserve more
+-  // space than needed, it could confuse the collision detecting code. To 
+-  // solve the problem, save current _highest_vm_reserved_address and 
++  // space than needed, it could confuse the collision detecting code. To
++  // solve the problem, save current _highest_vm_reserved_address and
+   // calculate the correct value before return.
+   address old_highest = _highest_vm_reserved_address;
+ 
+@@ -2505,7 +2510,7 @@
+      // mmap() is successful but it fails to reserve at the requested address
+      anon_munmap(addr, bytes);
+   }
+-  
++
+   int i;
+   for (i = 0; i < max_tries; ++i) {
+     base[i] = reserve_memory(bytes);
+@@ -2556,10 +2561,10 @@
+ 
+ size_t os::read(int fd, void *buf, unsigned int nBytes) {
+   return ::read(fd, buf, nBytes);
+-}  
++}
+ 
+ // TODO-FIXME: reconcile Solaris' os::sleep with the linux variation.
+-// Solaris uses poll(), linux uses park().  
++// Solaris uses poll(), linux uses park().
+ // Poll() is likely a better choice, assuming that Thread.interrupt()
+ // generates a SIGUSRx signal. Note that SIGUSR1 can interfere with
+ // SIGSEGV, see 4355769.
+@@ -2569,12 +2574,11 @@
+ int os::sleep(Thread* thread, jlong millis, bool interruptible) {
+   assert(thread == Thread::current(),  "thread consistency check");
+ 
+-  if (interruptible) {
+-    OSThread* osthread = thread->osthread();
+-    Linux::Event* event = (Linux::Event*) osthread->interrupt_event();
+-    event->reset() ; 
+-    OrderAccess::fence() ; 
++  ParkEvent * const slp = thread->_SleepEvent ;
++  slp->reset() ;
++  OrderAccess::fence() ;
+ 
++  if (interruptible) {
+     jlong prevtime = javaTimeNanos();
+ 
+     for (;;) {
+@@ -2608,7 +2612,7 @@
+         // cleared by handle_special_suspend_equivalent_condition() or
+         // java_suspend_self() via check_and_wait_while_suspended()
+ 
+-        event->park(millis);
++        slp->park(millis);
+ 
+         // were we externally suspended while we were waiting?
+         jt->check_and_wait_while_suspended();
+@@ -2616,11 +2620,27 @@
+     }
+   } else {
+     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
+-    Linux::Event event;
+-    event.lock();
+-    int rslt = event.timedwait(millis);
+-    event.unlock();
+-    return rslt;
++    jlong prevtime = javaTimeNanos();
++
++    for (;;) {
++      // It'd be nice to avoid the back-to-back javaTimeNanos() calls on
++      // the 1st iteration ...
++      jlong newtime = javaTimeNanos();
++
++      if (newtime - prevtime < 0) {
++        // time moving backwards, should only happen if no monotonic clock
++        // not a guarantee() because JVM should not abort on kernel/glibc bugs
++        assert(!Linux::supports_monotonic_clock(), "time moving backwards");
++      } else {
++        millis -= (newtime - prevtime) / NANOSECS_PER_MILLISECS;
++      }
++
++      if(millis <= 0) break ;
++
++      prevtime = newtime;
++      slp->park(millis);
++    }
++    return OS_OK ;
+   }
+ }
+ 
+@@ -2645,7 +2665,7 @@
+   sched_yield();
+ }
+ 
+-void os::NakedYield() { sched_yield(); } 
++os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN ;}
+ 
+ void os::yield_all(int attempts) {
+   // Yields to all threads, including threads with lower priorities
+@@ -2743,7 +2763,7 @@
+ //  code that used to be used.
+ //
+ //  The protocol is quite simple:
+-//  - suspend: 
++//  - suspend:
+ //      - sends a signal to the target thread
+ //      - polls the suspend state of the osthread using a yield loop
+ //      - target thread signal handler (SR_handler) sets suspend state
+@@ -2753,7 +2773,7 @@
+ //      - sends signal to end the sigsuspend loop in the SR_handler
+ //
+ //  Note that the SR_lock plays no role in this suspend/resume protocol.
+-// 
++//
+ 
+ static void resume_clear_context(OSThread *osthread) {
+   osthread->set_ucontext(NULL);
+@@ -2771,7 +2791,7 @@
+ //
+ // Handler function invoked when a thread's execution is suspended or
+ // resumed. We have to be careful that only async-safe functions are
+-// called here (Note: most pthread functions are not async safe and 
++// called here (Note: most pthread functions are not async safe and
+ // should be avoided.)
+ //
+ // Note: sigwait() is a more natural fit than sigsuspend() from an
+@@ -2834,7 +2854,7 @@
+   if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
+     int sig = ::strtol(s, 0, 10);
+     if (sig > 0 || sig < _NSIG) {
+-	SR_signum = sig;
++        SR_signum = sig;
+     }
+   }
+ 
+@@ -2847,7 +2867,7 @@
+   /* Set up signal handler for suspend/resume */
+   act.sa_flags = SA_RESTART|SA_SIGINFO;
+   act.sa_handler = (void (*)(int)) SR_handler;
+-  
++
+   // SR_signum is blocked by default.
+   // 4528190 - We also need to block pthread restart signal (32 on all
+   // supported Linux platforms). Note that LinuxThreads need to block
+@@ -2916,21 +2936,21 @@
+   OSThread* osthread = thread->osthread();
+ 
+   if (!osthread->interrupted()) {
+-    Linux::Event* event = (Linux::Event*) osthread->interrupt_event();
+     osthread->set_interrupted(true);
+     // More than one thread can get here with the same value of osthread,
+     // resulting in multiple notifications.  We do, however, want the store
+     // to interrupted() to be visible to other threads before we execute unpark().
+     OrderAccess::fence();
+-    event->unpark();
++    ParkEvent * const slp = thread->_SleepEvent ;
++    if (slp != NULL) slp->unpark() ;
+   }
+ 
+   // For JSR166. Unpark even if interrupt status already was set
+-  if (thread->is_Java_thread()) 
++  if (thread->is_Java_thread())
+     ((JavaThread*)thread)->parker()->unpark();
+ 
+-  ParkEvent * ev = thread->_ParkEvent ; 
+-  if (ev != NULL) ev->unpark() ; 
++  ParkEvent * ev = thread->_ParkEvent ;
++  if (ev != NULL) ev->unpark() ;
+ 
+ }
+ 
+@@ -2943,9 +2963,8 @@
+   bool interrupted = osthread->interrupted();
+ 
+   if (interrupted && clear_interrupted) {
+-    Linux::Event* event = (Linux::Event*) osthread->interrupt_event();
+     osthread->set_interrupted(false);
+-    event->reset();
++    // consider thread->_SleepEvent->reset() ... optional optimization
+   }
+ 
+   return interrupted;
+@@ -3002,7 +3021,7 @@
+ 
+ struct sigaction* os::Linux::get_chained_signal_action(int sig) {
+   struct sigaction *actp = NULL;
+- 
++
+   if (libjsig_is_loaded) {
+     // Retrieve the old signal handler from libjsig
+     actp = (*get_signal_action)(sig);
+@@ -3105,7 +3124,7 @@
+ 
+   void* oldhand = oldAct.sa_sigaction
+                 ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
+-		: CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
++                : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
+   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
+       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
+       oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)signalHandler)) {
+@@ -3186,14 +3205,14 @@
+     // and if UserSignalHandler is installed all bets are off
+     if (CheckJNICalls) {
+       if (libjsig_is_loaded) {
+-	tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
+-	check_signals = false;
++        tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
++        check_signals = false;
+       }
+       if (AllowUserSignalHandlers) {
+-	tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
+-	check_signals = false;
++        tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
++        check_signals = false;
+       }
+-    }      
++    }
+   }
+ }
+ 
+@@ -3216,15 +3235,15 @@
+ // glibc on Linux platform uses non-documented flag
+ // to indicate, that some special sort of signal
+ // trampoline is used.
+-// We will never set this flag, and we should 
++// We will never set this flag, and we should
+ // ignore this flag in our diagnostic
+ #ifdef SIGNIFICANT_SIGNAL_MASK
+ #undef SIGNIFICANT_SIGNAL_MASK
+ #endif
+ #define SIGNIFICANT_SIGNAL_MASK (~0x04000000)
+ 
+-static const char* get_signal_handler_name(address handler, 
+-					   char* buf, int buflen) {
++static const char* get_signal_handler_name(address handler,
++                                           char* buf, int buflen) {
+   int offset;
+   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
+   if (found) {
+@@ -3240,12 +3259,12 @@
+   return buf;
+ }
+ 
+-static void print_signal_handler(outputStream* st, int sig, 
++static void print_signal_handler(outputStream* st, int sig,
+                                  char* buf, size_t buflen) {
+   struct sigaction sa;
+ 
+   sigaction(sig, NULL, &sa);
+-  
++
+   // See comment for SIGNIFICANT_SIGNAL_MASK define
+   sa.sa_flags &= SIGNIFICANT_SIGNAL_MASK;
+ 
+@@ -3281,8 +3300,8 @@
+     // check for flags, reset system-used one!
+     if((int)sa.sa_flags != os::Linux::get_our_sigflags(sig)) {
+       st->print(
+-		", flags was changed from " PTR32_FORMAT ", consider using jsig library",
+-		os::Linux::get_our_sigflags(sig));
++                ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
++                os::Linux::get_our_sigflags(sig));
+     }
+   }
+   st->cr();
+@@ -3299,9 +3318,9 @@
+ void os::run_periodic_checks() {
+ 
+   if (check_signals == false) return;
+-  
+-  // SEGV and BUS if overridden could potentially prevent 
+-  // generation of hs*.log in the event of a crash, debugging 
++
++  // SEGV and BUS if overridden could potentially prevent
++  // generation of hs*.log in the event of a crash, debugging
+   // such a case can be very challenging, so we absolutely
+   // check the following for a good measure:
+   DO_SIGNAL_CHECK(SIGSEGV);
+@@ -3314,7 +3333,7 @@
+ 
+   // ReduceSignalUsage allows the user to override these handlers
+   // see comments at the very top and jvm_solaris.h
+-  if (!ReduceSignalUsage) { 
++  if (!ReduceSignalUsage) {
+     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
+     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
+     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
+@@ -3326,12 +3345,12 @@
+ }
+ 
+ typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
+-  
++
+ static os_sigaction_t os_sigaction = NULL;
+ 
+ void os::Linux::check_signal_handler(int sig) {
+   char buf[O_BUFLEN];
+-  address jvmHandler = NULL; 
++  address jvmHandler = NULL;
+ 
+ 
+   struct sigaction act;
+@@ -3340,13 +3359,13 @@
+     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
+     if (os_sigaction == NULL) return;
+   }
+- 
++
+   os_sigaction(sig, (struct sigaction*)NULL, &act);
+ 
+ 
+   act.sa_flags &= SIGNIFICANT_SIGNAL_MASK;
+- 
+-  address thisHandler = (act.sa_flags & SA_SIGINFO) 
++
++  address thisHandler = (act.sa_flags & SA_SIGINFO)
+     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
+     : CAST_FROM_FN_PTR(address, act.sa_handler) ;
+ 
+@@ -3364,7 +3383,7 @@
+   case SHUTDOWN1_SIGNAL:
+   case SHUTDOWN2_SIGNAL:
+   case SHUTDOWN3_SIGNAL:
+-  case BREAK_SIGNAL:   
++  case BREAK_SIGNAL:
+     jvmHandler = (address)user_handler();
+     break;
+ 
+@@ -3376,15 +3395,15 @@
+     if (sig == SR_signum) {
+       jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
+     } else {
+-      return; 
++      return;
+     }
+     break;
+-  } 
++  }
+ 
+   if (thisHandler != jvmHandler) {
+     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
+-    tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN)); 
+-    tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN)); 
++    tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
++    tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
+     // No need to check this sig any longer
+     sigaddset(&check_signal_done, sig);
+   } else if(os::Linux::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Linux::get_our_sigflags(sig)) {
+@@ -3395,7 +3414,7 @@
+     sigaddset(&check_signal_done, sig);
+   }
+ 
+-  // Dump all the signal 
++  // Dump all the signal
+   if (sigismember(&check_signal_done, sig)) {
+     print_signal_handlers(tty, buf, O_BUFLEN);
+   }
+@@ -3419,7 +3438,7 @@
+ 
+ // this is called _before_ the most of global arguments have been parsed
+ void os::init(void) {
+-  char dummy;	/* used to get a guess on initial stack address */
++  char dummy;   /* used to get a guess on initial stack address */
+ //  first_hrtime = gethrtime();
+ 
+   // With LinuxThreads the JavaMain thread pid (primordial thread)
+@@ -3439,10 +3458,10 @@
+   ThreadCritical::initialize();
+ 
+   Linux::set_page_size(sysconf(_SC_PAGESIZE));
+-
+   if (Linux::page_size() == -1) {
+     fatal1("os_linux.cpp: os::init: sysconf failed (%s)", strerror(errno));
+   }
++  init_page_sizes((size_t) Linux::page_size());
+ 
+   Linux::initialize_system_info();
+ 
+@@ -3516,8 +3535,8 @@
+ 
+   Linux::libpthread_init();
+   if (PrintMiscellaneous && (Verbose || WizardMode)) {
+-     tty->print_cr("[HotSpot is running with %s, %s(%s)]\n", 
+-          Linux::glibc_version(), Linux::libpthread_version(), 
++     tty->print_cr("[HotSpot is running with %s, %s(%s)]\n",
++          Linux::glibc_version(), Linux::libpthread_version(),
+           Linux::is_floating_stack() ? "floating stack" : "fixed stack");
+   }
+ 
+@@ -3604,7 +3623,7 @@
+   return false;
+ }
+ 
+-/// 
++///
+ 
+ // Suspends the target using the signal mechanism and then grabs the PC before
+ // resuming the target. Used by the flat-profiler only
+@@ -3694,7 +3713,7 @@
+       if (begin < lowest)  begin = lowest;
+       Dl_info dlinfo2;
+       if (dladdr(end, &dlinfo2) && dlinfo2.dli_saddr != dlinfo.dli_saddr
+-	  && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
++          && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
+         end = (address) dlinfo2.dli_saddr;
+       Disassembler::decode(begin, end);
+     }
+@@ -3834,7 +3853,7 @@
+                        bool allow_exec) {
+   // same as map_memory() on this OS
+   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
+-			allow_exec);
++                        allow_exec);
+ }
+ 
+ 
+@@ -3855,7 +3874,7 @@
+   return clockid;
+ }
+ 
+-// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 
++// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
+ // are used by JVM M&M and JVMTI to get user+sys or user CPU time
+ // of a thread.
+ //
+@@ -3898,7 +3917,7 @@
+ 
+ //
+ //  -1 on error.
+-// 
++//
+ 
+ static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
+   static bool proc_pid_cpu_avail = true;
+@@ -3973,9 +3992,9 @@
+   // Skip blank chars
+   do s++; while (isspace(*s));
+ 
+-  count = sscanf(s,"%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu", 
+-		 &idummy, &idummy, &idummy, &idummy, &idummy, &idummy, 
+-                 &ldummy, &ldummy, &ldummy, &ldummy, &ldummy, 
++  count = sscanf(s,"%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu",
++                 &idummy, &idummy, &idummy, &idummy, &idummy, &idummy,
++                 &ldummy, &ldummy, &ldummy, &ldummy, &ldummy,
+                  &user_time, &sys_time);
+   if ( count != 13 ) return -1;
+   if (user_sys_cpu_time) {
+@@ -4026,31 +4045,11 @@
+       (void)::poll(NULL, 0, 100);
+     }
+   } else {
+-    jio_fprintf(stderr, 
++    jio_fprintf(stderr,
+       "Could not open pause file '%s', continuing immediately.\n", filename);
+   }
+ }
+ 
+-#ifndef PRODUCT
+-void os::Linux::Event::verify() {
+-  guarantee(!Universe::is_fully_initialized() ||
+-            !Universe::heap()->is_in_reserved((oop)this),
+-            "Mutex must be in C heap only.");
+-}
+-
+-void os::Linux::OSMutex::verify() {
+-  guarantee(!Universe::is_fully_initialized() || 
+-    	    !Universe::heap()->is_in_reserved((oop)this), 
+-    	    "OSMutex must be in C heap only.");
+-}
+-
+-void os::Linux::OSMutex::verify_locked() {
+-  pthread_t my_id = pthread_self();
+-  assert(_is_owned, "OSMutex should be locked");
+-  assert(pthread_equal(_owner, my_id), "OSMutex should be locked by me");
+-}
+-#endif
+-
+ extern "C" {
+ 
+ /**
+@@ -4092,8 +4091,8 @@
+ 
+ // Refer to the comments in os_solaris.cpp park-unpark.
+ //
+-// Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can 
+-// hang indefinitely.  For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.  
++// Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
++// hang indefinitely.  For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
+ // For specifics regarding the bug see GLIBC BUGID 261237 :
+ //    http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html.
+ // Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
+@@ -4102,7 +4101,7 @@
+ // hang).  The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
+ // and monitorenter when we're using 1-0 locking.  All those operations may result in
+ // calls to pthread_cond_timedwait().  Using LD_ASSUME_KERNEL to use an older version
+-// of libpthread avoids the problem, but isn't practical.  
++// of libpthread avoids the problem, but isn't practical.
+ //
+ // Possible remedies:
+ //
+@@ -4111,21 +4110,21 @@
+ //      between the call to compute_abstime() and pthread_cond_timedwait(), more
+ //      than the minimum period may have passed, and the abstime may be stale (in the
+ //      past) resultin in a hang.   Using this technique reduces the odds of a hang
+-//      but the JVM is still vulnerable, particularly on heavily loaded systems.  
++//      but the JVM is still vulnerable, particularly on heavily loaded systems.
+ //
+-// 2.   Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead 
+-//      of the usual flag-condvar-mutex idiom.  The write side of the pipe is set 
++// 2.   Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
++//      of the usual flag-condvar-mutex idiom.  The write side of the pipe is set
+ //      NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
+ //      reduces to poll()+read().  This works well, but consumes 2 FDs per extant
+-//      thread.  
+-//      
++//      thread.
++//
+ // 3.   Embargo pthread_cond_timedwait() and implement a native "chron" thread
+ //      that manages timeouts.  We'd emulate pthread_cond_timedwait() by enqueuing
+ //      a timeout request to the chron thread and then blocking via pthread_cond_wait().
+ //      This also works well.  In fact it avoids kernel-level scalability impediments
+ //      on certain platforms that don't handle lots of active pthread_cond_timedwait()
+-//      timers in a graceful fashion.  
+-// 
++//      timers in a graceful fashion.
++//
+ // 4.   When the abstime value is in the past it appears that control returns
+ //      correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
+ //      Subsequent timedwait/wait calls may hang indefinitely.  Given that, we
+@@ -4137,101 +4136,137 @@
+ //      within critical sections protected by the adjunct mutex.  This prevents
+ //      cond_signal() from "seeing" a condvar that's in the midst of being
+ //      reinitialized or that is corrupt.  Sadly, this invariant obviates the
+-//      desirable signal-after-unlock optimization that avoids futile context switching. 
++//      desirable signal-after-unlock optimization that avoids futile context switching.
+ //
+ //      I'm also concerned that some versions of NTPL might allocate an auxilliary
+-//      structure when a condvar is used or initialized.  cond_destroy()  would 
++//      structure when a condvar is used or initialized.  cond_destroy()  would
+ //      release the helper structure.  Our reinitialize-after-timedwait fix
+-//      put excessive stress on malloc/free and locks protecting the c-heap. 
++//      put excessive stress on malloc/free and locks protecting the c-heap.
+ //
+ // We currently use (4).  See the WorkAroundNTPLTimedWaitHang flag.
+ // It may be possible to refine (4) by checking the kernel and NTPL verisons
+-// and only enabling the work-around for vulnerable environments. 
++// and only enabling the work-around for vulnerable environments.
+ 
++// utility to compute the abstime argument to timedwait:
++// millis is the relative timeout time
++// abstime will be the absolute timeout time
++// TODO: replace compute_abstime() with unpackTime()
++
++static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
++  if (millis < 0)  millis = 0;
++  struct timeval now;
++  int status = gettimeofday(&now, NULL);
++  assert(status == 0, "gettimeofday");
++  jlong seconds = millis / 1000;
++  millis %= 1000;
++  if (seconds > 50000000) { // see man cond_timedwait(3T)
++    seconds = 50000000;
++  }
++  abstime->tv_sec = now.tv_sec  + seconds;
++  long       usec = now.tv_usec + millis * 1000;
++  if (usec >= 1000000) {
++    abstime->tv_sec += 1;
++    usec -= 1000000;
++  }
++  abstime->tv_nsec = usec * 1000;
++  return abstime;
++}
++
++
++// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
++// Conceptually TryPark() should be equivalent to park(0).
++
++int os::PlatformEvent::TryPark() {
++  for (;;) {
++    const int v = _Event ;
++    guarantee ((v == 0) || (v == 1), "invariant") ;
++    if (Atomic::cmpxchg (0, &_Event, v) == v) return v  ;
++  }
++}
+ 
+ void os::PlatformEvent::park() {       // AKA "down()"
+   // Invariant: Only the thread associated with the Event/PlatformEvent
+-  // may call park().  
++  // may call park().
+   // TODO: assert that _Assoc != NULL or _Assoc == Self
+-  int v ; 
+-  for (;;) { 
+-      v = _Event ; 
+-      if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 
++  int v ;
++  for (;;) {
++      v = _Event ;
++      if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
+   }
+-  guarantee (v >= 0, "invariant") ; 
+-  if (v == 0) { 
++  guarantee (v >= 0, "invariant") ;
++  if (v == 0) {
+      // Do this the hard way by blocking ...
+      int status = pthread_mutex_lock(_mutex);
+      assert_status(status == 0, status, "mutex_lock");
+-     guarantee (_nParked == 0, "invariant") ; 
+-     ++ _nParked ; 
++     guarantee (_nParked == 0, "invariant") ;
++     ++ _nParked ;
+      while (_Event < 0) {
+         status = pthread_cond_wait(_cond, _mutex);
+         // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
+         // Treat this the same as if the wait was interrupted
+-        if (status == ETIME) { status = EINTR; } 
++        if (status == ETIME) { status = EINTR; }
+         assert_status(status == 0 || status == EINTR, status, "cond_wait");
+      }
+-     -- _nParked ; 
++     -- _nParked ;
+ 
+     // In theory we could move the ST of 0 into _Event past the unlock(),
+-    // but then we'd need a MEMBAR after the ST. 
+-    _Event = 0 ; 
++    // but then we'd need a MEMBAR after the ST.
++    _Event = 0 ;
+      status = pthread_mutex_unlock(_mutex);
+      assert_status(status == 0, status, "mutex_unlock");
+   }
+-  guarantee (_Event >= 0, "invariant") ; 
++  guarantee (_Event >= 0, "invariant") ;
+ }
+-    
++
+ int os::PlatformEvent::park(jlong millis) {
+-  guarantee (_nParked == 0, "invariant") ; 
++  guarantee (_nParked == 0, "invariant") ;
+ 
+-  int v ; 
+-  for (;;) { 
+-      v = _Event ; 
+-      if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 
++  int v ;
++  for (;;) {
++      v = _Event ;
++      if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
+   }
+-  guarantee (v >= 0, "invariant") ; 
+-  if (v != 0) return OS_OK ; 
++  guarantee (v >= 0, "invariant") ;
++  if (v != 0) return OS_OK ;
+ 
+   // We do this the hard way, by blocking the thread.
+-  // Consider enforcing a minimum timeout value.  
++  // Consider enforcing a minimum timeout value.
+   struct timespec abst;
+-  os::Linux::Event::compute_abstime(&abst, millis);
++  compute_abstime(&abst, millis);
+ 
+   int ret = OS_TIMEOUT;
+   int status = pthread_mutex_lock(_mutex);
+   assert_status(status == 0, status, "mutex_lock");
+-  guarantee (_nParked == 0, "invariant") ; 
++  guarantee (_nParked == 0, "invariant") ;
+   ++_nParked ;
+ 
+   // Object.wait(timo) will return because of
+   // (a) notification
+   // (b) timeout
+   // (c) thread.interrupt
+-  // 
+-  // Thread.interrupt and object.notify{All} both call Event::set.  	
+-  // That is, we treat thread.interrupt as a special case of notification.  
+-  // The underlying Solaris implementation, cond_timedwait, admits 
++  //
++  // Thread.interrupt and object.notify{All} both call Event::set.
++  // That is, we treat thread.interrupt as a special case of notification.
++  // The underlying Solaris implementation, cond_timedwait, admits
+   // spurious/premature wakeups, but the JLS/JVM spec prevents the
+   // JVM from making those visible to Java code.  As such, we must
+-  // filter out spurious wakeups.  We assume all ETIME returns are valid. 
++  // filter out spurious wakeups.  We assume all ETIME returns are valid.
+   //
+-  // TODO: properly differentiate simultaneous notify+interrupt. 
+-  // In that case, we should propagate the notify to another waiter. 
++  // TODO: properly differentiate simultaneous notify+interrupt.
++  // In that case, we should propagate the notify to another waiter.
+ 
+-  while (_Event < 0) { 		
++  while (_Event < 0) {
+     status = os::Linux::safe_cond_timedwait(_cond, _mutex, &abst);
+-    if (status != 0 && WorkAroundNPTLTimedWaitHang) { 
+-      pthread_cond_destroy (_cond); 
++    if (status != 0 && WorkAroundNPTLTimedWaitHang) {
++      pthread_cond_destroy (_cond);
+       pthread_cond_init (_cond, NULL) ;
+     }
+-    assert_status(status == 0 || status == EINTR || 
+-		  status == ETIME || status == ETIMEDOUT, 
+-		  status, "cond_timedwait");
+-    if (!FilterSpuriousWakeups) break ; 		// previous semantics
+-    if (status == ETIME || status == ETIMEDOUT) break ; 
+-    // We consume and ignore EINTR and spurious wakeups.   
++    assert_status(status == 0 || status == EINTR ||
++                  status == ETIME || status == ETIMEDOUT,
++                  status, "cond_timedwait");
++    if (!FilterSpuriousWakeups) break ;                 // previous semantics
++    if (status == ETIME || status == ETIMEDOUT) break ;
++    // We consume and ignore EINTR and spurious wakeups.
+   }
+   --_nParked ;
+   if (_Event >= 0) {
+@@ -4240,34 +4275,34 @@
+   _Event = 0 ;
+   status = pthread_mutex_unlock(_mutex);
+   assert_status(status == 0, status, "mutex_unlock");
+-  assert (_nParked == 0, "invariant") ;  
++  assert (_nParked == 0, "invariant") ;
+   return ret;
+ }
+ 
+ void os::PlatformEvent::unpark() {
+-  int v, AnyWaiters ; 
+-  for (;;) { 
+-      v = _Event ; 
+-      if (v > 0) { 
++  int v, AnyWaiters ;
++  for (;;) {
++      v = _Event ;
++      if (v > 0) {
+          // The LD of _Event could have reordered or be satisfied
+          // by a read-aside from this processor's write buffer.
+          // To avoid problems execute a barrier and then
+          // ratify the value.
+-         OrderAccess::fence() ; 
+-         if (_Event == v) return ; 
+-         continue ; 
++         OrderAccess::fence() ;
++         if (_Event == v) return ;
++         continue ;
+       }
+-      if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ; 
++      if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ;
+   }
+   if (v < 0) {
+      // Wait for the thread associated with the event to vacate
+      int status = pthread_mutex_lock(_mutex);
+      assert_status(status == 0, status, "mutex_lock");
+-     AnyWaiters = _nParked ; 
+-     assert (AnyWaiters == 0 || AnyWaiters == 1, "invariant") ; 
+-     if (AnyWaiters != 0 && WorkAroundNPTLTimedWaitHang) { 
+-        AnyWaiters = 0 ; 
+-        pthread_cond_signal (_cond); 
++     AnyWaiters = _nParked ;
++     assert (AnyWaiters == 0 || AnyWaiters == 1, "invariant") ;
++     if (AnyWaiters != 0 && WorkAroundNPTLTimedWaitHang) {
++        AnyWaiters = 0 ;
++        pthread_cond_signal (_cond);
+      }
+      status = pthread_mutex_unlock(_mutex);
+      assert_status(status == 0, status, "mutex_unlock");
+@@ -4277,10 +4312,283 @@
+      }
+   }
+ 
+-  // Note that we signal() _after dropping the lock for "immortal" Events.  
+-  // This is safe and avoids a common class of  futile wakeups.  In rare 
+-  // circumstances this can cause a thread to return prematurely from 
+-  // cond_{timed}wait() but the spurious wakeup is benign and the victim will 
+-  // simply re-test the condition and re-park itself.  
++  // Note that we signal() _after dropping the lock for "immortal" Events.
++  // This is safe and avoids a common class of  futile wakeups.  In rare
++  // circumstances this can cause a thread to return prematurely from
++  // cond_{timed}wait() but the spurious wakeup is benign and the victim will
++  // simply re-test the condition and re-park itself.
+ }
+ 
++
++// JSR166
++// -------------------------------------------------------
++
++/*
++ * The solaris and linux implementations of park/unpark are fairly
++ * conservative for now, but can be improved. They currently use a
++ * mutex/condvar pair, plus a a count.
++ * Park decrements count if > 0, else does a condvar wait.  Unpark
++ * sets count to 1 and signals condvar.  Only one thread ever waits
++ * on the condvar. Contention seen when trying to park implies that someone
++ * is unparking you, so don't wait. And spurious returns are fine, so there
++ * is no need to track notifications.
++ */
++
++
++#define NANOSECS_PER_SEC 1000000000
++#define NANOSECS_PER_MILLISEC 1000000
++#define MAX_SECS 100000000
++/*
++ * This code is common to linux and solaris and will be moved to a
++ * common place in dolphin.
++ *
++ * The passed in time value is either a relative time in nanoseconds
++ * or an absolute time in milliseconds. Either way it has to be unpacked
++ * into suitable seconds and nanoseconds components and stored in the
++ * given timespec structure.
++ * Given time is a 64-bit value and the time_t used in the timespec is only
++ * a signed-32-bit value (except on 64-bit Linux) we have to watch for
++ * overflow if times way in the future are given. Further on Solaris versions
++ * prior to 10 there is a restriction (see cond_timedwait) that the specified
++ * number of seconds, in abstime, is less than current_time  + 100,000,000.
++ * As it will be 28 years before "now + 100000000" will overflow we can
++ * ignore overflow and just impose a hard-limit on seconds using the value
++ * of "now + 100,000,000". This places a limit on the timeout of about 3.17
++ * years from "now".
++ */
++
++static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
++  assert (time > 0, "convertTime");
++
++  struct timeval now;
++  int status = gettimeofday(&now, NULL);
++  assert(status == 0, "gettimeofday");
++
++  time_t max_secs = now.tv_sec + MAX_SECS;
++
++  if (isAbsolute) {
++    jlong secs = time / 1000;
++    if (secs > max_secs) {
++      absTime->tv_sec = max_secs;
++    }
++    else {
++      absTime->tv_sec = secs;
++    }
++    absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
++  }
++  else {
++    jlong secs = time / NANOSECS_PER_SEC;
++    if (secs >= MAX_SECS) {
++      absTime->tv_sec = max_secs;
++      absTime->tv_nsec = 0;
++    }
++    else {
++      absTime->tv_sec = now.tv_sec + secs;
++      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
++      if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
++        absTime->tv_nsec -= NANOSECS_PER_SEC;
++        ++absTime->tv_sec; // note: this must be <= max_secs
++      }
++    }
++  }
++  assert(absTime->tv_sec >= 0, "tv_sec < 0");
++  assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
++  assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
++  assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
++}
++
++void Parker::park(bool isAbsolute, jlong time) {
++  // Optional fast-path check:
++  // Return immediately if a permit is available.
++  if (_counter > 0) {
++      _counter = 0 ;
++      return ;
++  }
++
++  Thread* thread = Thread::current();
++  assert(thread->is_Java_thread(), "Must be JavaThread");
++  JavaThread *jt = (JavaThread *)thread;
++
++  // Optional optimization -- avoid state transitions if there's an interrupt pending.
++  // Check interrupt before trying to wait
++  if (Thread::is_interrupted(thread, false)) {
++    return;
++  }
++
++  // Next, demultiplex/decode time arguments
++  timespec absTime;
++  if (time < 0) { // don't wait at all
++    return;
++  }
++  if (time > 0) {
++    unpackTime(&absTime, isAbsolute, time);
++  }
++
++
++  // Enter safepoint region
++  // Beware of deadlocks such as 6317397.
++  // The per-thread Parker:: mutex is a classic leaf-lock.
++  // In particular a thread must never block on the Threads_lock while
++  // holding the Parker:: mutex.  If safepoints are pending both the
++  // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
++  ThreadBlockInVM tbivm(jt);
++
++  // Don't wait if cannot get lock since interference arises from
++  // unblocking.  Also. check interrupt before trying wait
++  if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
++    return;
++  }
++
++  int status ;
++  if (_counter > 0)  { // no wait needed
++    _counter = 0;
++    status = pthread_mutex_unlock(_mutex);
++    assert (status == 0, "invariant") ;
++    return;
++  }
++
++#ifdef ASSERT
++  // Don't catch signals while blocked; let the running threads have the signals.
++  // (This allows a debugger to break into the running thread.)
++  sigset_t oldsigs;
++  sigset_t* allowdebug_blocked = os::Linux::allowdebug_blocked_signals();
++  pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
++#endif
++
++  OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
++  jt->set_suspend_equivalent();
++  // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
++
++  if (time == 0) {
++    status = pthread_cond_wait (_cond, _mutex) ;
++  } else {
++    status = os::Linux::safe_cond_timedwait (_cond, _mutex, &absTime) ;
++    if (status != 0 && WorkAroundNPTLTimedWaitHang) {
++      pthread_cond_destroy (_cond) ;
++      pthread_cond_init    (_cond, NULL);
++    }
++  }
++  assert_status(status == 0 || status == EINTR ||
++                status == ETIME || status == ETIMEDOUT,
++                status, "cond_timedwait");
++
++#ifdef ASSERT
++  pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
++#endif
++
++  _counter = 0 ;
++  status = pthread_mutex_unlock(_mutex) ;
++  assert_status(status == 0, status, "invariant") ;
++  // If externally suspended while waiting, re-suspend
++  if (jt->handle_special_suspend_equivalent_condition()) {
++    jt->java_suspend_self();
++  }
++
++}
++
++void Parker::unpark() {
++  int s, status ;
++  status = pthread_mutex_lock(_mutex);
++  assert (status == 0, "invariant") ;
++  s = _counter;
++  _counter = 1;
++  if (s < 1) {
++     if (WorkAroundNPTLTimedWaitHang) {
++        status = pthread_cond_signal (_cond) ;
++        assert (status == 0, "invariant") ;
++        status = pthread_mutex_unlock(_mutex);
++        assert (status == 0, "invariant") ;
++     } else {
++        status = pthread_mutex_unlock(_mutex);
++        assert (status == 0, "invariant") ;
++        status = pthread_cond_signal (_cond) ;
++        assert (status == 0, "invariant") ;
++     }
++  } else {
++    pthread_mutex_unlock(_mutex);
++    assert (status == 0, "invariant") ;
++  }
++}
++
++
++extern char** environ;
++
++#ifndef __NR_fork
++#define __NR_fork IA32_ONLY(2) IA64_ONLY(not defined) AMD64_ONLY(57)
++#endif
++
++#ifndef __NR_execve
++#define __NR_execve IA32_ONLY(11) IA64_ONLY(1033) AMD64_ONLY(59)
++#endif
++
++// Run the specified command in a separate process. Return its exit value,
++// or -1 on failure (e.g. can't fork a new process).
++// Unlike system(), this function can be called from signal handler. It
++// doesn't block SIGINT et al.
++int os::fork_and_exec(char* cmd) {
++  char * argv[4];
++  argv[0] = "sh";
++  argv[1] = "-c";
++  argv[2] = cmd;
++  argv[3] = NULL;
++
++  // fork() in LinuxThreads/NPTL is not async-safe. It needs to run
++  // pthread_atfork handlers and reset pthread library. All we need is a
++  // separate process to execve. Make a direct syscall to fork process.
++  // On IA64 there's no fork syscall, we have to use fork() and hope for
++  // the best...
++  pid_t pid = NOT_IA64(syscall(__NR_fork);)
++              IA64_ONLY(fork();)
++
++  if (pid < 0) {
++    // fork failed
++    return -1;
++
++  } else if (pid == 0) {
++    // child process
++
++    // execve() in LinuxThreads will call pthread_kill_other_threads_np()
++    // first to kill every thread on the thread list. Because this list is
++    // not reset by fork() (see notes above), execve() will instead kill
++    // every thread in the parent process. We know this is the only thread
++    // in the new process, so make a system call directly.
++    // IA64 should use normal execve() from glibc to match the glibc fork()
++    // above.
++    NOT_IA64(syscall(__NR_execve, "/bin/sh", argv, environ);)
++    IA64_ONLY(execve("/bin/sh", argv, environ);)
++
++    // execve failed
++    _exit(-1);
++
++  } else  {
++    // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
++    // care about the actual exit code, for now.
++
++    int status;
++
++    // Wait for the child process to exit.  This returns immediately if
++    // the child has already exited. */
++    while (waitpid(pid, &status, 0) < 0) {
++        switch (errno) {
++        case ECHILD: return 0;
++        case EINTR: break;
++        default: return -1;
++        }
++    }
++
++    if (WIFEXITED(status)) {
++       // The child exited normally; get its exit code.
++       return WEXITSTATUS(status);
++    } else if (WIFSIGNALED(status)) {
++       // The child exited because of a signal
++       // The best value to return is 0x80 + signal number,
++       // because that is what all Unix shells do, and because
++       // it allows callers to distinguish between process exit and
++       // process death by signal.
++       return 0x80 + WTERMSIG(status);
++    } else {
++       // Unknown exit code; pass it through
++       return status;
++    }
++  }
++}
+diff -ruN openjdk6/hotspot/src/os/linux/vm/os_linux.hpp openjdk/hotspot/src/os/linux/vm/os_linux.hpp
+--- openjdk6/hotspot/src/os/linux/vm/os_linux.hpp	2008-02-28 05:02:30.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/os_linux.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)os_linux.hpp	1.70 07/05/05 17:04:37 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Linux_OS defines the interface to Linux operating systems
+@@ -187,257 +184,11 @@
+ 
+   // Stack repair handling
+ 
+-  // none present 
++  // none present
+ 
+   // LinuxThreads work-around for 6292965
+   static int safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime);
+ 
+-  // An event is a condition variable with associated mutex.
+-  // (A cond_t is only usable in combination with a mutex_t.)
+-  class Event : public CHeapObj {
+-   private:
+-    volatile int    _count;
+-    volatile int _nParked ; 
+-    double cachePad [4] ; 
+-    pthread_mutex_t _mutex[1];
+-    pthread_cond_t  _cond[1];
+-
+-   public:
+-    Event * FreeNext ;                  // TSM free list linkage
+-    int Immortal ;                         
+-    
+-   public:
+-    Event() {
+-      verify();
+-      int status;
+-      status = pthread_cond_init(_cond, NULL);
+-      assert_status(status == 0, status, "cond_init");
+-      status = pthread_mutex_init(_mutex, NULL);
+-      assert_status(status == 0, status, "mutex_init");
+-      _count = 0;
+-      _nParked = 0 ; 
+-      FreeNext = NULL ; 
+-      Immortal = 0 ; 
+-    }
+-    ~Event() {
+-      int status;
+-      guarantee (Immortal == 0, "invariant") ; 
+-      guarantee (_nParked == 0, "invariant") ; 
+-      status = pthread_cond_destroy(_cond);
+-      assert_status(status == 0, status, "cond_destroy");
+-      status = pthread_mutex_destroy(_mutex);
+-      assert_status(status == 0, status, "mutex_destroy");
+-    }
+-    // hook to check for mutex corruption:
+-    void verify() PRODUCT_RETURN;
+-    // for use in critical sections:
+-    void lock() {
+-      verify();
+-      int status = pthread_mutex_lock(_mutex);
+-      assert_status(status == 0, status,  "mutex_lock");
+-    }
+-    bool trylock() {
+-      verify();
+-      int status = pthread_mutex_trylock(_mutex);
+-      if (status == EBUSY) {
+-	return false;
+-      }
+-      assert_status(status == 0, status, "mutex_lock");
+-      return true;
+-    }
+-    void unlock() {
+-      verify();
+-      int status = pthread_mutex_unlock(_mutex);
+-      assert_status(status == 0, status, "mutex_unlock");
+-    }
+-    int timedwait(timespec* abstime) {
+-      verify();
+-      ++_nParked ; 
+-      int status = safe_cond_timedwait(_cond, _mutex, abstime);
+-      --_nParked ; 
+-      if (status != 0 && _nParked == 0 && WorkAroundNPTLTimedWaitHang) {
+-         // Beware: if the condvar is currupted by the NPTL bug but we have
+-         // multiple threads parked in timedwait() -- as can happen with
+-         // Monitor::wait() -- then we don't have much recourse.  
+-         // Reinitializing the condvar would likely orphan the other waiters.  
+-         pthread_cond_destroy (_cond) ; 
+-         pthread_cond_init (_cond, NULL) ; 
+-      }
+-      assert_status(status == 0 || status == EINTR || 
+-		    status == ETIME || status == ETIMEDOUT, 
+-		    status, "cond_timedwait");
+-      return status;
+-    }
+-    int timedwait(jlong millis) {
+-      timespec abst;
+-      Event::compute_abstime(&abst, millis);
+-      return timedwait(&abst);
+-    }
+-    int wait() {
+-      verify();
+-      ++_nParked ; 
+-      int status = pthread_cond_wait(_cond, _mutex);
+-      --_nParked ; 
+-      // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
+-      // Treat this the same as if the wait was interrupted
+-      if(status == ETIME) {
+-	status = EINTR;
+-      }
+-      assert_status(status == 0 || status == EINTR, status, "cond_wait");
+-      return status;
+-    }
+-    void signal() {
+-      verify();
+-      int status = pthread_cond_signal(_cond);
+-      assert_status(status == 0, status, "cond_signal");
+-    }
+-    void broadcast() {
+-      verify();
+-      int status = pthread_cond_broadcast(_cond);
+-      assert_status(status == 0, status, "cond_broadcast");
+-    }
+-
+-    // TODO-FIXME: eliminate park, unpark and reset as well as interrupt_event().
+-    // Convert from interrupt_interrupt() to Self->ParkEvent. 
+-
+-    // functions used to support monitor and interrupt
+-    // Note: park() may wake up spuriously. Use it in a loop.
+-    void park() {
+-      verify();
+-      lock();
+-      while (_count <= 0) {
+-        wait();
+-      }
+-      _count = 0;
+-      unlock();
+-    }
+-
+-    int park(jlong millis) {
+-      verify();
+-      int ret = OS_TIMEOUT;
+-      lock();
+-      if (_count <= 0) {
+-        timedwait(millis);
+-      }
+-      if (_count > 0) {
+-        _count = 0;
+-        ret = OS_OK;
+-      }
+-      unlock();
+-      return ret;
+-    }
+-
+-    void unpark() {
+-      verify();
+-      lock();
+-      int AnyWaiters = _nParked - _count ; 
+-      _count = 1;
+-      // Refer to the comments in os_solaris.hpp
+-      // Try to avoid the call to signal(), and, if possible, 
+-      // call signal() after dropping the lock.  
+-      if (AnyWaiters > 0) { 
+-         if (Immortal && WorkAroundNPTLTimedWaitHang == 0) { 
+-            unlock(); signal(); 
+-         } else { 
+-            signal(); unlock();
+-         }
+-      } else { 
+-         unlock(); 
+-      }
+-    }
+-
+-    void reset() {
+-     verify();
+-     assert (_nParked == 0, "invariant") ; 
+-     _count = 0;
+-    }
+-
+-    // utility to compute the abstime argument to timedwait:
+-    static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
+-      // millis is the relative timeout time
+-      // abstime will be the absolute timeout time
+-      if (millis < 0)  millis = 0;
+-      struct timeval now;
+-      int status = gettimeofday(&now, NULL);
+-      assert(status == 0, "gettimeofday");
+-      jlong seconds = millis / 1000;
+-      millis %= 1000;
+-      if (seconds > 50000000) { // see man cond_timedwait(3T)
+-        seconds = 50000000;
+-      }
+-      abstime->tv_sec = now.tv_sec  + seconds;
+-      long       usec = now.tv_usec + millis * 1000;
+-      if (usec >= 1000000) {
+-        abstime->tv_sec += 1;
+-        usec -= 1000000;
+-      }
+-      abstime->tv_nsec = usec * 1000;
+-      return abstime;
+-    }
+-  };
+-
+-  // An OSMutex is an abstraction used in the implementation of
+-  // ObjectMonitor; needed to abstract over the different thread
+-  // libraries' mutexes on Solaris.
+-  class OSMutex : public CHeapObj {
+-   private:
+-    #ifndef PRODUCT
+-    debug_only(volatile pthread_t _owner;)
+-    debug_only(volatile bool      _is_owned;)
+-    #endif
+-    pthread_mutex_t _mutex[1];
+-
+-   public:
+-    OSMutex() {
+-      verify();
+-      int status = pthread_mutex_init(_mutex, NULL);
+-      assert_status(status == 0, status, "pthread_mutex_init");
+-      #ifndef PRODUCT
+-      debug_only(_is_owned = false;)
+-      #endif
+-    }
+-    ~OSMutex() {
+-      int status = pthread_mutex_destroy(_mutex);
+-      assert_status(status == 0, status, "pthread_mutex_destroy");
+-    }
+-    // for use in critical sections:
+-    void lock() {
+-      verify();
+-      int status = pthread_mutex_lock(_mutex);
+-      assert_status(status == 0, status, "pthread_mutex_lock");
+-      #ifndef PRODUCT
+-      assert(_is_owned == false, "mutex_lock should not have had owner");
+-      debug_only(_owner = pthread_self();)
+-      debug_only(_is_owned = true;)
+-      #endif
+-    }
+-    bool trylock() {
+-      verify();
+-      int status = pthread_mutex_trylock(_mutex);
+-      if (status == EBUSY)
+-	return false;
+-      assert_status(status == 0, status, "pthread_mutex_trylock");
+-      #ifndef PRODUCT
+-      debug_only(_owner = pthread_self();)
+-      debug_only(_is_owned = true;)
+-      #endif
+-      return true;
+-    }
+-    void unlock() {
+-      verify();
+-      #ifndef PRODUCT
+-      debug_only(pthread_t my_id = pthread_self();)
+-      assert(pthread_equal(_owner, my_id), "mutex_unlock");
+-      debug_only(_is_owned = false;)
+-      #endif
+-      int status = pthread_mutex_unlock(_mutex);
+-      assert_status(status == 0, status, "pthread_mutex_unlock");
+-    }
+-
+-    // hook to check for mutex corruption:
+-    void verify() PRODUCT_RETURN;
+-    void verify_locked() PRODUCT_RETURN;
+-  };
+ 
+   // Linux suspend/resume support - this helper is a shadow of its former
+   // self now that low-level suspension is barely used, and old workarounds
+@@ -460,18 +211,18 @@
+     void set_suspend_action(int x) { _suspend_action = x;    }
+ 
+     // atomic updates for _state
+-    void set_suspended()           { 
++    void set_suspended()           {
+       jint temp, temp2;
+       do {
+-	temp = _state;
+-	temp2 = Atomic::cmpxchg(temp | SR_SUSPENDED, &_state, temp);
++        temp = _state;
++        temp2 = Atomic::cmpxchg(temp | SR_SUSPENDED, &_state, temp);
+       } while (temp2 != temp);
+     }
+-    void clear_suspended()        { 
++    void clear_suspended()        {
+       jint temp, temp2;
+       do {
+-	temp = _state;
+-	temp2 = Atomic::cmpxchg(temp & ~SR_SUSPENDED, &_state, temp);
++        temp = _state;
++        temp2 = Atomic::cmpxchg(temp & ~SR_SUSPENDED, &_state, temp);
+       } while (temp2 != temp);
+     }
+     bool is_suspended()            { return _state & SR_SUSPENDED;       }
+@@ -480,7 +231,7 @@
+   };
+ };
+ 
+-  
++
+ class PlatformEvent : public CHeapObj {
+   private:
+     double CachePad [4] ;   // increase odds that _mutex is sole occupant of cache line
+@@ -488,9 +239,9 @@
+     volatile int _nParked ;
+     pthread_mutex_t _mutex  [1] ;
+     pthread_cond_t  _cond   [1] ;
+-    double PostPad  [2] ;  
+-    Thread * _Assoc ; 
+-    
++    double PostPad  [2] ;
++    Thread * _Assoc ;
++
+   public:       // TODO-FIXME: make dtor private
+     ~PlatformEvent() { guarantee (0, "invariant") ; }
+ 
+@@ -503,16 +254,17 @@
+       assert_status(status == 0, status, "mutex_init");
+       _Event   = 0 ;
+       _nParked = 0 ;
+-      _Assoc   = NULL ; 
++      _Assoc   = NULL ;
+     }
+-  
++
+     // Use caution with reset() and fired() -- they may require MEMBARs
+-    void reset() { _Event = 0 ; } 
+-    int  fired() { return _Event; } 
+-    void park () ; 
++    void reset() { _Event = 0 ; }
++    int  fired() { return _Event; }
++    void park () ;
+     void unpark () ;
++    int  TryPark () ;
+     int  park (jlong millis) ;
+-    void SetAssociation (Thread * a) { _Assoc = a ; } 
++    void SetAssociation (Thread * a) { _Assoc = a ; }
+ } ;
+ 
+ class PlatformParker : public CHeapObj {
+diff -ruN openjdk6/hotspot/src/os/linux/vm/os_linux.inline.hpp openjdk/hotspot/src/os/linux/vm/os_linux.inline.hpp
+--- openjdk6/hotspot/src/os/linux/vm/os_linux.inline.hpp	2008-02-28 05:02:30.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/os_linux.inline.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)os_linux.inline.hpp	1.30 07/05/05 17:04:36 JVM"
+-#endif
+ /*
+  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ inline void* os::thread_local_storage_at(int index) {
+diff -ruN openjdk6/hotspot/src/os/linux/vm/os_share_linux.hpp openjdk/hotspot/src/os/linux/vm/os_share_linux.hpp
+--- openjdk6/hotspot/src/os/linux/vm/os_share_linux.hpp	2008-02-28 05:02:30.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/os_share_linux.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)os_share_linux.hpp	1.11 07/05/05 17:04:36 JVM"
+-#endif
+ /*
+  * Copyright 1999-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // misc
+diff -ruN openjdk6/hotspot/src/os/linux/vm/osThread_linux.cpp openjdk/hotspot/src/os/linux/vm/osThread_linux.cpp
+--- openjdk6/hotspot/src/os/linux/vm/osThread_linux.cpp	2008-02-28 05:02:29.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/osThread_linux.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)osThread_linux.cpp	1.24 07/05/05 17:04:36 JVM"
+-#endif
+ /*
+  * Copyright 1999-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,24 +19,13 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // do not include  precompiled  header file
+ # include "incls/_osThread_linux.cpp.incl"
+ 
+ 
+-// Events associated with threads via "interrupt_event" must
+-// reside in a TSM (type-stable memory) pool.  
+-// The relationship between the interrupt_event and a thread
+-// must be stable for the lifetime of the thread.  
+-//
+-// A slightly better implementation would be to subclass Event
+-// with a "TSMEvent" that added the FreeNext field.  
+- 
+-static os::Linux::Event * EventFreeList = NULL ;     
+-static pthread_mutex_t EventFreeLock = PTHREAD_MUTEX_INITIALIZER ;
+- 
+ void OSThread::pd_initialize() {
+   assert(this != NULL, "check");
+   _thread_id        = 0;
+@@ -51,37 +37,10 @@
+ 
+   sigemptyset(&_caller_sigmask);
+ 
+-  // Try to allocate an Event from the TSM list, otherwise
+-  // instantiate a new Event.
+-  pthread_mutex_lock (&EventFreeLock) ;
+-  os::Linux::Event * ie = EventFreeList ;
+-  if (ie != NULL) {
+-     guarantee (ie->Immortal, "invariant") ;
+-     EventFreeList = ie->FreeNext ;
+-  }
+-  pthread_mutex_unlock (&EventFreeLock) ;
+-  if (ie == NULL) {
+-     ie = new os::Linux::Event();
+-  } else { 
+-     ie->reset () ;
+-  }
+-  ie->FreeNext = (os::Linux::Event *) 0xBAD ;
+-  ie->Immortal = 1 ;
+-  _interrupt_event = ie ;
+-
+   _startThread_lock = new Monitor(Mutex::event, "startThread_lock", true);
+   assert(_startThread_lock !=NULL, "check");
+ }
+ 
+ void OSThread::pd_destroy() {
+-  os::Linux::Event * ie = _interrupt_event ;
+-  _interrupt_event = NULL ;
+-  guarantee (ie != NULL, "invariant") ;
+-  guarantee (ie->Immortal, "invariant") ;
+-  pthread_mutex_lock (&EventFreeLock) ;
+-  ie->FreeNext = EventFreeList ;
+-  EventFreeList = ie ;
+-  pthread_mutex_unlock (&EventFreeLock) ;
+-
+   delete _startThread_lock;
+ }
+diff -ruN openjdk6/hotspot/src/os/linux/vm/osThread_linux.hpp openjdk/hotspot/src/os/linux/vm/osThread_linux.hpp
+--- openjdk6/hotspot/src/os/linux/vm/osThread_linux.hpp	2008-02-28 05:02:29.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/osThread_linux.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)osThread_linux.hpp	1.37 07/05/05 17:04:36 JVM"
+-#endif
+ /*
+  * Copyright 1999-2004 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+  private:
+@@ -51,7 +48,7 @@
+   sigset_t _caller_sigmask; // Caller's signal mask
+ 
+  public:
+-  
++
+   // Methods to save/restore caller's signal mask
+   sigset_t  caller_sigmask() const       { return _caller_sigmask; }
+   void    set_caller_sigmask(sigset_t sigmask)  { _caller_sigmask = sigmask; }
+@@ -106,7 +103,7 @@
+ private:
+   void* _siginfo;
+   ucontext_t* _ucontext;
+-  int _expanding_stack;			/* non zero if manually expanding stack */
++  int _expanding_stack;                 /* non zero if manually expanding stack */
+   address _alt_sig_stack;               /* address of base of alternate signal stack */
+ 
+ public:
+@@ -121,25 +118,11 @@
+   void set_alt_sig_stack(address val)     { _alt_sig_stack = val; }
+   address alt_sig_stack(void)             { return _alt_sig_stack; }
+ 
+-  // ***************************************************************
+-  // The interrupt_event is used to implement java.lang.Thread.interrupt,
+-  // which on Linux can interrupt sleep and ObjectMonitor::wait().
+-  // ***************************************************************
+-
+ private:
+-
+-  os::Linux::Event* _interrupt_event;
+   Monitor* _startThread_lock;     // sync parent and child in thread creation
+ 
+ public:
+ 
+-  os::Linux::Event* interrupt_event() const {
+-    return _interrupt_event;
+-  }
+-  void set_interrupt_event(os::Linux::Event* ptr) {
+-    _interrupt_event = ptr;
+-  }
+-
+   Monitor* startThread_lock() const {
+     return _startThread_lock;
+   }
+@@ -154,5 +137,5 @@
+   void pd_destroy();
+ 
+ // Reconciliation History
+-// osThread_solaris.hpp	1.24 99/08/27 13:11:54
++// osThread_solaris.hpp 1.24 99/08/27 13:11:54
+ // End
+diff -ruN openjdk6/hotspot/src/os/linux/vm/perfMemory_linux.cpp openjdk/hotspot/src/os/linux/vm/perfMemory_linux.cpp
+--- openjdk6/hotspot/src/os/linux/vm/perfMemory_linux.cpp	2008-02-28 05:02:30.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/perfMemory_linux.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)perfMemory_linux.cpp	1.30 07/05/05 17:04:35 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -85,9 +82,9 @@
+ //
+ static void save_memory_to_file(char* addr, size_t size) {
+ 
+- const char* destfile = PerfMemory::get_perfdata_file_path(); 
+- assert(destfile[0] != '\0', "invalid PerfData file path"); 
+- 
++ const char* destfile = PerfMemory::get_perfdata_file_path();
++ assert(destfile[0] != '\0', "invalid PerfData file path");
++
+   int result;
+ 
+   RESTARTABLE(::open(destfile, O_CREAT|O_WRONLY|O_TRUNC, S_IREAD|S_IWRITE),
+@@ -95,7 +92,7 @@
+   if (result == OS_ERR) {
+     if (PrintMiscellaneous && Verbose) {
+       warning("Could not create Perfdata save file: %s: %s\n",
+-	      destfile, strerror(errno));
++              destfile, strerror(errno));
+     }
+   } else {
+     int fd = result;
+@@ -453,7 +450,7 @@
+   if (PrintMiscellaneous && Verbose && result == OS_ERR) {
+     if (errno != ENOENT) {
+       warning("Could not unlink shared memory backing"
+-	      " store file %s : %s\n", path, strerror(errno));
++              " store file %s : %s\n", path, strerror(errno));
+     }
+   }
+ }
+@@ -584,7 +581,7 @@
+       //
+       if (PrintMiscellaneous && Verbose) {
+         warning("could not create directory %s: %s\n",
+-		dirname, strerror(errno));
++                dirname, strerror(errno));
+       }
+       return false;
+     }
+@@ -620,7 +617,7 @@
+   // save the file descriptor
+   int fd = result;
+ 
+-  // set the file size 
++  // set the file size
+   RESTARTABLE(::ftruncate(fd, (off_t)size), result);
+   if (result == OS_ERR) {
+     if (PrintMiscellaneous && Verbose) {
+@@ -756,7 +753,7 @@
+ 
+   if (backing_store_file_name != NULL) {
+     remove_file(backing_store_file_name);
+-    // Don't.. Free heap memory could deadlock os::abort() if it is called 
++    // Don't.. Free heap memory could deadlock os::abort() if it is called
+     // from signal handler. OS will reclaim the heap memory.
+     // FREE_C_HEAP_ARRAY(char, backing_store_file_name);
+     backing_store_file_name = NULL;
+@@ -890,7 +887,7 @@
+ 
+   if (PerfTraceMemOps) {
+     tty->print("mapped " SIZE_FORMAT " bytes for vmid %d at "
+-	       INTPTR_FORMAT "\n", size, vmid, (void*)mapAddress);
++               INTPTR_FORMAT "\n", size, vmid, (void*)mapAddress);
+   }
+ }
+ 
+@@ -940,9 +937,9 @@
+ 
+   // If user specifies PerfDataSaveFile, it will save the performance data
+   // to the specified file name no matter whether PerfDataSaveToFile is specified
+-  // or not. In other word, -XX:PerfDataSaveFile=.. overrides flag 
++  // or not. In other word, -XX:PerfDataSaveFile=.. overrides flag
+   // -XX:+PerfDataSaveToFile.
+-  if (PerfDataSaveToFile || PerfDataSaveFile[0] != '\0') {
++  if (PerfDataSaveToFile || PerfDataSaveFile != NULL) {
+     save_memory_to_file(start(), capacity());
+   }
+ 
+@@ -976,7 +973,7 @@
+      *sizep = capacity();
+      return;
+   }
+-  
++
+   mmap_attach_shared(user, vmid, mode, addrp, sizep, CHECK);
+ }
+ 
+@@ -1012,4 +1009,3 @@
+ char* PerfMemory::backing_store_filename() {
+   return backing_store_file_name;
+ }
+-
+diff -ruN openjdk6/hotspot/src/os/linux/vm/stubRoutines_linux.cpp openjdk/hotspot/src/os/linux/vm/stubRoutines_linux.cpp
+--- openjdk6/hotspot/src/os/linux/vm/stubRoutines_linux.cpp	2008-02-28 05:02:30.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/stubRoutines_linux.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)stubRoutines_linux.cpp	1.10 07/05/05 17:04:36 JVM"
+-#endif
+ /*
+  * Copyright 2001 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+diff -ruN openjdk6/hotspot/src/os/linux/vm/threadCritical_linux.cpp openjdk/hotspot/src/os/linux/vm/threadCritical_linux.cpp
+--- openjdk6/hotspot/src/os/linux/vm/threadCritical_linux.cpp	2008-02-28 05:02:30.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/threadCritical_linux.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)threadCritical_linux.cpp	1.16 07/05/05 17:04:37 JVM"
+-#endif
+ /*
+  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -67,4 +64,3 @@
+     guarantee(ret == 0, "fatal error with pthread_mutex_unlock()");
+   }
+ }
+-
+diff -ruN openjdk6/hotspot/src/os/linux/vm/thread_linux.inline.hpp openjdk/hotspot/src/os/linux/vm/thread_linux.inline.hpp
+--- openjdk6/hotspot/src/os/linux/vm/thread_linux.inline.hpp	2008-02-28 05:02:30.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/thread_linux.inline.hpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_HDR
+-#pragma ident "@(#)thread_linux.inline.hpp	1.10 07/05/05 17:04:37 JVM"
+-#endif
+ /*
+  * Copyright 2002-2003 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ // Contains inlined functions for class Thread and ThreadLocalStorage
+diff -ruN openjdk6/hotspot/src/os/linux/vm/vmError_linux.cpp openjdk/hotspot/src/os/linux/vm/vmError_linux.cpp
+--- openjdk6/hotspot/src/os/linux/vm/vmError_linux.cpp	2008-02-28 05:02:30.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/vmError_linux.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)vmError_linux.cpp	1.13 07/05/05 17:04:37 JVM"
+-#endif
+ /*
+  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ # include "incls/_precompiled.incl"
+@@ -34,88 +31,6 @@
+ #include <unistd.h>
+ #include <signal.h>
+ 
+-extern char** environ;
+-
+-#ifndef __NR_fork
+-#define __NR_fork IA32_ONLY(2) IA64_ONLY(not defined) AMD64_ONLY(57)
+-#endif
+-
+-#ifndef __NR_execve
+-#define __NR_execve IA32_ONLY(11) IA64_ONLY(1033) AMD64_ONLY(59)
+-#endif
+-
+-// Run the specified command in a separate process. Return its exit value,
+-// or -1 on failure (e.g. can't fork a new process).
+-// Unlike system(), this function can be called from signal handler. It
+-// doesn't block SIGINT et al.
+-int VMError::fork_and_exec(char* cmd) {
+-  char * argv[4];
+-  argv[0] = "sh";
+-  argv[1] = "-c";
+-  argv[2] = cmd;
+-  argv[3] = NULL;
+-
+-  // fork() in LinuxThreads/NPTL is not async-safe. It needs to run 
+-  // pthread_atfork handlers and reset pthread library. All we need is a 
+-  // separate process to execve. Make a direct syscall to fork process.
+-  // On IA64 there's no fork syscall, we have to use fork() and hope for
+-  // the best...
+-  pid_t pid = NOT_IA64(syscall(__NR_fork);) 
+-              IA64_ONLY(fork();)
+-
+-  if (pid < 0) {
+-    // fork failed
+-    return -1;
+-
+-  } else if (pid == 0) {
+-    // child process
+-
+-    // execve() in LinuxThreads will call pthread_kill_other_threads_np() 
+-    // first to kill every thread on the thread list. Because this list is 
+-    // not reset by fork() (see notes above), execve() will instead kill 
+-    // every thread in the parent process. We know this is the only thread 
+-    // in the new process, so make a system call directly.
+-    // IA64 should use normal execve() from glibc to match the glibc fork() 
+-    // above.
+-    NOT_IA64(syscall(__NR_execve, "/bin/sh", argv, environ);)
+-    IA64_ONLY(execve("/bin/sh", argv, environ);)
+-
+-    // execve failed
+-    _exit(-1);
+-
+-  } else  {
+-    // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
+-    // care about the actual exit code, for now.
+-   
+-    int status;
+-
+-    // Wait for the child process to exit.  This returns immediately if
+-    // the child has already exited. */
+-    while (waitpid(pid, &status, 0) < 0) {
+-        switch (errno) {
+-        case ECHILD: return 0;
+-        case EINTR: break;
+-        default: return -1;
+-        }
+-    }
+-
+-    if (WIFEXITED(status)) {
+-       // The child exited normally; get its exit code.
+-       return WEXITSTATUS(status);
+-    } else if (WIFSIGNALED(status)) {
+-       // The child exited because of a signal
+-       // The best value to return is 0x80 + signal number,
+-       // because that is what all Unix shells do, and because
+-       // it allows callers to distinguish between process exit and
+-       // process death by signal.
+-       return 0x80 + WTERMSIG(status);
+-    } else {
+-       // Unknown exit code; pass it through
+-       return status;
+-    }
+-  }
+-}
+-
+ void VMError::show_message_box(char *buf, int buflen) {
+   bool yes;
+   do {
+@@ -136,17 +51,18 @@
+ 
+     if (yes) {
+       // yes, user asked VM to launch debugger
+-      jio_snprintf(buf, buflen, "gdb /proc/%d/exe %d", 
++      jio_snprintf(buf, buflen, "gdb /proc/%d/exe %d",
+                    os::current_process_id(), os::current_process_id());
+ 
+-      fork_and_exec(buf);
++      os::fork_and_exec(buf);
++      yes = false;
+     }
+   } while (yes);
+ }
+ 
+ // Space for our "saved" signal flags and handlers
+ static int resettedSigflags[2];
+-static address resettedSighandler[2]; 
++static address resettedSighandler[2];
+ 
+ static void save_signal(int idx, int sig)
+ {
+@@ -160,7 +76,7 @@
+ 
+ int VMError::get_resetted_sigflags(int sig) {
+   if(SIGSEGV == sig) {
+-    return resettedSigflags[0];  
++    return resettedSigflags[0];
+   } else if(SIGBUS == sig) {
+     return resettedSigflags[1];
+   }
+@@ -169,7 +85,7 @@
+ 
+ address VMError::get_resetted_sighandler(int sig) {
+   if(SIGSEGV == sig) {
+-    return resettedSighandler[0];  
++    return resettedSighandler[0];
+   } else if(SIGBUS == sig) {
+     return resettedSighandler[1];
+   }
+diff -ruN openjdk6/hotspot/src/os/linux/vm/vtune_linux.cpp openjdk/hotspot/src/os/linux/vm/vtune_linux.cpp
+--- openjdk6/hotspot/src/os/linux/vm/vtune_linux.cpp	2008-02-28 05:02:30.000000000 -0500
++++ openjdk/hotspot/src/os/linux/vm/vtune_linux.cpp	2008-01-31 09:19:00.000000000 -0500
+@@ -1,6 +1,3 @@
+-#ifdef USE_PRAGMA_IDENT_SRC
+-#pragma ident "@(#)vtune_linux.cpp	1.12 07/05/05 17:04:35 JVM"
+-#endif
+ /*
+  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@@ -22,7 +19,7 @@
+  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+  * CA 95054 USA or visit www.sun.com if you need additional information or
+  * have any questions.
+- *  
++ *
+  */
+ 
+ #include "incls/_precompiled.incl"
+@@ -44,5 +41,5 @@
+ 
+ 
+ // Reconciliation History
+-// vtune_solaris.cpp	1.8 99/07/12 23:54:21
++// vtune_solaris.cpp    1.8 99/07/12 23:54:21
+ // End
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/patches/icedtea-hotspot7-build-fixes.patch	Tue Mar 04 07:02:39 2008 -0500
@@ -0,0 +1,64 @@
+diff -r 2323cafebabe openjdk/hotspot/src/share/vm/runtime/vm_version.cpp
+--- openjdk/hotspot/src/share/vm/runtime/vm_version.cpp	2008-01-31 09:19:01.000000000 -0500
++++ openjdk/hotspot/src/share/vm/runtime/vm_version.cpp	2008-02-29 13:12:39.000000000 -0500
+@@ -86,16 +86,12 @@
+   #define VMLP ""
+ #endif
+ 
+-#ifdef KERNEL
+-  #define VMTYPE "Kernel"
+-#else // KERNEL
+ #ifdef TIERED
+   #define VMTYPE "Server"
+ #else
+   #define VMTYPE COMPILER1_PRESENT("Client")   \
+                  COMPILER2_PRESENT("Server")
+ #endif // TIERED
+-#endif // KERNEL
+ 
+ #ifndef HOTSPOT_VM_DISTRO
+   #error HOTSPOT_VM_DISTRO must be defined
+diff -r 848a72e3bc9b openjdk/hotspot/build/linux/makefiles/top.make
+--- openjdk/hotspot/build/linux/makefiles/top.make	Fri Feb 29 17:02:22 2008 +0000
++++ openjdk/hotspot/build/linux/makefiles/top.make	Fri Feb 29 17:04:07 2008 +0000
+@@ -67,7 +67,9 @@ Include_DBs/GC          = $(VM)/includeD
+                           $(VM)/gc_implementation/includeDB_gc_serial \
+                           $(VM)/gc_implementation/includeDB_gc_shared
+ 
+-Include_DBs/CORE        = $(VM)/includeDB_core   $(Include_DBs/GC)
++Include_DBs/CORE        = $(VM)/includeDB_core   $(Include_DBs/GC) \
++                          $(VM)/includeDB_jvmti \
++                          $(VM)/includeDB_features
+ Include_DBs/COMPILER1   = $(Include_DBs/CORE) $(VM)/includeDB_compiler1
+ Include_DBs/COMPILER2   = $(Include_DBs/CORE) $(VM)/includeDB_compiler2
+ Include_DBs/TIERED      = $(Include_DBs/CORE) $(VM)/includeDB_compiler1 $(VM)/includeDB_compiler2
+diff -r 2323cafebabe openjdk/hotspot/build/linux/makefiles/jvmti.make
+--- openjdk/hotspot/build/linux/makefiles/jvmti.make	2008-02-29 16:50:25.000000000 +0000
++++ openjdk/hotspot/build/linux/makefiles/jvmti.make	2008-02-29 10:21:43.000000000 +0000
+@@ -36,6 +36,7 @@
+ JvmtiOutDir = $(GENERATED)/jvmtifiles
+ 
+ JvmtiSrcDir = $(GAMMADIR)/src/share/vm/prims
++InterpreterSrcDir = $(GAMMADIR)/src/share/vm/interpreter
+ Src_Dirs += $(JvmtiSrcDir)
+ 
+ # set VPATH so make knows where to look for source files
+@@ -47,6 +48,7 @@
+         jvmtiEnter.cpp \
+         jvmtiEnterTrace.cpp \
+         jvmtiEnvRecommended.cpp\
++        bytecodeInterpreterWithChecks.cpp \
+         jvmti.h \
+ 
+ JvmtiEnvFillSource = $(JvmtiSrcDir)/jvmtiEnvFill.java
+@@ -77,6 +79,10 @@
+ 	@echo Generating $@
+ 	$(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiEnter.xsl -OUT $(JvmtiOutDir)/jvmtiEnter.cpp -PARAM interface jvmti
+ 
++$(JvmtiOutDir)/bytecodeInterpreterWithChecks.cpp: $(JvmtiGenClass) $(InterpreterSrcDir)/bytecodeInterpreter.cpp $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xml $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xsl
++	@echo Generating $@
++	$(XSLT) -IN $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xml -XSL $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xsl -OUT $(JvmtiOutDir)/bytecodeInterpreterWithChecks.cpp 
++
+ $(JvmtiOutDir)/jvmtiEnterTrace.cpp: $(both) $(JvmtiSrcDir)/jvmtiEnter.xsl
+ 	@echo Generating $@
+ 	$(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiEnter.xsl -OUT $(JvmtiOutDir)/jvmtiEnterTrace.cpp -PARAM interface jvmti -PARAM trace Trace
--- a/patches/icedtea-linker-options.patch	Tue Mar 04 00:06:10 2008 -0500
+++ b/patches/icedtea-linker-options.patch	Tue Mar 04 07:02:39 2008 -0500
@@ -22,6 +22,18 @@
  LDFLAGS_COMMON  += $(LDFLAGS_DEFS_OPTION)
  
  #
+diff -ru openjdk.orig/jdk/make/common/Program.gmk openjdk/jdk/make/common/Program.gmk
+--- openjdk.orig/jdk/make/common/Program.gmk	2007-11-08 13:45:46.000000000 +0000
++++ openjdk/jdk/make/common/Program.gmk	2007-11-08 13:42:44.000000000 +0000
+@@ -85,7 +85,7 @@
+ 	endif
+     endif
+     ifeq ($(PLATFORM), linux)
+-	LDFLAGS += -lz -z origin
++	LDFLAGS += -lz -Wl,-z -Wl,origin
+ 	LDFLAGS += -Wl,--allow-shlib-undefined
+ 	LDFLAGS += -Wl,-rpath -Wl,\$$ORIGIN/../lib/$(LIBARCH)/jli
+ 	LDFLAGS += -Wl,-rpath -Wl,\$$ORIGIN/../jre/lib/$(LIBARCH)/jli
 diff -ru openjdk.orig/jdk/make/java/instrument/Makefile openjdk/jdk/make/java/instrument/Makefile
 --- openjdk.orig/jdk/make/java/instrument/Makefile	2007-11-08 13:45:46.000000000 +0000
 +++ openjdk/jdk/make/java/instrument/Makefile	2007-11-08 13:42:25.000000000 +0000
--- a/patches/icedtea-ports.patch	Tue Mar 04 00:06:10 2008 -0500
+++ b/patches/icedtea-ports.patch	Tue Mar 04 07:02:39 2008 -0500
@@ -10,6 +10,33 @@
  
  JAVA_FLAG/32 = -d32
  JAVA_FLAG/64 = -d64
+diff -ru openjdk.orig/hotspot/build/linux/makefiles/defs.make openjdk/hotspot/build/linux/makefiles/defs.make
+--- openjdk.orig/hotspot/build/linux/makefiles/defs.make	2007-11-08 11:34:54.000000000 +0000
++++ openjdk/hotspot/build/linux/makefiles/defs.make	2007-11-08 11:36:15.000000000 +0000
+@@ -87,6 +87,23 @@
+   HS_ARCH          = i486
+ endif
+ 
++# ppc
++ifeq ($(ARCH), ppc)
++  ARCH_DATA_MODEL  = 32
++  PLATFORM         = linux-ppc
++  VM_PLATFORM      = linux_ppc
++  HS_ARCH          = zero
++endif
++
++# ppc64
++ifeq ($(ARCH), ppc64)
++  ARCH_DATA_MODEL  = 64
++  MAKE_ARGS        += LP64=1
++  PLATFORM         = linux-ppc64
++  VM_PLATFORM      = linux_ppc64
++  HS_ARCH          = zero
++endif
++
+ JDK_INCLUDE_SUBDIR=linux
+ 
+ # FIXUP: The subdirectory for a debug build is NOT the same on all platforms
 diff -ru openjdk.orig/hotspot/build/linux/makefiles/gcc.make openjdk/hotspot/build/linux/makefiles/gcc.make
 --- openjdk.orig/hotspot/build/linux/makefiles/gcc.make	2007-11-08 11:34:54.000000000 +0000
 +++ openjdk/hotspot/build/linux/makefiles/gcc.make	2007-11-08 11:36:15.000000000 +0000
@@ -54,6 +81,88 @@
  	   $(MAKE) -f vm.make $(LIBSAPROC); \
  	fi
  
+diff -ru openjdk.orig/hotspot/make/defs.make openjdk/hotspot/make/defs.make
+--- openjdk.orig/hotspot/make/defs.make	2007-11-08 11:34:54.000000000 +0000
++++ openjdk/hotspot/make/defs.make	2007-11-08 11:36:15.000000000 +0000
+@@ -199,26 +199,31 @@
+   # Use uname output for SRCARCH, but deal with platform differences. If ARCH
+   # is not explicitly listed below, it is treated as i486. Also note amd64 is
+   # a separate src arch, so LP64 && i486 ==> amd64.
+-  SRCARCH     = $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64,$(ARCH)))
++  SRCARCH     = $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 ppc ppc64,$(ARCH)))
+   ARCH/       = i486
+   ARCH/sparc  = sparc
+   ARCH/sparc64= sparc
+   ARCH/ia64   = ia64
+   ARCH/amd64  = amd64
+   ARCH/x86_64 = amd64
++  ARCH/ppc    = zero
++  ARCH/ppc64  = zero
+   ifdef LP64
+     ifeq ($(SRCARCH), i486)
+       SRCARCH = amd64
+     endif
+   endif
+ 
+-  # BUILDARCH is usually the same as SRCARCH, except for sparcv9
++  # BUILDARCH is usually the same as SRCARCH, except for sparcv9 and zero
+   BUILDARCH = $(SRCARCH)
+   ifdef LP64
+     ifeq ($(BUILDARCH), sparc)
+       BUILDARCH = sparcv9
+     endif
+   endif
++  ifeq ($(BUILDARCH), zero)
++    BUILDARCH = $(ARCH)
++  endif
+ 
+   # LIBARCH is 1:1 mapping from BUILDARCH
+   LIBARCH         = $(LIBARCH/$(BUILDARCH))
+@@ -227,8 +232,10 @@
+   LIBARCH/sparc   = sparc
+   LIBARCH/sparcv9 = sparcv9
+   LIBARCH/ia64    = ia64
++  LIBARCH/ppc     = ppc
++  LIBARCH/ppc64   = ppc64
+ 
+-  LP64_ARCH = sparcv9 amd64 ia64
++  LP64_ARCH = sparcv9 amd64 ia64 ppc64
+ endif
+ 
+ # Required make macro settings for all platforms
+diff -r e95eb2395f60 openjdk-ecj/hotspot/build/linux/makefiles/vm.make
+--- openjdk/hotspot/build/linux/makefiles/vm.make	Fri Nov 09 16:35:08 2007 +0000
++++ openjdk/hotspot/build/linux/makefiles/vm.make	Mon Nov 12 09:09:28 2007 +0000
+@@ -149,6 +149,12 @@
+ 	rm -f $@
+ 	cat $^ > $@
+ 
++ifeq ($(BUILDARCH), ppc64)
++  STATIC_CXX = false
++else
++  STATIC_CXX = true
++endif
++
+ ifeq ($(LINK_INTO),AOUT)
+   LIBJVM.o                 =
+   LIBJVM_MAPFILE           =
+@@ -162,8 +168,14 @@
+   # JVM is statically linked with libgcc[_s] and libstdc++; this is needed to
+   # get around library dependency and compatibility issues. Must use gcc not
+   # g++ to link.
+-  LFLAGS_VM                += $(STATIC_LIBGCC)
+-  LIBS_VM                  += $(STATIC_STDCXX) $(LIBS)
++  ifeq ($(STATIC_CXX), true)
++    LFLAGS_VM              += $(STATIC_LIBGCC)
++    LIBS_VM                += $(STATIC_STDCXX)
++  else
++    LIBS_VM                += -lstdc++
++  endif
++
++  LIBS_VM                  += $(LIBS)
+ endif
+ 
+ LINK_VM = $(LINK_LIB.c)
 diff -ru openjdk.orig/hotspot/src/os/linux/vm/os_linux.cpp openjdk/hotspot/src/os/linux/vm/os_linux.cpp
 --- openjdk.orig/hotspot/src/os/linux/vm/os_linux.cpp	2007-11-08 11:34:54.000000000 +0000
 +++ openjdk/hotspot/src/os/linux/vm/os_linux.cpp	2007-11-08 11:36:15.000000000 +0000
@@ -80,6 +189,29 @@
  
      FILE *fp = fopen("/proc/meminfo", "r");
      if (fp) {
+diff -ru openjdk.orig/hotspot/src/share/vm/runtime/mutex.hpp openjdk/hotspot/src/share/vm/runtime/mutex.hpp
+--- openjdk.orig/hotspot/src/share/vm/runtime/mutex.hpp	2007-11-08 11:34:54.000000000 +0000
++++ openjdk/hotspot/src/share/vm/runtime/mutex.hpp	2007-11-08 11:36:15.000000000 +0000
+@@ -61,18 +61,10 @@ union SplitWord {   // full-word with se
+ } ;
+ 
+ // Endian-ness ... index of least-significant byte in SplitWord.Bytes[]
+-#ifdef AMD64        // little
++#ifdef VM_LITTLE_ENDIAN
+  #define _LSBINDEX 0
+ #else
+-#if IA32            // little
+- #define _LSBINDEX 0
+-#else
+-#ifdef SPARC        // big
+  #define _LSBINDEX (sizeof(intptr_t)-1)
+-#else
+- #error "unknown architecture"
+-#endif
+-#endif
+ #endif
+ 
+ class ParkEvent ;
 diff -ru openjdk.orig/hotspot/src/share/vm/runtime/vm_version.cpp openjdk/hotspot/src/share/vm/runtime/vm_version.cpp
 --- openjdk.orig/hotspot/src/share/vm/runtime/vm_version.cpp	2007-11-08 11:34:54.000000000 +0000
 +++ openjdk/hotspot/src/share/vm/runtime/vm_version.cpp	2007-11-08 11:36:15.000000000 +0000
@@ -354,77 +486,3 @@
          ARCH_DATA_MODEL=32
        else
          ARCH_DATA_MODEL=64
---- openjdk6/hotspot/make/defs.make	2008-02-12 04:14:16.000000000 -0500
-+++ openjdk/hotspot/make/defs.make	2008-02-14 22:18:56.000000000 -0500
-@@ -199,26 +199,31 @@
-   # Use uname output for SRCARCH, but deal with platform differences. If ARCH
-   # is not explicitly listed below, it is treated as i486. Also note amd64 is
-   # a separate src arch, so LP64 && i486 ==> amd64.
--  SRCARCH     = $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64,$(ARCH)))
-+  SRCARCH     = $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 ppc ppc64,$(ARCH)))
-   ARCH/       = i486
-   ARCH/sparc  = sparc
-   ARCH/sparc64= sparc
-   ARCH/ia64   = ia64
-   ARCH/amd64  = amd64
-   ARCH/x86_64 = amd64
-+  ARCH/ppc    = zero
-+  ARCH/ppc64  = zero
-   ifdef LP64
-     ifeq ($(SRCARCH), i486)
-       SRCARCH = amd64
-     endif
-   endif
- 
--  # BUILDARCH is usually the same as SRCARCH, except for sparcv9
-+  # BUILDARCH is usually the same as SRCARCH, except for sparcv9 and zero
-   BUILDARCH = $(SRCARCH)
-   ifdef LP64
-     ifeq ($(BUILDARCH), sparc)
-       BUILDARCH = sparcv9
-     endif
-   endif
-+  ifeq ($(BUILDARCH), zero)
-+    BUILDARCH = $(ARCH)
-+  endif
- 
-   # LIBARCH is 1:1 mapping from BUILDARCH
-   LIBARCH         = $(LIBARCH/$(BUILDARCH))
-@@ -227,8 +232,10 @@
-   LIBARCH/sparc   = sparc
-   LIBARCH/sparcv9 = sparcv9
-   LIBARCH/ia64    = ia64
-+  LIBARCH/ppc     = ppc
-+  LIBARCH/ppc64   = ppc64
- 
--  LP64_ARCH = sparcv9 amd64 ia64
-+  LP64_ARCH = sparcv9 amd64 ia64 ppc64
- endif
- 
- # Required make macro settings for all platforms
---- defs.make	2008-02-14 22:26:27.000000000 -0500
-+++ openjdk/hotspot/build/linux/makefiles/defs.make	2008-02-14 22:26:58.000000000 -0500
-@@ -87,6 +87,23 @@
-   HS_ARCH          = i486
- endif
- 
-+# ppc
-+ifeq ($(ARCH), ppc)
-+  ARCH_DATA_MODEL  = 32
-+  PLATFORM         = linux-ppc
-+  VM_PLATFORM      = linux_ppc
-+  HS_ARCH          = zero
-+endif
-+
-+# ppc64
-+ifeq ($(ARCH), ppc64)
-+  ARCH_DATA_MODEL  = 64
-+  MAKE_ARGS        += LP64=1
-+  PLATFORM         = linux-ppc64
-+  VM_PLATFORM      = linux_ppc64
-+  HS_ARCH          = zero
-+endif
-+
- JDK_INCLUDE_SUBDIR=linux
- 
- # FIXUP: The subdirectory for a debug build is NOT the same on all platforms
--- a/patches/icedtea-zero.patch	Tue Mar 04 00:06:10 2008 -0500
+++ b/patches/icedtea-zero.patch	Tue Mar 04 07:02:39 2008 -0500
@@ -1,3 +1,27 @@
+diff -ru openjdk/hotspot/build/linux/makefiles/gcc.make openjdk/hotspot/build/linux/makefiles/gcc.make
+--- openjdk/hotspot/build/linux/makefiles/gcc.make	2008-02-17 15:37:26.000000000 -0500
++++ openjdk/hotspot/build/linux/makefiles/gcc.make	2008-02-17 15:37:58.000000000 -0500
+@@ -57,6 +57,7 @@
+ 
+ VM_PICFLAG        = $(VM_PICFLAG/$(LINK_INTO))
+ 
++CFLAGS += $(LIBFFI_CFLAGS)
+ CFLAGS += $(VM_PICFLAG)
+ CFLAGS += -fno-rtti
+ CFLAGS += -fno-exceptions
+diff -ru openjdk-ecj/hotspot/build/linux/makefiles/vm.make openjdk-ecj.new/hotspot/build/linux/makefiles/vm.make
+--- openjdk/hotspot/build/linux/makefiles/vm.make	2008-02-29 09:46:55.000000000 -0500
++++ openjdk/hotspot/build/linux/makefiles/vm.make	2008-02-29 09:47:30.000000000 -0500
+@@ -177,6 +177,9 @@
+ 
+   LIBS_VM                  += $(LIBS)
+ endif
++ifeq ($(SRCARCH), zero)
++LIBS_VM += $(LIBFFI_LIBS)
++endif
+ 
+ LINK_VM = $(LINK_LIB.c)
+ 
 diff -ru openjdk/hotspot/src/share/vm/runtime/icache.cpp openjdk/hotspot/src/share/vm/runtime/icache.cpp
 --- openjdk/hotspot/src/share/vm/runtime/icache.cpp	2007-10-30 08:46:35.000000000 +0000
 +++ openjdk/hotspot/src/share/vm/runtime/icache.cpp	2008-01-21 15:18:48.000000000 +0000
@@ -86,19 +110,6 @@
       }
  
    STEP(140, "(printing VM operation)" )
---- vm.make	2008-02-14 22:30:03.000000000 -0500
-+++ openjdk/hotspot/build/linux/makefiles/vm.make	2008-02-14 22:30:11.000000000 -0500
-@@ -165,6 +165,10 @@
-   LFLAGS_VM                += $(STATIC_LIBGCC)
-   LIBS_VM                  += $(STATIC_STDCXX) $(LIBS)
- endif
-+ifeq ($(SRCARCH), zero)
-+LIBS_VM += $(LIBFFI_LIBS)
-+endif
-+
- 
- LINK_VM = $(LINK_LIB.c)
- 
 diff -r 8e9761ee6dd6 openjdk/hotspot/build/linux/makefiles/buildtree.make
 --- openjdk/hotspot/build/linux/makefiles/buildtree.make	Mon Feb 18 11:56:36 2008 +0000
 +++ openjdk/hotspot/build/linux/makefiles/buildtree.make	Mon Feb 18 12:03:06 2008 +0000